summaryrefslogtreecommitdiff
path: root/boost/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'boost/atomic')
-rw-r--r--boost/atomic/atomic.hpp1
-rw-r--r--boost/atomic/capabilities.hpp2
-rw-r--r--boost/atomic/detail/atomic_template.hpp118
-rw-r--r--boost/atomic/detail/caps_gcc_arm.hpp19
-rw-r--r--boost/atomic/detail/caps_gcc_atomic.hpp11
-rw-r--r--boost/atomic/detail/caps_gcc_ppc.hpp3
-rw-r--r--boost/atomic/detail/caps_gcc_sync.hpp11
-rw-r--r--boost/atomic/detail/caps_gcc_x86.hpp37
-rw-r--r--boost/atomic/detail/config.hpp26
-rw-r--r--boost/atomic/detail/extra_operations.hpp27
-rw-r--r--boost/atomic/detail/extra_operations_fwd.hpp35
-rw-r--r--boost/atomic/detail/extra_ops_gcc_arm.hpp336
-rw-r--r--boost/atomic/detail/extra_ops_gcc_ppc.hpp274
-rw-r--r--boost/atomic/detail/extra_ops_gcc_x86.hpp1382
-rw-r--r--boost/atomic/detail/extra_ops_generic.hpp162
-rw-r--r--boost/atomic/detail/extra_ops_msvc_arm.hpp106
-rw-r--r--boost/atomic/detail/extra_ops_msvc_x86.hpp866
-rw-r--r--boost/atomic/detail/hwcaps_gcc_arm.hpp67
-rw-r--r--boost/atomic/detail/hwcaps_gcc_ppc.hpp42
-rw-r--r--boost/atomic/detail/hwcaps_gcc_x86.hpp58
-rw-r--r--boost/atomic/detail/interlocked.hpp36
-rw-r--r--boost/atomic/detail/operations_lockfree.hpp2
-rw-r--r--boost/atomic/detail/ops_emulated.hpp3
-rw-r--r--boost/atomic/detail/ops_gcc_alpha.hpp7
-rw-r--r--boost/atomic/detail/ops_gcc_arm.hpp661
-rw-r--r--boost/atomic/detail/ops_gcc_arm_common.hpp133
-rw-r--r--boost/atomic/detail/ops_gcc_atomic.hpp56
-rw-r--r--boost/atomic/detail/ops_gcc_ppc.hpp516
-rw-r--r--boost/atomic/detail/ops_gcc_ppc_common.hpp69
-rw-r--r--boost/atomic/detail/ops_gcc_sparc.hpp7
-rw-r--r--boost/atomic/detail/ops_gcc_sync.hpp26
-rw-r--r--boost/atomic/detail/ops_gcc_x86.hpp141
-rw-r--r--boost/atomic/detail/ops_gcc_x86_dcas.hpp64
-rw-r--r--boost/atomic/detail/ops_linux_arm.hpp4
-rw-r--r--boost/atomic/detail/ops_msvc_arm.hpp13
-rw-r--r--boost/atomic/detail/ops_msvc_x86.hpp25
-rw-r--r--boost/atomic/detail/ops_windows.hpp4
-rw-r--r--boost/atomic/detail/platform.hpp123
-rw-r--r--boost/atomic/detail/storage_type.hpp4
39 files changed, 5141 insertions, 336 deletions
diff --git a/boost/atomic/atomic.hpp b/boost/atomic/atomic.hpp
index 8b0bdd11c1..2793ddef0d 100644
--- a/boost/atomic/atomic.hpp
+++ b/boost/atomic/atomic.hpp
@@ -21,6 +21,7 @@
#include <boost/atomic/atomic_flag.hpp>
#include <boost/atomic/detail/atomic_template.hpp>
#include <boost/atomic/detail/operations.hpp>
+#include <boost/atomic/detail/extra_operations.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
diff --git a/boost/atomic/capabilities.hpp b/boost/atomic/capabilities.hpp
index 05bbb0fd93..7e5205d219 100644
--- a/boost/atomic/capabilities.hpp
+++ b/boost/atomic/capabilities.hpp
@@ -19,7 +19,7 @@
#include <boost/atomic/detail/int_sizes.hpp>
#if !defined(BOOST_ATOMIC_EMULATED)
-#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/caps_)
+#include BOOST_ATOMIC_DETAIL_BACKEND_HEADER(boost/atomic/detail/caps_)
#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
diff --git a/boost/atomic/detail/atomic_template.hpp b/boost/atomic/detail/atomic_template.hpp
index 7bbc1fffad..28de879d40 100644
--- a/boost/atomic/detail/atomic_template.hpp
+++ b/boost/atomic/detail/atomic_template.hpp
@@ -22,6 +22,7 @@
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/bitwise_cast.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/is_signed.hpp>
#include <boost/atomic/detail/type_traits/is_integral.hpp>
#include <boost/atomic/detail/type_traits/is_function.hpp>
@@ -231,6 +232,7 @@ public:
protected:
typedef atomics::detail::operations< storage_size_of< value_type >::value, boost::atomics::detail::is_signed< T >::value > operations;
+ typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
typedef value_type value_arg_type;
public:
@@ -243,6 +245,7 @@ public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
+ // Standard methods
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
@@ -332,6 +335,96 @@ public:
return static_cast< value_type >(operations::fetch_xor(m_storage.value, static_cast< storage_type >(v), order));
}
+ // Boost.Atomic extensions
+ BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(extra_operations::fetch_negate(m_storage.value, order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return static_cast< value_type >(extra_operations::fetch_complement(m_storage.value, order));
+ }
+
+ BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_add(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_sub(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_negate(m_storage.value, order);
+ }
+
+ BOOST_FORCEINLINE void opaque_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_and(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_or(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_xor(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_complement(m_storage.value, order);
+ }
+
+ BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::add_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::sub_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool and_and_test(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::and_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool or_and_test(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::or_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool xor_and_test(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::xor_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool bit_test_and_set(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
+ return extra_operations::bit_test_and_set(m_storage.value, bit_number, order);
+ }
+
+ BOOST_FORCEINLINE bool bit_test_and_reset(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
+ return extra_operations::bit_test_and_reset(m_storage.value, bit_number, order);
+ }
+
+ BOOST_FORCEINLINE bool bit_test_and_complement(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(bit_number < sizeof(value_type) * 8u);
+ return extra_operations::bit_test_and_complement(m_storage.value, bit_number, order);
+ }
+
+ // Operators
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
{
return fetch_add(1);
@@ -402,6 +495,7 @@ public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
+ // Standard methods
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
@@ -481,6 +575,7 @@ public:
protected:
typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+ typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
typedef value_type value_arg_type;
public:
@@ -495,6 +590,7 @@ public:
{
}
+ // Standard methods
BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
@@ -569,6 +665,28 @@ public:
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
+ // Boost.Atomic extensions
+ BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_add(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_operations::opaque_sub(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::add_and_test(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::sub_and_test(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ }
+
+ // Operators
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
{
return fetch_add(1);
diff --git a/boost/atomic/detail/caps_gcc_arm.hpp b/boost/atomic/detail/caps_gcc_arm.hpp
index b827c648d7..a26ea56ee5 100644
--- a/boost/atomic/detail/caps_gcc_arm.hpp
+++ b/boost/atomic/detail/caps_gcc_arm.hpp
@@ -19,29 +19,12 @@
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_ARM_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__))
-// ARMv7 and later have dmb instruction
-#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
-#endif
-
-#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
-// ARMv6k and ARMv7 have 8 and 16 ldrex/strex variants
-#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
-#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
-#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7M__))
-// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.
-// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,
-// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM
-// or ARMv7 (both ARM and Thumb 2) it works as expected.
-#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
-#endif
-#endif
-
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
diff --git a/boost/atomic/detail/caps_gcc_atomic.hpp b/boost/atomic/detail/caps_gcc_atomic.hpp
index f4e7a7023e..d6221fd134 100644
--- a/boost/atomic/detail/caps_gcc_atomic.hpp
+++ b/boost/atomic/detail/caps_gcc_atomic.hpp
@@ -16,19 +16,14 @@
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
-#endif
-
-#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
-#endif
-
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
#define BOOST_ATOMIC_INT128_LOCK_FREE 2
#else
diff --git a/boost/atomic/detail/caps_gcc_ppc.hpp b/boost/atomic/detail/caps_gcc_ppc.hpp
index ee2346081b..3e20fdee45 100644
--- a/boost/atomic/detail/caps_gcc_ppc.hpp
+++ b/boost/atomic/detail/caps_gcc_ppc.hpp
@@ -17,6 +17,7 @@
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_PPC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -25,7 +26,7 @@
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
-#if defined(__powerpc64__) || defined(__PPC64__)
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
diff --git a/boost/atomic/detail/caps_gcc_sync.hpp b/boost/atomic/detail/caps_gcc_sync.hpp
index 7fac07a130..d797e5af64 100644
--- a/boost/atomic/detail/caps_gcc_sync.hpp
+++ b/boost/atomic/detail/caps_gcc_sync.hpp
@@ -17,19 +17,14 @@
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(__i386__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
-#endif
-
-#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
-#endif
-
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)\
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)\
diff --git a/boost/atomic/detail/caps_gcc_x86.hpp b/boost/atomic/detail/caps_gcc_x86.hpp
index 7485b010c7..70c64628af 100644
--- a/boost/atomic/detail/caps_gcc_x86.hpp
+++ b/boost/atomic/detail/caps_gcc_x86.hpp
@@ -17,47 +17,12 @@
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_X86_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(__GNUC__)
-
-#if defined(__i386__) &&\
- (\
- defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
- defined(__i586__) || defined(__i686__) || defined(__pentium4__) || defined(__nocona__) || defined(__core2__) || defined(__corei7__) ||\
- defined(__k6__) || defined(__athlon__) || defined(__k8__) || defined(__amdfam10__) || defined(__bdver1__) || defined(__bdver2__) || defined(__bdver3__) || defined(__btver1__) || defined(__btver2__)\
- )
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
-#endif
-
-#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
-#endif
-
-#if defined(__x86_64__) || defined(__SSE2__)
-// Use mfence only if SSE2 is available
-#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
-#endif
-
-#else // defined(__GNUC__)
-
-#if defined(__i386__) && !defined(BOOST_ATOMIC_NO_CMPXCHG8B)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
-#endif
-
-#if defined(__x86_64__) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
-#endif
-
-#if !defined(BOOST_ATOMIC_NO_MFENCE)
-#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
-#endif
-
-#endif // defined(__GNUC__)
-
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
diff --git a/boost/atomic/detail/config.hpp b/boost/atomic/detail/config.hpp
index 7a43e23cbc..aee674084f 100644
--- a/boost/atomic/detail/config.hpp
+++ b/boost/atomic/detail/config.hpp
@@ -61,6 +61,7 @@
#endif
#if ((defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)) && (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) < 403)) ||\
+ (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000) /* gcc 7 emits assembler warnings when zero displacement is implied */ ||\
defined(__SUNPRO_CC)
// This macro indicates we're using older binutils that don't support implied zero displacements for memory opereands,
// making code like this invalid:
@@ -85,18 +86,27 @@
// and does not require an explicit markup for types that may alias, we still don't
// enable the optimization for this compiler because at least MSVC-8 and 9 are known
// to generate broken code sometimes when casts are used.
-#if defined(__GNUC__) && (!defined(BOOST_INTEL_CXX_VERSION) || (BOOST_INTEL_CXX_VERSION+0) >= 1300)
-#define BOOST_ATOMIC_DETAIL_MAY_ALIAS __attribute__((__may_alias__))
-#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS
-#elif defined(__has_attribute)
-#if __has_attribute(__may_alias__)
-#define BOOST_ATOMIC_DETAIL_MAY_ALIAS __attribute__((__may_alias__))
+#define BOOST_ATOMIC_DETAIL_MAY_ALIAS BOOST_MAY_ALIAS
+#if !defined(BOOST_NO_MAY_ALIAS)
#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS
#endif
+
+#if defined(__GCC_ASM_FLAG_OUTPUTS__)
+// The compiler supports output values in flag registers.
+// See: https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html, Section 6.44.3.
+#define BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS
+#endif
+
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_constant_p)
+#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
+#endif
+#elif defined(__GNUC__)
+#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) __builtin_constant_p(x)
#endif
-#if !defined(BOOST_ATOMIC_DETAIL_MAY_ALIAS)
-#define BOOST_ATOMIC_DETAIL_MAY_ALIAS
+#if !defined(BOOST_ATOMIC_DETAIL_IS_CONSTANT)
+#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) false
#endif
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_operations.hpp b/boost/atomic/detail/extra_operations.hpp
new file mode 100644
index 0000000000..4335f48bf4
--- /dev/null
+++ b/boost/atomic/detail/extra_operations.hpp
@@ -0,0 +1,27 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_operations.hpp
+ *
+ * This header defines extra atomic operations, including the generic version.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+
+#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC)
+#include BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(boost/atomic/detail/extra_ops_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_operations_fwd.hpp b/boost/atomic/detail/extra_operations_fwd.hpp
new file mode 100644
index 0000000000..8c258f6dc2
--- /dev/null
+++ b/boost/atomic/detail/extra_operations_fwd.hpp
@@ -0,0 +1,35 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c extra_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_gcc_arm.hpp b/boost/atomic/detail/extra_ops_gcc_arm.hpp
new file mode 100644
index 0000000000..9bd4829bd8
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_gcc_arm.hpp
@@ -0,0 +1,336 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_arm.hpp
+ *
+ * This header contains implementation of the extra atomic operations for ARM.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed > :
+ public generic_extra_operations< Base, 1u, Signed >
+{
+ typedef generic_extra_operations< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed > :
+ public generic_extra_operations< Base, 2u, Signed >
+{
+ typedef generic_extra_operations< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed > :
+ public generic_extra_operations< Base, 4u, Signed >
+{
+ typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed > :
+ public generic_extra_operations< Base, 8u, Signed >
+{
+ typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "adds %2, %2, #1\n" // result = result + 1
+ "adc %H2, %H2, #0\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_gcc_ppc.hpp b/boost/atomic/detail/extra_ops_gcc_ppc.hpp
new file mode 100644
index 0000000000..cc32e4960b
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_gcc_ppc.hpp
@@ -0,0 +1,274 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_ppc.hpp
+ *
+ * This header contains implementation of the extra atomic operations for PowerPC.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_PPC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed > :
+ public generic_extra_operations< Base, 1u, Signed >
+{
+ typedef generic_extra_operations< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed > :
+ public generic_extra_operations< Base, 2u, Signed >
+{
+ typedef generic_extra_operations< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed > :
+ public generic_extra_operations< Base, 4u, Signed >
+{
+ typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed > :
+ public generic_extra_operations< Base, 8u, Signed >
+{
+ typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, tmp;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_ARM_PPC_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_gcc_x86.hpp b/boost/atomic/detail/extra_ops_gcc_x86.hpp
new file mode 100644
index 0000000000..59b39064d2
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_gcc_x86.hpp
@@ -0,0 +1,1382 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2015 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_gcc_x86.hpp
+ *
+ * This header contains implementation of the extra atomic operations for x86.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base >
+struct gcc_x86_extra_operations_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; bts %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" (bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; bts %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" (bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btr %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" (bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btr %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" (bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; btc %[bit_number], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccc" (res)
+ : [bit_number] "Kq" (bit_number)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; btc %[bit_number], %[storage]\n\t"
+ "setc %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [bit_number] "Kq" (bit_number)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
+ temp_storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: movzbl %[res], %2\n\t"\
+ op " %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negb", res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notb", res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notb %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incb %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incb %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addb %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decb %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decb %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subb %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andb %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orb %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorb %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorb %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
+ temp_storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: movzwl %[res], %2\n\t"\
+ op " %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negw", res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notw", res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notw %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incw %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incw %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addw %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decw %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decw %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subw %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andw %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orw %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorw %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "iq" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorw %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "iq" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
+ storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[res], %[new_val]\n\t"\
+ op " %[new_val]\n\t"\
+ "lock; cmpxchgl %[new_val], %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negl", res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notl", res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notl %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incl %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incl %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addl %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decl %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decl %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subl %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andl %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orl %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorl %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "ir" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorl %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "ir" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+#if defined(__x86_64__)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed > :
+ public gcc_x86_extra_operations_common< Base >
+{
+ typedef gcc_x86_extra_operations_common< Base > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
+ storage_type new_val;\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[res], %[new_val]\n\t"\
+ op " %[new_val]\n\t"\
+ "lock; cmpxchgq %[new_val], %[storage]\n\t"\
+ "jne 1b"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : \
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negq", res);
+ return res;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type res = storage;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notq", res);
+ return res;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; negq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; andq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; orq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; xorq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ __asm__ __volatile__
+ (
+ "lock; notq %[storage]\n\t"
+ : [storage] "+m" (storage)
+ :
+ : "memory"
+ );
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incq %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; incq %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; addq %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decq %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ :
+ : "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+ }
+#else
+ if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
+ {
+ __asm__ __volatile__
+ (
+ "lock; decq %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lock; subq %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ }
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; andq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; andq %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; orq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; orq %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ bool res;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; xorq %[argument], %[storage]\n\t"
+ : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [argument] "er" (v)
+ : "memory"
+ );
+#else
+ __asm__ __volatile__
+ (
+ "lock; xorq %[argument], %[storage]\n\t"
+ "setz %[result]\n\t"
+ : [storage] "+m" (storage), [result] "=q" (res)
+ : [argument] "er" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif
+ return res;
+ }
+};
+
+#endif // defined(__x86_64__)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_generic.hpp b/boost/atomic/detail/extra_ops_generic.hpp
new file mode 100644
index 0000000000..00930e9a81
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_generic.hpp
@@ -0,0 +1,162 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2015 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_generic.hpp
+ *
+ * This header contains generic implementation of the extra atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// unary minus operator applied to unsigned type, result still unsigned
+#pragma warning(disable: 4146)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Generic implementation of extra operations
+template< typename Base, std::size_t Size, bool Signed >
+struct generic_extra_operations :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< Size, Signed >::type emulated_storage_type;
+ typedef typename make_storage_type< Size, false >::type unsigned_emulated_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(-static_cast< emulated_storage_type >(old_val)), order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return Base::fetch_xor(storage, static_cast< storage_type >(~static_cast< emulated_storage_type >(0)), order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::fetch_add(storage, v, order);
+ emulated_storage_type new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v);
+ return !new_val;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val = Base::fetch_sub(storage, v, order);
+ emulated_storage_type new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v);
+ return !new_val;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !(Base::fetch_and(storage, v, order) & v);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !(Base::fetch_or(storage, v, order) | v);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !(Base::fetch_xor(storage, v, order) ^ v);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number);
+ storage_type old_val = Base::fetch_or(storage, mask, order);
+ return !!(old_val & mask);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number);
+ storage_type old_val = Base::fetch_and(storage, ~mask, order);
+ return !!(old_val & mask);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number);
+ storage_type old_val = Base::fetch_xor(storage, mask, order);
+ return !!(old_val & mask);
+ }
+};
+
+// Default extra_operations template definition will be used unless specialized for a specific platform
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations :
+ public generic_extra_operations< Base, Size, Signed >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_GENERIC_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_msvc_arm.hpp b/boost/atomic/detail/extra_ops_msvc_arm.hpp
new file mode 100644
index 0000000000..5989d6677f
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_msvc_arm.hpp
@@ -0,0 +1,106 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_msvc_arm.hpp
+ *
+ * This header contains implementation of the extra atomic operations for ARM.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations< Base, 4u, Signed > :
+ public generic_extra_operations< Base, 4u, Signed >
+{
+ typedef generic_extra_operations< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE)
+ bool result;
+ switch (order)
+ {
+ case memory_order_relaxed:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(&storage, bit_number);
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(&storage, bit_number);
+ break;
+ case memory_order_release:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(&storage, bit_number);
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+ break;
+ }
+ return result;
+#else
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+#endif
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE) && defined(BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE)
+ bool result;
+ switch (order)
+ {
+ case memory_order_relaxed:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(&storage, bit_number);
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(&storage, bit_number);
+ break;
+ case memory_order_release:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(&storage, bit_number);
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ default:
+ result = !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+ break;
+ }
+ return result;
+#else
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+#endif
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_ARM_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_msvc_x86.hpp b/boost/atomic/detail/extra_ops_msvc_x86.hpp
new file mode 100644
index 0000000000..6d95dbed74
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_msvc_x86.hpp
@@ -0,0 +1,866 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_msvc_x86.hpp
+ *
+ * This header contains implementation of the extra atomic operations for x86.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/capabilities.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// frame pointer register 'ebx' modified by inline assembly code
+#pragma warning(disable: 4731)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
+
+template< typename Base, std::size_t Size, bool Signed >
+struct msvc_x86_extra_operations_common :
+ public generic_extra_operations< Base, Size, Signed >
+{
+ typedef generic_extra_operations< Base, Size, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS)
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS(&storage, bit_number);
+ }
+#else
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock bts [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTR)
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR(&storage, bit_number);
+ }
+#else
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btr [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, bit_number
+ lock btc [edx], eax
+ setc result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed > :
+ public msvc_x86_extra_operations_common< Base, 1u, Signed >
+{
+ typedef msvc_x86_extra_operations_common< Base, 1u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov old_val, al
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov old_val, al
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock neg byte ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor byte ptr [edx], al
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock not byte ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add byte ptr [edx], al
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub byte ptr [edx], al
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and byte ptr [edx], al
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or byte ptr [edx], al
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor byte ptr [edx], al
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif // defined(_M_IX86)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed > :
+ public msvc_x86_extra_operations_common< Base, 2u, Signed >
+{
+ typedef msvc_x86_extra_operations_common< Base, 2u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov old_val, ax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov old_val, ax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock neg word ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor word ptr [edx], ax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock not word ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock add word ptr [edx], ax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock sub word ptr [edx], ax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock and word ptr [edx], ax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock or word ptr [edx], ax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ movzx eax, v
+ lock xor word ptr [edx], ax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif // defined(_M_IX86)
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed > :
+ public msvc_x86_extra_operations_common< Base, 4u, Signed >
+{
+ typedef msvc_x86_extra_operations_common< Base, 4u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+#if defined(_M_IX86)
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov old_val, eax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type old_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov old_val, eax
+ };
+ base_type::fence_after(order);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock add dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock sub dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock neg dword ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock and dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock or dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock xor dword ptr [edx], eax
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edx, storage
+ lock not dword ptr [edx]
+ };
+ base_type::fence_after(order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock add dword ptr [edx], eax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock sub dword ptr [edx], eax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock and dword ptr [edx], eax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock or dword ptr [edx], eax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov edx, storage
+ mov eax, v
+ lock xor dword ptr [edx], eax
+ setz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+#endif // defined(_M_IX86)
+};
+
+#endif // defined(_M_IX86) || (defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR))
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_BTS64) && defined(BOOST_ATOMIC_INTERLOCKED_BTR64)
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed > :
+ public generic_extra_operations< Base, 8u, Signed >
+{
+ typedef generic_extra_operations< Base, 8u, Signed > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTS64(&storage, bit_number);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!BOOST_ATOMIC_INTERLOCKED_BTR64(&storage, bit_number);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_INTERLOCKED_BTS64) && defined(BOOST_ATOMIC_INTERLOCKED_BTR64)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_MSVC_X86_HPP_INCLUDED_
diff --git a/boost/atomic/detail/hwcaps_gcc_arm.hpp b/boost/atomic/detail/hwcaps_gcc_arm.hpp
new file mode 100644
index 0000000000..6d0c338622
--- /dev/null
+++ b/boost/atomic/detail/hwcaps_gcc_arm.hpp
@@ -0,0 +1,67 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/hwcaps_gcc_arm.hpp
+ *
+ * This header defines hardware capabilities macros for ARM
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/platform.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+
+#if BOOST_ATOMIC_DETAIL_ARM_ARCH > 6
+// ARMv7 and later have dmb instruction
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_DMB 1
+#endif
+
+#if defined(__ARM_FEATURE_LDREX)
+
+#if (__ARM_FEATURE_LDREX & 1)
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
+#endif
+#if (__ARM_FEATURE_LDREX & 2)
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
+#endif
+#if (__ARM_FEATURE_LDREX & 8)
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
+#endif
+
+#else // defined(__ARM_FEATURE_LDREX)
+
+#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
+
+// ARMv6k and ARMv7 have 8 and 16-bit ldrex/strex variants, but at least GCC 4.7 fails to compile them. GCC 4.9 is known to work.
+#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB 1
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH 1
+#endif
+
+#if !(((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7M__))
+// ARMv6k and ARMv7 except ARMv7-M have 64-bit ldrex/strex variants.
+// Unfortunately, GCC (at least 4.7.3 on Ubuntu) does not allocate register pairs properly when targeting ARMv6k Thumb,
+// which is required for ldrexd/strexd instructions, so we disable 64-bit support. When targeting ARMv6k ARM
+// or ARMv7 (both ARM and Thumb 2) it works as expected.
+#define BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD 1
+#endif
+
+#endif // !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__))
+
+#endif // defined(__ARM_FEATURE_LDREX)
+
+#endif // defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
+
+#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_ARM_HPP_INCLUDED_
diff --git a/boost/atomic/detail/hwcaps_gcc_ppc.hpp b/boost/atomic/detail/hwcaps_gcc_ppc.hpp
new file mode 100644
index 0000000000..2ec1e327a7
--- /dev/null
+++ b/boost/atomic/detail/hwcaps_gcc_ppc.hpp
@@ -0,0 +1,42 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/hwcaps_gcc_ppc.hpp
+ *
+ * This header defines hardware capabilities macros for PowerPC
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__POWERPC__) || defined(__PPC__)
+
+#if defined(_ARCH_PWR8)
+// Power8 and later architectures have 8 and 16-bit instructions
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX
+#endif
+
+#if defined(__powerpc64__) || defined(__PPC64__)
+// Power7 and later architectures in 64-bit mode have 64-bit instructions
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX
+#if defined(_ARCH_PWR8)
+// Power8 also has 128-bit instructions
+#define BOOST_ATOMIC_DETAIL_PPC_HAS_LQARX_STQCX
+#endif
+#endif
+
+#endif // defined(__POWERPC__) || defined(__PPC__)
+
+#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_PPC_HPP_INCLUDED_
diff --git a/boost/atomic/detail/hwcaps_gcc_x86.hpp b/boost/atomic/detail/hwcaps_gcc_x86.hpp
new file mode 100644
index 0000000000..91a1aee3aa
--- /dev/null
+++ b/boost/atomic/detail/hwcaps_gcc_x86.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/hwcaps_gcc_x86.hpp
+ *
+ * This header defines hardware capabilities macros for x86
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__GNUC__)
+
+#if defined(__i386__) &&\
+ (\
+ defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) ||\
+ defined(__i586__) || defined(__i686__) || defined(__SSE__)\
+ )
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if defined(__x86_64__) || defined(__SSE2__)
+// Use mfence only if SSE2 is available
+#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
+#endif
+
+#else // defined(__GNUC__)
+
+#if defined(__i386__) && !defined(BOOST_ATOMIC_NO_CMPXCHG8B)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B 1
+#endif
+
+#if defined(__x86_64__) && !defined(BOOST_ATOMIC_NO_CMPXCHG16B)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B 1
+#endif
+
+#if !defined(BOOST_ATOMIC_NO_MFENCE)
+#define BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE 1
+#endif
+
+#endif // defined(__GNUC__)
+
+#endif // BOOST_ATOMIC_DETAIL_HWCAPS_GCC_X86_HPP_INCLUDED_
diff --git a/boost/atomic/detail/interlocked.hpp b/boost/atomic/detail/interlocked.hpp
index 82b6d3a0dc..774354fb7f 100644
--- a/boost/atomic/detail/interlocked.hpp
+++ b/boost/atomic/detail/interlocked.hpp
@@ -2,7 +2,7 @@
#define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
// Copyright (c) 2009 Helge Bahmann
-// Copyright (c) 2012 - 2014 Andrey Semashev
+// Copyright (c) 2012 - 2014, 2017 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
@@ -73,6 +73,8 @@ extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
#pragma intrinsic(_InterlockedAnd)
#pragma intrinsic(_InterlockedOr)
#pragma intrinsic(_InterlockedXor)
+#pragma intrinsic(_interlockedbittestandset)
+#pragma intrinsic(_interlockedbittestandreset)
#endif
#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
@@ -81,6 +83,18 @@ extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTS(dest, arg) _interlockedbittestandset((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR(dest, arg) _interlockedbittestandreset((long*)(dest), (long)(arg))
+
+#if defined(_M_AMD64)
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_interlockedbittestandset64)
+#pragma intrinsic(_interlockedbittestandreset64)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_BTS64(dest, arg) _interlockedbittestandset64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR64(dest, arg) _interlockedbittestandreset64((__int64*)(dest), (__int64)(arg))
+#endif // defined(_M_AMD64)
#if (defined(_M_IX86) && _M_IX86 >= 500) || defined(_M_AMD64) || defined(_M_IA64)
#if defined(BOOST_MSVC)
@@ -394,6 +408,26 @@ extern "C" long __cdecl _InterlockedExchange( long volatile *, long );
#define BOOST_ATOMIC_INTERLOCKED_XOR64_ACQUIRE(dest, arg) _InterlockedXor64_acq((__int64*)(dest), (__int64)(arg))
#define BOOST_ATOMIC_INTERLOCKED_XOR64_RELEASE(dest, arg) _InterlockedXor64_rel((__int64*)(dest), (__int64)(arg))
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_interlockedbittestandset_nf)
+#pragma intrinsic(_interlockedbittestandset_acq)
+#pragma intrinsic(_interlockedbittestandset_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_BTS_RELAXED(dest, arg) _interlockedbittestandset_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTS_ACQUIRE(dest, arg) _interlockedbittestandset_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTS_RELEASE(dest, arg) _interlockedbittestandset_rel((long*)(dest), (long)(arg))
+
+#if defined(BOOST_MSVC)
+#pragma intrinsic(_interlockedbittestandreset_nf)
+#pragma intrinsic(_interlockedbittestandreset_acq)
+#pragma intrinsic(_interlockedbittestandreset_rel)
+#endif
+
+#define BOOST_ATOMIC_INTERLOCKED_BTR_RELAXED(dest, arg) _interlockedbittestandreset_nf((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR_ACQUIRE(dest, arg) _interlockedbittestandreset_acq((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_BTR_RELEASE(dest, arg) _interlockedbittestandreset_rel((long*)(dest), (long)(arg))
+
#endif // _MSC_VER >= 1700 && defined(_M_ARM)
#endif // _MSC_VER < 1400
diff --git a/boost/atomic/detail/operations_lockfree.hpp b/boost/atomic/detail/operations_lockfree.hpp
index b465403a6d..62b45836b5 100644
--- a/boost/atomic/detail/operations_lockfree.hpp
+++ b/boost/atomic/detail/operations_lockfree.hpp
@@ -18,7 +18,7 @@
#include <boost/atomic/detail/platform.hpp>
#if !defined(BOOST_ATOMIC_EMULATED)
-#include BOOST_ATOMIC_DETAIL_HEADER(boost/atomic/detail/ops_)
+#include BOOST_ATOMIC_DETAIL_BACKEND_HEADER(boost/atomic/detail/ops_)
#else
#include <boost/atomic/detail/operations_fwd.hpp>
#endif
diff --git a/boost/atomic/detail/ops_emulated.hpp b/boost/atomic/detail/ops_emulated.hpp
index e15f37a680..437b62f311 100644
--- a/boost/atomic/detail/ops_emulated.hpp
+++ b/boost/atomic/detail/ops_emulated.hpp
@@ -149,6 +149,9 @@ struct operations :
public emulated_operations< typename make_storage_type< Size, Signed >::type >
{
typedef typename make_storage_type< Size, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
} // namespace detail
diff --git a/boost/atomic/detail/ops_gcc_alpha.hpp b/boost/atomic/detail/ops_gcc_alpha.hpp
index 5a9deb42ea..71846a8647 100644
--- a/boost/atomic/detail/ops_gcc_alpha.hpp
+++ b/boost/atomic/detail/ops_gcc_alpha.hpp
@@ -16,6 +16,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ALPHA_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -92,6 +93,9 @@ struct operations< 4u, Signed > :
typedef typename make_storage_type< 4u, Signed >::type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
@@ -599,6 +603,9 @@ struct operations< 8u, Signed > :
typedef typename make_storage_type< 8u, Signed >::type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
diff --git a/boost/atomic/detail/ops_gcc_arm.hpp b/boost/atomic/detail/ops_gcc_arm.hpp
index 86a75f5b25..0cea16bc18 100644
--- a/boost/atomic/detail/ops_gcc_arm.hpp
+++ b/boost/atomic/detail/ops_gcc_arm.hpp
@@ -16,12 +16,13 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_HPP_INCLUDED_
+#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
-#include <boost/atomic/detail/ops_extending_cas_based.hpp>
+#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
@@ -53,105 +54,6 @@ namespace detail {
// (Actually it looks like these are available from version 6k onwards.)
// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
// I think you can supply an immediate offset to the address.
-//
-// A memory barrier is effected using a "co-processor 15" instruction,
-// though a separate assembler mnemonic is available for it in v7.
-//
-// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
-// doesn't include all instructions and in particular it doesn't include the co-processor
-// instruction used for the memory barrier or the load-locked/store-conditional
-// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
-// asm blocks with code to temporarily change to ARM mode.
-//
-// You can only change between ARM and Thumb modes when branching using the bx instruction.
-// bx takes an address specified in a register. The least significant bit of the address
-// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
-// A temporary register is needed for the address and is passed as an argument to these
-// macros. It must be one of the "low" registers accessible to Thumb code, specified
-// using the "l" attribute in the asm statement.
-//
-// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
-// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
-// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
-// So in v7 we don't need to change to ARM mode; we can write "universal
-// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
-// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
-// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
-// so they can always be present.
-
-// A note about memory_order_consume. Technically, this architecture allows to avoid
-// unnecessary memory barrier after consume load since it supports data dependency ordering.
-// However, some compiler optimizations may break a seemingly valid code relying on data
-// dependency tracking by injecting bogus branches to aid out of order execution.
-// This may happen not only in Boost.Atomic code but also in user's code, which we have no
-// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
-// For this reason we promote memory_order_consume to memory_order_acquire.
-
-#if defined(__thumb__) && !defined(__thumb2__)
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8:\n"
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9:\n"
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
-#else
-// The tmpreg may be wasted in this case, which is non-optimal.
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
-#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&r" (var)
-#endif
-
-struct gcc_arm_operations_base
-{
- static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
-
- static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
- {
- if ((order & memory_order_release) != 0)
- hardware_full_fence();
- }
-
- static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
- {
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
- hardware_full_fence();
- }
-
- static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
- {
- if (order == memory_order_seq_cst)
- hardware_full_fence();
- }
-
- static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
- {
-#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
- // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
- // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
- // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
- __asm__ __volatile__
- (
-#if defined(__thumb2__)
- ".short 0xF3BF, 0x8F5B\n" // dmb ish
-#else
- ".word 0xF57FF05B\n" // dmb ish
-#endif
- :
- :
- : "memory"
- );
-#else
- int tmp;
- __asm__ __volatile__
- (
- BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
- "mcr\tp15, 0, r0, c7, c10, 5\n"
- BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
- : "=&l" (tmp)
- :
- : "memory"
- );
-#endif
- }
-};
-
template< bool Signed >
struct operations< 4u, Signed > :
@@ -160,6 +62,9 @@ struct operations< 4u, Signed > :
typedef typename make_storage_type< 4u, Signed >::type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
@@ -217,7 +122,7 @@ struct operations< 4u, Signed > :
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [expected] "r" (expected), // %4
+ : [expected] "Ir" (expected), // %4
[desired] "r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -253,7 +158,7 @@ struct operations< 4u, Signed > :
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [expected] "r" (expected), // %4
+ : [expected] "Ir" (expected), // %4
[desired] "r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -284,7 +189,7 @@ struct operations< 4u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -310,7 +215,7 @@ struct operations< 4u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -336,7 +241,7 @@ struct operations< 4u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -362,7 +267,7 @@ struct operations< 4u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -388,7 +293,7 @@ struct operations< 4u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -406,6 +311,266 @@ struct operations< 4u, Signed > :
}
};
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 1u, Signed >::type storage_type;
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ extended_storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // load the original value
+ "strexb %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
+ : [value] "r" ((extended_storage_type)v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexbeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" ((extended_storage_type)expected), // %4
+ [desired] "r" ((extended_storage_type)desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strexb %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" ((extended_storage_type)expected), // %4
+ [desired] "r" ((extended_storage_type)desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
template< >
struct operations< 1u, false > :
@@ -434,7 +599,7 @@ struct operations< 1u, false > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -461,7 +626,7 @@ struct operations< 1u, false > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -496,7 +661,7 @@ struct operations< 1u, true > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -523,7 +688,7 @@ struct operations< 1u, true > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -531,6 +696,268 @@ struct operations< 1u, true > :
}
};
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
+
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_arm_operations_base
+{
+ typedef typename make_storage_type< 2u, Signed >::type storage_type;
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ extended_storage_type original;
+ fence_before(order);
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // load the original value
+ "strexh %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
+ "teq %[tmp], #0\n" // check if store succeeded
+ "bne 1b\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
+ : [value] "r" ((extended_storage_type)v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
+ "strexheq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
+ "eoreq %[success], %[success], #1\n" // if (flags.equal) success ^= 1 (i.e. make it 1 if store succeeded)
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" ((extended_storage_type)expected), // %4
+ [desired] "r" ((extended_storage_type)desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ uint32_t success;
+ uint32_t tmp;
+ extended_storage_type original;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "mov %[success], #0\n" // success = 0
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "cmp %[original], %[expected]\n" // flags = original==expected
+ "bne 2f\n" // if (!flags.equal) goto end
+ "strexh %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
+ "eors %[success], %[success], #1\n" // success ^= 1 (i.e. make it 1 if store succeeded); flags.equal = success == 0
+ "beq 1b\n" // if (flags.equal) goto retry
+ "2:\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [success] "=&r" (success), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [expected] "Ir" ((extended_storage_type)expected), // %4
+ [desired] "r" ((extended_storage_type)desired) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ expected = static_cast< storage_type >(original);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" ((extended_storage_type)v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return static_cast< storage_type >(original);
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
template< >
struct operations< 2u, false > :
@@ -559,7 +986,7 @@ struct operations< 2u, false > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -586,7 +1013,7 @@ struct operations< 2u, false > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -621,7 +1048,7 @@ struct operations< 2u, true > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -648,7 +1075,7 @@ struct operations< 2u, true > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "r" (v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -656,6 +1083,7 @@ struct operations< 2u, true > :
}
};
+#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
@@ -677,6 +1105,9 @@ struct operations< 8u, Signed > :
typedef typename make_storage_type< 8u, Signed >::type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
exchange(storage, v, order);
diff --git a/boost/atomic/detail/ops_gcc_arm_common.hpp b/boost/atomic/detail/ops_gcc_arm_common.hpp
new file mode 100644
index 0000000000..9ac08ee5b6
--- /dev/null
+++ b/boost/atomic/detail/ops_gcc_arm_common.hpp
@@ -0,0 +1,133 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_arm_common.hpp
+ *
+ * This header contains basic utilities for gcc ARM backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
+
+#include <boost/cstdint.hpp>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// A memory barrier is effected using a "co-processor 15" instruction,
+// though a separate assembler mnemonic is available for it in v7.
+//
+// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
+// doesn't include all instructions and in particular it doesn't include the co-processor
+// instruction used for the memory barrier or the load-locked/store-conditional
+// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
+// asm blocks with code to temporarily change to ARM mode.
+//
+// You can only change between ARM and Thumb modes when branching using the bx instruction.
+// bx takes an address specified in a register. The least significant bit of the address
+// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
+// A temporary register is needed for the address and is passed as an argument to these
+// macros. It must be one of the "low" registers accessible to Thumb code, specified
+// using the "l" attribute in the asm statement.
+//
+// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
+// instruction set. (Actually, there was an extension of v6 called v6T2 which supported
+// "Thumb 2" mode, but its architecture manual is no longer available, referring to v7.)
+// So in v7 we don't need to change to ARM mode; we can write "universal
+// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
+// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
+// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
+// so they can always be present.
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+#if defined(__thumb__) && !defined(__thumb2__)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 8f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "8:\n"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 9f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "9:\n"
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&l" (var)
+#else
+// The tmpreg may be wasted in this case, which is non-optimal.
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_START(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_END(TMPREG)
+#define BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(var) "=&r" (var)
+#endif
+
+struct gcc_arm_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & memory_order_release) != 0)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ hardware_full_fence();
+ }
+
+ static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_DMB)
+ // Older binutils (supposedly, older than 2.21.1) didn't support symbolic or numeric arguments of the "dmb" instruction such as "ish" or "#11".
+ // As a workaround we have to inject encoded bytes of the instruction. There are two encodings for the instruction: ARM and Thumb. See ARM Architecture Reference Manual, A8.8.43.
+ // Since we cannot detect binutils version at compile time, we'll have to always use this hack.
+ __asm__ __volatile__
+ (
+#if defined(__thumb2__)
+ ".short 0xF3BF, 0x8F5B\n" // dmb ish
+#else
+ ".word 0xF57FF05B\n" // dmb ish
+#endif
+ :
+ :
+ : "memory"
+ );
+#else
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "mcr\tp15, 0, r0, c7, c10, 5\n"
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : "=&l" (tmp)
+ :
+ : "memory"
+ );
+#endif
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ARM_COMMON_HPP_INCLUDED_
diff --git a/boost/atomic/detail/ops_gcc_atomic.hpp b/boost/atomic/detail/ops_gcc_atomic.hpp
index 4e1adae86a..b32f8933b0 100644
--- a/boost/atomic/detail/ops_gcc_atomic.hpp
+++ b/boost/atomic/detail/ops_gcc_atomic.hpp
@@ -14,12 +14,13 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
-#if defined(__clang__) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
+#if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
#include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
#include <boost/atomic/detail/ops_cas_based.hpp>
#endif
@@ -164,14 +165,18 @@ struct gcc_atomic_operations
};
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
-#if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
+#if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
// Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
// Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+// A similar problem exists with gcc 7 as well, as it requires to link with libatomic to use 16-byte intrinsics:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
template< bool Signed >
struct operations< 16u, Signed > :
public cas_based_operations< gcc_dcas_x86_64< Signed > >
{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
@@ -181,6 +186,9 @@ struct operations< 16u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >
{
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -195,6 +203,8 @@ template< bool Signed >
struct operations< 8u, Signed > :
public cas_based_operations< gcc_dcas_x86< Signed > >
{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
@@ -210,6 +220,9 @@ struct operations< 8u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
{
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
@@ -219,6 +232,9 @@ struct operations< 8u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >
{
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -240,6 +256,9 @@ struct operations< 4u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
{
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -249,6 +268,9 @@ struct operations< 4u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
{
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -260,6 +282,9 @@ struct operations< 4u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >
{
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -281,6 +306,9 @@ struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
{
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -290,6 +318,9 @@ struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
{
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
@@ -299,6 +330,9 @@ struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
{
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -310,6 +344,9 @@ struct operations< 2u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >
{
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -331,6 +368,9 @@ struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
{
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
@@ -340,6 +380,9 @@ struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
{
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -349,6 +392,9 @@ struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
{
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
@@ -358,6 +404,9 @@ struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
{
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -369,6 +418,9 @@ struct operations< 1u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type >
{
typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
diff --git a/boost/atomic/detail/ops_gcc_ppc.hpp b/boost/atomic/detail/ops_gcc_ppc.hpp
index 76eae4e232..29e7ddf249 100644
--- a/boost/atomic/detail/ops_gcc_ppc.hpp
+++ b/boost/atomic/detail/ops_gcc_ppc.hpp
@@ -16,10 +16,12 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
+#include <boost/atomic/detail/ops_gcc_ppc_common.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
@@ -57,7 +59,9 @@ namespace detail {
cycles, while "sync" hast a cost of about 50 clock cycles, the small
penalty to atomic loads more than compensates for this.
- Byte- and halfword-sized atomic values are realized by encoding the
+ Byte- and halfword-sized atomic values are implemented in two ways.
+ When 8 and 16-bit instructions are available (in Power8 and later),
+ they are used. Otherwise operations are realized by encoding the
value to be represented into a word, performing sign/zero extension
as appropriate. This means that after add/sub operations the value
needs fixing up to accurately preserve the wrap-around semantic of
@@ -75,39 +79,6 @@ namespace detail {
to pose a problem.
*/
-// A note about memory_order_consume. Technically, this architecture allows to avoid
-// unnecessary memory barrier after consume load since it supports data dependency ordering.
-// However, some compiler optimizations may break a seemingly valid code relying on data
-// dependency tracking by injecting bogus branches to aid out of order execution.
-// This may happen not only in Boost.Atomic code but also in user's code, which we have no
-// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
-// For this reason we promote memory_order_consume to memory_order_acquire.
-
-struct gcc_ppc_operations_base
-{
- static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
-
- static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
- {
-#if defined(__powerpc64__) || defined(__PPC64__)
- if (order == memory_order_seq_cst)
- __asm__ __volatile__ ("sync" ::: "memory");
- else if ((order & memory_order_release) != 0)
- __asm__ __volatile__ ("lwsync" ::: "memory");
-#else
- if ((order & memory_order_release) != 0)
- __asm__ __volatile__ ("sync" ::: "memory");
-#endif
- }
-
- static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
- {
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
- __asm__ __volatile__ ("isync" ::: "memory");
- }
-};
-
-
template< bool Signed >
struct operations< 4u, Signed > :
public gcc_ppc_operations_base
@@ -115,6 +86,9 @@ struct operations< 4u, Signed > :
typedef typename make_storage_type< 4u, Signed >::type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
@@ -333,6 +307,238 @@ struct operations< 4u, Signed > :
}
};
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+template< bool Signed >
+struct operations< 1u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 1u, Signed >::type storage_type;
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 1u, false >::type unsigned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "stb %1, %0\n\t"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ {
+ __asm__ __volatile__
+ (
+ "lbz %0, %1\n\t"
+ "cmpw %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lbz %0, %1\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ );
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y1\n\t"
+ "stbcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "lbarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stbcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "0: lbarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stbcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
template< >
struct operations< 1u, false > :
@@ -430,6 +636,240 @@ struct operations< 1u, true > :
}
};
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
+
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
+
+template< bool Signed >
+struct operations< 2u, Signed > :
+ public gcc_ppc_operations_base
+{
+ typedef typename make_storage_type< 2u, Signed >::type storage_type;
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 2u, false >::type unsigned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "sth %1, %0\n\t"
+ : "+m" (storage)
+ : "r" (v)
+ );
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v;
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ {
+ __asm__ __volatile__
+ (
+ "lhz %0, %1\n\t"
+ "cmpw %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lhz %0, %1\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ );
+ }
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y1\n\t"
+ "sthcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "+Z" (storage)
+ : "b" (v)
+ : "cr0"
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "lharx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "sthcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "li %1, 0\n\t"
+ "0: lharx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "sthcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
+ : "=&b" (expected), "=&b" (success), "+Z" (storage)
+ : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "cr0"
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+#else // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< >
struct operations< 2u, false > :
@@ -527,8 +967,9 @@ struct operations< 2u, true > :
}
};
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
-#if defined(__powerpc64__) || defined(__PPC64__)
+#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
template< bool Signed >
struct operations< 8u, Signed > :
@@ -537,6 +978,9 @@ struct operations< 8u, Signed > :
typedef typename make_storage_type< 8u, Signed >::type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
@@ -755,7 +1199,7 @@ struct operations< 8u, Signed > :
}
};
-#endif // defined(__powerpc64__) || defined(__PPC64__)
+#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
diff --git a/boost/atomic/detail/ops_gcc_ppc_common.hpp b/boost/atomic/detail/ops_gcc_ppc_common.hpp
new file mode 100644
index 0000000000..3aa4e5f15f
--- /dev/null
+++ b/boost/atomic/detail/ops_gcc_ppc_common.hpp
@@ -0,0 +1,69 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/ops_gcc_ppc_common.hpp
+ *
+ * This header contains basic utilities for gcc PowerPC backend.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// The implementation below uses information from this document:
+// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
+
+// A note about memory_order_consume. Technically, this architecture allows to avoid
+// unnecessary memory barrier after consume load since it supports data dependency ordering.
+// However, some compiler optimizations may break a seemingly valid code relying on data
+// dependency tracking by injecting bogus branches to aid out of order execution.
+// This may happen not only in Boost.Atomic code but also in user's code, which we have no
+// control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
+// For this reason we promote memory_order_consume to memory_order_acquire.
+
+struct gcc_ppc_operations_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(__powerpc64__) || defined(__PPC64__)
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ else if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+#else
+ if ((order & memory_order_release) != 0)
+ __asm__ __volatile__ ("sync" ::: "memory");
+#endif
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ __asm__ __volatile__ ("isync" ::: "memory");
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
diff --git a/boost/atomic/detail/ops_gcc_sparc.hpp b/boost/atomic/detail/ops_gcc_sparc.hpp
index 08492ac69a..9191c13255 100644
--- a/boost/atomic/detail/ops_gcc_sparc.hpp
+++ b/boost/atomic/detail/ops_gcc_sparc.hpp
@@ -16,6 +16,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_SPARC_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -66,6 +67,9 @@ struct gcc_sparc_cas32 :
typedef typename make_storage_type< 4u, Signed >::type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
@@ -147,6 +151,9 @@ struct gcc_sparc_cas64 :
typedef typename make_storage_type< 8u, Signed >::type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
diff --git a/boost/atomic/detail/ops_gcc_sync.hpp b/boost/atomic/detail/ops_gcc_sync.hpp
index a9a9ae2f72..2a075bcf9f 100644
--- a/boost/atomic/detail/ops_gcc_sync.hpp
+++ b/boost/atomic/detail/ops_gcc_sync.hpp
@@ -16,6 +16,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_SYNC_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -164,15 +165,22 @@ struct operations< 1u, Signed > :
{
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
#else
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
#endif
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -191,13 +199,19 @@ struct operations< 2u, Signed > :
{
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
#else
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
#endif
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -214,11 +228,16 @@ struct operations< 4u, Signed > :
{
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
#else
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
#endif
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -233,9 +252,13 @@ struct operations< 8u, Signed > :
{
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
#else
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
#endif
+
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -245,6 +268,9 @@ struct operations< 16u, Signed > :
public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >
{
typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
diff --git a/boost/atomic/detail/ops_gcc_x86.hpp b/boost/atomic/detail/ops_gcc_x86.hpp
index 74e45c1f61..baf4d5757d 100644
--- a/boost/atomic/detail/ops_gcc_x86.hpp
+++ b/boost/atomic/detail/ops_gcc_x86.hpp
@@ -16,6 +16,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -30,12 +31,6 @@
#pragma once
#endif
-#if defined(__x86_64__)
-#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "rdx"
-#else
-#define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "edx"
-#endif
-
namespace boost {
namespace atomics {
namespace detail {
@@ -57,11 +52,11 @@ struct gcc_x86_operations_base
}
};
-template< typename T, typename Derived >
+template< std::size_t Size, bool Signed, typename Derived >
struct gcc_x86_operations :
public gcc_x86_operations_base
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size, Signed >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -108,11 +103,15 @@ struct gcc_x86_operations :
template< bool Signed >
struct operations< 1u, Signed > :
- public gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+ public gcc_x86_operations< 1u, Signed, operations< 1u, Signed > >
{
- typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef gcc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -143,6 +142,15 @@ struct operations< 1u, Signed > :
{
storage_type previous = expected;
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgb %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchgb %3, %1\n\t"
@@ -151,22 +159,23 @@ struct operations< 1u, Signed > :
: "q" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ temp_storage_type new_val;\
__asm__ __volatile__\
(\
- "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
- "1: movb %[arg], %%dl\n\t"\
- op " %%al, %%dl\n\t"\
- "lock; cmpxchgb %%dl, %[storage]\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%al, %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage)\
- : [arg] "q" (argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -195,11 +204,15 @@ struct operations< 1u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
- public gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+ public gcc_x86_operations< 2u, Signed, operations< 2u, Signed > >
{
- typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef gcc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -230,6 +243,15 @@ struct operations< 2u, Signed > :
{
storage_type previous = expected;
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgw %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "q" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchgw %3, %1\n\t"
@@ -238,22 +260,23 @@ struct operations< 2u, Signed > :
: "q" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ temp_storage_type new_val;\
__asm__ __volatile__\
(\
- "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
- "1: movw %[arg], %%dx\n\t"\
- op " %%ax, %%dx\n\t"\
- "lock; cmpxchgw %%dx, %[storage]\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%ax, %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage)\
- : [arg] "q" (argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -282,12 +305,15 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct operations< 4u, Signed > :
- public gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+ public gcc_x86_operations< 4u, Signed, operations< 4u, Signed > >
{
- typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef gcc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
@@ -317,6 +343,15 @@ struct operations< 4u, Signed > :
{
storage_type previous = expected;
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgl %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchgl %3, %1\n\t"
@@ -325,22 +360,23 @@ struct operations< 4u, Signed > :
: "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ storage_type new_val;\
__asm__ __volatile__\
(\
- "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
- "1: movl %[arg], %%edx\n\t"\
- op " %%eax, %%edx\n\t"\
- "lock; cmpxchgl %%edx, %[storage]\n\t"\
+ "1: mov %[arg], %[new_val]\n\t"\
+ op " %%eax, %[new_val]\n\t"\
+ "lock; cmpxchgl %[new_val], %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage)\
- : [arg] "r" (argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [arg] "ir" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -373,18 +409,23 @@ template< bool Signed >
struct operations< 8u, Signed > :
public cas_based_operations< gcc_dcas_x86< Signed > >
{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif defined(__x86_64__)
template< bool Signed >
struct operations< 8u, Signed > :
- public gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
+ public gcc_x86_operations< 8u, Signed, operations< 8u, Signed > >
{
- typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
+ typedef gcc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
__asm__ __volatile__
@@ -414,6 +455,15 @@ struct operations< 8u, Signed > :
{
storage_type previous = expected;
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchgq %3, %1"
+ : "+a" (previous), "+m" (storage), "=@ccz" (success)
+ : "r" (desired)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchgq %3, %1\n\t"
@@ -422,22 +472,23 @@ struct operations< 8u, Signed > :
: "r" (desired)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
expected = previous;
return success;
}
#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\
+ storage_type new_val;\
__asm__ __volatile__\
(\
- "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\
".align 16\n\t"\
- "1: movq %[arg], %%rdx\n\t"\
- op " %%rax, %%rdx\n\t"\
- "lock; cmpxchgq %%rdx, %[storage]\n\t"\
+ "1: movq %[arg], %[new_val]\n\t"\
+ op " %%rax, %[new_val]\n\t"\
+ "lock; cmpxchgq %[new_val], %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage)\
+ : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
: [arg] "r" (argument)\
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -472,6 +523,8 @@ template< bool Signed >
struct operations< 16u, Signed > :
public cas_based_operations< gcc_dcas_x86_64< Signed > >
{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
@@ -506,6 +559,4 @@ BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
} // namespace atomics
} // namespace boost
-#undef BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER
-
#endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_
diff --git a/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/boost/atomic/detail/ops_gcc_x86_dcas.hpp
index 7f3962199a..28cbc225e3 100644
--- a/boost/atomic/detail/ops_gcc_x86_dcas.hpp
+++ b/boost/atomic/detail/ops_gcc_x86_dcas.hpp
@@ -204,11 +204,14 @@ struct gcc_dcas_x86
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
#if defined(__clang__)
+
// Clang cannot allocate eax:edx register pairs but it has sync intrinsics
storage_type old_expected = expected;
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
+
#elif defined(__PIC__)
+
// Make sure ebx is saved and restored properly in case
// of position independent code. To make this work
// setup register constraints such that ebx can not be
@@ -222,25 +225,50 @@ struct gcc_dcas_x86
uint32_t scratch;
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"movl %%ebx, %[scratch]\n\t"
"movl %[desired_lo], %%ebx\n\t"
- "lock; cmpxchg8b %[dest]\n\t"
+ "lock; cmpxchg8b (%[dest])\n\t"
+ "movl %[scratch], %%ebx\n\t"
+ : "+A" (expected), [scratch] "=m" (scratch), [success] "=@ccz" (success)
+ : [desired_lo] "Sm" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %[scratch]\n\t"
+ "movl %[desired_lo], %%ebx\n\t"
+ "lock; cmpxchg8b (%[dest])\n\t"
"movl %[scratch], %%ebx\n\t"
"sete %[success]\n\t"
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success)
- : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32))
+ : "+A,A,A,A,A,A" (expected), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success)
+ : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32)), [dest] "D,D,S,S,D,D" (&storage)
#else
- : "+A" (expected), [dest] "+m" (storage), [scratch] "=m" (scratch), [success] "=q" (success)
- : [desired_lo] "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32))
+ : "+A" (expected), [scratch] "=m" (scratch), [success] "=q" (success)
+ : [desired_lo] "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
#endif
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+
return success;
-#else
+
+#else // defined(__PIC__)
+
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg8b %[dest]\n\t"
+ : "+A" (expected), [dest] "+m" (storage), [success] "=@ccz" (success)
+ : "b" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchg8b %[dest]\n\t"
@@ -254,8 +282,11 @@ struct gcc_dcas_x86
#endif
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+
return success;
-#endif
+
+#endif // defined(__PIC__)
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
@@ -461,11 +492,14 @@ struct gcc_dcas_x86_64
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
#if defined(__clang__)
+
// Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
storage_type old_expected = expected;
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
+
#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+
// GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
uint64_t const* p_desired = (uint64_t const*)&desired;
const uint64_t desired_lo = p_desired[0], desired_hi = p_desired[1];
@@ -499,10 +533,21 @@ struct gcc_dcas_x86_64
#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
return success;
+
#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+
uint64_t const* p_desired = (uint64_t const*)&desired;
const uint64_t desired_lo = p_desired[0], desired_hi = p_desired[1];
bool success;
+#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+ __asm__ __volatile__
+ (
+ "lock; cmpxchg16b %[dest]\n\t"
+ : "+A" (expected), [dest] "+m" (storage), [success] "=@ccz" (success)
+ : "b" (desired_lo), "c" (desired_hi)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchg16b %[dest]\n\t"
@@ -516,8 +561,11 @@ struct gcc_dcas_x86_64
#endif
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
+#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
+
return success;
-#endif
+
+#endif // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
diff --git a/boost/atomic/detail/ops_linux_arm.hpp b/boost/atomic/detail/ops_linux_arm.hpp
index c26bc2c07b..840c125a7b 100644
--- a/boost/atomic/detail/ops_linux_arm.hpp
+++ b/boost/atomic/detail/ops_linux_arm.hpp
@@ -18,6 +18,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_LINUX_ARM_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -91,6 +92,9 @@ struct linux_arm_cas :
typedef typename make_storage_type< 4u, Signed >::type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before_store(order);
diff --git a/boost/atomic/detail/ops_msvc_arm.hpp b/boost/atomic/detail/ops_msvc_arm.hpp
index e0a709c991..a0cfe81afc 100644
--- a/boost/atomic/detail/ops_msvc_arm.hpp
+++ b/boost/atomic/detail/ops_msvc_arm.hpp
@@ -17,6 +17,7 @@
#define BOOST_ATOMIC_DETAIL_OPS_MSVC_ARM_HPP_INCLUDED_
#include <intrin.h>
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
@@ -134,6 +135,9 @@ struct operations< 1u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
@@ -300,6 +304,9 @@ struct operations< 2u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
@@ -466,6 +473,9 @@ struct operations< 4u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
@@ -632,6 +642,9 @@ struct operations< 8u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before_store(order);
diff --git a/boost/atomic/detail/ops_msvc_x86.hpp b/boost/atomic/detail/ops_msvc_x86.hpp
index 623662468d..85bed28ad1 100644
--- a/boost/atomic/detail/ops_msvc_x86.hpp
+++ b/boost/atomic/detail/ops_msvc_x86.hpp
@@ -16,6 +16,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_MSVC_X86_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
@@ -164,6 +165,9 @@ struct operations< 4u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&storage, v));
@@ -236,6 +240,9 @@ struct operations< 1u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&storage, v));
@@ -281,6 +288,9 @@ struct operations< 1u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -422,6 +432,9 @@ struct operations< 2u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&storage, v));
@@ -467,6 +480,9 @@ struct operations< 2u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -609,6 +625,9 @@ struct msvc_dcas_x86
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
//
// The Pentium processor (and newer processors since) guarantees that the following additional memory operations will always be carried out atomically:
@@ -812,6 +831,9 @@ struct operations< 8u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
return static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&storage, v));
@@ -859,6 +881,9 @@ struct msvc_dcas_x86_64
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
storage_type value = const_cast< storage_type& >(storage);
diff --git a/boost/atomic/detail/ops_windows.hpp b/boost/atomic/detail/ops_windows.hpp
index 50d951dabf..29bd3809d2 100644
--- a/boost/atomic/detail/ops_windows.hpp
+++ b/boost/atomic/detail/ops_windows.hpp
@@ -23,6 +23,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_WINDOWS_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/interlocked.hpp>
@@ -109,6 +110,9 @@ struct operations< 4u, Signed > :
typedef typename base_type::storage_type storage_type;
typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
diff --git a/boost/atomic/detail/platform.hpp b/boost/atomic/detail/platform.hpp
index 786b1f1971..117dff27f4 100644
--- a/boost/atomic/detail/platform.hpp
+++ b/boost/atomic/detail/platform.hpp
@@ -21,50 +21,48 @@
#pragma once
#endif
-#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
-
-// Compiler-based backends
-#if (defined(__ibmxl__) || defined(__IBMCPP__)) && defined(__PPC__)
+#if defined(__GNUC__) && defined(__arm__)
+
+// Newer gcc versions define __ARM_ARCH. Older ones don't, so we have to deduce ARM arch version from a bunch of version-specific macros.
+#if defined(__ARM_ARCH)
+#define BOOST_ATOMIC_DETAIL_ARM_ARCH __ARM_ARCH
+#elif defined(__ARM_ARCH_8A__)
+#define BOOST_ATOMIC_DETAIL_ARM_ARCH 8
+#elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\
+ defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)
+#define BOOST_ATOMIC_DETAIL_ARM_ARCH 7
+#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\
+ defined(__ARM_ARCH_6ZK__)
+#define BOOST_ATOMIC_DETAIL_ARM_ARCH 6
+#else
+// We are not interested in older versions - they don't support atomic ops
+#define BOOST_ATOMIC_DETAIL_ARM_ARCH 0
+#endif
-// IBM XL C++ Compiler has to be checked before GCC/Clang as it pretends to be one but does not support __atomic* intrinsics.
-// It does support GCC inline assembler though.
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc
+#endif // defined(__GNUC__) && defined(__arm__)
-#elif ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\
- (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\
- (\
- (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\
- (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\
- )
+#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_atomic
+// Determine the target platform.
+// The target platform describes the compiler and target architecture. It can be used by more generic backends, such as the ones
+// based on compiler intrinsics, to implement specialized operations in a non-generic way.
-#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && (defined(__i386__) || defined(__x86_64__))
+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_x86
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_x86
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_ppc
-// This list of ARM architecture versions comes from Apple's arm/arch.h header.
-// I don't know how complete it is.
-#elif defined(__GNUC__) &&\
- (\
- defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\
- defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\
- defined(__ARM_ARCH_6ZK__) ||\
- defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\
- defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\
- defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__) ||\
- defined(__ARM_ARCH_8A__)\
- )
+#elif defined(__GNUC__) && defined(__arm__) && (BOOST_ATOMIC_DETAIL_ARM_ARCH+0) >= 6
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_arm
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND gcc_arm
#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && (defined(__sparcv8plus) || defined(__sparc_v9__))
@@ -74,6 +72,38 @@
#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_alpha
+#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86
+
+#elif defined(_MSC_VER) && _MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64))
+
+#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm
+
+#endif
+
+// Compiler-based backends
+
+// IBM XL C++ Compiler has to be checked before GCC/Clang as it pretends to be one but does not support __atomic* intrinsics.
+// It does support GCC inline assembler though.
+#if !(defined(__ibmxl__) || defined(__IBMCPP__)) &&\
+ ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\
+ (defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\
+ (\
+ (__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_CHAR_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_SHORT_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_INT_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_LONG_LOCK_FREE + 0) == 2 ||\
+ (__GCC_ATOMIC_LLONG_LOCK_FREE + 0) == 2\
+ )
+
+#define BOOST_ATOMIC_DETAIL_BACKEND gcc_atomic
+
+#elif defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+
+#define BOOST_ATOMIC_DETAIL_BACKEND BOOST_ATOMIC_DETAIL_PLATFORM
+
#elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 401) &&\
(\
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) ||\
@@ -83,40 +113,39 @@
defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16)\
)
-#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_sync
-
-#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
-
-#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_x86
-
-#elif defined(_MSC_VER) && _MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64))
-
-#define BOOST_ATOMIC_DETAIL_PLATFORM msvc_arm
+#define BOOST_ATOMIC_DETAIL_BACKEND gcc_sync
#endif
// OS-based backends
-#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+
+#if !defined(BOOST_ATOMIC_DETAIL_BACKEND)
#if defined(__linux__) && defined(__arm__)
-#define BOOST_ATOMIC_DETAIL_PLATFORM linux_arm
+#define BOOST_ATOMIC_DETAIL_BACKEND linux_arm
#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
-#define BOOST_ATOMIC_DETAIL_PLATFORM windows
+#define BOOST_ATOMIC_DETAIL_BACKEND windows
#endif
-#endif // !defined(BOOST_ATOMIC_DETAIL_PLATFORM)
+#endif // !defined(BOOST_ATOMIC_DETAIL_BACKEND)
#endif // !defined(BOOST_ATOMIC_FORCE_FALLBACK)
-#if !defined(BOOST_ATOMIC_DETAIL_PLATFORM)
-#define BOOST_ATOMIC_DETAIL_PLATFORM emulated
+#if !defined(BOOST_ATOMIC_DETAIL_BACKEND)
+#define BOOST_ATOMIC_DETAIL_BACKEND emulated
#define BOOST_ATOMIC_EMULATED
#endif
-#define BOOST_ATOMIC_DETAIL_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_PLATFORM).hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_BACKEND)
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND generic
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC
+#endif
+
+#define BOOST_ATOMIC_DETAIL_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_BACKEND).hpp>
+#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_EXTRA_BACKEND).hpp>
#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
diff --git a/boost/atomic/detail/storage_type.hpp b/boost/atomic/detail/storage_type.hpp
index 59e6901c26..d4d07f2273 100644
--- a/boost/atomic/detail/storage_type.hpp
+++ b/boost/atomic/detail/storage_type.hpp
@@ -224,7 +224,9 @@ struct make_storage_type< 16u, true >
struct BOOST_ATOMIC_DETAIL_MAY_ALIAS storage128_t
{
- boost::uint64_t data[2];
+ typedef boost::uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS element_type;
+
+ element_type data[2];
BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
{