summaryrefslogtreecommitdiff
path: root/boost/atomic/detail
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2016-10-06 10:33:54 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2016-10-06 10:36:09 +0900
commitd9ec475d945d3035377a0d89ed42e382d8988891 (patch)
tree34aff2cee4b209906243ab5499d61f3edee2982f /boost/atomic/detail
parent71d216b90256936a9638f325af9bc69d720e75de (diff)
downloadboost-d9ec475d945d3035377a0d89ed42e382d8988891.tar.gz
boost-d9ec475d945d3035377a0d89ed42e382d8988891.tar.bz2
boost-d9ec475d945d3035377a0d89ed42e382d8988891.zip
Imported Upstream version 1.60.0
Change-Id: Ie709530d6d5841088ceaba025cbe175a4ef43050 Signed-off-by: DongHun Kwak <dh0128.kwak@samsung.com>
Diffstat (limited to 'boost/atomic/detail')
-rw-r--r--boost/atomic/detail/atomic_flag.hpp6
-rw-r--r--boost/atomic/detail/atomic_template.hpp128
-rw-r--r--boost/atomic/detail/bitwise_cast.hpp (renamed from boost/atomic/detail/casts.hpp)31
-rw-r--r--boost/atomic/detail/caps_gcc_ppc.hpp2
-rw-r--r--boost/atomic/detail/config.hpp36
-rw-r--r--boost/atomic/detail/int_sizes.hpp4
-rw-r--r--boost/atomic/detail/operations_fwd.hpp3
-rw-r--r--boost/atomic/detail/ops_cas_based.hpp40
-rw-r--r--boost/atomic/detail/ops_emulated.hpp16
-rw-r--r--boost/atomic/detail/ops_extending_cas_based.hpp9
-rw-r--r--boost/atomic/detail/ops_gcc_alpha.hpp2
-rw-r--r--boost/atomic/detail/ops_gcc_arm.hpp2
-rw-r--r--boost/atomic/detail/ops_gcc_atomic.hpp15
-rw-r--r--boost/atomic/detail/ops_gcc_ppc.hpp379
-rw-r--r--boost/atomic/detail/ops_gcc_sparc.hpp27
-rw-r--r--boost/atomic/detail/ops_gcc_sync.hpp33
-rw-r--r--boost/atomic/detail/ops_gcc_x86.hpp4
-rw-r--r--boost/atomic/detail/ops_gcc_x86_dcas.hpp306
-rw-r--r--boost/atomic/detail/ops_linux_arm.hpp7
-rw-r--r--boost/atomic/detail/ops_msvc_arm.hpp4
-rw-r--r--boost/atomic/detail/ops_msvc_x86.hpp57
-rw-r--r--boost/atomic/detail/ops_windows.hpp1
-rw-r--r--boost/atomic/detail/platform.hpp8
-rw-r--r--boost/atomic/detail/storage_type.hpp138
24 files changed, 924 insertions, 334 deletions
diff --git a/boost/atomic/detail/atomic_flag.hpp b/boost/atomic/detail/atomic_flag.hpp
index 6a6667d8eb..7fb44cdb1a 100644
--- a/boost/atomic/detail/atomic_flag.hpp
+++ b/boost/atomic/detail/atomic_flag.hpp
@@ -42,7 +42,7 @@ struct atomic_flag
typedef atomics::detail::operations< 1u, false > operations;
typedef operations::storage_type storage_type;
- storage_type m_storage;
+ operations::aligned_storage_type m_storage;
BOOST_FORCEINLINE BOOST_CONSTEXPR atomic_flag() BOOST_NOEXCEPT : m_storage(0)
{
@@ -50,14 +50,14 @@ struct atomic_flag
BOOST_FORCEINLINE bool test_and_set(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return operations::test_and_set(m_storage, order);
+ return operations::test_and_set(m_storage.value, order);
}
BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::clear(m_storage, order);
+ operations::clear(m_storage.value, order);
}
BOOST_DELETED_FUNCTION(atomic_flag(atomic_flag const&))
diff --git a/boost/atomic/detail/atomic_template.hpp b/boost/atomic/detail/atomic_template.hpp
index bc3922a711..2deaded62f 100644
--- a/boost/atomic/detail/atomic_template.hpp
+++ b/boost/atomic/detail/atomic_template.hpp
@@ -22,7 +22,7 @@
#include <boost/type_traits/is_signed.hpp>
#include <boost/type_traits/is_integral.hpp>
#include <boost/atomic/detail/config.hpp>
-#include <boost/atomic/detail/casts.hpp>
+#include <boost/atomic/detail/bitwise_cast.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
@@ -87,7 +87,7 @@ public:
typedef typename operations::storage_type storage_type;
protected:
- storage_type m_storage;
+ typename operations::aligned_storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
@@ -99,7 +99,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage, static_cast< storage_type >(v), order);
+ operations::store(m_storage.value, static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -107,22 +107,22 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return static_cast< value_type >(operations::load(m_storage, order));
+ return static_cast< value_type >(operations::load(m_storage.value, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order));
+ return static_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order));
+ return static_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::exchange(m_storage, static_cast< storage_type >(v), order));
+ return static_cast< value_type >(operations::exchange(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -132,7 +132,7 @@ public:
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = static_cast< value_type >(old_value);
return res;
}
@@ -149,7 +149,7 @@ public:
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = static_cast< value_type >(old_value);
return res;
}
@@ -161,22 +161,22 @@ public:
BOOST_FORCEINLINE value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_and(m_storage, static_cast< storage_type >(v), order));
+ return static_cast< value_type >(operations::fetch_and(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_or(m_storage, static_cast< storage_type >(v), order));
+ return static_cast< value_type >(operations::fetch_or(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_xor(m_storage, static_cast< storage_type >(v), order));
+ return static_cast< value_type >(operations::fetch_xor(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
- return operations::is_lock_free(m_storage);
+ return operations::is_lock_free(m_storage.value);
}
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
@@ -243,7 +243,7 @@ public:
typedef operations::storage_type storage_type;
protected:
- storage_type m_storage;
+ operations::aligned_storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
@@ -255,7 +255,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage, static_cast< storage_type >(v), order);
+ operations::store(m_storage.value, static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -263,12 +263,12 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return !!operations::load(m_storage, order);
+ return !!operations::load(m_storage.value, order);
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return !!operations::exchange(m_storage, static_cast< storage_type >(v), order);
+ return !!operations::exchange(m_storage.value, static_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -278,7 +278,7 @@ public:
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
}
@@ -295,7 +295,7 @@ public:
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
}
@@ -307,7 +307,7 @@ public:
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
- return operations::is_lock_free(m_storage);
+ return operations::is_lock_free(m_storage.value);
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
@@ -330,10 +330,10 @@ public:
typedef typename operations::storage_type storage_type;
protected:
- storage_type m_storage;
+ typename operations::aligned_storage_type m_storage;
public:
- BOOST_FORCEINLINE explicit base_atomic(value_type const& v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::memcpy_cast< storage_type >(v))
+ BOOST_FORCEINLINE explicit base_atomic(value_type const& v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
{
}
@@ -343,7 +343,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage, atomics::detail::memcpy_cast< storage_type >(v), order);
+ operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -351,12 +351,12 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::memcpy_cast< value_type >(operations::load(m_storage, order));
+ return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::memcpy_cast< value_type >(operations::exchange(m_storage, atomics::detail::memcpy_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -365,9 +365,9 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- storage_type old_value = atomics::detail::memcpy_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::memcpy_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::memcpy_cast< value_type >(old_value);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -382,9 +382,9 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- storage_type old_value = atomics::detail::memcpy_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::memcpy_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::memcpy_cast< value_type >(old_value);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -395,7 +395,7 @@ public:
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
- return operations::is_lock_free(m_storage);
+ return operations::is_lock_free(m_storage.value);
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
@@ -419,11 +419,11 @@ public:
typedef typename operations::storage_type storage_type;
protected:
- storage_type m_storage;
+ typename operations::aligned_storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
- BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
+ BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
{
}
@@ -433,7 +433,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
+ operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -441,22 +441,22 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
+ return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v * sizeof(T)), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v * sizeof(T)), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -465,9 +465,9 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::union_cast< value_type >(old_value);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -482,9 +482,9 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::union_cast< value_type >(old_value);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -495,7 +495,7 @@ public:
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
- return operations::is_lock_free(m_storage);
+ return operations::is_lock_free(m_storage.value);
}
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
@@ -549,11 +549,11 @@ public:
typedef operations::storage_type storage_type;
protected:
- storage_type m_storage;
+ operations::aligned_storage_type m_storage;
public:
BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
- BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::union_cast< storage_type >(v))
+ BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
{
}
@@ -563,7 +563,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage, atomics::detail::union_cast< storage_type >(v), order);
+ operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -571,22 +571,22 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::union_cast< value_type >(operations::load(m_storage, order));
+ return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::union_cast< value_type >(operations::fetch_add(m_storage, static_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::union_cast< value_type >(operations::fetch_sub(m_storage, static_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::union_cast< value_type >(operations::exchange(m_storage, atomics::detail::union_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -595,9 +595,9 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::union_cast< value_type >(old_value);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -612,9 +612,9 @@ public:
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
- storage_type old_value = atomics::detail::union_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage, old_value, atomics::detail::union_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::union_cast< value_type >(old_value);
+ storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -625,7 +625,7 @@ public:
BOOST_FORCEINLINE bool is_lock_free() const volatile BOOST_NOEXCEPT
{
- return operations::is_lock_free(m_storage);
+ return operations::is_lock_free(m_storage.value);
}
BOOST_FORCEINLINE value_type operator++(int) volatile BOOST_NOEXCEPT
@@ -696,10 +696,10 @@ public:
return this->load();
}
- BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage; }
- BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage; }
- BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage; }
- BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage; }
+ BOOST_FORCEINLINE storage_type& storage() BOOST_NOEXCEPT { return this->m_storage.value; }
+ BOOST_FORCEINLINE storage_type volatile& storage() volatile BOOST_NOEXCEPT { return this->m_storage.value; }
+ BOOST_FORCEINLINE storage_type const& storage() const BOOST_NOEXCEPT { return this->m_storage.value; }
+ BOOST_FORCEINLINE storage_type const volatile& storage() const volatile BOOST_NOEXCEPT { return this->m_storage.value; }
BOOST_DELETED_FUNCTION(atomic(atomic const&))
BOOST_DELETED_FUNCTION(atomic& operator= (atomic const&))
diff --git a/boost/atomic/detail/casts.hpp b/boost/atomic/detail/bitwise_cast.hpp
index db28bc25ff..8654d10b95 100644
--- a/boost/atomic/detail/casts.hpp
+++ b/boost/atomic/detail/bitwise_cast.hpp
@@ -8,16 +8,18 @@
* Copyright (c) 2013 - 2014 Andrey Semashev
*/
/*!
- * \file atomic/detail/casts.hpp
+ * \file atomic/detail/bitwise_cast.hpp
*
- * This header defines \c union_cast and \c memcpy_cast used to convert between storage and value types
+ * This header defines \c bitwise_cast used to convert between storage and value types
*/
-#ifndef BOOST_ATOMIC_DETAIL_CASTS_HPP_INCLUDED_
-#define BOOST_ATOMIC_DETAIL_CASTS_HPP_INCLUDED_
+#ifndef BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
-#include <cstring>
#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
+#include <cstring>
+#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -28,27 +30,14 @@ namespace atomics {
namespace detail {
template< typename To, typename From >
-BOOST_FORCEINLINE To union_cast(From const& from) BOOST_NOEXCEPT
-{
- union
- {
- To as_to;
- From as_from;
- }
- caster = {};
- caster.as_from = from;
- return caster.as_to;
-}
-
-template< typename To, typename From >
-BOOST_FORCEINLINE To memcpy_cast(From const& from) BOOST_NOEXCEPT
+BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
{
struct
{
To to;
}
value = {};
- std::memcpy
+ BOOST_ATOMIC_DETAIL_MEMCPY
(
&reinterpret_cast< char& >(value.to),
&reinterpret_cast< const char& >(from),
@@ -61,4 +50,4 @@ BOOST_FORCEINLINE To memcpy_cast(From const& from) BOOST_NOEXCEPT
} // namespace atomics
} // namespace boost
-#endif // BOOST_ATOMIC_DETAIL_CASTS_HPP_INCLUDED_
+#endif // BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
diff --git a/boost/atomic/detail/caps_gcc_ppc.hpp b/boost/atomic/detail/caps_gcc_ppc.hpp
index 6dbdde826d..ee2346081b 100644
--- a/boost/atomic/detail/caps_gcc_ppc.hpp
+++ b/boost/atomic/detail/caps_gcc_ppc.hpp
@@ -25,7 +25,7 @@
#define BOOST_ATOMIC_INT8_LOCK_FREE 2
#define BOOST_ATOMIC_INT16_LOCK_FREE 2
#define BOOST_ATOMIC_INT32_LOCK_FREE 2
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) || defined(__PPC64__)
#define BOOST_ATOMIC_INT64_LOCK_FREE 2
#endif
#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
diff --git a/boost/atomic/detail/config.hpp b/boost/atomic/detail/config.hpp
index 6b0e418693..489281c2b4 100644
--- a/boost/atomic/detail/config.hpp
+++ b/boost/atomic/detail/config.hpp
@@ -21,6 +21,30 @@
#pragma once
#endif
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_memcpy)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY
+#endif
+#if __has_builtin(__builtin_memcmp)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP
+#endif
+#elif defined(BOOST_GCC)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
+#define BOOST_ATOMIC_DETAIL_MEMCPY __builtin_memcpy
+#else
+#define BOOST_ATOMIC_DETAIL_MEMCPY std::memcpy
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP)
+#define BOOST_ATOMIC_DETAIL_MEMCMP __builtin_memcmp
+#else
+#define BOOST_ATOMIC_DETAIL_MEMCMP std::memcmp
+#endif
+
#if defined(__CUDACC__)
// nvcc does not support alternatives in asm statement constraints
#define BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES
@@ -36,4 +60,16 @@
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA
#endif
+#if (defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)) && (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) < 403)
+// This macro indicates we're using older binutils that don't support implied zero displacements for memory opereands,
+// making code like this invalid:
+// movl 4+(%%edx), %%eax
+#define BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS
+#endif
+
+#if defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40500)
+// This macro indicates that the compiler does not support allocating rax:rdx register pairs ("A") in asm blocks
+#define BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS
+#endif
+
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
diff --git a/boost/atomic/detail/int_sizes.hpp b/boost/atomic/detail/int_sizes.hpp
index d06ed42a11..eada4fff07 100644
--- a/boost/atomic/detail/int_sizes.hpp
+++ b/boost/atomic/detail/int_sizes.hpp
@@ -117,8 +117,8 @@
#include <wchar.h>
#include <boost/cstdint.hpp>
-#if defined(_MSC_VER) && _MSC_VER <= 1310
-// MSVC 7.1 defines WCHAR_MAX to a value not suitable for constant expressions
+ #if defined(_MSC_VER) && ( _MSC_VER <= 1310 || defined(UNDER_CE) && _MSC_VER <= 1500 )
+// MSVC 7.1 and MSVC 8 (arm) define WCHAR_MAX to a value not suitable for constant expressions
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
#elif (WCHAR_MAX + 0) == 0xff || (WCHAR_MAX + 0) == 0x7f
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 1
diff --git a/boost/atomic/detail/operations_fwd.hpp b/boost/atomic/detail/operations_fwd.hpp
index 69049e4630..efd4970747 100644
--- a/boost/atomic/detail/operations_fwd.hpp
+++ b/boost/atomic/detail/operations_fwd.hpp
@@ -14,6 +14,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPERATIONS_FWD_HPP_INCLUDED_
+#include <cstddef>
#include <boost/atomic/detail/config.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
@@ -24,7 +25,7 @@ namespace boost {
namespace atomics {
namespace detail {
-template< unsigned int Size, bool Signed >
+template< std::size_t Size, bool Signed >
struct operations;
} // namespace detail
diff --git a/boost/atomic/detail/ops_cas_based.hpp b/boost/atomic/detail/ops_cas_based.hpp
index 7f8d288f7f..504cedb70f 100644
--- a/boost/atomic/detail/ops_cas_based.hpp
+++ b/boost/atomic/detail/ops_cas_based.hpp
@@ -16,6 +16,7 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -26,6 +27,21 @@ namespace atomics {
namespace detail {
template< typename Base >
+struct cas_based_exchange :
+ public Base
+{
+ typedef typename Base::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
+ return old_val;
+ }
+};
+
+template< typename Base >
struct cas_based_operations :
public Base
{
@@ -33,49 +49,47 @@ struct cas_based_operations :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val + v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val - v, order, memory_order_relaxed)) {}
return old_val;
}
- static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
- {
- storage_type old_val = Base::load(storage, memory_order_relaxed);
- while (!Base::compare_exchange_weak(storage, old_val, v, order, memory_order_relaxed)) {}
- return old_val;
- }
-
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val & v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val | v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
while (!Base::compare_exchange_weak(storage, old_val, old_val ^ v, order, memory_order_relaxed)) {}
return old_val;
}
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- return !!exchange(storage, (storage_type)1, order);
+ return !!Base::exchange(storage, (storage_type)1, order);
}
static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
diff --git a/boost/atomic/detail/ops_emulated.hpp b/boost/atomic/detail/ops_emulated.hpp
index 597490f2d7..0dc4e6828a 100644
--- a/boost/atomic/detail/ops_emulated.hpp
+++ b/boost/atomic/detail/ops_emulated.hpp
@@ -14,6 +14,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_EMULATED_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -90,7 +91,17 @@ struct emulated_operations
static BOOST_FORCEINLINE bool compare_exchange_weak(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
- return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
+ // Note: This function is the exact copy of compare_exchange_strong. The reason we're not just forwarding the call
+ // is that MSVC-12 ICEs in this case.
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ const bool res = old_val == expected;
+ if (res)
+ s = desired;
+ expected = old_val;
+
+ return res;
}
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
@@ -136,10 +147,11 @@ struct emulated_operations
}
};
-template< unsigned int Size, bool Signed >
+template< std::size_t Size, bool Signed >
struct operations :
public emulated_operations< typename make_storage_type< Size, Signed >::type >
{
+ typedef typename make_storage_type< Size, Signed >::aligned aligned_storage_type;
};
} // namespace detail
diff --git a/boost/atomic/detail/ops_extending_cas_based.hpp b/boost/atomic/detail/ops_extending_cas_based.hpp
index d7f3c5f5de..3f21031f12 100644
--- a/boost/atomic/detail/ops_extending_cas_based.hpp
+++ b/boost/atomic/detail/ops_extending_cas_based.hpp
@@ -14,6 +14,7 @@
#ifndef BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_OPS_EXTENDING_CAS_BASED_HPP_INCLUDED_
+#include <cstddef>
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
@@ -26,7 +27,7 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename Base, unsigned int Size, bool Signed >
+template< typename Base, std::size_t Size, bool Signed >
struct extending_cas_based_operations :
public Base
{
@@ -35,7 +36,8 @@ struct extending_cas_based_operations :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
emulated_storage_type new_val;
do
{
@@ -47,7 +49,8 @@ struct extending_cas_based_operations :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::load(storage, memory_order_relaxed);
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
emulated_storage_type new_val;
do
{
diff --git a/boost/atomic/detail/ops_gcc_alpha.hpp b/boost/atomic/detail/ops_gcc_alpha.hpp
index 6978c7f1c4..3c0e258ceb 100644
--- a/boost/atomic/detail/ops_gcc_alpha.hpp
+++ b/boost/atomic/detail/ops_gcc_alpha.hpp
@@ -88,6 +88,7 @@ struct operations< 4u, Signed > :
public gcc_alpha_operations_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -599,6 +600,7 @@ struct operations< 8u, Signed > :
public gcc_alpha_operations_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/ops_gcc_arm.hpp b/boost/atomic/detail/ops_gcc_arm.hpp
index a28da6919d..d2c2f39a2c 100644
--- a/boost/atomic/detail/ops_gcc_arm.hpp
+++ b/boost/atomic/detail/ops_gcc_arm.hpp
@@ -156,6 +156,7 @@ struct operations< 4u, Signed > :
public gcc_arm_operations_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -677,6 +678,7 @@ struct operations< 8u, Signed > :
public gcc_arm_operations_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/ops_gcc_atomic.hpp b/boost/atomic/detail/ops_gcc_atomic.hpp
index 2e4c37bec7..573a695d08 100644
--- a/boost/atomic/detail/ops_gcc_atomic.hpp
+++ b/boost/atomic/detail/ops_gcc_atomic.hpp
@@ -180,6 +180,7 @@ template< bool Signed >
struct operations< 16u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >
{
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
};
#endif
@@ -208,6 +209,7 @@ template< bool Signed >
struct operations< 8u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
{
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
};
#else
@@ -216,6 +218,7 @@ template< bool Signed >
struct operations< 8u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >
{
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
};
#endif
@@ -236,6 +239,7 @@ template< bool Signed >
struct operations< 4u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
{
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
};
#else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -244,6 +248,7 @@ template< bool Signed >
struct operations< 4u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
{
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
};
#endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -254,6 +259,7 @@ template< bool Signed >
struct operations< 4u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >
{
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
};
#endif
@@ -274,6 +280,7 @@ template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
{
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -282,6 +289,7 @@ template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
{
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
};
#else
@@ -290,6 +298,7 @@ template< bool Signed >
struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
{
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
};
#endif
@@ -300,6 +309,7 @@ template< bool Signed >
struct operations< 2u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >
{
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
};
#endif
@@ -320,6 +330,7 @@ template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
{
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
@@ -328,6 +339,7 @@ template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
{
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -336,6 +348,7 @@ template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
{
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
};
#else
@@ -344,6 +357,7 @@ template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
{
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
};
#endif
@@ -354,6 +368,7 @@ template< bool Signed >
struct operations< 1u, Signed > :
public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type >
{
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
};
#endif
diff --git a/boost/atomic/detail/ops_gcc_ppc.hpp b/boost/atomic/detail/ops_gcc_ppc.hpp
index 8698ee8d76..9131791193 100644
--- a/boost/atomic/detail/ops_gcc_ppc.hpp
+++ b/boost/atomic/detail/ops_gcc_ppc.hpp
@@ -30,6 +30,9 @@ namespace boost {
namespace atomics {
namespace detail {
+// The implementation below uses information from this document:
+// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
+
/*
Refer to: Motorola: "Programming Environments Manual for 32-Bit
Implementations of the PowerPC Architecture", Appendix E:
@@ -84,7 +87,7 @@ struct gcc_ppc_operations_base
{
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) || defined(__PPC64__)
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
else if ((order & memory_order_release) != 0)
@@ -100,12 +103,6 @@ struct gcc_ppc_operations_base
if ((order & (memory_order_consume | memory_order_acquire)) != 0)
__asm__ __volatile__ ("isync" ::: "memory");
}
-
- static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
- {
- if (order == memory_order_seq_cst)
- __asm__ __volatile__ ("sync" ::: "memory");
- }
};
@@ -114,33 +111,47 @@ struct operations< 4u, Signed > :
public gcc_ppc_operations_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
__asm__ __volatile__
(
- "stw %1, %0\n"
+ "stw %1, %0\n\t"
: "+m" (storage)
: "r" (v)
);
- fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v;
- __asm__ __volatile__
- (
- "lwz %0, %1\n"
- "cmpw %0, %0\n"
- "bne- 1f\n"
- "1:\n"
- : "=&r" (v)
- : "m" (storage)
- : "cr0"
- );
- fence_after(order);
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ {
+ __asm__ __volatile__
+ (
+ "lwz %0, %1\n\t"
+ "cmpw %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "lwz %0, %1\n\t"
+ : "=&r" (v)
+ : "m" (storage)
+ );
+ }
return v;
}
@@ -150,10 +161,10 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y1\n"
- "stwcx. %2,%y1\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y1\n\t"
+ "stwcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
@@ -169,14 +180,14 @@ struct operations< 4u, Signed > :
fence_before(success_order);
__asm__ __volatile__
(
- "li %1, 0\n"
- "lwarx %0,%y2\n"
- "cmpw %0, %3\n"
- "bne- 1f\n"
- "stwcx. %4,%y2\n"
- "bne- 1f\n"
- "li %1, 1\n"
- "1:"
+ "li %1, 0\n\t"
+ "lwarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stwcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -195,14 +206,14 @@ struct operations< 4u, Signed > :
fence_before(success_order);
__asm__ __volatile__
(
- "li %1, 0\n"
- "0: lwarx %0,%y2\n"
- "cmpw %0, %3\n"
- "bne- 1f\n"
- "stwcx. %4,%y2\n"
- "bne- 0b\n"
- "li %1, 1\n"
- "1:"
+ "li %1, 0\n\t"
+ "0: lwarx %0,%y2\n\t"
+ "cmpw %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stwcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -220,11 +231,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "add %1,%0,%3\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -239,11 +250,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "sub %1,%0,%3\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -258,11 +269,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "and %1,%0,%3\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -277,11 +288,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "or %1,%0,%3\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -296,11 +307,11 @@ struct operations< 4u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "xor %1,%0,%3\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -339,12 +350,12 @@ struct operations< 1u, false > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "add %1,%0,%3\n"
- "rlwinm %1, %1, 0, 0xff\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -359,12 +370,12 @@ struct operations< 1u, false > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "sub %1,%0,%3\n"
- "rlwinm %1, %1, 0, 0xff\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -387,12 +398,12 @@ struct operations< 1u, true > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "add %1,%0,%3\n"
- "extsb %1, %1\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "extsb %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -407,12 +418,12 @@ struct operations< 1u, true > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "sub %1,%0,%3\n"
- "extsb %1, %1\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "extsb %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -436,12 +447,12 @@ struct operations< 2u, false > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "add %1,%0,%3\n"
- "rlwinm %1, %1, 0, 0xffff\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xffff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -456,12 +467,12 @@ struct operations< 2u, false > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "sub %1,%0,%3\n"
- "rlwinm %1, %1, 0, 0xffff\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "rlwinm %1, %1, 0, 0xffff\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -484,12 +495,12 @@ struct operations< 2u, true > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "add %1,%0,%3\n"
- "extsh %1, %1\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "extsh %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -504,12 +515,12 @@ struct operations< 2u, true > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "lwarx %0,%y2\n"
- "sub %1,%0,%3\n"
- "extsh %1, %1\n"
- "stwcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "extsh %1, %1\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -520,40 +531,54 @@ struct operations< 2u, true > :
};
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) || defined(__PPC64__)
template< bool Signed >
struct operations< 8u, Signed > :
public gcc_ppc_operations_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
fence_before(order);
__asm__ __volatile__
(
- "std %1, %0\n"
+ "std %1, %0\n\t"
: "+m" (storage)
: "r" (v)
);
- fence_after_store(order);
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type v;
- __asm__ __volatile__
- (
- "ld %0, %1\n"
- "cmpd %0, %0\n"
- "bne- 1f\n"
- "1:\n"
- : "=&b" (v)
- : "m" (storage)
- : "cr0"
- );
- fence_after(order);
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("sync" ::: "memory");
+ if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ {
+ __asm__ __volatile__
+ (
+ "ld %0, %1\n\t"
+ "cmpd %0, %0\n\t"
+ "bne- 1f\n\t"
+ "1:\n\t"
+ "isync\n\t"
+ : "=&b" (v)
+ : "m" (storage)
+ : "cr0", "memory"
+ );
+ }
+ else
+ {
+ __asm__ __volatile__
+ (
+ "ld %0, %1\n\t"
+ : "=&b" (v)
+ : "m" (storage)
+ );
+ }
return v;
}
@@ -563,10 +588,10 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldarx %0,%y1\n"
- "stdcx. %2,%y1\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "ldarx %0,%y1\n\t"
+ "stdcx. %2,%y1\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "+Z" (storage)
: "b" (v)
: "cr0"
@@ -582,13 +607,13 @@ struct operations< 8u, Signed > :
fence_before(success_order);
__asm__ __volatile__
(
- "li %1, 0\n"
- "ldarx %0,%y2\n"
- "cmpd %0, %3\n"
- "bne- 1f\n"
- "stdcx. %4,%y2\n"
- "bne- 1f\n"
- "li %1, 1\n"
+ "li %1, 0\n\t"
+ "ldarx %0,%y2\n\t"
+ "cmpd %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stdcx. %4,%y2\n\t"
+ "bne- 1f\n\t"
+ "li %1, 1\n\t"
"1:"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
@@ -608,14 +633,14 @@ struct operations< 8u, Signed > :
fence_before(success_order);
__asm__ __volatile__
(
- "li %1, 0\n"
- "0: ldarx %0,%y2\n"
- "cmpd %0, %3\n"
- "bne- 1f\n"
- "stdcx. %4,%y2\n"
- "bne- 0b\n"
- "li %1, 1\n"
- "1:"
+ "li %1, 0\n\t"
+ "0: ldarx %0,%y2\n\t"
+ "cmpd %0, %3\n\t"
+ "bne- 1f\n\t"
+ "stdcx. %4,%y2\n\t"
+ "bne- 0b\n\t"
+ "li %1, 1\n\t"
+ "1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
: "b" (expected), "b" (desired)
: "cr0"
@@ -633,11 +658,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldarx %0,%y2\n"
- "add %1,%0,%3\n"
- "stdcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -652,11 +677,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldarx %0,%y2\n"
- "sub %1,%0,%3\n"
- "stdcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -671,11 +696,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldarx %0,%y2\n"
- "and %1,%0,%3\n"
- "stdcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -690,11 +715,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldarx %0,%y2\n"
- "or %1,%0,%3\n"
- "stdcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -709,11 +734,11 @@ struct operations< 8u, Signed > :
fence_before(order);
__asm__ __volatile__
(
- "1:\n"
- "ldarx %0,%y2\n"
- "xor %1,%0,%3\n"
- "stdcx. %1,%y2\n"
- "bne- 1b\n"
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
: "=&b" (original), "=&b" (tmp), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
@@ -738,7 +763,7 @@ struct operations< 8u, Signed > :
}
};
-#endif // defined(__powerpc64__)
+#endif // defined(__powerpc64__) || defined(__PPC64__)
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
@@ -747,14 +772,12 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
case memory_order_consume:
case memory_order_acquire:
- __asm__ __volatile__ ("isync" ::: "memory");
- break;
case memory_order_release:
-#if defined(__powerpc64__)
+ case memory_order_acq_rel:
+#if defined(__powerpc64__) || defined(__PPC64__)
__asm__ __volatile__ ("lwsync" ::: "memory");
break;
#endif
- case memory_order_acq_rel:
case memory_order_seq_cst:
__asm__ __volatile__ ("sync" ::: "memory");
break;
@@ -765,7 +788,11 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
{
if (order != memory_order_relaxed)
+#if defined(__ibmxl__) || defined(__IBMCPP__)
+ __fence();
+#else
__asm__ __volatile__ ("" ::: "memory");
+#endif
}
} // namespace detail
diff --git a/boost/atomic/detail/ops_gcc_sparc.hpp b/boost/atomic/detail/ops_gcc_sparc.hpp
index ea6df91dc6..020882bbfa 100644
--- a/boost/atomic/detail/ops_gcc_sparc.hpp
+++ b/boost/atomic/detail/ops_gcc_sparc.hpp
@@ -62,6 +62,7 @@ struct gcc_sparc_cas32 :
public gcc_sparc_cas_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -104,19 +105,6 @@ struct gcc_sparc_cas32 :
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
- static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
- {
- return true;
- }
-};
-
-template< bool Signed >
-struct operations< 4u, Signed > :
- public cas_based_operations< gcc_sparc_cas32< Signed > >
-{
- typedef cas_based_operations< gcc_sparc_cas32< Signed > > base_type;
- typedef typename base_type::storage_type storage_type;
-
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -131,13 +119,19 @@ struct operations< 4u, Signed > :
return v;
}
- static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
- return !!exchange(storage, (storage_type)1, order);
+ return true;
}
};
template< bool Signed >
+struct operations< 4u, Signed > :
+ public cas_based_operations< gcc_sparc_cas32< Signed > >
+{
+};
+
+template< bool Signed >
struct operations< 1u, Signed > :
public extending_cas_based_operations< operations< 4u, Signed >, 1u, Signed >
{
@@ -154,6 +148,7 @@ struct gcc_sparc_cas64 :
public gcc_sparc_cas_base
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -204,7 +199,7 @@ struct gcc_sparc_cas64 :
template< bool Signed >
struct operations< 8u, Signed > :
- public cas_based_operations< gcc_sparc_cas64< Signed > >
+ public cas_based_operations< cas_based_exchange< gcc_sparc_cas64< Signed > > >
{
};
diff --git a/boost/atomic/detail/ops_gcc_sync.hpp b/boost/atomic/detail/ops_gcc_sync.hpp
index f4fc3331c6..87f2f53029 100644
--- a/boost/atomic/detail/ops_gcc_sync.hpp
+++ b/boost/atomic/detail/ops_gcc_sync.hpp
@@ -165,6 +165,17 @@ struct operations< 1u, Signed > :
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
#endif
{
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+#else
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+#endif
};
#endif
@@ -181,6 +192,15 @@ struct operations< 2u, Signed > :
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
#endif
{
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+#else
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+#endif
};
#endif
@@ -195,6 +215,13 @@ struct operations< 4u, Signed > :
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
#endif
{
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+#else
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+#endif
};
#endif
@@ -207,6 +234,11 @@ struct operations< 8u, Signed > :
public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
#endif
{
+#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+#else
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+#endif
};
#endif
@@ -215,6 +247,7 @@ template< bool Signed >
struct operations< 16u, Signed > :
public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >
{
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
};
#endif
diff --git a/boost/atomic/detail/ops_gcc_x86.hpp b/boost/atomic/detail/ops_gcc_x86.hpp
index 6e600457aa..f68125c491 100644
--- a/boost/atomic/detail/ops_gcc_x86.hpp
+++ b/boost/atomic/detail/ops_gcc_x86.hpp
@@ -115,6 +115,7 @@ struct operations< 1u, Signed > :
{
typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -201,6 +202,7 @@ struct operations< 2u, Signed > :
{
typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -287,6 +289,7 @@ struct operations< 4u, Signed > :
{
typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -383,6 +386,7 @@ struct operations< 8u, Signed > :
{
typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/boost/atomic/detail/ops_gcc_x86_dcas.hpp
index a6109f926c..f7a84f79ce 100644
--- a/boost/atomic/detail/ops_gcc_x86_dcas.hpp
+++ b/boost/atomic/detail/ops_gcc_x86_dcas.hpp
@@ -36,6 +36,7 @@ template< bool Signed >
struct gcc_dcas_x86
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -68,6 +69,37 @@ struct gcc_dcas_x86
}
else
{
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+#if defined(__PIC__)
+ uint32_t scratch;
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %[scratch]\n\t"
+ "movl %[value_lo], %%ebx\n\t"
+ "movl %[dest], %%eax\n\t"
+ "movl 4+%[dest], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest]\n\t"
+ "jne 1b\n\t"
+ "movl %[scratch], %%ebx\n\t"
+ : [scratch] "=m" (scratch), [dest] "=o" (storage)
+ : [value_lo] "a" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "edx", "memory"
+ );
+#else // defined(__PIC__)
+ __asm__ __volatile__
+ (
+ "movl %[dest], %%eax\n\t"
+ "movl 4+%[dest], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest]\n\t"
+ "jne 1b\n\t"
+ : [dest] "=o" (storage)
+ : [value_lo] "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "eax", "edx", "memory"
+ );
+#endif // defined(__PIC__)
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
#if defined(__PIC__)
uint32_t scratch;
__asm__ __volatile__
@@ -79,7 +111,7 @@ struct gcc_dcas_x86
".align 16\n\t"
"1: lock; cmpxchg8b 0(%[dest])\n\t"
"jne 1b\n\t"
- "movl %[scratch], %%ebx"
+ "movl %[scratch], %%ebx\n\t"
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
: [scratch] "=m,m" (scratch)
: [value_lo] "a,a" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
@@ -89,7 +121,7 @@ struct gcc_dcas_x86
#endif
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "edx", "memory"
);
-#else
+#else // defined(__PIC__)
__asm__ __volatile__
(
"movl 0(%[dest]), %%eax\n\t"
@@ -105,7 +137,8 @@ struct gcc_dcas_x86
#endif
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "eax", "edx", "memory"
);
-#endif
+#endif // defined(__PIC__)
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
}
}
@@ -152,7 +185,7 @@ struct gcc_dcas_x86
(
"movl %%ebx, %%eax\n\t"
"movl %%ecx, %%edx\n\t"
- "lock; cmpxchg8b %[storage]"
+ "lock; cmpxchg8b %[storage]\n\t"
: "=&A" (value)
: [storage] "m" (storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -191,7 +224,7 @@ struct gcc_dcas_x86
"movl %[desired_lo], %%ebx\n\t"
"lock; cmpxchg8b %[dest]\n\t"
"movl %[scratch], %%ebx\n\t"
- "sete %[success]"
+ "sete %[success]\n\t"
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
: "+A,A,A,A,A,A" (expected), [dest] "+m,m,m,m,m,m" (storage), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success)
: [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32))
@@ -207,7 +240,7 @@ struct gcc_dcas_x86
__asm__ __volatile__
(
"lock; cmpxchg8b %[dest]\n\t"
- "sete %[success]"
+ "sete %[success]\n\t"
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
: "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
: "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32))
@@ -227,6 +260,97 @@ struct gcc_dcas_x86
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+ // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
+ storage_type old_val = storage;
+ while (true)
+ {
+ storage_type val = __sync_val_compare_and_swap(&storage, old_val, v);
+ if (val == old_val)
+ return val;
+ old_val = val;
+ }
+#elif !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+#if defined(__PIC__)
+ uint32_t scratch;
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %[scratch]\n\t"
+ "movl %%eax, %%ebx\n\t"
+ "movl %%edx, %%ecx\n\t"
+ "movl %[dest], %%eax\n\t"
+ "movl 4+%[dest], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest]\n\t"
+ "jne 1b\n\t"
+ "movl %[scratch], %%ebx\n\t"
+ : "+A" (v), [scratch] "=m" (scratch), [dest] "+o" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "ecx", "memory"
+ );
+ return v;
+#else // defined(__PIC__)
+ __asm__ __volatile__
+ (
+ "movl %[dest], %%eax\n\t"
+ "movl 4+%[dest], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest]\n\t"
+ "jne 1b\n\t"
+ : "=A" (v), [dest] "+o" (storage)
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+#endif // defined(__PIC__)
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+#if defined(__PIC__)
+ uint32_t scratch;
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %[scratch]\n\t"
+ "movl %%eax, %%ebx\n\t"
+ "movl %%edx, %%ecx\n\t"
+ "movl 0(%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "jne 1b\n\t"
+ "movl %[scratch], %%ebx\n\t"
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
+ : "+A,A" (v), [scratch] "=m,m" (scratch)
+ : [dest] "D,S" (&storage)
+#else
+ : "+A" (v), [scratch] "=m" (scratch)
+ : [dest] "D" (&storage)
+#endif
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "ecx", "memory"
+ );
+ return v;
+#else // defined(__PIC__)
+ __asm__ __volatile__
+ (
+ "movl 0(%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "jne 1b\n\t"
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
+ : "=A,A" (v)
+ : "b,b" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
+#else
+ : "=A" (v)
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
+#endif
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return v;
+#endif // defined(__PIC__)
+#endif
+ }
+
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
@@ -241,21 +365,36 @@ template< bool Signed >
struct gcc_dcas_x86_64
{
typedef typename make_storage_type< 16u, Signed >::type storage_type;
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
uint64_t const* p_value = (uint64_t const*)&v;
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq %[dest], %%rax\n\t"
+ "movq 8+%[dest], %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b %[dest]\n\t"
+ "jne 1b\n\t"
+ : [dest] "=o" (storage)
+ : "b" (p_value[0]), "c" (p_value[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "rax", "rdx", "memory"
+ );
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
__asm__ __volatile__
(
"movq 0(%[dest]), %%rax\n\t"
"movq 8(%[dest]), %%rdx\n\t"
".align 16\n\t"
"1: lock; cmpxchg16b 0(%[dest])\n\t"
- "jne 1b"
+ "jne 1b\n\t"
:
: "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "rax", "rdx", "memory"
);
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
@@ -264,16 +403,49 @@ struct gcc_dcas_x86_64
// Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
storage_type value = storage_type();
return __sync_val_compare_and_swap(&storage, value, value);
-#else
+#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+ // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
storage_type value;
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
__asm__ __volatile__
(
"movq %%rbx, %%rax\n\t"
"movq %%rcx, %%rdx\n\t"
- "lock; cmpxchg16b %[storage]"
+ "lock; cmpxchg16b %[storage]\n\t"
+ "movq %%rax, %[value]\n\t"
+ "movq %%rdx, 8+%[value]\n\t"
+ : [value] "=o" (value)
+ : [storage] "m" (storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ );
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq %%rbx, %%rax\n\t"
+ "movq %%rcx, %%rdx\n\t"
+ "lock; cmpxchg16b %[storage]\n\t"
+ "movq %%rax, 0(%[value])\n\t"
+ "movq %%rdx, 8(%[value])\n\t"
+ :
+ : [storage] "m" (storage), [value] "r" (&value)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ );
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+
+ return value;
+#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+ storage_type value;
+
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
+ __asm__ __volatile__
+ (
+ "movq %%rbx, %%rax\n\t"
+ "movq %%rcx, %%rdx\n\t"
+ "lock; cmpxchg16b %[storage]\n\t"
: "=&A" (value)
: [storage] "m" (storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -291,13 +463,46 @@ struct gcc_dcas_x86_64
storage_type old_expected = expected;
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
-#else
+#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+ // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
+ uint64_t const* p_desired = (uint64_t const*)&desired;
+ bool success;
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq %[expected], %%rax\n\t"
+ "movq 8+%[expected], %%rdx\n\t"
+ "lock; cmpxchg16b %[dest]\n\t"
+ "sete %[success]\n\t"
+ "movq %%rax, %[expected]\n\t"
+ "movq %%rdx, 8+%[expected]\n\t"
+ : [dest] "+m" (storage), [expected] "+o" (expected), [success] "=q" (success)
+ : "b" (p_desired[0]), "c" (p_desired[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ );
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq 0(%[expected]), %%rax\n\t"
+ "movq 8(%[expected]), %%rdx\n\t"
+ "lock; cmpxchg16b %[dest]\n\t"
+ "sete %[success]\n\t"
+ "movq %%rax, 0(%[expected])\n\t"
+ "movq %%rdx, 8(%[expected])\n\t"
+ : [dest] "+m" (storage), [success] "=q" (success)
+ : "b" (p_desired[0]), "c" (p_desired[1]), [expected] "r" (&expected)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ );
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+
+ return success;
+#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
uint64_t const* p_desired = (uint64_t const*)&desired;
bool success;
__asm__ __volatile__
(
"lock; cmpxchg16b %[dest]\n\t"
- "sete %[success]"
+ "sete %[success]\n\t"
#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
: "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
: "b,b" (p_desired[0]), "c,c" (p_desired[1])
@@ -317,6 +522,85 @@ struct gcc_dcas_x86_64
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+#if defined(__clang__)
+ // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
+ storage_type old_val = storage;
+ while (true)
+ {
+ storage_type val = __sync_val_compare_and_swap(&storage, old_val, v);
+ if (val == old_val)
+ return val;
+ old_val = val;
+ }
+#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+ // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
+ storage_type old_value;
+ uint64_t const* p_value = (uint64_t const*)&v;
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq %[dest], %%rax\n\t"
+ "movq 8+%[dest], %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b %[dest]\n\t"
+ "jne 1b\n\t"
+ "movq %%rax, %[old_value]\n\t"
+ "movq %%rdx, 8+%[old_value]\n\t"
+ : [dest] "+o" (storage), [old_value] "=o" (old_value)
+ : "b" (p_value[0]), "c" (p_value[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ );
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq 0(%[dest]), %%rax\n\t"
+ "movq 8(%[dest]), %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b 0(%[dest])\n\t"
+ "jne 1b\n\t"
+ "movq %%rax, 0(%[old_value])\n\t"
+ "movq %%rdx, 8(%[old_value])\n\t"
+ :
+ : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage), [old_value] "r" (&old_value)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ );
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+
+ return old_value;
+#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+ uint64_t const* p_value = (uint64_t const*)&v;
+#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq %[dest], %%rax\n\t"
+ "movq 8+%[dest], %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b %[dest]\n\t"
+ "jne 1b\n\t"
+ : "=&A" (v), [dest] "+o" (storage)
+ : "b" (p_value[0]), "c" (p_value[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ __asm__ __volatile__
+ (
+ "movq 0(%[dest]), %%rax\n\t"
+ "movq 8(%[dest]), %%rdx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg16b 0(%[dest])\n\t"
+ "jne 1b\n\t"
+ : "=&A" (v)
+ : "b" (p_value[0]), "c" (p_value[1]), [dest] "r" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+
+ return v;
+#endif
+ }
+
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
diff --git a/boost/atomic/detail/ops_linux_arm.hpp b/boost/atomic/detail/ops_linux_arm.hpp
index 25167b1974..41713a35f5 100644
--- a/boost/atomic/detail/ops_linux_arm.hpp
+++ b/boost/atomic/detail/ops_linux_arm.hpp
@@ -87,6 +87,7 @@ struct linux_arm_cas :
public linux_arm_cas_base
{
typedef typename make_storage_type< 4u, Signed >::type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -142,19 +143,19 @@ struct linux_arm_cas :
template< bool Signed >
struct operations< 1u, Signed > :
- public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 1u, Signed >
+ public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 1u, Signed >
{
};
template< bool Signed >
struct operations< 2u, Signed > :
- public extending_cas_based_operations< cas_based_operations< linux_arm_cas< Signed > >, 2u, Signed >
+ public extending_cas_based_operations< cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >, 2u, Signed >
{
};
template< bool Signed >
struct operations< 4u, Signed > :
- public cas_based_operations< linux_arm_cas< Signed > >
+ public cas_based_operations< cas_based_exchange< linux_arm_cas< Signed > > >
{
};
diff --git a/boost/atomic/detail/ops_msvc_arm.hpp b/boost/atomic/detail/ops_msvc_arm.hpp
index 349f7a5ae8..ff953d67e3 100644
--- a/boost/atomic/detail/ops_msvc_arm.hpp
+++ b/boost/atomic/detail/ops_msvc_arm.hpp
@@ -135,6 +135,7 @@ struct operations< 1u, Signed > :
{
typedef msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -300,6 +301,7 @@ struct operations< 2u, Signed > :
{
typedef msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -465,6 +467,7 @@ struct operations< 4u, Signed > :
{
typedef msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -630,6 +633,7 @@ struct operations< 8u, Signed > :
{
typedef msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/ops_msvc_x86.hpp b/boost/atomic/detail/ops_msvc_x86.hpp
index 501d9c622d..589c029864 100644
--- a/boost/atomic/detail/ops_msvc_x86.hpp
+++ b/boost/atomic/detail/ops_msvc_x86.hpp
@@ -166,6 +166,7 @@ struct operations< 4u, Signed > :
{
typedef msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -237,6 +238,7 @@ struct operations< 1u, Signed > :
{
typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -281,6 +283,7 @@ struct operations< 1u, Signed > :
{
typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -421,6 +424,7 @@ struct operations< 2u, Signed > :
{
typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -465,6 +469,7 @@ struct operations< 2u, Signed > :
{
typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -604,6 +609,7 @@ template< bool Signed >
struct msvc_dcas_x86
{
typedef typename make_storage_type< 8u, Signed >::type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
// Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3A, 8.1.1. Guaranteed Atomic Operations:
//
@@ -611,10 +617,12 @@ struct msvc_dcas_x86
// * Reading or writing a quadword aligned on a 64-bit boundary
//
// Luckily, the memory is almost always 8-byte aligned in our case because atomic<> uses 64 bit native types for storage and dynamic memory allocations
- // have at least 8 byte alignment. The only unfortunate case is when atomic is placeod on the stack and it is not 8-byte aligned (like on 32 bit Windows).
+ // have at least 8 byte alignment. The only unfortunate case is when atomic is placed on the stack and it is not 8-byte aligned (like on 32 bit Windows).
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
storage_type volatile* p = &storage;
if (((uint32_t)p & 0x00000007) == 0)
{
@@ -661,10 +669,14 @@ struct msvc_dcas_x86
mov ebx, backup
};
}
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
storage_type const volatile* p = &storage;
storage_type value;
@@ -710,18 +722,23 @@ struct msvc_dcas_x86
};
}
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
return value;
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT
{
+ // MSVC-11 in 32-bit mode sometimes generates messed up code without compiler barriers,
+ // even though the _InterlockedCompareExchange64 intrinsic already provides one.
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
storage_type volatile* p = &storage;
#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
const storage_type old_val = (storage_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(p, desired, expected);
const bool result = (old_val == expected);
expected = old_val;
- return result;
#else
bool result;
int backup;
@@ -740,8 +757,10 @@ struct msvc_dcas_x86
mov ebx, backup
sete result
};
- return result;
#endif
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ return result;
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
@@ -750,6 +769,34 @@ struct msvc_dcas_x86
return compare_exchange_strong(storage, expected, desired, success_order, failure_order);
}
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ storage_type volatile* p = &storage;
+ int backup;
+ __asm
+ {
+ mov backup, ebx
+ mov edi, p
+ mov ebx, dword ptr [v]
+ mov ecx, dword ptr [v + 4]
+ mov eax, dword ptr [edi]
+ mov edx, dword ptr [edi + 4]
+ align 16
+ again:
+ lock cmpxchg8b qword ptr [edi]
+ jne again
+ mov ebx, backup
+ mov dword ptr [v], eax
+ mov dword ptr [v + 4], edx
+ };
+
+ BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
+
+ return v;
+ }
+
static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT
{
return true;
@@ -770,6 +817,7 @@ struct operations< 8u, Signed > :
{
typedef msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -814,6 +862,7 @@ template< bool Signed >
struct msvc_dcas_x86_64
{
typedef typename make_storage_type< 16u, Signed >::type storage_type;
+ typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -848,7 +897,7 @@ struct msvc_dcas_x86_64
template< bool Signed >
struct operations< 16u, Signed > :
- public cas_based_operations< msvc_dcas_x86_64< Signed > >
+ public cas_based_operations< cas_based_exchange< msvc_dcas_x86_64< Signed > > >
{
};
diff --git a/boost/atomic/detail/ops_windows.hpp b/boost/atomic/detail/ops_windows.hpp
index 1b4b04c8bc..191eb84d0a 100644
--- a/boost/atomic/detail/ops_windows.hpp
+++ b/boost/atomic/detail/ops_windows.hpp
@@ -110,6 +110,7 @@ struct operations< 4u, Signed > :
{
typedef windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/platform.hpp b/boost/atomic/detail/platform.hpp
index 76ad4ebb8f..b6c48ef0f9 100644
--- a/boost/atomic/detail/platform.hpp
+++ b/boost/atomic/detail/platform.hpp
@@ -24,7 +24,13 @@
#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
// Compiler-based backends
-#if ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\
+#if (defined(__ibmxl__) || defined(__IBMCPP__)) && defined(__PPC__)
+
+// IBM XL C++ Compiler has to be checked before GCC/Clang as it pretends to be one but does not support __atomic* intrinsics.
+// It does support GCC inline assembler though.
+#define BOOST_ATOMIC_DETAIL_PLATFORM gcc_ppc
+
+#elif ((defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 407)) ||\
(defined(BOOST_CLANG) && ((__clang_major__ * 100 + __clang_minor__) >= 302))) &&\
(\
(__GCC_ATOMIC_BOOL_LOCK_FREE + 0) == 2 ||\
diff --git a/boost/atomic/detail/storage_type.hpp b/boost/atomic/detail/storage_type.hpp
index a024f1d327..63a7cef581 100644
--- a/boost/atomic/detail/storage_type.hpp
+++ b/boost/atomic/detail/storage_type.hpp
@@ -16,9 +16,12 @@
#ifndef BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_STORAGE_TYPE_HPP_INCLUDED_
-#include <cstring>
+#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
+#include <cstring>
+#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -28,84 +31,163 @@ namespace boost {
namespace atomics {
namespace detail {
-template< unsigned int Size >
+template< typename T >
+BOOST_FORCEINLINE void non_atomic_load(T const volatile& from, T& to) BOOST_NOEXCEPT
+{
+ to = from;
+}
+
+template< std::size_t Size >
struct buffer_storage
{
- unsigned char data[Size];
+ BOOST_ALIGNMENT(16) unsigned char data[Size];
BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
{
- bool result = true;
- for (unsigned int i = 0; i < Size && result; ++i)
- {
- result &= data[i] == 0;
- }
- return result;
+ return (data[0] == 0u && BOOST_ATOMIC_DETAIL_MEMCMP(data, data + 1, Size - 1) == 0);
}
BOOST_FORCEINLINE bool operator== (buffer_storage const& that) const BOOST_NOEXCEPT
{
- return std::memcmp(data, that.data, Size) == 0;
+ return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) == 0;
}
BOOST_FORCEINLINE bool operator!= (buffer_storage const& that) const BOOST_NOEXCEPT
{
- return std::memcmp(data, that.data, Size) != 0;
+ return BOOST_ATOMIC_DETAIL_MEMCMP(data, that.data, Size) != 0;
}
};
-template< unsigned int Size, bool Signed >
+template< std::size_t Size >
+BOOST_FORCEINLINE void non_atomic_load(buffer_storage< Size > const volatile& from, buffer_storage< Size >& to) BOOST_NOEXCEPT
+{
+ BOOST_ATOMIC_DETAIL_MEMCPY(to.data, const_cast< unsigned char const* >(from.data), Size);
+}
+
+template< std::size_t Size, bool Signed >
struct make_storage_type
{
typedef buffer_storage< Size > type;
+
+ struct aligned
+ {
+ type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type const& v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 1u, false >
{
typedef boost::uint8_t type;
+
+ struct aligned
+ {
+ type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 1u, true >
{
typedef boost::int8_t type;
+
+ struct aligned
+ {
+ type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 2u, false >
{
typedef boost::uint16_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(2) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 2u, true >
{
typedef boost::int16_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(2) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 4u, false >
{
typedef boost::uint32_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(4) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 4u, true >
{
typedef boost::int32_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(4) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 8u, false >
{
typedef boost::uint64_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(8) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 8u, true >
{
typedef boost::int64_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(8) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
#if defined(BOOST_HAS_INT128)
@@ -114,17 +196,33 @@ template< >
struct make_storage_type< 16u, false >
{
typedef boost::uint128_type type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(16) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
template< >
struct make_storage_type< 16u, true >
{
typedef boost::int128_type type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(16) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
+ };
};
#elif !defined(BOOST_NO_ALIGNMENT)
-struct BOOST_ALIGNMENT(16) storage128_t
+struct storage128_t
{
boost::uint64_t data[2];
@@ -143,10 +241,24 @@ BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const&
return !(left == right);
}
+BOOST_FORCEINLINE void non_atomic_load(storage128_t const volatile& from, storage128_t& to) BOOST_NOEXCEPT
+{
+ to.data[0] = from.data[0];
+ to.data[1] = from.data[1];
+}
+
template< bool Signed >
struct make_storage_type< 16u, Signed >
{
typedef storage128_t type;
+
+ struct aligned
+ {
+ BOOST_ALIGNMENT(16) type value;
+
+ BOOST_DEFAULTED_FUNCTION(aligned(), {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type const& v) BOOST_NOEXCEPT : value(v) {}
+ };
};
#endif