summaryrefslogtreecommitdiff
path: root/boost/atomic/detail
diff options
context:
space:
mode:
Diffstat (limited to 'boost/atomic/detail')
-rw-r--r--boost/atomic/detail/addressof.hpp58
-rw-r--r--boost/atomic/detail/atomic_flag.hpp1
-rw-r--r--boost/atomic/detail/atomic_template.hpp711
-rw-r--r--boost/atomic/detail/bitwise_cast.hpp56
-rw-r--r--boost/atomic/detail/bitwise_fp_cast.hpp86
-rw-r--r--boost/atomic/detail/caps_gcc_atomic.hpp8
-rw-r--r--boost/atomic/detail/caps_gcc_sync.hpp4
-rw-r--r--boost/atomic/detail/config.hpp112
-rw-r--r--boost/atomic/detail/extra_fp_operations.hpp28
-rw-r--r--boost/atomic/detail/extra_fp_operations_fwd.hpp35
-rw-r--r--boost/atomic/detail/extra_fp_ops_emulated.hpp107
-rw-r--r--boost/atomic/detail/extra_fp_ops_generic.hpp189
-rw-r--r--boost/atomic/detail/extra_operations.hpp1
-rw-r--r--boost/atomic/detail/extra_operations_fwd.hpp2
-rw-r--r--boost/atomic/detail/extra_ops_emulated.hpp238
-rw-r--r--boost/atomic/detail/extra_ops_gcc_arm.hpp845
-rw-r--r--boost/atomic/detail/extra_ops_gcc_ppc.hpp658
-rw-r--r--boost/atomic/detail/extra_ops_gcc_x86.hpp486
-rw-r--r--boost/atomic/detail/extra_ops_generic.hpp292
-rw-r--r--boost/atomic/detail/extra_ops_msvc_arm.hpp2
-rw-r--r--boost/atomic/detail/extra_ops_msvc_x86.hpp473
-rw-r--r--boost/atomic/detail/float_sizes.hpp142
-rw-r--r--boost/atomic/detail/fp_operations.hpp28
-rw-r--r--boost/atomic/detail/fp_operations_fwd.hpp35
-rw-r--r--boost/atomic/detail/fp_ops_emulated.hpp72
-rw-r--r--boost/atomic/detail/fp_ops_generic.hpp83
-rw-r--r--boost/atomic/detail/int_sizes.hpp6
-rw-r--r--boost/atomic/detail/integral_extend.hpp105
-rw-r--r--boost/atomic/detail/ops_cas_based.hpp2
-rw-r--r--boost/atomic/detail/ops_emulated.hpp15
-rw-r--r--boost/atomic/detail/ops_extending_cas_based.hpp15
-rw-r--r--boost/atomic/detail/ops_gcc_alpha.hpp13
-rw-r--r--boost/atomic/detail/ops_gcc_arm.hpp93
-rw-r--r--boost/atomic/detail/ops_gcc_arm_common.hpp5
-rw-r--r--boost/atomic/detail/ops_gcc_atomic.hpp109
-rw-r--r--boost/atomic/detail/ops_gcc_ppc.hpp164
-rw-r--r--boost/atomic/detail/ops_gcc_ppc_common.hpp7
-rw-r--r--boost/atomic/detail/ops_gcc_sparc.hpp13
-rw-r--r--boost/atomic/detail/ops_gcc_sync.hpp105
-rw-r--r--boost/atomic/detail/ops_gcc_x86.hpp21
-rw-r--r--boost/atomic/detail/ops_gcc_x86_dcas.hpp566
-rw-r--r--boost/atomic/detail/ops_linux_arm.hpp9
-rw-r--r--boost/atomic/detail/ops_msvc_arm.hpp48
-rw-r--r--boost/atomic/detail/ops_msvc_x86.hpp127
-rw-r--r--boost/atomic/detail/ops_windows.hpp17
-rw-r--r--boost/atomic/detail/platform.hpp12
-rw-r--r--boost/atomic/detail/storage_type.hpp101
-rw-r--r--boost/atomic/detail/string_ops.hpp61
-rw-r--r--boost/atomic/detail/type_traits/conditional.hpp4
-rw-r--r--boost/atomic/detail/type_traits/integral_constant.hpp46
-rw-r--r--boost/atomic/detail/type_traits/is_floating_point.hpp42
-rw-r--r--boost/atomic/detail/type_traits/is_function.hpp4
-rw-r--r--boost/atomic/detail/type_traits/is_iec559.hpp47
-rw-r--r--boost/atomic/detail/type_traits/is_integral.hpp4
-rw-r--r--boost/atomic/detail/type_traits/is_signed.hpp4
-rw-r--r--boost/atomic/detail/type_traits/is_trivially_default_constructible.hpp46
-rw-r--r--boost/atomic/detail/type_traits/make_signed.hpp4
-rw-r--r--boost/atomic/detail/type_traits/make_unsigned.hpp43
58 files changed, 5258 insertions, 1352 deletions
diff --git a/boost/atomic/detail/addressof.hpp b/boost/atomic/detail/addressof.hpp
new file mode 100644
index 0000000000..38e876e317
--- /dev/null
+++ b/boost/atomic/detail/addressof.hpp
@@ -0,0 +1,58 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/addressof.hpp
+ *
+ * This header defines \c addressof helper function. It is similar to \c boost::addressof but it is more
+ * lightweight and also contains a workaround for some compiler warnings.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// Detection logic is based on boost/core/addressof.hpp
+#if defined(BOOST_MSVC_FULL_VER) && BOOST_MSVC_FULL_VER >= 190024215
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
+#elif defined(BOOST_GCC) && BOOST_GCC >= 70000
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
+#elif defined(__has_builtin)
+#if __has_builtin(__builtin_addressof)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF
+#endif
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T >
+BOOST_FORCEINLINE T* addressof(T& value) BOOST_NOEXCEPT
+{
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_ADDRESSOF)
+ return __builtin_addressof(value);
+#else
+ // Note: The point of using a local struct as the intermediate type instead of char is to avoid gcc warnings
+ // if T is a const volatile char*:
+ // warning: casting 'const volatile char* const' to 'const volatile char&' does not dereference pointer
+ // The local struct makes sure T is not related to the cast target type.
+ struct opaque_type;
+ return reinterpret_cast< T* >(&const_cast< opaque_type& >(reinterpret_cast< const volatile opaque_type& >(value)));
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_ADDRESSOF_HPP_INCLUDED_
diff --git a/boost/atomic/detail/atomic_flag.hpp b/boost/atomic/detail/atomic_flag.hpp
index 7fb44cdb1a..6f5fc8acc3 100644
--- a/boost/atomic/detail/atomic_flag.hpp
+++ b/boost/atomic/detail/atomic_flag.hpp
@@ -55,6 +55,7 @@ struct atomic_flag
BOOST_FORCEINLINE void clear(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
+ BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
operations::clear(m_storage.value, order);
diff --git a/boost/atomic/detail/atomic_template.hpp b/boost/atomic/detail/atomic_template.hpp
index 28de879d40..fb0a8f58f0 100644
--- a/boost/atomic/detail/atomic_template.hpp
+++ b/boost/atomic/detail/atomic_template.hpp
@@ -20,13 +20,23 @@
#include <boost/cstdint.hpp>
#include <boost/assert.hpp>
#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
#include <boost/atomic/detail/bitwise_cast.hpp>
+#include <boost/atomic/detail/integral_extend.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/detail/type_traits/is_signed.hpp>
#include <boost/atomic/detail/type_traits/is_integral.hpp>
#include <boost/atomic/detail/type_traits/is_function.hpp>
+#include <boost/atomic/detail/type_traits/is_floating_point.hpp>
+#include <boost/atomic/detail/type_traits/is_trivially_default_constructible.hpp>
#include <boost/atomic/detail/type_traits/conditional.hpp>
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+#include <boost/atomic/detail/bitwise_fp_cast.hpp>
+#include <boost/atomic/detail/fp_operations_fwd.hpp>
+#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
+#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -56,10 +66,10 @@ BOOST_FORCEINLINE BOOST_CONSTEXPR bool cas_failure_order_must_not_be_stronger_th
{
// 15 == (memory_order_seq_cst | memory_order_consume), see memory_order.hpp
// Given the enum values we can test the strength of memory order requirements with this single condition.
- return (failure_order & 15u) <= (success_order & 15u);
+ return (static_cast< unsigned int >(failure_order) & 15u) <= (static_cast< unsigned int >(success_order) & 15u);
}
-template< typename T, bool IsFunction = boost::atomics::detail::is_function< T >::value >
+template< typename T, bool IsFunction = atomics::detail::is_function< T >::value >
struct classify_pointer
{
typedef void* type;
@@ -71,64 +81,126 @@ struct classify_pointer< T, true >
typedef void type;
};
-template< typename T, bool IsInt = boost::atomics::detail::is_integral< T >::value >
+template< typename T, bool IsInt = atomics::detail::is_integral< T >::value, bool IsFloat = atomics::detail::is_floating_point< T >::value >
struct classify
{
typedef void type;
};
template< typename T >
-struct classify< T, true > { typedef int type; };
+struct classify< T, true, false > { typedef int type; };
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+template< typename T >
+struct classify< T, false, true > { typedef float type; };
+#endif
template< typename T >
-struct classify< T*, false > { typedef typename classify_pointer< T >::type type; };
+struct classify< T*, false, false > { typedef typename classify_pointer< T >::type type; };
template< >
-struct classify< void*, false > { typedef void type; };
+struct classify< void*, false, false > { typedef void type; };
template< >
-struct classify< const void*, false > { typedef void type; };
+struct classify< const void*, false, false > { typedef void type; };
template< >
-struct classify< volatile void*, false > { typedef void type; };
+struct classify< volatile void*, false, false > { typedef void type; };
template< >
-struct classify< const volatile void*, false > { typedef void type; };
+struct classify< const volatile void*, false, false > { typedef void type; };
template< typename T, typename U >
-struct classify< T U::*, false > { typedef void type; };
+struct classify< T U::*, false, false > { typedef void type; };
-template< bool >
-struct boolean_constant {};
-typedef boolean_constant< true > true_constant;
-typedef boolean_constant< false > false_constant;
+#if defined(BOOST_INTEL) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40700) ||\
+ (defined(BOOST_CLANG) && !defined(__apple_build_version__) && ((__clang_major__+0) * 100 + (__clang_minor__+0)) < 302) ||\
+ (defined(__clang__) && defined(__apple_build_version__) && ((__clang_major__+0) * 100 + (__clang_minor__+0)) < 402)
+// Intel compiler (at least 18.0 update 1) breaks if noexcept specification is used in defaulted function declarations:
+// error: the default constructor of "boost::atomics::atomic<T>" cannot be referenced -- it is a deleted function
+// GCC 4.6 doesn't seem to support that either. Clang 3.1 deduces wrong noexcept for the defaulted function and fails as well.
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL BOOST_NOEXCEPT
+#else
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL BOOST_NOEXCEPT
+#define BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL
+#endif
-template< typename T, typename Kind >
-class base_atomic;
+template< typename T, bool IsTriviallyDefaultConstructible = atomics::detail::is_trivially_default_constructible< T >::value >
+class base_atomic_generic;
-//! General template. Implementation for user-defined types, such as structs and enums, and pointers to non-object types
template< typename T >
-class base_atomic< T, void >
+class base_atomic_generic< T, true >
{
public:
typedef T value_type;
protected:
typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
- typedef typename boost::atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
+ typedef typename atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
public:
typedef typename operations::storage_type storage_type;
-private:
- typedef boolean_constant< sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
+protected:
+ typename operations::aligned_storage_type m_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic_generic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE explicit base_atomic_generic(value_arg_type v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
+ {
+ }
+};
+
+template< typename T >
+class base_atomic_generic< T, false >
+{
+public:
+ typedef T value_type;
+
+protected:
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+ typedef typename atomics::detail::conditional< sizeof(value_type) <= sizeof(void*), value_type, value_type const& >::type value_arg_type;
+
+public:
+ typedef typename operations::storage_type storage_type;
protected:
typename operations::aligned_storage_type m_storage;
public:
- BOOST_FORCEINLINE explicit base_atomic(value_arg_type v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
+ BOOST_FORCEINLINE explicit base_atomic_generic(value_arg_type v = value_type()) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
+ {
+ }
+};
+
+
+template< typename T, typename Kind >
+class base_atomic;
+
+//! General template. Implementation for user-defined types, such as structs and enums, and pointers to non-object types
+template< typename T >
+class base_atomic< T, void > :
+ public base_atomic_generic< T >
+{
+private:
+ typedef base_atomic_generic< T > base_type;
+
+public:
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::storage_type storage_type;
+
+protected:
+ typedef typename base_type::operations operations;
+ typedef typename base_type::value_arg_type value_arg_type;
+
+private:
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v)
{
}
@@ -138,7 +210,7 @@ public:
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
+ operations::store(this->m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -146,12 +218,12 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));
+ return atomics::detail::bitwise_cast< value_type >(operations::load(this->m_storage.value, order));
}
BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(operations::exchange(this->m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
}
BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
@@ -186,36 +258,36 @@ public:
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
private:
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, true_constant) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ return operations::compare_exchange_strong(this->m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
#else
- return compare_exchange_strong_impl(expected, desired, success_order, failure_order, false_constant());
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
#endif
}
- BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, false_constant) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = operations::compare_exchange_strong(this->m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, true_constant) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
{
#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ return operations::compare_exchange_weak(this->m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
#else
- return compare_exchange_weak_impl(expected, desired, success_order, failure_order, false_constant());
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
#endif
}
- BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, false_constant) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
{
storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
+ const bool res = operations::compare_exchange_weak(this->m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
expected = atomics::detail::bitwise_cast< value_type >(old_value);
return res;
}
@@ -231,28 +303,31 @@ public:
typedef T difference_type;
protected:
- typedef atomics::detail::operations< storage_size_of< value_type >::value, boost::atomics::detail::is_signed< T >::value > operations;
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, atomics::detail::is_signed< T >::value > operations;
typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
typedef value_type value_arg_type;
public:
typedef typename operations::storage_type storage_type;
+private:
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
+
protected:
typename operations::aligned_storage_type m_storage;
public:
- BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
- BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
+ BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(v) {}
// Standard methods
- BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, static_cast< storage_type >(v), order);
+ operations::store(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -260,100 +335,121 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return static_cast< value_type >(operations::load(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(operations::load(m_storage.value, order));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(operations::fetch_add(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(operations::fetch_sub(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
}
- BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::exchange(m_storage.value, static_cast< storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(operations::exchange(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
}
- BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
-#else
- storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
- expected = static_cast< value_type >(old_value);
- return res;
-#endif
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
}
- BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
- BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
-#else
- storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
- expected = static_cast< value_type >(old_value);
- return res;
-#endif
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
}
- BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
- BOOST_FORCEINLINE value_type fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE value_type fetch_and(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_and(m_storage.value, static_cast< storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(operations::fetch_and(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
}
- BOOST_FORCEINLINE value_type fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE value_type fetch_or(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_or(m_storage.value, static_cast< storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(operations::fetch_or(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
}
- BOOST_FORCEINLINE value_type fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE value_type fetch_xor(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(operations::fetch_xor(m_storage.value, static_cast< storage_type >(v), order));
+ return atomics::detail::integral_truncate< value_type >(operations::fetch_xor(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
}
// Boost.Atomic extensions
BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(extra_operations::fetch_negate(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::fetch_negate(m_storage.value, order));
}
BOOST_FORCEINLINE value_type fetch_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return static_cast< value_type >(extra_operations::fetch_complement(m_storage.value, order));
+ return atomics::detail::integral_truncate< value_type >(extra_operations::fetch_complement(m_storage.value, order));
+ }
+
+ BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::add(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::sub(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::negate(m_storage.value, order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_and(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_and(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_or(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_or(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_xor(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_xor(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE value_type bitwise_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::integral_truncate< value_type >(extra_operations::bitwise_complement(m_storage.value, order));
}
BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_add(m_storage.value, static_cast< storage_type >(v), order);
+ extra_operations::opaque_add(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_sub(m_storage.value, static_cast< storage_type >(v), order);
+ extra_operations::opaque_sub(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -361,19 +457,19 @@ public:
extra_operations::opaque_negate(m_storage.value, order);
}
- BOOST_FORCEINLINE void opaque_and(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE void opaque_and(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_and(m_storage.value, static_cast< storage_type >(v), order);
+ extra_operations::opaque_and(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
- BOOST_FORCEINLINE void opaque_or(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE void opaque_or(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_or(m_storage.value, static_cast< storage_type >(v), order);
+ extra_operations::opaque_or(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
- BOOST_FORCEINLINE void opaque_xor(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE void opaque_xor(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_xor(m_storage.value, static_cast< storage_type >(v), order);
+ extra_operations::opaque_xor(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
BOOST_FORCEINLINE void opaque_complement(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -381,29 +477,44 @@ public:
extra_operations::opaque_complement(m_storage.value, order);
}
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::add_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ return extra_operations::add_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::sub_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ return extra_operations::sub_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool negate_and_test(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::negate_and_test(m_storage.value, order);
}
- BOOST_FORCEINLINE bool and_and_test(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
+ BOOST_FORCEINLINE bool and_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::and_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ return extra_operations::and_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
- BOOST_FORCEINLINE bool or_and_test(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
+ BOOST_FORCEINLINE bool or_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::or_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ return extra_operations::or_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
}
- BOOST_FORCEINLINE bool xor_and_test(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
+ BOOST_FORCEINLINE bool xor_and_test(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::xor_and_test(m_storage.value, static_cast< storage_type >(v), order);
+ return extra_operations::xor_and_test(m_storage.value, atomics::detail::integral_extend< operations::is_signed, storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE bool complement_and_test(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_operations::complement_and_test(m_storage.value, order);
}
BOOST_FORCEINLINE bool bit_test_and_set(unsigned int bit_number, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
@@ -432,7 +543,7 @@ public:
BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
{
- return fetch_add(1) + 1;
+ return add(1);
}
BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
@@ -442,36 +553,71 @@ public:
BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
{
- return fetch_sub(1) - 1;
+ return sub(1);
}
BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
{
- return fetch_add(v) + v;
+ return add(v);
}
BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
{
- return fetch_sub(v) - v;
+ return sub(v);
}
BOOST_FORCEINLINE value_type operator&=(value_type v) volatile BOOST_NOEXCEPT
{
- return fetch_and(v) & v;
+ return bitwise_and(v);
}
BOOST_FORCEINLINE value_type operator|=(value_type v) volatile BOOST_NOEXCEPT
{
- return fetch_or(v) | v;
+ return bitwise_or(v);
}
BOOST_FORCEINLINE value_type operator^=(value_type v) volatile BOOST_NOEXCEPT
{
- return fetch_xor(v) ^ v;
+ return bitwise_xor(v);
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::integral_extend< operations::is_signed, storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::integral_truncate< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::integral_extend< operations::is_signed, storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::integral_extend< operations::is_signed, storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::integral_truncate< value_type >(old_value);
+ return res;
+ }
};
//! Implementation for bool
@@ -488,15 +634,18 @@ protected:
public:
typedef operations::storage_type storage_type;
+private:
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
+
protected:
operations::aligned_storage_type m_storage;
public:
- BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
- BOOST_CONSTEXPR explicit base_atomic(value_type v) BOOST_NOEXCEPT : m_storage(v) {}
+ BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(v) {}
// Standard methods
- BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
@@ -513,57 +662,257 @@ public:
return !!operations::load(m_storage.value, order);
}
- BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return !!operations::exchange(m_storage.value, static_cast< storage_type >(v), order);
}
- BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
+ }
+
+ BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
+ BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
#else
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
storage_type old_value = static_cast< storage_type >(expected);
const bool res = operations::compare_exchange_strong(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
expected = !!old_value;
return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
#endif
}
- BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = static_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
+ expected = !!old_value;
+ return res;
+ }
+};
+
+
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
+//! Implementation for floating point types
+template< typename T >
+class base_atomic< T, float >
+{
+public:
+ typedef T value_type;
+ typedef T difference_type;
+
+protected:
+ typedef atomics::detail::operations< storage_size_of< value_type >::value, false > operations;
+ typedef atomics::detail::extra_operations< operations, operations::storage_size, operations::is_signed > extra_operations;
+ typedef atomics::detail::fp_operations< extra_operations, value_type, operations::storage_size > fp_operations;
+ typedef atomics::detail::extra_fp_operations< fp_operations, value_type, operations::storage_size > extra_fp_operations;
+ typedef value_type value_arg_type;
+
+public:
+ typedef typename operations::storage_type storage_type;
+
+private:
+ typedef atomics::detail::integral_constant< bool, atomics::detail::value_sizeof< value_type >::value == sizeof(storage_type) > value_matches_storage;
+
+protected:
+ typename operations::aligned_storage_type m_storage;
+
+public:
+ BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_fp_cast< storage_type >(v)) {}
+
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_consume);
+ BOOST_ASSERT(order != memory_order_acquire);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ operations::store(m_storage.value, atomics::detail::bitwise_fp_cast< storage_type >(v), order);
+ }
+
+ BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(order != memory_order_release);
+ BOOST_ASSERT(order != memory_order_acq_rel);
+
+ return atomics::detail::bitwise_fp_cast< value_type >(operations::load(m_storage.value, order));
+ }
+
+ BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return fp_operations::fetch_add(m_storage.value, v, order);
+ }
+
+ BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return fp_operations::fetch_sub(m_storage.value, v, order);
+ }
+
+ BOOST_FORCEINLINE value_type exchange(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_fp_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_fp_cast< storage_type >(v), order));
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ {
+ BOOST_ASSERT(failure_order != memory_order_release);
+ BOOST_ASSERT(failure_order != memory_order_acq_rel);
+ BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
+
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
- BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), static_cast< storage_type >(desired), success_order, failure_order);
-#else
- storage_type old_value = static_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, static_cast< storage_type >(desired), success_order, failure_order);
- expected = !!old_value;
- return res;
-#endif
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
}
- BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
+ // Boost.Atomic extensions
+ BOOST_FORCEINLINE value_type fetch_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::fetch_negate(m_storage.value, order);
+ }
+
+ BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::add(m_storage.value, v, order);
+ }
+
+ BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::sub(m_storage.value, v, order);
+ }
+
+ BOOST_FORCEINLINE value_type negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return extra_fp_operations::negate(m_storage.value, order);
+ }
+
+ BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_fp_operations::opaque_add(m_storage.value, v, order);
+ }
+
+ BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_fp_operations::opaque_sub(m_storage.value, v, order);
+ }
+
+ BOOST_FORCEINLINE void opaque_negate(memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ extra_fp_operations::opaque_negate(m_storage.value, order);
+ }
+
+ // Operators
+ BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return add(v);
+ }
+
+ BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
+ {
+ return sub(v);
+ }
+
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_fp_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_fp_cast< value_type >(old_value);
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_fp_cast< storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_fp_cast< storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_fp_cast< value_type >(old_value);
+ return res;
+ }
};
+#endif // !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+
//! Implementation for pointers to object types
template< typename T >
@@ -581,23 +930,34 @@ protected:
public:
typedef typename operations::storage_type storage_type;
+private:
+ typedef atomics::detail::integral_constant< bool, sizeof(value_type) == sizeof(storage_type) > value_matches_storage;
+
+ // uintptr_storage_type is the minimal storage type that is enough to store pointers. The actual storage_type theoretically may be larger,
+ // if the target architecture only supports atomic ops on larger data. Typically, though, they are the same type.
+#if defined(BOOST_HAS_INTPTR_T)
+ typedef uintptr_t uintptr_storage_type;
+#else
+ typedef typename atomics::detail::make_storage_type< sizeof(value_type) >::type uintptr_storage_type;
+#endif
+
protected:
typename operations::aligned_storage_type m_storage;
public:
- BOOST_DEFAULTED_FUNCTION(base_atomic(), {})
- BOOST_FORCEINLINE explicit base_atomic(value_type const& v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< storage_type >(v))
+ BOOST_DEFAULTED_FUNCTION(base_atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE explicit base_atomic(value_arg_type v) BOOST_NOEXCEPT : m_storage(atomics::detail::bitwise_cast< uintptr_storage_type >(v))
{
}
// Standard methods
- BOOST_FORCEINLINE void store(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE void store(value_arg_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(order != memory_order_consume);
BOOST_ASSERT(order != memory_order_acquire);
BOOST_ASSERT(order != memory_order_acq_rel);
- operations::store(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order);
+ operations::store(m_storage.value, atomics::detail::bitwise_cast< uintptr_storage_type >(v), order);
}
BOOST_FORCEINLINE value_type load(memory_order order = memory_order_seq_cst) const volatile BOOST_NOEXCEPT
@@ -605,85 +965,83 @@ public:
BOOST_ASSERT(order != memory_order_release);
BOOST_ASSERT(order != memory_order_acq_rel);
- return atomics::detail::bitwise_cast< value_type >(operations::load(m_storage.value, order));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::load(m_storage.value, order)));
}
BOOST_FORCEINLINE value_type fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(operations::fetch_add(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::fetch_add(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
}
BOOST_FORCEINLINE value_type fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(operations::fetch_sub(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::fetch_sub(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
}
BOOST_FORCEINLINE value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return atomics::detail::bitwise_cast< value_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< storage_type >(v), order));
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(operations::exchange(m_storage.value, atomics::detail::bitwise_cast< uintptr_storage_type >(v), order)));
}
- BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
-#else
- storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::bitwise_cast< value_type >(old_value);
- return res;
-#endif
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, value_matches_storage());
}
- BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_strong(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_strong(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
- BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order) volatile BOOST_NOEXCEPT
{
BOOST_ASSERT(failure_order != memory_order_release);
BOOST_ASSERT(failure_order != memory_order_acq_rel);
BOOST_ASSERT(cas_failure_order_must_not_be_stronger_than_success_order(success_order, failure_order));
-#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
- return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
-#else
- storage_type old_value = atomics::detail::bitwise_cast< storage_type >(expected);
- const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< storage_type >(desired), success_order, failure_order);
- expected = atomics::detail::bitwise_cast< value_type >(old_value);
- return res;
-#endif
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, value_matches_storage());
}
- BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ BOOST_FORCEINLINE bool compare_exchange_weak(value_type& expected, value_arg_type desired, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
return compare_exchange_weak(expected, desired, order, atomics::detail::deduce_failure_order(order));
}
// Boost.Atomic extensions
+ BOOST_FORCEINLINE value_type add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(extra_operations::add(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
+ }
+
+ BOOST_FORCEINLINE value_type sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(extra_operations::sub(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order)));
+ }
+
BOOST_FORCEINLINE void opaque_add(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_add(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ extra_operations::opaque_add(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
BOOST_FORCEINLINE void opaque_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- extra_operations::opaque_sub(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ extra_operations::opaque_sub(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool add_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::add_and_test(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ return extra_operations::add_and_test(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
+ BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
BOOST_FORCEINLINE bool sub_and_test(difference_type v, memory_order order = memory_order_seq_cst) volatile BOOST_NOEXCEPT
{
- return extra_operations::sub_and_test(m_storage.value, static_cast< storage_type >(v * sizeof(T)), order);
+ return extra_operations::sub_and_test(m_storage.value, static_cast< uintptr_storage_type >(v * sizeof(T)), order);
}
// Operators
@@ -694,7 +1052,7 @@ public:
BOOST_FORCEINLINE value_type operator++() volatile BOOST_NOEXCEPT
{
- return fetch_add(1) + 1;
+ return add(1);
}
BOOST_FORCEINLINE value_type operator--(int) volatile BOOST_NOEXCEPT
@@ -704,21 +1062,56 @@ public:
BOOST_FORCEINLINE value_type operator--() volatile BOOST_NOEXCEPT
{
- return fetch_sub(1) - 1;
+ return sub(1);
}
BOOST_FORCEINLINE value_type operator+=(difference_type v) volatile BOOST_NOEXCEPT
{
- return fetch_add(v) + v;
+ return add(v);
}
BOOST_FORCEINLINE value_type operator-=(difference_type v) volatile BOOST_NOEXCEPT
{
- return fetch_sub(v) - v;
+ return sub(v);
}
BOOST_DELETED_FUNCTION(base_atomic(base_atomic const&))
BOOST_DELETED_FUNCTION(base_atomic& operator=(base_atomic const&))
+
+private:
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_strong(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_strong_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_strong_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_cast< uintptr_storage_type >(expected);
+ const bool res = operations::compare_exchange_strong(m_storage.value, old_value, atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(old_value));
+ return res;
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::true_type) volatile BOOST_NOEXCEPT
+ {
+#if defined(BOOST_ATOMIC_DETAIL_STORAGE_TYPE_MAY_ALIAS)
+ return operations::compare_exchange_weak(m_storage.value, reinterpret_cast< storage_type& >(expected), atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
+#else
+ return compare_exchange_weak_impl(expected, desired, success_order, failure_order, atomics::detail::false_type());
+#endif
+ }
+
+ BOOST_FORCEINLINE bool compare_exchange_weak_impl(value_type& expected, value_arg_type desired, memory_order success_order, memory_order failure_order, atomics::detail::false_type) volatile BOOST_NOEXCEPT
+ {
+ storage_type old_value = atomics::detail::bitwise_cast< uintptr_storage_type >(expected);
+ const bool res = operations::compare_exchange_weak(m_storage.value, old_value, atomics::detail::bitwise_cast< uintptr_storage_type >(desired), success_order, failure_order);
+ expected = atomics::detail::bitwise_cast< value_type >(static_cast< uintptr_storage_type >(old_value));
+ return res;
+ }
};
} // namespace detail
@@ -739,13 +1132,14 @@ public:
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = base_type::operations::is_always_lock_free;
public:
- BOOST_DEFAULTED_FUNCTION(atomic(), BOOST_NOEXCEPT {})
+ BOOST_DEFAULTED_FUNCTION(atomic() BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL, BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL {})
+ BOOST_FORCEINLINE BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {}
- // NOTE: The constructor is made explicit because gcc 4.7 complains that
- // operator=(value_arg_type) is considered ambiguous with operator=(atomic const&)
- // in assignment expressions, even though conversion to atomic<> is less preferred
- // than conversion to value_arg_type.
- BOOST_FORCEINLINE explicit BOOST_CONSTEXPR atomic(value_arg_type v) BOOST_NOEXCEPT : base_type(v) {}
+ BOOST_FORCEINLINE value_type operator= (value_arg_type v) BOOST_NOEXCEPT
+ {
+ this->store(v);
+ return v;
+ }
BOOST_FORCEINLINE value_type operator= (value_arg_type v) volatile BOOST_NOEXCEPT
{
@@ -777,6 +1171,9 @@ public:
template< typename T >
BOOST_CONSTEXPR_OR_CONST bool atomic< T >::is_always_lock_free;
+#undef BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_DECL
+#undef BOOST_ATOMIC_DETAIL_DEF_NOEXCEPT_IMPL
+
typedef atomic< char > atomic_char;
typedef atomic< unsigned char > atomic_uchar;
typedef atomic< signed char > atomic_schar;
@@ -827,6 +1224,12 @@ typedef atomic< uint_fast64_t > atomic_uint_fast64_t;
typedef atomic< intmax_t > atomic_intmax_t;
typedef atomic< uintmax_t > atomic_uintmax_t;
+#if !defined(BOOST_ATOMIC_NO_FLOATING_POINT)
+typedef atomic< float > atomic_float_t;
+typedef atomic< double > atomic_double_t;
+typedef atomic< long double > atomic_long_double_t;
+#endif
+
typedef atomic< std::size_t > atomic_size_t;
typedef atomic< std::ptrdiff_t > atomic_ptrdiff_t;
diff --git a/boost/atomic/detail/bitwise_cast.hpp b/boost/atomic/detail/bitwise_cast.hpp
index 4a285ecab2..10d165e7c5 100644
--- a/boost/atomic/detail/bitwise_cast.hpp
+++ b/boost/atomic/detail/bitwise_cast.hpp
@@ -5,7 +5,7 @@
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2013 - 2014 Andrey Semashev
+ * Copyright (c) 2013 - 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/bitwise_cast.hpp
@@ -16,59 +16,53 @@
#ifndef BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
#define BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
+#include <cstddef>
#include <boost/atomic/detail/config.hpp>
-#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
-#include <cstring>
-#endif
+#include <boost/atomic/detail/addressof.hpp>
+#include <boost/atomic/detail/string_ops.hpp>
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
-#if defined(BOOST_GCC) && (BOOST_GCC+0) >= 40600
-#pragma GCC diagnostic push
-// missing initializer for member var
-#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
-#endif
-
namespace boost {
namespace atomics {
namespace detail {
-template< typename T >
-BOOST_FORCEINLINE T* addressof(T& value) BOOST_NOEXCEPT
+template< std::size_t FromSize, typename To >
+BOOST_FORCEINLINE void clear_padding(To& to, atomics::detail::true_type) BOOST_NOEXCEPT
{
- // Note: The point of using a local struct as the intermediate type instead of char is to avoid gcc warnings
- // if T is a const volatile char*:
- // warning: casting 'const volatile char* const' to 'const volatile char&' does not dereference pointer
- // The local struct makes sure T is not related to the cast target type.
- struct opaque_type;
- return reinterpret_cast< T* >(&const_cast< opaque_type& >(reinterpret_cast< const volatile opaque_type& >(value)));
+ BOOST_ATOMIC_DETAIL_MEMSET(reinterpret_cast< unsigned char* >(atomics::detail::addressof(to)) + FromSize, 0, sizeof(To) - FromSize);
}
-template< typename To, typename From >
+template< std::size_t FromSize, typename To >
+BOOST_FORCEINLINE void clear_padding(To&, atomics::detail::false_type) BOOST_NOEXCEPT
+{
+}
+
+template< typename To, std::size_t FromSize, typename From >
BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
{
- struct
- {
- To to;
- }
- value = {};
+ To to;
BOOST_ATOMIC_DETAIL_MEMCPY
(
- atomics::detail::addressof(value.to),
+ atomics::detail::addressof(to),
atomics::detail::addressof(from),
- (sizeof(From) < sizeof(To) ? sizeof(From) : sizeof(To))
+ (FromSize < sizeof(To) ? FromSize : sizeof(To))
);
- return value.to;
+ atomics::detail::clear_padding< FromSize >(to, atomics::detail::integral_constant< bool, FromSize < sizeof(To) >());
+ return to;
+}
+
+template< typename To, typename From >
+BOOST_FORCEINLINE To bitwise_cast(From const& from) BOOST_NOEXCEPT
+{
+ return atomics::detail::bitwise_cast< To, sizeof(From) >(from);
}
} // namespace detail
} // namespace atomics
} // namespace boost
-#if defined(BOOST_GCC) && (BOOST_GCC+0) >= 40600
-#pragma GCC diagnostic pop
-#endif
-
#endif // BOOST_ATOMIC_DETAIL_BITWISE_CAST_HPP_INCLUDED_
diff --git a/boost/atomic/detail/bitwise_fp_cast.hpp b/boost/atomic/detail/bitwise_fp_cast.hpp
new file mode 100644
index 0000000000..a74b20b972
--- /dev/null
+++ b/boost/atomic/detail/bitwise_fp_cast.hpp
@@ -0,0 +1,86 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/bitwise_fp_cast.hpp
+ *
+ * This header defines \c bitwise_fp_cast used to convert between storage and floating point value types
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/float_sizes.hpp>
+#include <boost/atomic/detail/bitwise_cast.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*!
+ * \brief The type trait returns the size of the value of the specified floating point type
+ *
+ * This size may be less than <tt>sizeof(T)</tt> if the implementation uses padding bytes for a particular FP type. This is
+ * often the case with 80-bit extended double, which is stored in 12 or 16 bytes with padding filled with garbage.
+ */
+template< typename T >
+struct value_sizeof
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t value = sizeof(T);
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
+template< >
+struct value_sizeof< float >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE;
+};
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
+template< >
+struct value_sizeof< double >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE;
+};
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
+template< >
+struct value_sizeof< long double >
+{
+ static BOOST_CONSTEXPR_OR_CONST std::size_t value = BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE;
+};
+#endif
+
+template< typename T >
+struct value_sizeof< const T > : value_sizeof< T > {};
+
+template< typename T >
+struct value_sizeof< volatile T > : value_sizeof< T > {};
+
+template< typename T >
+struct value_sizeof< const volatile T > : value_sizeof< T > {};
+
+
+template< typename To, typename From >
+BOOST_FORCEINLINE To bitwise_fp_cast(From const& from) BOOST_NOEXCEPT
+{
+ return atomics::detail::bitwise_cast< To, atomics::detail::value_sizeof< From >::value >(from);
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_BITWISE_FP_CAST_HPP_INCLUDED_
diff --git a/boost/atomic/detail/caps_gcc_atomic.hpp b/boost/atomic/detail/caps_gcc_atomic.hpp
index d6221fd134..3b518cf49c 100644
--- a/boost/atomic/detail/caps_gcc_atomic.hpp
+++ b/boost/atomic/detail/caps_gcc_atomic.hpp
@@ -16,9 +16,13 @@
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/int_sizes.hpp>
+#if defined(__i386__) || defined(__x86_64__)
#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+#elif defined(__arm__)
#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+#elif defined(__POWERPC__) || defined(__PPC__)
#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
+#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -30,13 +34,13 @@
#define BOOST_ATOMIC_INT128_LOCK_FREE 0
#endif
-#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2
+#if (__GCC_ATOMIC_LLONG_LOCK_FREE == 2) || (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8)
#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
#else
#define BOOST_ATOMIC_LLONG_LOCK_FREE BOOST_ATOMIC_INT128_LOCK_FREE
#endif
-#if __GCC_ATOMIC_LONG_LOCK_FREE == 2
+#if (__GCC_ATOMIC_LONG_LOCK_FREE == 2) || (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) && BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8)
#define BOOST_ATOMIC_LONG_LOCK_FREE 2
#else
#define BOOST_ATOMIC_LONG_LOCK_FREE BOOST_ATOMIC_LLONG_LOCK_FREE
diff --git a/boost/atomic/detail/caps_gcc_sync.hpp b/boost/atomic/detail/caps_gcc_sync.hpp
index d797e5af64..ffbe605a1a 100644
--- a/boost/atomic/detail/caps_gcc_sync.hpp
+++ b/boost/atomic/detail/caps_gcc_sync.hpp
@@ -17,9 +17,13 @@
#define BOOST_ATOMIC_DETAIL_CAPS_GCC_SYNC_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
+#if defined(__i386__) || defined(__x86_64__)
#include <boost/atomic/detail/hwcaps_gcc_x86.hpp>
+#elif defined(__arm__)
#include <boost/atomic/detail/hwcaps_gcc_arm.hpp>
+#elif defined(__POWERPC__) || defined(__PPC__)
#include <boost/atomic/detail/hwcaps_gcc_ppc.hpp>
+#endif
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
diff --git a/boost/atomic/detail/config.hpp b/boost/atomic/detail/config.hpp
index aee674084f..d2a6afd203 100644
--- a/boost/atomic/detail/config.hpp
+++ b/boost/atomic/detail/config.hpp
@@ -21,32 +21,8 @@
#pragma once
#endif
-#if defined(__has_builtin)
-#if __has_builtin(__builtin_memcpy)
-#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY
-#endif
-#if __has_builtin(__builtin_memcmp)
-#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP
-#endif
-#elif defined(BOOST_GCC)
-#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY
-#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP
-#endif
-
-#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
-#define BOOST_ATOMIC_DETAIL_MEMCPY __builtin_memcpy
-#else
-#define BOOST_ATOMIC_DETAIL_MEMCPY std::memcpy
-#endif
-
-#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP)
-#define BOOST_ATOMIC_DETAIL_MEMCMP __builtin_memcmp
-#else
-#define BOOST_ATOMIC_DETAIL_MEMCMP std::memcmp
-#endif
-
#if defined(__CUDACC__)
-// nvcc does not support alternatives in asm statement constraints
+// nvcc does not support alternatives ("q,m") in asm statement constraints
#define BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES
// nvcc does not support condition code register ("cc") clobber in asm statements
#define BOOST_ATOMIC_DETAIL_NO_ASM_CLOBBER_CC
@@ -60,24 +36,21 @@
#define BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA
#endif
-#if ((defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)) && (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) < 403)) ||\
- (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000) /* gcc 7 emits assembler warnings when zero displacement is implied */ ||\
- defined(__SUNPRO_CC)
-// This macro indicates we're using older binutils that don't support implied zero displacements for memory opereands,
-// making code like this invalid:
-// movl 4+(%%edx), %%eax
-#define BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS
+#if (defined(__i386__) || defined(__x86_64__)) && (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40500) || defined(__SUNPRO_CC))
+// This macro indicates that the compiler does not support allocating eax:edx or rax:rdx register pairs ("A") in asm blocks
+#define BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS
#endif
-#if defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) < 40500) || defined(__SUNPRO_CC)
-// This macro indicates that the compiler does not support allocating rax:rdx register pairs ("A") in asm blocks
-#define BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS
+#if defined(__i386__) && (defined(__PIC__) || defined(__PIE__)) && !(defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 50100))
+// This macro indicates that asm blocks should preserve ebx value unchanged. Some compilers are able to maintain ebx themselves
+// around the asm blocks. For those compilers we don't need to save/restore ebx in asm blocks.
+#define BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX
#endif
#if defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
#if !(defined(BOOST_LIBSTDCXX11) && (BOOST_LIBSTDCXX_VERSION+0) >= 40700) /* libstdc++ from gcc >= 4.7 in C++11 mode */
-// This macro indicates that there is no <type_traits> standard header that is sufficient for Boost.Atomic needs.
-#define BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS
+// This macro indicates that there is not even a basic <type_traits> standard header that is sufficient for most Boost.Atomic needs.
+#define BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS
#endif
#endif // defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
@@ -109,4 +82,69 @@
#define BOOST_ATOMIC_DETAIL_IS_CONSTANT(x) false
#endif
+#if (defined(__BYTE_ORDER__) && defined(__FLOAT_WORD_ORDER__) && (__BYTE_ORDER__+0) == (__FLOAT_WORD_ORDER__+0)) ||\
+ defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
+// This macro indicates that integer and floating point endianness is the same
+#define BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH
+#endif
+
+// Deprecated symbols markup
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && defined(_MSC_VER)
+#if (_MSC_VER) >= 1400
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __declspec(deprecated(msg))
+#else
+// MSVC 7.1 only supports the attribute without a message
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __declspec(deprecated)
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && defined(__has_extension)
+#if __has_extension(attribute_deprecated_with_message)
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __attribute__((deprecated(msg)))
+#endif
+#endif
+
+// gcc since 4.5 supports deprecated attribute with a message; older versions support the attribute without a message.
+// Oracle Studio 12.4 supports deprecated attribute with a message; this is the first release that supports the attribute.
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && (\
+ (defined(__GNUC__) && ((__GNUC__ + 0) * 100 + (__GNUC_MINOR__ + 0)) >= 405) ||\
+ (defined(__SUNPRO_CC) && (__SUNPRO_CC + 0) >= 0x5130))
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __attribute__((deprecated(msg)))
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && __cplusplus >= 201402
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) [[deprecated(msg)]]
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && defined(__GNUC__)
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __attribute__((deprecated))
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED) && defined(__has_attribute)
+#if __has_attribute(deprecated)
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg) __attribute__((deprecated))
+#endif
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_DEPRECATED)
+#define BOOST_ATOMIC_DETAIL_DEPRECATED(msg)
+#endif
+
+// In Boost.Atomic 1.67 we changed (op)_and_test methods to return true when the result is non-zero. This would be more consistent
+// with the other names used in Boost.Atomic and the C++ standard library. Since the methods were announced as experimental and
+// the previous behavior was released only in Boost 1.66, it was decided to change the result without changing the method names.
+// By defining BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST the user has a way to highlight all uses of the affected functions so
+// that it is easier to find and update the affected code (which is typically adding or removing negation of the result). This
+// highlighting functionality is a temporary measure to help users upgrade from Boost 1.66 to newer Boost versions. It will
+// be removed eventually.
+//
+// More info at:
+// https://github.com/boostorg/atomic/issues/11
+// http://boost.2283326.n4.nabble.com/atomic-op-and-test-naming-tc4701445.html
+#if defined(BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST)
+#define BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST BOOST_ATOMIC_DETAIL_DEPRECATED("Boost.Atomic 1.67 has changed (op)_and_test result to the opposite. The functions now return true when the result is non-zero. Please, verify your use of the operation and undefine BOOST_ATOMIC_HIGHLIGHT_OP_AND_TEST.")
+#else
+#define BOOST_ATOMIC_DETAIL_HIGHLIGHT_OP_AND_TEST
+#endif
+
#endif // BOOST_ATOMIC_DETAIL_CONFIG_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_fp_operations.hpp b/boost/atomic/detail/extra_fp_operations.hpp
new file mode 100644
index 0000000000..854d8c9bee
--- /dev/null
+++ b/boost/atomic/detail/extra_fp_operations.hpp
@@ -0,0 +1,28 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_fp_operations.hpp
+ *
+ * This header defines extra floating point atomic operations, including the generic version.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/extra_fp_ops_generic.hpp>
+#include <boost/atomic/detail/extra_fp_ops_emulated.hpp>
+
+#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_GENERIC)
+#include BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_HEADER(boost/atomic/detail/extra_fp_ops_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_fp_operations_fwd.hpp b/boost/atomic/detail/extra_fp_operations_fwd.hpp
new file mode 100644
index 0000000000..79bca9d2cd
--- /dev/null
+++ b/boost/atomic/detail/extra_fp_operations_fwd.hpp
@@ -0,0 +1,35 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_fp_operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c extra_fp_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base, typename Value, std::size_t Size, bool = Base::is_always_lock_free >
+struct extra_fp_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_fp_ops_emulated.hpp b/boost/atomic/detail/extra_fp_ops_emulated.hpp
new file mode 100644
index 0000000000..e04b2f50fb
--- /dev/null
+++ b/boost/atomic/detail/extra_fp_ops_emulated.hpp
@@ -0,0 +1,107 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_fp_ops_emulated.hpp
+ *
+ * This header contains emulated (lock-based) implementation of the extra floating point atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/bitwise_fp_cast.hpp>
+#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
+#include <boost/atomic/detail/lockpool.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Generic implementation of extra floating point operations
+template< typename Base, typename Value, std::size_t Size >
+struct emulated_extra_fp_operations :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef Value value_type;
+
+ static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
+ value_type new_val = -old_val;
+ s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
+ value_type new_val = -old_val;
+ s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE value_type add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
+ value_type new_val = old_val + v;
+ s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE value_type sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
+ value_type new_val = old_val - v;
+ s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_sub(storage, v, order);
+ }
+};
+
+template< typename Base, typename Value, std::size_t Size >
+struct extra_fp_operations< Base, Value, Size, false > :
+ public emulated_extra_fp_operations< Base, Value, Size >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_EMULATED_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_fp_ops_generic.hpp b/boost/atomic/detail/extra_fp_ops_generic.hpp
new file mode 100644
index 0000000000..34902c472c
--- /dev/null
+++ b/boost/atomic/detail/extra_fp_ops_generic.hpp
@@ -0,0 +1,189 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_fp_ops_generic.hpp
+ *
+ * This header contains generic implementation of the extra floating point atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_GENERIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_OPS_GENERIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/bitwise_fp_cast.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_fp_operations_fwd.hpp>
+#include <boost/atomic/detail/type_traits/is_iec559.hpp>
+#include <boost/atomic/detail/type_traits/is_integral.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_GCC) && (BOOST_GCC+0) >= 60000
+#pragma GCC diagnostic push
+// ignoring attributes on template argument X - this warning is because we need to pass storage_type as a template argument; no problem in this case
+#pragma GCC diagnostic ignored "-Wignored-attributes"
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Negate implementation
+template<
+ typename Base,
+ typename Value,
+ std::size_t Size
+#if defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
+ , bool = atomics::detail::is_iec559< Value >::value && atomics::detail::is_integral< typename Base::storage_type >::value
+#endif
+>
+struct generic_extra_fp_negate :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef Value value_type;
+
+ static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_storage, new_storage;
+ value_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_storage);
+ do
+ {
+ old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
+ new_val = -old_val;
+ new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ }
+ while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_storage, new_storage;
+ value_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_storage);
+ do
+ {
+ old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
+ new_val = -old_val;
+ new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ }
+ while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+};
+
+#if defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
+
+//! Negate implementation for IEEE 754 / IEC 559 floating point types. We leverage the fact that the sign bit is the most significant bit in the value.
+template< typename Base, typename Value, std::size_t Size >
+struct generic_extra_fp_negate< Base, Value, Size, true > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef Value value_type;
+
+ //! The mask with only one sign bit set to 1
+ static BOOST_CONSTEXPR_OR_CONST storage_type sign_mask = static_cast< storage_type >(1u) << (atomics::detail::value_sizeof< value_type >::value * 8u - 1u);
+
+ static BOOST_FORCEINLINE value_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_fp_cast< value_type >(base_type::fetch_xor(storage, sign_mask, order));
+ }
+
+ static BOOST_FORCEINLINE value_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return atomics::detail::bitwise_fp_cast< value_type >(base_type::bitwise_xor(storage, sign_mask, order));
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::opaque_xor(storage, sign_mask, order);
+ }
+};
+
+#endif // defined(BOOST_ATOMIC_DETAIL_INT_FP_ENDIAN_MATCH)
+
+//! Generic implementation of floating point operations
+template< typename Base, typename Value, std::size_t Size >
+struct generic_extra_fp_operations :
+ public generic_extra_fp_negate< Base, Value, Size >
+{
+ typedef generic_extra_fp_negate< Base, Value, Size > base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef Value value_type;
+
+ static BOOST_FORCEINLINE value_type add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_storage, new_storage;
+ value_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_storage);
+ do
+ {
+ old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
+ new_val = old_val + v;
+ new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ }
+ while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE value_type sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_storage, new_storage;
+ value_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_storage);
+ do
+ {
+ old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
+ new_val = old_val - v;
+ new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ }
+ while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_sub(storage, v, order);
+ }
+};
+
+// Default extra_fp_operations template definition will be used unless specialized for a specific platform
+template< typename Base, typename Value, std::size_t Size >
+struct extra_fp_operations< Base, Value, Size, true > :
+ public generic_extra_fp_operations< Base, Value, Size >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_GCC) && (BOOST_GCC+0) >= 60000
+#pragma GCC diagnostic pop
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_operations.hpp b/boost/atomic/detail/extra_operations.hpp
index 4335f48bf4..c04f55cd83 100644
--- a/boost/atomic/detail/extra_operations.hpp
+++ b/boost/atomic/detail/extra_operations.hpp
@@ -15,6 +15,7 @@
#define BOOST_ATOMIC_DETAIL_EXTRA_OPERATIONS_HPP_INCLUDED_
#include <boost/atomic/detail/extra_ops_generic.hpp>
+#include <boost/atomic/detail/extra_ops_emulated.hpp>
#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC)
#include BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(boost/atomic/detail/extra_ops_)
diff --git a/boost/atomic/detail/extra_operations_fwd.hpp b/boost/atomic/detail/extra_operations_fwd.hpp
index 8c258f6dc2..399a823351 100644
--- a/boost/atomic/detail/extra_operations_fwd.hpp
+++ b/boost/atomic/detail/extra_operations_fwd.hpp
@@ -25,7 +25,7 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename Base, std::size_t Size, bool Signed >
+template< typename Base, std::size_t Size, bool Signed, bool = Base::is_always_lock_free >
struct extra_operations;
} // namespace detail
diff --git a/boost/atomic/detail/extra_ops_emulated.hpp b/boost/atomic/detail/extra_ops_emulated.hpp
new file mode 100644
index 0000000000..c0e4832944
--- /dev/null
+++ b/boost/atomic/detail/extra_ops_emulated.hpp
@@ -0,0 +1,238 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/extra_ops_emulated.hpp
+ *
+ * This header contains emulated (lock-based) implementation of the extra atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/extra_operations_fwd.hpp>
+#include <boost/atomic/detail/lockpool.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(BOOST_MSVC)
+#pragma warning(push)
+// unary minus operator applied to unsigned type, result still unsigned
+#pragma warning(disable: 4146)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Generic implementation of extra operations
+template< typename Base, std::size_t Size, bool Signed >
+struct emulated_extra_operations :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s = static_cast< storage_type >(-old_val);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = static_cast< storage_type >(-s);
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = s;
+ new_val += v;
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = s;
+ new_val -= v;
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = s;
+ new_val &= v;
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = s;
+ new_val |= v;
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = s;
+ new_val ^= v;
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type old_val = s;
+ s = static_cast< storage_type >(~old_val);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ storage_type new_val = static_cast< storage_type >(~s);
+ s = new_val;
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ Base::fetch_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
+ storage_type old_val = Base::fetch_or(storage, mask, order);
+ return !!(old_val & mask);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
+ storage_type old_val = Base::fetch_and(storage, ~mask, order);
+ return !!(old_val & mask);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type mask = static_cast< storage_type >(static_cast< storage_type >(1u) << bit_number);
+ storage_type old_val = Base::fetch_xor(storage, mask, order);
+ return !!(old_val & mask);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct extra_operations< Base, Size, Signed, false > :
+ public emulated_extra_operations< Base, Size, Signed >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#if defined(BOOST_MSVC)
+#pragma warning(pop)
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_EXTRA_OPS_EMULATED_HPP_INCLUDED_
diff --git a/boost/atomic/detail/extra_ops_gcc_arm.hpp b/boost/atomic/detail/extra_ops_gcc_arm.hpp
index 9bd4829bd8..e84f1771da 100644
--- a/boost/atomic/detail/extra_ops_gcc_arm.hpp
+++ b/boost/atomic/detail/extra_ops_gcc_arm.hpp
@@ -3,7 +3,7 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2017 Andrey Semashev
+ * Copyright (c) 2017 - 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_ops_gcc_arm.hpp
@@ -33,15 +33,71 @@ namespace boost {
namespace atomics {
namespace detail {
+template< typename Base >
+struct gcc_arm_extra_operations_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_complement(storage, order);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct gcc_arm_extra_operations;
+
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
template< typename Base, bool Signed >
-struct extra_operations< Base, 1u, Signed > :
+struct gcc_arm_extra_operations< Base, 1u, Signed > :
public generic_extra_operations< Base, 1u, Signed >
{
typedef generic_extra_operations< Base, 1u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
@@ -52,7 +108,7 @@ struct extra_operations< Base, 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"rsb %[result], %[original], #0\n" // result = 0 - original
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -69,6 +125,162 @@ struct extra_operations< Base, 1u, Signed > :
return static_cast< storage_type >(original);
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_arm_operations_base::fence_before(order);
@@ -78,7 +290,7 @@ struct extra_operations< Base, 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"mvn %[result], %[original]\n" // result = NOT original
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -95,15 +307,37 @@ struct extra_operations< Base, 1u, Signed > :
return static_cast< storage_type >(original);
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
}
+};
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- fetch_complement(storage, order);
- }
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 1u, Signed > >
+{
};
#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXB_STREXB)
@@ -111,12 +345,12 @@ struct extra_operations< Base, 1u, Signed > :
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
template< typename Base, bool Signed >
-struct extra_operations< Base, 2u, Signed > :
+struct gcc_arm_extra_operations< Base, 2u, Signed > :
public generic_extra_operations< Base, 2u, Signed >
{
typedef generic_extra_operations< Base, 2u, Signed > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
@@ -127,7 +361,7 @@ struct extra_operations< Base, 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"rsb %[result], %[original], #0\n" // result = 0 - original
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -144,6 +378,162 @@ struct extra_operations< Base, 2u, Signed > :
return static_cast< storage_type >(original);
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_arm_operations_base::fence_before(order);
@@ -153,7 +543,7 @@ struct extra_operations< Base, 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"mvn %[result], %[original]\n" // result = NOT original
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -170,21 +560,43 @@ struct extra_operations< Base, 2u, Signed > :
return static_cast< storage_type >(original);
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ extended_storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return static_cast< storage_type >(result);
}
+};
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- fetch_complement(storage, order);
- }
+template< typename Base, bool Signed >
+struct extra_operations< Base, 2u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 2u, Signed > >
+{
};
#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXH_STREXH)
template< typename Base, bool Signed >
-struct extra_operations< Base, 4u, Signed > :
+struct gcc_arm_extra_operations< Base, 4u, Signed > :
public generic_extra_operations< Base, 4u, Signed >
{
typedef generic_extra_operations< Base, 4u, Signed > base_type;
@@ -216,6 +628,162 @@ struct extra_operations< Base, 4u, Signed > :
return original;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "rsb %[result], %[original], #0\n" // result = 0 - original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "add %[result], %[original], %[value]\n" // result = original + value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "sub %[result], %[original], %[value]\n" // result = original - value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "and %[result], %[original], %[value]\n" // result = original & value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "orr %[result], %[original], %[value]\n" // result = original | value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "eor %[result], %[original], %[value]\n" // result = original ^ value
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ : [value] "Ir" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_arm_operations_base::fence_before(order);
@@ -242,21 +810,43 @@ struct extra_operations< Base, 4u, Signed > :
return original;
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_arm_operations_base::fence_before(order);
+ uint32_t tmp;
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
+ "1:\n"
+ "ldrex %[original], %[storage]\n" // original = *(&storage)
+ "mvn %[result], %[original]\n" // result = NOT original
+ "strex %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
+ "teq %[tmp], #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
+ : [original] "=&r" (original), // %0
+ [result] "=&r" (result), // %1
+ [tmp] "=&l" (tmp), // %2
+ [storage] "+Q" (storage) // %3
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
}
+};
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- fetch_complement(storage, order);
- }
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 4u, Signed > >
+{
};
#if defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
template< typename Base, bool Signed >
-struct extra_operations< Base, 8u, Signed > :
+struct gcc_arm_extra_operations< Base, 8u, Signed > :
public generic_extra_operations< Base, 8u, Signed >
{
typedef generic_extra_operations< Base, 8u, Signed > base_type;
@@ -290,7 +880,7 @@ struct extra_operations< Base, 8u, Signed > :
return original;
}
- static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_arm_operations_base::fence_before(order);
storage_type original, result;
@@ -302,6 +892,8 @@ struct extra_operations< Base, 8u, Signed > :
"ldrexd %1, %H1, [%3]\n" // original = *(&storage)
"mvn %2, %1\n" // result = NOT original
"mvn %H2, %H1\n"
+ "adds %2, %2, #1\n" // result = result + 1
+ "adc %H2, %H2, #0\n"
"strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
"teq %0, #0\n" // flags = tmp==0
"bne 1b\n" // if (!flags.equal) goto retry
@@ -313,18 +905,201 @@ struct extra_operations< Base, 8u, Signed > :
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
gcc_arm_operations_base::fence_after(order);
- return original;
+ return result;
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "adds %2, %1, %4\n" // result = original + value
+ "adc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
}
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fetch_complement(storage, order);
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "subs %2, %1, %4\n" // result = original - value
+ "sbc %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "and %2, %1, %4\n" // result = original & value
+ "and %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
}
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "orr %2, %1, %4\n" // result = original | value
+ "orr %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "eor %2, %1, %4\n" // result = original ^ value
+ "eor %H2, %H1, %H4\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage), // %3
+ "r" (v) // %4
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_arm_operations_base::fence_before(order);
+ storage_type original, result;
+ uint32_t tmp;
+ __asm__ __volatile__
+ (
+ BOOST_ATOMIC_DETAIL_ARM_ASM_START(%0)
+ "1:\n"
+ "ldrexd %1, %H1, [%3]\n" // original = *(&storage)
+ "mvn %2, %1\n" // result = NOT original
+ "mvn %H2, %H1\n"
+ "strexd %0, %2, %H2, [%3]\n" // *(&storage) = result, tmp = store failed
+ "teq %0, #0\n" // flags = tmp==0
+ "bne 1b\n" // if (!flags.equal) goto retry
+ BOOST_ATOMIC_DETAIL_ARM_ASM_END(%0)
+ : BOOST_ATOMIC_DETAIL_ARM_ASM_TMPREG_CONSTRAINT(tmp), // %0
+ "=&r" (original), // %1
+ "=&r" (result) // %2
+ : "r" (&storage) // %3
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ gcc_arm_operations_base::fence_after(order);
+ return result;
+ }
+};
+
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public gcc_arm_extra_operations_common< gcc_arm_extra_operations< Base, 8u, Signed > >
+{
};
#endif // defined(BOOST_ATOMIC_DETAIL_ARM_HAS_LDREXD_STREXD)
diff --git a/boost/atomic/detail/extra_ops_gcc_ppc.hpp b/boost/atomic/detail/extra_ops_gcc_ppc.hpp
index cc32e4960b..dc4bbdbf74 100644
--- a/boost/atomic/detail/extra_ops_gcc_ppc.hpp
+++ b/boost/atomic/detail/extra_ops_gcc_ppc.hpp
@@ -3,7 +3,7 @@
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
- * Copyright (c) 2017 Andrey Semashev
+ * Copyright (c) 2017 - 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/extra_ops_gcc_ppc.hpp
@@ -31,10 +31,66 @@ namespace boost {
namespace atomics {
namespace detail {
+template< typename Base >
+struct gcc_ppc_extra_operations_common :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!base_type::bitwise_complement(storage, order);
+ }
+};
+
+template< typename Base, std::size_t Size, bool Signed >
+struct gcc_ppc_extra_operations;
+
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
template< typename Base, bool Signed >
-struct extra_operations< Base, 1u, Signed > :
+struct gcc_ppc_extra_operations< Base, 1u, Signed > :
public generic_extra_operations< Base, 1u, Signed >
{
typedef generic_extra_operations< Base, 1u, Signed > base_type;
@@ -43,7 +99,7 @@ struct extra_operations< Base, 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -51,7 +107,7 @@ struct extra_operations< Base, 1u, Signed > :
"neg %1,%0\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -59,10 +115,124 @@ struct extra_operations< Base, 1u, Signed > :
return original;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -70,7 +240,7 @@ struct extra_operations< Base, 1u, Signed > :
"nor %1,%0,%0\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -78,15 +248,30 @@ struct extra_operations< Base, 1u, Signed > :
return original;
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lbarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stbcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
}
+};
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- fetch_complement(storage, order);
- }
+template< typename Base, bool Signed >
+struct extra_operations< Base, 1u, Signed, true > :
+ public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 1u, Signed > >
+{
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LBARX_STBCX)
@@ -94,7 +279,7 @@ struct extra_operations< Base, 1u, Signed > :
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< typename Base, bool Signed >
-struct extra_operations< Base, 2u, Signed > :
+struct gcc_ppc_extra_operations< Base, 2u, Signed > :
public generic_extra_operations< Base, 2u, Signed >
{
typedef generic_extra_operations< Base, 2u, Signed > base_type;
@@ -103,7 +288,7 @@ struct extra_operations< Base, 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -111,7 +296,7 @@ struct extra_operations< Base, 2u, Signed > :
"neg %1,%0\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -119,40 +304,163 @@ struct extra_operations< Base, 2u, Signed > :
return original;
}
- static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
"lharx %0,%y2\n\t"
- "nor %1,%0,%0\n\t"
+ "neg %1,%0\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
gcc_ppc_operations_base::fence_after(order);
- return original;
+ return result;
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
}
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_complement(storage, order);
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lharx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "sthcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
}
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LHARX_STHCX)
template< typename Base, bool Signed >
-struct extra_operations< Base, 4u, Signed > :
+struct gcc_ppc_extra_operations< Base, 4u, Signed > :
public generic_extra_operations< Base, 4u, Signed >
{
typedef generic_extra_operations< Base, 4u, Signed > base_type;
@@ -161,7 +469,7 @@ struct extra_operations< Base, 4u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -169,7 +477,7 @@ struct extra_operations< Base, 4u, Signed > :
"neg %1,%0\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -177,10 +485,124 @@ struct extra_operations< Base, 4u, Signed > :
return original;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -188,7 +610,7 @@ struct extra_operations< Base, 4u, Signed > :
"nor %1,%0,%0\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -196,21 +618,36 @@ struct extra_operations< Base, 4u, Signed > :
return original;
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "lwarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stwcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
}
+};
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- fetch_complement(storage, order);
- }
+template< typename Base, bool Signed >
+struct extra_operations< Base, 4u, Signed, true > :
+ public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 4u, Signed > >
+{
};
#if defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
template< typename Base, bool Signed >
-struct extra_operations< Base, 8u, Signed > :
+struct gcc_ppc_extra_operations< Base, 8u, Signed > :
public generic_extra_operations< Base, 8u, Signed >
{
typedef generic_extra_operations< Base, 8u, Signed > base_type;
@@ -219,7 +656,7 @@ struct extra_operations< Base, 8u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -227,7 +664,7 @@ struct extra_operations< Base, 8u, Signed > :
"neg %1,%0\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -235,10 +672,124 @@ struct extra_operations< Base, 8u, Signed > :
return original;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "neg %1,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "add %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "sub %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "and %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "or %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, result;
+ gcc_ppc_operations_base::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "xor %1,%0,%3\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ : "b" (v)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
gcc_ppc_operations_base::fence_before(order);
- storage_type original, tmp;
+ storage_type original, result;
__asm__ __volatile__
(
"1:\n\t"
@@ -246,7 +797,7 @@ struct extra_operations< Base, 8u, Signed > :
"nor %1,%0,%0\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -254,15 +805,30 @@ struct extra_operations< Base, 8u, Signed > :
return original;
}
- static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- fetch_negate(storage, order);
+ gcc_ppc_operations_base::fence_before(order);
+ storage_type original, result;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldarx %0,%y2\n\t"
+ "nor %1,%0,%0\n\t"
+ "stdcx. %1,%y2\n\t"
+ "bne- 1b\n\t"
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
+ :
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ gcc_ppc_operations_base::fence_after(order);
+ return result;
}
+};
- static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
- {
- fetch_complement(storage, order);
- }
+template< typename Base, bool Signed >
+struct extra_operations< Base, 8u, Signed, true > :
+ public gcc_ppc_extra_operations_common< gcc_ppc_extra_operations< Base, 8u, Signed > >
+{
};
#endif // defined(BOOST_ATOMIC_DETAIL_PPC_HAS_LDARX_STDCX)
diff --git a/boost/atomic/detail/extra_ops_gcc_x86.hpp b/boost/atomic/detail/extra_ops_gcc_x86.hpp
index 59b39064d2..ee2cd02a88 100644
--- a/boost/atomic/detail/extra_ops_gcc_x86.hpp
+++ b/boost/atomic/detail/extra_ops_gcc_x86.hpp
@@ -36,6 +36,16 @@ struct gcc_x86_extra_operations_common :
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(Base::fetch_add(storage, v, order) + v);
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return static_cast< storage_type >(Base::fetch_sub(storage, v, order) - v);
+ }
+
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order) BOOST_NOEXCEPT
{
bool res;
@@ -110,43 +120,109 @@ struct gcc_x86_extra_operations_common :
};
template< typename Base, bool Signed >
-struct extra_operations< Base, 1u, Signed > :
+struct extra_operations< Base, 1u, Signed, true > :
public gcc_x86_extra_operations_common< Base >
{
typedef gcc_x86_extra_operations_common< Base > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
- temp_storage_type new_val;\
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
__asm__ __volatile__\
(\
".align 16\n\t"\
- "1: movzbl %[res], %2\n\t"\
+ "1: movzbl %[orig], %2\n\t"\
op " %b2\n\t"\
"lock; cmpxchgb %b2, %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
: \
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("negb", res);
- return res;
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negb", original, result);
+ return original;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("notb", res);
- return res;
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notb", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negb", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notb", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%al, %b2\n\t"\
+ "lock; cmpxchgb %b2, %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, original, result);
+ return static_cast< storage_type >(result);
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
@@ -259,7 +335,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; incb %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -269,7 +345,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; addb %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -280,7 +356,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; incb %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -291,7 +367,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; addb %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -310,7 +386,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; decb %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -320,7 +396,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; subb %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -331,7 +407,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; decb %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -342,7 +418,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; subb %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -359,7 +435,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; andb %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -367,7 +443,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; andb %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -383,7 +459,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; orb %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -391,7 +467,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; orb %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -407,7 +483,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; xorb %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -415,7 +491,7 @@ struct extra_operations< Base, 1u, Signed > :
__asm__ __volatile__
(
"lock; xorb %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -426,43 +502,109 @@ struct extra_operations< Base, 1u, Signed > :
};
template< typename Base, bool Signed >
-struct extra_operations< Base, 2u, Signed > :
+struct extra_operations< Base, 2u, Signed, true > :
public gcc_x86_extra_operations_common< Base >
{
typedef gcc_x86_extra_operations_common< Base > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
- temp_storage_type new_val;\
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
__asm__ __volatile__\
(\
".align 16\n\t"\
- "1: movzwl %[res], %2\n\t"\
+ "1: movzwl %[orig], %2\n\t"\
op " %w2\n\t"\
"lock; cmpxchgw %w2, %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), "=&q" (new_val)\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
: \
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("negw", res);
- return res;
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negw", original, result);
+ return original;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("notw", res);
- return res;
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notw", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negw", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notw", original, result);
+ return static_cast< storage_type >(result);
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %2\n\t"\
+ op " %%ax, %w2\n\t"\
+ "lock; cmpxchgw %w2, %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), "=&q" (result)\
+ : [arg] "ir" ((temp_storage_type)argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ temp_storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, original, result);
+ return static_cast< storage_type >(result);
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
@@ -575,7 +717,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; incw %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -585,7 +727,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; addw %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -596,7 +738,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; incw %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -607,7 +749,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; addw %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -626,7 +768,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; decw %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -636,7 +778,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; subw %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -647,7 +789,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; decw %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -658,7 +800,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; subw %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -675,7 +817,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; andw %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -683,7 +825,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; andw %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -699,7 +841,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; orw %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -707,7 +849,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; orw %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -723,7 +865,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; xorw %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "iq" (v)
: "memory"
);
@@ -731,7 +873,7 @@ struct extra_operations< Base, 2u, Signed > :
__asm__ __volatile__
(
"lock; xorw %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "iq" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -742,42 +884,108 @@ struct extra_operations< Base, 2u, Signed > :
};
template< typename Base, bool Signed >
-struct extra_operations< Base, 4u, Signed > :
+struct extra_operations< Base, 4u, Signed, true > :
public gcc_x86_extra_operations_common< Base >
{
typedef gcc_x86_extra_operations_common< Base > base_type;
typedef typename base_type::storage_type storage_type;
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
- storage_type new_val;\
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
__asm__ __volatile__\
(\
".align 16\n\t"\
- "1: mov %[res], %[new_val]\n\t"\
- op " %[new_val]\n\t"\
- "lock; cmpxchgl %[new_val], %[storage]\n\t"\
+ "1: mov %[orig], %[res]\n\t"\
+ op " %[res]\n\t"\
+ "lock; cmpxchgl %[res], %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
: \
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("negl", res);
- return res;
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negl", original, result);
+ return original;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("notl", res);
- return res;
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notl", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negl", original, result);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notl", original, result);
+ return result;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %[res]\n\t"\
+ op " %%eax, %[res]\n\t"\
+ "lock; cmpxchgl %[res], %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
+ : [arg] "ir" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, original, result);
+ return static_cast< storage_type >(result);
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
@@ -890,7 +1098,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; incl %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -900,7 +1108,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; addl %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "ir" (v)
: "memory"
);
@@ -911,7 +1119,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; incl %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -922,7 +1130,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; addl %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "ir" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -941,7 +1149,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; decl %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -951,7 +1159,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; subl %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "ir" (v)
: "memory"
);
@@ -962,7 +1170,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; decl %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -973,7 +1181,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; subl %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "ir" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -990,7 +1198,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; andl %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "ir" (v)
: "memory"
);
@@ -998,7 +1206,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; andl %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "ir" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1014,7 +1222,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; orl %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "ir" (v)
: "memory"
);
@@ -1022,7 +1230,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; orl %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "ir" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1038,7 +1246,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; xorl %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "ir" (v)
: "memory"
);
@@ -1046,7 +1254,7 @@ struct extra_operations< Base, 4u, Signed > :
__asm__ __volatile__
(
"lock; xorl %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "ir" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1059,42 +1267,108 @@ struct extra_operations< Base, 4u, Signed > :
#if defined(__x86_64__)
template< typename Base, bool Signed >
-struct extra_operations< Base, 8u, Signed > :
+struct extra_operations< Base, 8u, Signed, true > :
public gcc_x86_extra_operations_common< Base >
{
typedef gcc_x86_extra_operations_common< Base > base_type;
typedef typename base_type::storage_type storage_type;
-#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, result)\
- storage_type new_val;\
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, original, result)\
__asm__ __volatile__\
(\
".align 16\n\t"\
- "1: mov %[res], %[new_val]\n\t"\
- op " %[new_val]\n\t"\
- "lock; cmpxchgq %[new_val], %[storage]\n\t"\
+ "1: mov %[orig], %[res]\n\t"\
+ op " %[res]\n\t"\
+ "lock; cmpxchgq %[res], %[storage]\n\t"\
"jne 1b"\
- : [res] "+a" (result), [storage] "+m" (storage), [new_val] "=&r" (new_val)\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
: \
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
)
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("negq", res);
- return res;
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negq", original, result);
+ return original;
}
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
{
- storage_type res = storage;
- BOOST_ATOMIC_DETAIL_CAS_LOOP("notq", res);
- return res;
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notq", original, result);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("negq", original, result);
+ return result;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("notq", original, result);
+ return result;
+ }
+
+#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+
+#define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, original, result)\
+ __asm__ __volatile__\
+ (\
+ ".align 16\n\t"\
+ "1: mov %[arg], %[res]\n\t"\
+ op " %%rax, %[res]\n\t"\
+ "lock; cmpxchgq %[res], %[storage]\n\t"\
+ "jne 1b"\
+ : [orig] "+a" (original), [storage] "+m" (storage), [res] "=&r" (result)\
+ : [arg] "r" (argument)\
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"\
+ )
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, original, result);
+ return static_cast< storage_type >(result);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type original = storage;
+ storage_type result;
+ BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, original, result);
+ return static_cast< storage_type >(result);
}
#undef BOOST_ATOMIC_DETAIL_CAS_LOOP
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_complement(storage, order);
+ }
+
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
if (BOOST_ATOMIC_DETAIL_IS_CONSTANT(v) && v == 1)
@@ -1207,7 +1481,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; incq %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -1217,7 +1491,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; addq %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "er" (v)
: "memory"
);
@@ -1228,7 +1502,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; incq %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1239,7 +1513,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; addq %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "er" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1258,7 +1532,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; decq %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
:
: "memory"
);
@@ -1268,7 +1542,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; subq %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "er" (v)
: "memory"
);
@@ -1279,7 +1553,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; decq %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
:
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1290,7 +1564,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; subq %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "er" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1307,7 +1581,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; andq %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "er" (v)
: "memory"
);
@@ -1315,7 +1589,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; andq %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "er" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1331,7 +1605,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; orq %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "er" (v)
: "memory"
);
@@ -1339,7 +1613,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; orq %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "er" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
@@ -1355,7 +1629,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; xorq %[argument], %[storage]\n\t"
- : [storage] "+m" (storage), [result] "=@ccz" (res)
+ : [storage] "+m" (storage), [result] "=@ccnz" (res)
: [argument] "er" (v)
: "memory"
);
@@ -1363,7 +1637,7 @@ struct extra_operations< Base, 8u, Signed > :
__asm__ __volatile__
(
"lock; xorq %[argument], %[storage]\n\t"
- "setz %[result]\n\t"
+ "setnz %[result]\n\t"
: [storage] "+m" (storage), [result] "=q" (res)
: [argument] "er" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
diff --git a/boost/atomic/detail/extra_ops_generic.hpp b/boost/atomic/detail/extra_ops_generic.hpp
index 00930e9a81..43842628a2 100644
--- a/boost/atomic/detail/extra_ops_generic.hpp
+++ b/boost/atomic/detail/extra_ops_generic.hpp
@@ -18,6 +18,7 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/integral_extend.hpp>
#include <boost/atomic/detail/extra_operations_fwd.hpp>
#include <boost/atomic/capabilities.hpp>
@@ -36,36 +37,78 @@ namespace atomics {
namespace detail {
//! Generic implementation of extra operations
-template< typename Base, std::size_t Size, bool Signed >
+template< typename Base, std::size_t Size, bool Signed, bool = Base::full_cas_based >
struct generic_extra_operations :
public Base
{
typedef Base base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< Size, Signed >::type emulated_storage_type;
- typedef typename make_storage_type< Size, false >::type unsigned_emulated_storage_type;
+ typedef typename make_storage_type< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
- while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(-static_cast< emulated_storage_type >(old_val)), order, memory_order_relaxed)) {}
+ while (!base_type::compare_exchange_weak(storage, old_val, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val)), order, memory_order_relaxed)) {}
return old_val;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return base_type::fetch_add(storage, v, order) + v;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return base_type::fetch_sub(storage, v, order) - v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return base_type::fetch_and(storage, v, order) & v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return base_type::fetch_or(storage, v, order) | v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return base_type::fetch_xor(storage, v, order) ^ v;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- return Base::fetch_xor(storage, static_cast< storage_type >(~static_cast< emulated_storage_type >(0)), order);
+ return base_type::fetch_xor(storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u)));
+ return base_type::fetch_xor(storage, mask, order) ^ mask;
}
static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_add(storage, v, order);
+ base_type::fetch_add(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_sub(storage, v, order);
+ base_type::fetch_sub(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
@@ -75,17 +118,17 @@ struct generic_extra_operations :
static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_and(storage, v, order);
+ base_type::fetch_and(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_or(storage, v, order);
+ base_type::fetch_or(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- Base::fetch_xor(storage, v, order);
+ base_type::fetch_xor(storage, v, order);
}
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
@@ -95,58 +138,255 @@ struct generic_extra_operations :
static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::fetch_add(storage, v, order);
- emulated_storage_type new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v);
- return !new_val;
+ return !!static_cast< emulated_storage_type >(add(storage, v, order));
}
static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type old_val = Base::fetch_sub(storage, v, order);
- emulated_storage_type new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v);
- return !new_val;
+ return !!static_cast< emulated_storage_type >(sub(storage, v, order));
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
}
static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- return !(Base::fetch_and(storage, v, order) & v);
+ return !!bitwise_and(storage, v, order);
}
static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- return !(Base::fetch_or(storage, v, order) | v);
+ return !!bitwise_or(storage, v, order);
}
static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- return !(Base::fetch_xor(storage, v, order) ^ v);
+ return !!bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!static_cast< emulated_storage_type >(bitwise_complement(storage, order));
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
+ storage_type old_val = base_type::fetch_or(storage, mask, order);
+ return !!(old_val & mask);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
+ storage_type old_val = base_type::fetch_and(storage, ~mask, order);
+ return !!(old_val & mask);
+ }
+
+ static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
+ {
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
+ storage_type old_val = base_type::fetch_xor(storage, mask, order);
+ return !!(old_val & mask);
+ }
+};
+
+//! Specialization for cases when the platform only natively supports CAS
+template< typename Base, std::size_t Size, bool Signed >
+struct generic_extra_operations< Base, Size, Signed, true > :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef typename make_storage_type< Size >::type emulated_storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ while (!base_type::compare_exchange_weak(storage, old_val, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val)), order, memory_order_relaxed)) {}
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(-old_val));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val & v));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val | v));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_val);
+ do
+ {
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val ^ v));
+ }
+ while (!base_type::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return base_type::fetch_xor(storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return bitwise_xor(storage, atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(~static_cast< emulated_storage_type >(0u))), order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_add(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_sub(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fetch_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ fetch_complement(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool add_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!static_cast< emulated_storage_type >(add(storage, v, order));
+ }
+
+ static BOOST_FORCEINLINE bool sub_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!static_cast< emulated_storage_type >(sub(storage, v, order));
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!negate(storage, order);
+ }
+
+ static BOOST_FORCEINLINE bool and_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_and(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool or_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_or(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool xor_and_test(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!bitwise_xor(storage, v, order);
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!static_cast< emulated_storage_type >(bitwise_complement(storage, order));
}
static BOOST_FORCEINLINE bool bit_test_and_set(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
- storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number);
- storage_type old_val = Base::fetch_or(storage, mask, order);
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
+ storage_type old_val = base_type::fetch_or(storage, mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_reset(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
- storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number);
- storage_type old_val = Base::fetch_and(storage, ~mask, order);
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
+ storage_type old_val = base_type::fetch_and(storage, ~mask, order);
return !!(old_val & mask);
}
static BOOST_FORCEINLINE bool bit_test_and_complement(storage_type volatile& storage, unsigned int bit_number, memory_order order) BOOST_NOEXCEPT
{
- storage_type mask = storage_type(((unsigned_emulated_storage_type)1u) << bit_number);
- storage_type old_val = Base::fetch_xor(storage, mask, order);
+ const storage_type mask = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(static_cast< emulated_storage_type >(1u) << bit_number));
+ storage_type old_val = base_type::fetch_xor(storage, mask, order);
return !!(old_val & mask);
}
};
// Default extra_operations template definition will be used unless specialized for a specific platform
template< typename Base, std::size_t Size, bool Signed >
-struct extra_operations :
+struct extra_operations< Base, Size, Signed, true > :
public generic_extra_operations< Base, Size, Signed >
{
};
diff --git a/boost/atomic/detail/extra_ops_msvc_arm.hpp b/boost/atomic/detail/extra_ops_msvc_arm.hpp
index 5989d6677f..b8eb5bcb31 100644
--- a/boost/atomic/detail/extra_ops_msvc_arm.hpp
+++ b/boost/atomic/detail/extra_ops_msvc_arm.hpp
@@ -34,7 +34,7 @@ namespace detail {
#if defined(BOOST_ATOMIC_INTERLOCKED_BTS) && defined(BOOST_ATOMIC_INTERLOCKED_BTR)
template< typename Base, std::size_t Size, bool Signed >
-struct extra_operations< Base, 4u, Signed > :
+struct extra_operations< Base, 4u, Signed, true > :
public generic_extra_operations< Base, 4u, Signed >
{
typedef generic_extra_operations< Base, 4u, Signed > base_type;
diff --git a/boost/atomic/detail/extra_ops_msvc_x86.hpp b/boost/atomic/detail/extra_ops_msvc_x86.hpp
index 6d95dbed74..17451a83d6 100644
--- a/boost/atomic/detail/extra_ops_msvc_x86.hpp
+++ b/boost/atomic/detail/extra_ops_msvc_x86.hpp
@@ -109,7 +109,7 @@ struct msvc_x86_extra_operations_common :
};
template< typename Base, bool Signed >
-struct extra_operations< Base, 1u, Signed > :
+struct extra_operations< Base, 1u, Signed, true > :
public msvc_x86_extra_operations_common< Base, 1u, Signed >
{
typedef msvc_x86_extra_operations_common< Base, 1u, Signed > base_type;
@@ -136,6 +136,47 @@ struct extra_operations< Base, 1u, Signed > :
return old_val;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov new_val, dl
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ test dl, dl
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -153,6 +194,69 @@ struct extra_operations< Base, 1u, Signed > :
base_type::fence_after(order);
}
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ and dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, dl
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ or dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, dl
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, byte ptr [edi]
+ align 16
+ again:
+ mov dl, al
+ xor dl, cl
+ lock cmpxchg byte ptr [edi], dl
+ jne again
+ mov v, dl
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -173,6 +277,47 @@ struct extra_operations< Base, 1u, Signed > :
return old_val;
}
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ mov new_val, dl
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, byte ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dl
+ lock cmpxchg byte ptr [ecx], dl
+ jne again
+ test dl, dl
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -281,7 +426,7 @@ struct extra_operations< Base, 1u, Signed > :
mov edx, storage
movzx eax, v
lock add byte ptr [edx], al
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -296,7 +441,7 @@ struct extra_operations< Base, 1u, Signed > :
mov edx, storage
movzx eax, v
lock sub byte ptr [edx], al
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -311,7 +456,7 @@ struct extra_operations< Base, 1u, Signed > :
mov edx, storage
movzx eax, v
lock and byte ptr [edx], al
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -326,7 +471,7 @@ struct extra_operations< Base, 1u, Signed > :
mov edx, storage
movzx eax, v
lock or byte ptr [edx], al
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -341,7 +486,7 @@ struct extra_operations< Base, 1u, Signed > :
mov edx, storage
movzx eax, v
lock xor byte ptr [edx], al
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -350,7 +495,7 @@ struct extra_operations< Base, 1u, Signed > :
};
template< typename Base, bool Signed >
-struct extra_operations< Base, 2u, Signed > :
+struct extra_operations< Base, 2u, Signed, true > :
public msvc_x86_extra_operations_common< Base, 2u, Signed >
{
typedef msvc_x86_extra_operations_common< Base, 2u, Signed > base_type;
@@ -377,6 +522,47 @@ struct extra_operations< Base, 2u, Signed > :
return old_val;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov new_val, dx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ test dx, dx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -394,6 +580,69 @@ struct extra_operations< Base, 2u, Signed > :
base_type::fence_after(order);
}
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ and dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, dx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ or dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, dx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ movzx ecx, v
+ xor edx, edx
+ movzx eax, word ptr [edi]
+ align 16
+ again:
+ mov dx, ax
+ xor dx, cx
+ lock cmpxchg word ptr [edi], dx
+ jne again
+ mov v, dx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -414,6 +663,47 @@ struct extra_operations< Base, 2u, Signed > :
return old_val;
}
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ mov new_val, dx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ movzx eax, word ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not dx
+ lock cmpxchg word ptr [ecx], dx
+ jne again
+ test dx, dx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -522,7 +812,7 @@ struct extra_operations< Base, 2u, Signed > :
mov edx, storage
movzx eax, v
lock add word ptr [edx], ax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -537,7 +827,7 @@ struct extra_operations< Base, 2u, Signed > :
mov edx, storage
movzx eax, v
lock sub word ptr [edx], ax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -552,7 +842,7 @@ struct extra_operations< Base, 2u, Signed > :
mov edx, storage
movzx eax, v
lock and word ptr [edx], ax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -567,7 +857,7 @@ struct extra_operations< Base, 2u, Signed > :
mov edx, storage
movzx eax, v
lock or word ptr [edx], ax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -582,7 +872,7 @@ struct extra_operations< Base, 2u, Signed > :
mov edx, storage
movzx eax, v
lock xor word ptr [edx], ax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -591,7 +881,7 @@ struct extra_operations< Base, 2u, Signed > :
};
template< typename Base, bool Signed >
-struct extra_operations< Base, 4u, Signed > :
+struct extra_operations< Base, 4u, Signed, true > :
public msvc_x86_extra_operations_common< Base, 4u, Signed >
{
typedef msvc_x86_extra_operations_common< Base, 4u, Signed > base_type;
@@ -618,6 +908,47 @@ struct extra_operations< Base, 4u, Signed > :
return old_val;
}
+ static BOOST_FORCEINLINE storage_type negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov new_val, edx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool negate_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ neg edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ test edx, edx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE void opaque_negate(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -635,6 +966,69 @@ struct extra_operations< Base, 4u, Signed > :
base_type::fence_after(order);
}
+ static BOOST_FORCEINLINE storage_type bitwise_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ mov ecx, v
+ xor edx, edx
+ mov eax, dword ptr [edi]
+ align 16
+ again:
+ mov edx, eax
+ and edx, ecx
+ lock cmpxchg dword ptr [edi], edx
+ jne again
+ mov v, edx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ mov ecx, v
+ xor edx, edx
+ mov eax, dword ptr [edi]
+ align 16
+ again:
+ mov edx, eax
+ or edx, ecx
+ lock cmpxchg dword ptr [edi], edx
+ jne again
+ mov v, edx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type bitwise_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ __asm
+ {
+ mov edi, storage
+ mov ecx, v
+ xor edx, edx
+ mov eax, dword ptr [edi]
+ align 16
+ again:
+ mov edx, eax
+ xor edx, ecx
+ lock cmpxchg dword ptr [edi], edx
+ jne again
+ mov v, edx
+ };
+ base_type::fence_after(order);
+ return v;
+ }
+
static BOOST_FORCEINLINE storage_type fetch_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -655,6 +1049,47 @@ struct extra_operations< Base, 4u, Signed > :
return old_val;
}
+ static BOOST_FORCEINLINE storage_type bitwise_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ storage_type new_val;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ mov new_val, edx
+ };
+ base_type::fence_after(order);
+ return new_val;
+ }
+
+ static BOOST_FORCEINLINE bool complement_and_test(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ base_type::fence_before(order);
+ bool result;
+ __asm
+ {
+ mov ecx, storage
+ mov eax, dword ptr [ecx]
+ align 16
+ again:
+ mov edx, eax
+ not edx
+ lock cmpxchg dword ptr [ecx], edx
+ jne again
+ test edx, edx
+ setnz result
+ };
+ base_type::fence_after(order);
+ return result;
+ }
+
static BOOST_FORCEINLINE void opaque_complement(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
@@ -763,7 +1198,7 @@ struct extra_operations< Base, 4u, Signed > :
mov edx, storage
mov eax, v
lock add dword ptr [edx], eax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -778,7 +1213,7 @@ struct extra_operations< Base, 4u, Signed > :
mov edx, storage
mov eax, v
lock sub dword ptr [edx], eax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -793,7 +1228,7 @@ struct extra_operations< Base, 4u, Signed > :
mov edx, storage
mov eax, v
lock and dword ptr [edx], eax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -808,7 +1243,7 @@ struct extra_operations< Base, 4u, Signed > :
mov edx, storage
mov eax, v
lock or dword ptr [edx], eax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -823,7 +1258,7 @@ struct extra_operations< Base, 4u, Signed > :
mov edx, storage
mov eax, v
lock xor dword ptr [edx], eax
- setz result
+ setnz result
};
base_type::fence_after(order);
return result;
@@ -836,7 +1271,7 @@ struct extra_operations< Base, 4u, Signed > :
#if defined(BOOST_ATOMIC_INTERLOCKED_BTS64) && defined(BOOST_ATOMIC_INTERLOCKED_BTR64)
template< typename Base, bool Signed >
-struct extra_operations< Base, 8u, Signed > :
+struct extra_operations< Base, 8u, Signed, true > :
public generic_extra_operations< Base, 8u, Signed >
{
typedef generic_extra_operations< Base, 8u, Signed > base_type;
diff --git a/boost/atomic/detail/float_sizes.hpp b/boost/atomic/detail/float_sizes.hpp
new file mode 100644
index 0000000000..4c3a346f15
--- /dev/null
+++ b/boost/atomic/detail/float_sizes.hpp
@@ -0,0 +1,142 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/float_sizes.hpp
+ *
+ * This header defines macros for testing buitin floating point type sizes
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
+
+#include <float.h>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+// Detect value sizes of the different floating point types. The value sizes may be less than the corresponding type sizes
+// if the type contains padding bits. This is typical e.g. with 80-bit extended float types, which are often represented as 128-bit types.
+// See: https://en.wikipedia.org/wiki/IEEE_754
+// For Intel x87 extended double see: https://en.wikipedia.org/wiki/Extended_precision#x86_Architecture_Extended_Precision_Format
+// For IBM extended double (a.k.a. double-double) see: https://en.wikipedia.org/wiki/Long_double#Implementations, https://gcc.gnu.org/wiki/Ieee128PowerPC
+#if (FLT_RADIX+0) == 2
+
+#if ((FLT_MANT_DIG+0) == 11) && ((FLT_MAX_EXP+0) == 16) // IEEE 754 binary16
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 2
+#elif ((FLT_MANT_DIG+0) == 24) && ((FLT_MAX_EXP+0) == 128) // IEEE 754 binary32
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
+#elif ((FLT_MANT_DIG+0) == 53) && ((FLT_MAX_EXP+0) == 1024) // IEEE 754 binary64
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
+#elif ((FLT_MANT_DIG+0) == 64) && ((FLT_MAX_EXP+0) == 16384) // x87 extended double
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 10
+#elif ((FLT_MANT_DIG+0) == 106) && ((FLT_MAX_EXP+0) == 1024) // IBM extended double
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
+#elif ((FLT_MANT_DIG+0) == 113) && ((FLT_MAX_EXP+0) == 16384) // IEEE 754 binary128
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
+#elif ((FLT_MANT_DIG+0) == 237) && ((FLT_MAX_EXP+0) == 262144) // IEEE 754 binary256
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 32
+#endif
+
+#if ((DBL_MANT_DIG+0) == 11) && ((DBL_MAX_EXP+0) == 16) // IEEE 754 binary16
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 2
+#elif ((DBL_MANT_DIG+0) == 24) && ((DBL_MAX_EXP+0) == 128) // IEEE 754 binary32
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
+#elif ((DBL_MANT_DIG+0) == 53) && ((DBL_MAX_EXP+0) == 1024) // IEEE 754 binary64
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
+#elif ((DBL_MANT_DIG+0) == 64) && ((DBL_MAX_EXP+0) == 16384) // x87 extended double
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 10
+#elif ((DBL_MANT_DIG+0) == 106) && ((DBL_MAX_EXP+0) == 1024) // IBM extended double
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
+#elif ((DBL_MANT_DIG+0) == 113) && ((DBL_MAX_EXP+0) == 16384) // IEEE 754 binary128
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
+#elif ((DBL_MANT_DIG+0) == 237) && ((DBL_MAX_EXP+0) == 262144) // IEEE 754 binary256
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 32
+#endif
+
+#if ((LDBL_MANT_DIG+0) == 11) && ((LDBL_MAX_EXP+0) == 16) // IEEE 754 binary16
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 2
+#elif ((LDBL_MANT_DIG+0) == 24) && ((LDBL_MAX_EXP+0) == 128) // IEEE 754 binary32
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
+#elif ((LDBL_MANT_DIG+0) == 53) && ((LDBL_MAX_EXP+0) == 1024) // IEEE 754 binary64
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
+#elif ((LDBL_MANT_DIG+0) == 64) && ((LDBL_MAX_EXP+0) == 16384) // x87 extended double
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 10
+#elif ((LDBL_MANT_DIG+0) == 106) && ((LDBL_MAX_EXP+0) == 1024) // IBM extended double
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
+#elif ((LDBL_MANT_DIG+0) == 113) && ((LDBL_MAX_EXP+0) == 16384) // IEEE 754 binary128
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
+#elif ((LDBL_MANT_DIG+0) == 237) && ((LDBL_MAX_EXP+0) == 262144) // IEEE 754 binary256
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 32
+#endif
+
+#elif (FLT_RADIX+0) == 10
+
+#if ((FLT_MANT_DIG+0) == 7) && ((FLT_MAX_EXP+0) == 97) // IEEE 754 decimal32
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 4
+#elif ((FLT_MANT_DIG+0) == 16) && ((FLT_MAX_EXP+0) == 385) // IEEE 754 decimal64
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 8
+#elif ((FLT_MANT_DIG+0) == 34) && ((FLT_MAX_EXP+0) == 6145) // IEEE 754 decimal128
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE 16
+#endif
+
+#if ((DBL_MANT_DIG+0) == 7) && ((DBL_MAX_EXP+0) == 97) // IEEE 754 decimal32
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 4
+#elif ((DBL_MANT_DIG+0) == 16) && ((DBL_MAX_EXP+0) == 385) // IEEE 754 decimal64
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 8
+#elif ((DBL_MANT_DIG+0) == 34) && ((DBL_MAX_EXP+0) == 6145) // IEEE 754 decimal128
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE 16
+#endif
+
+#if ((LDBL_MANT_DIG+0) == 7) && ((LDBL_MAX_EXP+0) == 97) // IEEE 754 decimal32
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 4
+#elif ((LDBL_MANT_DIG+0) == 16) && ((LDBL_MAX_EXP+0) == 385) // IEEE 754 decimal64
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 8
+#elif ((LDBL_MANT_DIG+0) == 34) && ((LDBL_MAX_EXP+0) == 6145) // IEEE 754 decimal128
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE 16
+#endif
+
+#endif
+
+// GCC and compatible compilers define internal macros with builtin type traits
+#if defined(__SIZEOF_FLOAT__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT __SIZEOF_FLOAT__
+#endif
+#if defined(__SIZEOF_DOUBLE__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE __SIZEOF_DOUBLE__
+#endif
+#if defined(__SIZEOF_LONG_DOUBLE__)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE __SIZEOF_LONG_DOUBLE__
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
+
+#define BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(x)\
+ ((x) == 1u ? 1u : ((x) == 2u ? 2u : ((x) <= 4u ? 4u : ((x) <= 8u ? 8u : ((x) <= 16u ? 16u : ((x) <= 32u ? 32u : (x)))))))
+
+// Make our best guess. These sizes may not be accurate, but they are good enough to estimate the size of the storage required to hold these types.
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE)
+#endif
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE)
+#endif
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE) && defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
+#define BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE BOOST_ATOMIC_DETAIL_ALIGN_SIZE_TO_POWER_OF_2(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE)
+#endif
+
+#endif // !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
+
+#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_FLOAT) ||\
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_DOUBLE) ||\
+ !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE_VALUE) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG_DOUBLE)
+#error Boost.Atomic: Failed to determine builtin floating point type sizes, the target platform is not supported. Please, report to the developers (patches are welcome).
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_FLOAT_SIZES_HPP_INCLUDED_
diff --git a/boost/atomic/detail/fp_operations.hpp b/boost/atomic/detail/fp_operations.hpp
new file mode 100644
index 0000000000..69cb0d19a2
--- /dev/null
+++ b/boost/atomic/detail/fp_operations.hpp
@@ -0,0 +1,28 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fp_operations.hpp
+ *
+ * This header defines floating point atomic operations, including the generic version.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FP_OPERATIONS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FP_OPERATIONS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/fp_ops_generic.hpp>
+#include <boost/atomic/detail/fp_ops_emulated.hpp>
+
+#if !defined(BOOST_ATOMIC_DETAIL_FP_BACKEND_GENERIC)
+#include BOOST_ATOMIC_DETAIL_FP_BACKEND_HEADER(boost/atomic/detail/fp_ops_)
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_FP_OPERATIONS_HPP_INCLUDED_
diff --git a/boost/atomic/detail/fp_operations_fwd.hpp b/boost/atomic/detail/fp_operations_fwd.hpp
new file mode 100644
index 0000000000..8696de31cf
--- /dev/null
+++ b/boost/atomic/detail/fp_operations_fwd.hpp
@@ -0,0 +1,35 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fp_operations_fwd.hpp
+ *
+ * This header contains forward declaration of the \c fp_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Base, typename Value, std::size_t Size, bool = Base::is_always_lock_free >
+struct fp_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_FP_OPERATIONS_FWD_HPP_INCLUDED_
diff --git a/boost/atomic/detail/fp_ops_emulated.hpp b/boost/atomic/detail/fp_ops_emulated.hpp
new file mode 100644
index 0000000000..a87f1814b3
--- /dev/null
+++ b/boost/atomic/detail/fp_ops_emulated.hpp
@@ -0,0 +1,72 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fp_ops_emulated.hpp
+ *
+ * This header contains emulated (lock-based) implementation of the floating point atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/bitwise_fp_cast.hpp>
+#include <boost/atomic/detail/fp_operations_fwd.hpp>
+#include <boost/atomic/detail/lockpool.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Generic implementation of floating point operations
+template< typename Base, typename Value, std::size_t Size >
+struct emulated_fp_operations :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef Value value_type;
+
+ static BOOST_FORCEINLINE value_type fetch_add(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
+ value_type new_val = old_val + v;
+ s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE value_type fetch_sub(storage_type volatile& storage, value_type v, memory_order) BOOST_NOEXCEPT
+ {
+ storage_type& s = const_cast< storage_type& >(storage);
+ lockpool::scoped_lock lock(&storage);
+ value_type old_val = atomics::detail::bitwise_fp_cast< value_type >(s);
+ value_type new_val = old_val - v;
+ s = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ return old_val;
+ }
+};
+
+template< typename Base, typename Value, std::size_t Size >
+struct fp_operations< Base, Value, Size, false > :
+ public emulated_fp_operations< Base, Value, Size >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_FP_OPS_EMULATED_HPP_INCLUDED_
diff --git a/boost/atomic/detail/fp_ops_generic.hpp b/boost/atomic/detail/fp_ops_generic.hpp
new file mode 100644
index 0000000000..b83e85a359
--- /dev/null
+++ b/boost/atomic/detail/fp_ops_generic.hpp
@@ -0,0 +1,83 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fp_ops_generic.hpp
+ *
+ * This header contains generic implementation of the floating point atomic operations.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/bitwise_fp_cast.hpp>
+#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/fp_operations_fwd.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Generic implementation of floating point operations
+template< typename Base, typename Value, std::size_t Size >
+struct generic_fp_operations :
+ public Base
+{
+ typedef Base base_type;
+ typedef typename base_type::storage_type storage_type;
+ typedef Value value_type;
+
+ static BOOST_FORCEINLINE value_type fetch_add(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_storage, new_storage;
+ value_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_storage);
+ do
+ {
+ old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
+ new_val = old_val + v;
+ new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ }
+ while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
+ return old_val;
+ }
+
+ static BOOST_FORCEINLINE value_type fetch_sub(storage_type volatile& storage, value_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type old_storage, new_storage;
+ value_type old_val, new_val;
+ atomics::detail::non_atomic_load(storage, old_storage);
+ do
+ {
+ old_val = atomics::detail::bitwise_fp_cast< value_type >(old_storage);
+ new_val = old_val - v;
+ new_storage = atomics::detail::bitwise_fp_cast< storage_type >(new_val);
+ }
+ while (!base_type::compare_exchange_weak(storage, old_storage, new_storage, order, memory_order_relaxed));
+ return old_val;
+ }
+};
+
+// Default fp_operations template definition will be used unless specialized for a specific platform
+template< typename Base, typename Value, std::size_t Size >
+struct fp_operations< Base, Value, Size, true > :
+ public generic_fp_operations< Base, Value, Size >
+{
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_FP_OPS_GENERIC_HPP_INCLUDED_
diff --git a/boost/atomic/detail/int_sizes.hpp b/boost/atomic/detail/int_sizes.hpp
index eada4fff07..2a9757c147 100644
--- a/boost/atomic/detail/int_sizes.hpp
+++ b/boost/atomic/detail/int_sizes.hpp
@@ -39,7 +39,7 @@
#if defined(__SIZEOF_POINTER__)
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER __SIZEOF_POINTER__
#elif defined(_MSC_VER)
-#if defined(_M_AMD64) || defined(_M_IA64)
+#if defined(_M_AMD64) || defined(_M_ARM64) || defined(_M_IA64)
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 8
#else
#define BOOST_ATOMIC_DETAIL_SIZEOF_POINTER 4
@@ -117,7 +117,7 @@
#include <wchar.h>
#include <boost/cstdint.hpp>
- #if defined(_MSC_VER) && ( _MSC_VER <= 1310 || defined(UNDER_CE) && _MSC_VER <= 1500 )
+#if defined(_MSC_VER) && (_MSC_VER <= 1310 || defined(UNDER_CE) && _MSC_VER <= 1500)
// MSVC 7.1 and MSVC 8 (arm) define WCHAR_MAX to a value not suitable for constant expressions
#define BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T 2
#elif (WCHAR_MAX + 0) == 0xff || (WCHAR_MAX + 0) == 0x7f
@@ -134,7 +134,7 @@
#if !defined(BOOST_ATOMIC_DETAIL_SIZEOF_SHORT) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_INT) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_LONG) || !defined(BOOST_ATOMIC_DETAIL_SIZEOF_LLONG) ||\
!defined(BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T)
-#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers.
+#error Boost.Atomic: Failed to determine builtin integer sizes, the target platform is not supported. Please, report to the developers (patches are welcome).
#endif
#endif // BOOST_ATOMIC_DETAIL_INT_SIZES_HPP_INCLUDED_
diff --git a/boost/atomic/detail/integral_extend.hpp b/boost/atomic/detail/integral_extend.hpp
new file mode 100644
index 0000000000..dea48ac6fe
--- /dev/null
+++ b/boost/atomic/detail/integral_extend.hpp
@@ -0,0 +1,105 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/integral_extend.hpp
+ *
+ * This header defines sign/zero extension utilities for Boost.Atomic. The tools assume two's complement signed integer representation.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_INTEGRAL_EXTEND_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_INTEGRAL_EXTEND_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/bitwise_cast.hpp>
+#include <boost/atomic/detail/type_traits/integral_constant.hpp>
+#include <boost/atomic/detail/type_traits/is_signed.hpp>
+#include <boost/atomic/detail/type_traits/make_signed.hpp>
+#include <boost/atomic/detail/type_traits/make_unsigned.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output zero_extend_impl(Input input, atomics::detail::true_type) BOOST_NOEXCEPT
+{
+ // Note: If we are casting with truncation or to the same-sized output, don't cause signed integer overflow by this chain of conversions
+ return atomics::detail::bitwise_cast< Output >(static_cast< typename atomics::detail::make_unsigned< Output >::type >(
+ static_cast< typename atomics::detail::make_unsigned< Input >::type >(input)));
+}
+
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output zero_extend_impl(Input input, atomics::detail::false_type) BOOST_NOEXCEPT
+{
+ return static_cast< Output >(static_cast< typename atomics::detail::make_unsigned< Input >::type >(input));
+}
+
+//! Zero-extends or truncates (wraps) input operand to fit in the output type
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output zero_extend(Input input) BOOST_NOEXCEPT
+{
+ return atomics::detail::zero_extend_impl< Output >(input, atomics::detail::integral_constant< bool, atomics::detail::is_signed< Output >::value >());
+}
+
+//! Truncates (wraps) input operand to fit in the output type
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output integral_truncate(Input input) BOOST_NOEXCEPT
+{
+ // zero_extend does the truncation
+ return atomics::detail::zero_extend< Output >(input);
+}
+
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output sign_extend_impl(Input input, atomics::detail::true_type) BOOST_NOEXCEPT
+{
+ return atomics::detail::integral_truncate< Output >(input);
+}
+
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output sign_extend_impl(Input input, atomics::detail::false_type) BOOST_NOEXCEPT
+{
+ return static_cast< Output >(atomics::detail::bitwise_cast< typename atomics::detail::make_signed< Input >::type >(input));
+}
+
+//! Sign-extends or truncates (wraps) input operand to fit in the output type
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output sign_extend(Input input) BOOST_NOEXCEPT
+{
+ return atomics::detail::sign_extend_impl< Output >(input, atomics::detail::integral_constant< bool, sizeof(Output) <= sizeof(Input) >());
+}
+
+//! Sign-extends or truncates (wraps) input operand to fit in the output type
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output integral_extend(Input input, atomics::detail::true_type) BOOST_NOEXCEPT
+{
+ return atomics::detail::sign_extend< Output >(input);
+}
+
+//! Zero-extends or truncates (wraps) input operand to fit in the output type
+template< typename Output, typename Input >
+BOOST_FORCEINLINE Output integral_extend(Input input, atomics::detail::false_type) BOOST_NOEXCEPT
+{
+ return atomics::detail::zero_extend< Output >(input);
+}
+
+//! Sign- or zero-extends or truncates (wraps) input operand to fit in the output type
+template< bool Signed, typename Output, typename Input >
+BOOST_FORCEINLINE Output integral_extend(Input input) BOOST_NOEXCEPT
+{
+ return atomics::detail::integral_extend< Output >(input, atomics::detail::integral_constant< bool, Signed >());
+}
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_INTEGRAL_EXTEND_HPP_INCLUDED_
diff --git a/boost/atomic/detail/ops_cas_based.hpp b/boost/atomic/detail/ops_cas_based.hpp
index 504cedb70f..e2e18aa384 100644
--- a/boost/atomic/detail/ops_cas_based.hpp
+++ b/boost/atomic/detail/ops_cas_based.hpp
@@ -47,6 +47,8 @@ struct cas_based_operations :
{
typedef typename Base::storage_type storage_type;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
+
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
diff --git a/boost/atomic/detail/ops_emulated.hpp b/boost/atomic/detail/ops_emulated.hpp
index 437b62f311..f30fbdab9f 100644
--- a/boost/atomic/detail/ops_emulated.hpp
+++ b/boost/atomic/detail/ops_emulated.hpp
@@ -30,10 +30,15 @@ namespace boost {
namespace atomics {
namespace detail {
-template< typename T >
+template< std::size_t Size, bool Signed >
struct emulated_operations
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = false;
@@ -146,12 +151,8 @@ struct emulated_operations
template< std::size_t Size, bool Signed >
struct operations :
- public emulated_operations< typename make_storage_type< Size, Signed >::type >
+ public emulated_operations< Size, Signed >
{
- typedef typename make_storage_type< Size, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
} // namespace detail
diff --git a/boost/atomic/detail/ops_extending_cas_based.hpp b/boost/atomic/detail/ops_extending_cas_based.hpp
index 3f21031f12..5f197cea48 100644
--- a/boost/atomic/detail/ops_extending_cas_based.hpp
+++ b/boost/atomic/detail/ops_extending_cas_based.hpp
@@ -18,6 +18,7 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/integral_extend.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -32,18 +33,18 @@ struct extending_cas_based_operations :
public Base
{
typedef typename Base::storage_type storage_type;
- typedef typename make_storage_type< Size, Signed >::type emulated_storage_type;
+ typedef typename make_storage_type< Size >::type emulated_storage_type;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
- emulated_storage_type new_val;
+ storage_type new_val;
do
{
- new_val = static_cast< emulated_storage_type >(old_val) + static_cast< emulated_storage_type >(v);
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val + v));
}
- while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));
+ while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return old_val;
}
@@ -51,12 +52,12 @@ struct extending_cas_based_operations :
{
storage_type old_val;
atomics::detail::non_atomic_load(storage, old_val);
- emulated_storage_type new_val;
+ storage_type new_val;
do
{
- new_val = static_cast< emulated_storage_type >(old_val) - static_cast< emulated_storage_type >(v);
+ new_val = atomics::detail::integral_extend< Signed, storage_type >(static_cast< emulated_storage_type >(old_val - v));
}
- while (!Base::compare_exchange_weak(storage, old_val, static_cast< storage_type >(new_val), order, memory_order_relaxed));
+ while (!Base::compare_exchange_weak(storage, old_val, new_val, order, memory_order_relaxed));
return old_val;
}
};
diff --git a/boost/atomic/detail/ops_gcc_alpha.hpp b/boost/atomic/detail/ops_gcc_alpha.hpp
index 71846a8647..85b1342982 100644
--- a/boost/atomic/detail/ops_gcc_alpha.hpp
+++ b/boost/atomic/detail/ops_gcc_alpha.hpp
@@ -64,17 +64,18 @@ namespace detail {
struct gcc_alpha_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("mb" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("mb" ::: "memory");
}
@@ -90,8 +91,8 @@ template< bool Signed >
struct operations< 4u, Signed > :
public gcc_alpha_operations_base
{
- typedef typename make_storage_type< 4u, Signed >::type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -600,8 +601,8 @@ template< bool Signed >
struct operations< 8u, Signed > :
public gcc_alpha_operations_base
{
- typedef typename make_storage_type< 8u, Signed >::type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
diff --git a/boost/atomic/detail/ops_gcc_arm.hpp b/boost/atomic/detail/ops_gcc_arm.hpp
index 0cea16bc18..b32159536f 100644
--- a/boost/atomic/detail/ops_gcc_arm.hpp
+++ b/boost/atomic/detail/ops_gcc_arm.hpp
@@ -21,6 +21,7 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/integral_extend.hpp>
#include <boost/atomic/detail/operations_fwd.hpp>
#include <boost/atomic/detail/ops_gcc_arm_common.hpp>
#include <boost/atomic/capabilities.hpp>
@@ -59,8 +60,8 @@ template< bool Signed >
struct operations< 4u, Signed > :
public gcc_arm_operations_base
{
- typedef typename make_storage_type< 4u, Signed >::type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -317,9 +318,9 @@ template< bool Signed >
struct operations< 1u, Signed > :
public gcc_arm_operations_base
{
- typedef typename make_storage_type< 1u, Signed >::type storage_type;
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+ typedef typename make_storage_type< 1u >::type storage_type;
+ typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -347,13 +348,13 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // load the original value
+ "ldrexb %[original], %[storage]\n" // load the original value and zero-extend to 32 bits
"strexb %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
"teq %[tmp], #0\n" // check if store succeeded
"bne 1b\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
- : [value] "r" ((extended_storage_type)v)
+ : [value] "r" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -371,7 +372,7 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"mov %[success], #0\n" // success = 0
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"cmp %[original], %[expected]\n" // flags = original==expected
"itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
"strexbeq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
@@ -381,8 +382,8 @@ struct operations< 1u, Signed > :
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [expected] "Ir" ((extended_storage_type)expected), // %4
- [desired] "r" ((extended_storage_type)desired) // %5
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -405,7 +406,7 @@ struct operations< 1u, Signed > :
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"mov %[success], #0\n" // success = 0
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"cmp %[original], %[expected]\n" // flags = original==expected
"bne 2f\n" // if (!flags.equal) goto end
"strexb %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
@@ -417,8 +418,8 @@ struct operations< 1u, Signed > :
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [expected] "Ir" ((extended_storage_type)expected), // %4
- [desired] "r" ((extended_storage_type)desired) // %5
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -438,7 +439,7 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"add %[result], %[original], %[value]\n" // result = original + value
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -448,7 +449,7 @@ struct operations< 1u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -464,7 +465,7 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"sub %[result], %[original], %[value]\n" // result = original - value
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -474,7 +475,7 @@ struct operations< 1u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -490,7 +491,7 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"and %[result], %[original], %[value]\n" // result = original & value
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -500,7 +501,7 @@ struct operations< 1u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -516,7 +517,7 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"orr %[result], %[original], %[value]\n" // result = original | value
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -526,7 +527,7 @@ struct operations< 1u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -542,7 +543,7 @@ struct operations< 1u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexb %[original], %[storage]\n" // original = *(&storage)
+ "ldrexb %[original], %[storage]\n" // original = zero_extend(*(&storage))
"eor %[result], %[original], %[value]\n" // result = original ^ value
"strexb %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -552,7 +553,7 @@ struct operations< 1u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -704,9 +705,9 @@ template< bool Signed >
struct operations< 2u, Signed > :
public gcc_arm_operations_base
{
- typedef typename make_storage_type< 2u, Signed >::type storage_type;
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u, Signed >::type extended_storage_type;
+ typedef typename make_storage_type< 2u >::type storage_type;
+ typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type extended_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -734,13 +735,13 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // load the original value
+ "ldrexh %[original], %[storage]\n" // load the original value and zero-extend to 32 bits
"strexh %[tmp], %[value], %[storage]\n" // store the replacement, tmp = store failed
"teq %[tmp], #0\n" // check if store succeeded
"bne 1b\n"
BOOST_ATOMIC_DETAIL_ARM_ASM_END(%[tmp])
: [tmp] "=&l" (tmp), [original] "=&r" (original), [storage] "+Q" (storage)
- : [value] "r" ((extended_storage_type)v)
+ : [value] "r" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -758,7 +759,7 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"mov %[success], #0\n" // success = 0
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"cmp %[original], %[expected]\n" // flags = original==expected
"itt eq\n" // [hint that the following 2 instructions are conditional on flags.equal]
"strexheq %[success], %[desired], %[storage]\n" // if (flags.equal) *(&storage) = desired, success = store failed
@@ -768,8 +769,8 @@ struct operations< 2u, Signed > :
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [expected] "Ir" ((extended_storage_type)expected), // %4
- [desired] "r" ((extended_storage_type)desired) // %5
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -792,7 +793,7 @@ struct operations< 2u, Signed > :
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"mov %[success], #0\n" // success = 0
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"cmp %[original], %[expected]\n" // flags = original==expected
"bne 2f\n" // if (!flags.equal) goto end
"strexh %[success], %[desired], %[storage]\n" // *(&storage) = desired, success = store failed
@@ -804,8 +805,8 @@ struct operations< 2u, Signed > :
[success] "=&r" (success), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [expected] "Ir" ((extended_storage_type)expected), // %4
- [desired] "r" ((extended_storage_type)desired) // %5
+ : [expected] "Ir" (atomics::detail::zero_extend< extended_storage_type >(expected)), // %4
+ [desired] "r" (desired) // %5
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
if (success)
@@ -825,7 +826,7 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"add %[result], %[original], %[value]\n" // result = original + value
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -835,7 +836,7 @@ struct operations< 2u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -851,7 +852,7 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"sub %[result], %[original], %[value]\n" // result = original - value
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -861,7 +862,7 @@ struct operations< 2u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -877,7 +878,7 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"and %[result], %[original], %[value]\n" // result = original & value
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -887,7 +888,7 @@ struct operations< 2u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -903,7 +904,7 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"orr %[result], %[original], %[value]\n" // result = original | value
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -913,7 +914,7 @@ struct operations< 2u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -929,7 +930,7 @@ struct operations< 2u, Signed > :
(
BOOST_ATOMIC_DETAIL_ARM_ASM_START(%[tmp])
"1:\n"
- "ldrexh %[original], %[storage]\n" // original = *(&storage)
+ "ldrexh %[original], %[storage]\n" // original = zero_extend(*(&storage))
"eor %[result], %[original], %[value]\n" // result = original ^ value
"strexh %[tmp], %[result], %[storage]\n" // *(&storage) = result, tmp = store failed
"teq %[tmp], #0\n" // flags = tmp==0
@@ -939,7 +940,7 @@ struct operations< 2u, Signed > :
[result] "=&r" (result), // %1
[tmp] "=&l" (tmp), // %2
[storage] "+Q" (storage) // %3
- : [value] "Ir" ((extended_storage_type)v) // %4
+ : [value] "Ir" (v) // %4
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
fence_after(order);
@@ -1102,8 +1103,8 @@ template< bool Signed >
struct operations< 8u, Signed > :
public gcc_arm_operations_base
{
- typedef typename make_storage_type< 8u, Signed >::type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
diff --git a/boost/atomic/detail/ops_gcc_arm_common.hpp b/boost/atomic/detail/ops_gcc_arm_common.hpp
index 9ac08ee5b6..73c04ffe15 100644
--- a/boost/atomic/detail/ops_gcc_arm_common.hpp
+++ b/boost/atomic/detail/ops_gcc_arm_common.hpp
@@ -74,17 +74,18 @@ namespace detail {
struct gcc_arm_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
hardware_full_fence();
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
hardware_full_fence();
}
diff --git a/boost/atomic/detail/ops_gcc_atomic.hpp b/boost/atomic/detail/ops_gcc_atomic.hpp
index b32f8933b0..ce40e3b2b9 100644
--- a/boost/atomic/detail/ops_gcc_atomic.hpp
+++ b/boost/atomic/detail/ops_gcc_atomic.hpp
@@ -54,10 +54,10 @@ namespace detail {
* The function converts \c boost::memory_order values to the compiler-specific constants.
*
* NOTE: The intention is that the function is optimized away by the compiler, and the
- * compiler-specific constants are passed to the intrinsics. I know constexpr doesn't
+ * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
* work in this case because the standard atomics interface require memory ordering
* constants to be passed as function arguments, at which point they stop being constexpr.
- * However it is crucial that the compiler sees constants and not runtime values,
+ * However, it is crucial that the compiler sees constants and not runtime values,
* because otherwise it just ignores the ordering value and always uses seq_cst.
* This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
* gcc 4.8.2. Intel Compiler issues a warning in this case:
@@ -71,8 +71,8 @@ namespace detail {
* all functions are called with constant orderings and call intrinstcts properly.
*
* Unfortunately, this still doesn't work in debug mode as the compiler doesn't
- * inline functions even when marked with BOOST_FORCEINLINE. In this case all atomic
- * operaions will be executed with seq_cst semantics.
+ * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
+ * all atomic operaions will be executed with seq_cst semantics.
*/
BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
{
@@ -81,12 +81,17 @@ BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order o
(order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
}
-template< typename T >
+template< std::size_t Size, bool Signed >
struct gcc_atomic_operations
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
- // Note: In the current implementation, gcc_atomic_operations are used onlu when the particularly sized __atomic
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+
+ // Note: In the current implementation, gcc_atomic_operations are used only when the particularly sized __atomic
// intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
// always set is_always_lock_free to true here.
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
@@ -183,12 +188,8 @@ struct operations< 16u, Signed > :
template< bool Signed >
struct operations< 16u, Signed > :
- public gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >
+ public gcc_atomic_operations< 16u, Signed >
{
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -217,24 +218,16 @@ struct operations< 8u, Signed > :
template< bool Signed >
struct operations< 8u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 8u, Signed >
{
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
template< bool Signed >
struct operations< 8u, Signed > :
- public gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >
+ public gcc_atomic_operations< 8u, Signed >
{
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -253,24 +246,16 @@ struct operations< 8u, Signed > :
template< bool Signed >
struct operations< 4u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 4u, Signed >
{
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
template< bool Signed >
struct operations< 4u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 4u, Signed >
{
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
@@ -279,12 +264,8 @@ struct operations< 4u, Signed > :
template< bool Signed >
struct operations< 4u, Signed > :
- public gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >
+ public gcc_atomic_operations< 4u, Signed >
{
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -303,36 +284,24 @@ struct operations< 4u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 2u, Signed >
{
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
template< bool Signed >
struct operations< 2u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 2u, Signed >
{
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
template< bool Signed >
struct operations< 2u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 2u, Signed >
{
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -341,12 +310,8 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
- public gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >
+ public gcc_atomic_operations< 2u, Signed >
{
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -365,48 +330,32 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 2u, Signed >, 1u, Signed >
{
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
template< bool Signed >
struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 1u, Signed >
{
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
template< bool Signed >
struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 1u, Signed >
{
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#else
template< bool Signed >
struct operations< 1u, Signed > :
- public extending_cas_based_operations< gcc_atomic_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 1u, Signed >
{
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -415,12 +364,8 @@ struct operations< 1u, Signed > :
template< bool Signed >
struct operations< 1u, Signed > :
- public gcc_atomic_operations< typename make_storage_type< 1u, Signed >::type >
+ public gcc_atomic_operations< 1u, Signed >
{
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
diff --git a/boost/atomic/detail/ops_gcc_ppc.hpp b/boost/atomic/detail/ops_gcc_ppc.hpp
index 29e7ddf249..a826736d17 100644
--- a/boost/atomic/detail/ops_gcc_ppc.hpp
+++ b/boost/atomic/detail/ops_gcc_ppc.hpp
@@ -83,8 +83,8 @@ template< bool Signed >
struct operations< 4u, Signed > :
public gcc_ppc_operations_base
{
- typedef typename make_storage_type< 4u, Signed >::type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -105,7 +105,7 @@ struct operations< 4u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -203,7 +203,7 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -212,7 +212,7 @@ struct operations< 4u, Signed > :
"add %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -222,7 +222,7 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -231,7 +231,7 @@ struct operations< 4u, Signed > :
"sub %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -241,7 +241,7 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -250,7 +250,7 @@ struct operations< 4u, Signed > :
"and %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -260,7 +260,7 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -269,7 +269,7 @@ struct operations< 4u, Signed > :
"or %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -279,7 +279,7 @@ struct operations< 4u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -288,7 +288,7 @@ struct operations< 4u, Signed > :
"xor %1,%0,%3\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -313,9 +313,8 @@ template< bool Signed >
struct operations< 1u, Signed > :
public gcc_ppc_operations_base
{
- typedef typename make_storage_type< 1u, Signed >::type storage_type;
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
- typedef typename make_storage_type< 1u, false >::type unsigned_storage_type;
+ typedef typename make_storage_type< 1u >::type storage_type;
+ typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -336,7 +335,7 @@ struct operations< 1u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -396,7 +395,7 @@ struct operations< 1u, Signed > :
"li %1, 1\n\t"
"1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
- : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "b" (expected), "b" (desired)
: "cr0"
);
if (success)
@@ -422,7 +421,7 @@ struct operations< 1u, Signed > :
"li %1, 1\n\t"
"1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
- : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "b" (expected), "b" (desired)
: "cr0"
);
if (success)
@@ -434,7 +433,7 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -443,7 +442,7 @@ struct operations< 1u, Signed > :
"add %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -453,7 +452,7 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -462,7 +461,7 @@ struct operations< 1u, Signed > :
"sub %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -472,7 +471,7 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -481,7 +480,7 @@ struct operations< 1u, Signed > :
"and %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -491,7 +490,7 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -500,7 +499,7 @@ struct operations< 1u, Signed > :
"or %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -510,7 +509,7 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -519,7 +518,7 @@ struct operations< 1u, Signed > :
"xor %1,%0,%3\n\t"
"stbcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -549,7 +548,7 @@ struct operations< 1u, false > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -559,7 +558,7 @@ struct operations< 1u, false > :
"rlwinm %1, %1, 0, 0xff\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -569,7 +568,7 @@ struct operations< 1u, false > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -579,7 +578,7 @@ struct operations< 1u, false > :
"rlwinm %1, %1, 0, 0xff\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -597,7 +596,7 @@ struct operations< 1u, true > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -607,7 +606,7 @@ struct operations< 1u, true > :
"extsb %1, %1\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -617,7 +616,7 @@ struct operations< 1u, true > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -627,7 +626,7 @@ struct operations< 1u, true > :
"extsb %1, %1\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -644,9 +643,8 @@ template< bool Signed >
struct operations< 2u, Signed > :
public gcc_ppc_operations_base
{
- typedef typename make_storage_type< 2u, Signed >::type storage_type;
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
- typedef typename make_storage_type< 2u, false >::type unsigned_storage_type;
+ typedef typename make_storage_type< 2u >::type storage_type;
+ typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -667,7 +665,7 @@ struct operations< 2u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -727,7 +725,7 @@ struct operations< 2u, Signed > :
"li %1, 1\n\t"
"1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
- : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "b" (expected), "b" (desired)
: "cr0"
);
if (success)
@@ -753,7 +751,7 @@ struct operations< 2u, Signed > :
"li %1, 1\n\t"
"1:\n\t"
: "=&b" (expected), "=&b" (success), "+Z" (storage)
- : "b" ((unsigned_storage_type)expected), "b" (desired)
+ : "b" (expected), "b" (desired)
: "cr0"
);
if (success)
@@ -765,7 +763,7 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -774,7 +772,7 @@ struct operations< 2u, Signed > :
"add %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -784,7 +782,7 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -793,7 +791,7 @@ struct operations< 2u, Signed > :
"sub %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -803,7 +801,7 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -812,7 +810,7 @@ struct operations< 2u, Signed > :
"and %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -822,7 +820,7 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -831,7 +829,7 @@ struct operations< 2u, Signed > :
"or %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -841,7 +839,7 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -850,7 +848,7 @@ struct operations< 2u, Signed > :
"xor %1,%0,%3\n\t"
"sthcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -880,7 +878,7 @@ struct operations< 2u, false > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -890,7 +888,7 @@ struct operations< 2u, false > :
"rlwinm %1, %1, 0, 0xffff\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -900,7 +898,7 @@ struct operations< 2u, false > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -910,7 +908,7 @@ struct operations< 2u, false > :
"rlwinm %1, %1, 0, 0xffff\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -928,7 +926,7 @@ struct operations< 2u, true > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -938,7 +936,7 @@ struct operations< 2u, true > :
"extsh %1, %1\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -948,7 +946,7 @@ struct operations< 2u, true > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -958,7 +956,7 @@ struct operations< 2u, true > :
"extsh %1, %1\n\t"
"stwcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -975,8 +973,8 @@ template< bool Signed >
struct operations< 8u, Signed > :
public gcc_ppc_operations_base
{
- typedef typename make_storage_type< 8u, Signed >::type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -997,7 +995,7 @@ struct operations< 8u, Signed > :
storage_type v;
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
{
__asm__ __volatile__
(
@@ -1095,7 +1093,7 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -1104,7 +1102,7 @@ struct operations< 8u, Signed > :
"add %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -1114,7 +1112,7 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -1123,7 +1121,7 @@ struct operations< 8u, Signed > :
"sub %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -1133,7 +1131,7 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -1142,7 +1140,7 @@ struct operations< 8u, Signed > :
"and %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -1152,7 +1150,7 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -1161,7 +1159,7 @@ struct operations< 8u, Signed > :
"or %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -1171,7 +1169,7 @@ struct operations< 8u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
- storage_type original, tmp;
+ storage_type original, result;
fence_before(order);
__asm__ __volatile__
(
@@ -1180,7 +1178,7 @@ struct operations< 8u, Signed > :
"xor %1,%0,%3\n\t"
"stdcx. %1,%y2\n\t"
"bne- 1b\n\t"
- : "=&b" (original), "=&b" (tmp), "+Z" (storage)
+ : "=&b" (original), "=&b" (result), "+Z" (storage)
: "b" (v)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
);
@@ -1204,20 +1202,16 @@ struct operations< 8u, Signed > :
BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
{
- switch (order)
+ if (order != memory_order_relaxed)
{
- case memory_order_consume:
- case memory_order_acquire:
- case memory_order_release:
- case memory_order_acq_rel:
#if defined(__powerpc64__) || defined(__PPC64__)
- __asm__ __volatile__ ("lwsync" ::: "memory");
- break;
-#endif
- case memory_order_seq_cst:
+ if (order != memory_order_seq_cst)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+ else
+ __asm__ __volatile__ ("sync" ::: "memory");
+#else
__asm__ __volatile__ ("sync" ::: "memory");
- break;
- default:;
+#endif
}
}
diff --git a/boost/atomic/detail/ops_gcc_ppc_common.hpp b/boost/atomic/detail/ops_gcc_ppc_common.hpp
index 3aa4e5f15f..e5c9303bf7 100644
--- a/boost/atomic/detail/ops_gcc_ppc_common.hpp
+++ b/boost/atomic/detail/ops_gcc_ppc_common.hpp
@@ -40,6 +40,7 @@ namespace detail {
struct gcc_ppc_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
@@ -47,17 +48,17 @@ struct gcc_ppc_operations_base
#if defined(__powerpc64__) || defined(__PPC64__)
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("sync" ::: "memory");
- else if ((order & memory_order_release) != 0)
+ else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("lwsync" ::: "memory");
#else
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("sync" ::: "memory");
#endif
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("isync" ::: "memory");
}
};
diff --git a/boost/atomic/detail/ops_gcc_sparc.hpp b/boost/atomic/detail/ops_gcc_sparc.hpp
index 9191c13255..19b9b1fa87 100644
--- a/boost/atomic/detail/ops_gcc_sparc.hpp
+++ b/boost/atomic/detail/ops_gcc_sparc.hpp
@@ -35,13 +35,14 @@ namespace detail {
struct gcc_sparc_cas_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
- else if ((order & memory_order_release) != 0)
+ else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
@@ -49,7 +50,7 @@ struct gcc_sparc_cas_base
{
if (order == memory_order_seq_cst)
__asm__ __volatile__ ("membar #Sync" ::: "memory");
- else if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("membar #StoreStore | #LoadStore" ::: "memory");
}
@@ -64,8 +65,8 @@ template< bool Signed >
struct gcc_sparc_cas32 :
public gcc_sparc_cas_base
{
- typedef typename make_storage_type< 4u, Signed >::type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -148,8 +149,8 @@ template< bool Signed >
struct gcc_sparc_cas64 :
public gcc_sparc_cas_base
{
- typedef typename make_storage_type< 8u, Signed >::type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
diff --git a/boost/atomic/detail/ops_gcc_sync.hpp b/boost/atomic/detail/ops_gcc_sync.hpp
index 2a075bcf9f..1597de852a 100644
--- a/boost/atomic/detail/ops_gcc_sync.hpp
+++ b/boost/atomic/detail/ops_gcc_sync.hpp
@@ -34,11 +34,12 @@ namespace detail {
struct gcc_sync_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
}
@@ -50,16 +51,20 @@ struct gcc_sync_operations_base
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
- if ((order & (memory_order_acquire | memory_order_consume)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_consume))) != 0u)
__sync_synchronize();
}
};
-template< typename T >
+template< std::size_t Size, bool Signed >
struct gcc_sync_operations :
public gcc_sync_operations_base
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -90,7 +95,7 @@ struct gcc_sync_operations :
// GCC docs mention that not all architectures may support full exchange semantics for this intrinsic. However, GCC's implementation of
// std::atomic<> uses this intrinsic unconditionally. We do so as well. In case if some architectures actually don't support this, we can always
// add a check here and fall back to a CAS loop.
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
return __sync_lock_test_and_set(&storage, v);
}
@@ -135,7 +140,7 @@ struct gcc_sync_operations :
static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__sync_synchronize();
return !!__sync_lock_test_and_set(&storage, 1);
}
@@ -152,35 +157,17 @@ struct gcc_sync_operations :
template< bool Signed >
struct operations< 1u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
- public gcc_sync_operations< typename make_storage_type< 1u, Signed >::type >
+ public gcc_sync_operations< 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 2u, Signed >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 1u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 1u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 1u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 1u, Signed >
#endif
{
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1)
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
-#else
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
-#endif
-
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -188,30 +175,15 @@ struct operations< 1u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
- public gcc_sync_operations< typename make_storage_type< 2u, Signed >::type >
+ public gcc_sync_operations< 2u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >, 2u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 4u, Signed >, 2u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 2u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 2u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 2u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 2u, Signed >
#endif
{
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2)
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
-#else
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
-#endif
-
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -219,25 +191,13 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct operations< 4u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- public gcc_sync_operations< typename make_storage_type< 4u, Signed >::type >
+ public gcc_sync_operations< 4u, Signed >
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >, 4u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 8u, Signed >, 4u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 4u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 4u, Signed >
#endif
{
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
-#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
-#else
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
-#endif
-
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
@@ -245,32 +205,19 @@ struct operations< 4u, Signed > :
template< bool Signed >
struct operations< 8u, Signed > :
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- public gcc_sync_operations< typename make_storage_type< 8u, Signed >::type >
+ public gcc_sync_operations< 8u, Signed >
#else
- public extending_cas_based_operations< gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >, 8u, Signed >
+ public extending_cas_based_operations< gcc_sync_operations< 16u, Signed >, 8u, Signed >
#endif
{
-#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
-#else
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
-#endif
-
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
#if BOOST_ATOMIC_INT128_LOCK_FREE > 0
template< bool Signed >
struct operations< 16u, Signed > :
- public gcc_sync_operations< typename make_storage_type< 16u, Signed >::type >
+ public gcc_sync_operations< 16u, Signed >
{
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
};
#endif
diff --git a/boost/atomic/detail/ops_gcc_x86.hpp b/boost/atomic/detail/ops_gcc_x86.hpp
index baf4d5757d..007d4eeeeb 100644
--- a/boost/atomic/detail/ops_gcc_x86.hpp
+++ b/boost/atomic/detail/ops_gcc_x86.hpp
@@ -37,17 +37,18 @@ namespace detail {
struct gcc_x86_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
__asm__ __volatile__ ("" ::: "memory");
}
static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_acquire) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
__asm__ __volatile__ ("" ::: "memory");
}
};
@@ -56,7 +57,7 @@ template< std::size_t Size, bool Signed, typename Derived >
struct gcc_x86_operations :
public gcc_x86_operations_base
{
- typedef typename make_storage_type< Size, Signed >::type storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -107,8 +108,8 @@ struct operations< 1u, Signed > :
{
typedef gcc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+ typedef typename make_storage_type< 1u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -208,8 +209,8 @@ struct operations< 2u, Signed > :
{
typedef gcc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
- typedef typename make_storage_type< 4u, Signed >::type temp_storage_type;
+ typedef typename make_storage_type< 2u >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type temp_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -309,7 +310,7 @@ struct operations< 4u, Signed > :
{
typedef gcc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -421,7 +422,7 @@ struct operations< 8u, Signed > :
{
typedef gcc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
@@ -543,7 +544,7 @@ BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
::: "memory"
);
}
- else if ((order & (memory_order_acquire | memory_order_release)) != 0)
+ else if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_acquire) | static_cast< unsigned int >(memory_order_release))) != 0u)
{
__asm__ __volatile__ ("" ::: "memory");
}
diff --git a/boost/atomic/detail/ops_gcc_x86_dcas.hpp b/boost/atomic/detail/ops_gcc_x86_dcas.hpp
index 28cbc225e3..4dacc66fe2 100644
--- a/boost/atomic/detail/ops_gcc_x86_dcas.hpp
+++ b/boost/atomic/detail/ops_gcc_x86_dcas.hpp
@@ -5,7 +5,7 @@
*
* Copyright (c) 2009 Helge Bahmann
* Copyright (c) 2012 Tim Blechmann
- * Copyright (c) 2014 Andrey Semashev
+ * Copyright (c) 2014 - 2018 Andrey Semashev
*/
/*!
* \file atomic/detail/ops_gcc_x86_dcas.hpp
@@ -20,6 +20,7 @@
#include <boost/memory_order.hpp>
#include <boost/atomic/detail/config.hpp>
#include <boost/atomic/detail/storage_type.hpp>
+#include <boost/atomic/detail/string_ops.hpp>
#include <boost/atomic/capabilities.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
@@ -30,119 +31,91 @@ namespace boost {
namespace atomics {
namespace detail {
+// Note: In the 32-bit PIC code guarded with BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX below we have to avoid using memory
+// operand constraints because the compiler may choose to use ebx as the base register for that operand. At least, clang
+// is known to do that. For this reason we have to pre-compute a pointer to storage and pass it in edi. For the same reason
+// we cannot save ebx to the stack with a mov instruction, so we use esi as a scratch register and restore it afterwards.
+// Alternatively, we could push/pop the register to the stack, but exchanging the registers is faster.
+// The need to pass a pointer in edi is a bit wasteful because normally the memory operand would use a base pointer
+// with an offset (e.g. `this` + offset). But unfortunately, there seems to be no way around it.
+
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
template< bool Signed >
struct gcc_dcas_x86
{
- typedef typename make_storage_type< 8u, Signed >::type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ typedef uint32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS aliasing_uint32_t;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
- if ((((uint32_t)&storage) & 0x00000007) == 0)
+ if (BOOST_LIKELY((((uint32_t)&storage) & 0x00000007) == 0u))
{
-#if defined(__SSE2__)
+#if defined(__SSE__)
+ typedef float xmm_t __attribute__((__vector_size__(16)));
+ xmm_t xmm_scratch;
__asm__ __volatile__
(
#if defined(__AVX__)
- "vmovq %1, %%xmm4\n\t"
- "vmovq %%xmm4, %0\n\t"
+ "vmovq %[value], %[xmm_scratch]\n\t"
+ "vmovq %[xmm_scratch], %[storage]\n\t"
+#elif defined(__SSE2__)
+ "movq %[value], %[xmm_scratch]\n\t"
+ "movq %[xmm_scratch], %[storage]\n\t"
#else
- "movq %1, %%xmm4\n\t"
- "movq %%xmm4, %0\n\t"
+ "xorps %[xmm_scratch], %[xmm_scratch]\n\t"
+ "movlps %[value], %[xmm_scratch]\n\t"
+ "movlps %[xmm_scratch], %[storage]\n\t"
#endif
- : "=m" (storage)
- : "m" (v)
- : "memory", "xmm4"
+ : [storage] "=m" (storage), [xmm_scratch] "=x" (xmm_scratch)
+ : [value] "m" (v)
+ : "memory"
);
#else
__asm__ __volatile__
(
- "fildll %1\n\t"
- "fistpll %0\n\t"
- : "=m" (storage)
- : "m" (v)
+ "fildll %[value]\n\t"
+ "fistpll %[storage]\n\t"
+ : [storage] "=m" (storage)
+ : [value] "m" (v)
: "memory"
);
#endif
}
else
{
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
-#if defined(__PIC__)
- uint32_t v_lo = (uint32_t)v;
- uint32_t scratch;
- __asm__ __volatile__
- (
- "movl %%ebx, %[scratch]\n\t"
- "movl %[value_lo], %%ebx\n\t"
- "movl %[dest], %%eax\n\t"
- "movl 4+%[dest], %%edx\n\t"
- ".align 16\n\t"
- "1: lock; cmpxchg8b %[dest]\n\t"
- "jne 1b\n\t"
- "movl %[scratch], %%ebx\n\t"
- : [scratch] "=m" (scratch), [dest] "=o" (storage), [value_lo] "+a" (v_lo)
- : "c" ((uint32_t)(v >> 32))
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "edx", "memory"
- );
-#else // defined(__PIC__)
- __asm__ __volatile__
- (
- "movl %[dest], %%eax\n\t"
- "movl 4+%[dest], %%edx\n\t"
- ".align 16\n\t"
- "1: lock; cmpxchg8b %[dest]\n\t"
- "jne 1b\n\t"
- : [dest] "=o" (storage)
- : [value_lo] "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "eax", "edx", "memory"
- );
-#endif // defined(__PIC__)
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
-#if defined(__PIC__)
- uint32_t v_lo = (uint32_t)v;
- uint32_t scratch;
+#if defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
__asm__ __volatile__
(
- "movl %%ebx, %[scratch]\n\t"
- "movl %[value_lo], %%ebx\n\t"
- "movl 0(%[dest]), %%eax\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ "movl %%eax, %%ebx\n\t"
+ "movl (%[dest]), %%eax\n\t"
"movl 4(%[dest]), %%edx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
"jne 1b\n\t"
- "movl %[scratch], %%ebx\n\t"
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : [scratch] "=m,m" (scratch), [value_lo] "+a,a" (v_lo)
- : "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
-#else
- : [scratch] "=m" (scratch), [value_lo] "+a" (v_lo)
- : "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
-#endif
+ "xchgl %%ebx, %%esi\n\t"
+ :
+ : "a" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "edx", "memory"
);
-#else // defined(__PIC__)
+#else // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
__asm__ __volatile__
(
- "movl 0(%[dest]), %%eax\n\t"
- "movl 4(%[dest]), %%edx\n\t"
+ "movl %[dest_lo], %%eax\n\t"
+ "movl %[dest_hi], %%edx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "1: lock; cmpxchg8b %[dest_lo]\n\t"
"jne 1b\n\t"
- :
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : [value_lo] "b,b" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
-#else
- : [value_lo] "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
-#endif
+ : [dest_lo] "=m" (storage), [dest_hi] "=m" (reinterpret_cast< volatile aliasing_uint32_t* >(&storage)[1])
+ : [value_lo] "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "eax", "edx", "memory"
);
-#endif // defined(__PIC__)
-#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
}
}
@@ -150,29 +123,35 @@ struct gcc_dcas_x86
{
storage_type value;
- if ((((uint32_t)&storage) & 0x00000007) == 0)
+ if (BOOST_LIKELY((((uint32_t)&storage) & 0x00000007) == 0u))
{
-#if defined(__SSE2__)
+#if defined(__SSE__)
+ typedef float xmm_t __attribute__((__vector_size__(16)));
+ xmm_t xmm_scratch;
__asm__ __volatile__
(
#if defined(__AVX__)
- "vmovq %1, %%xmm4\n\t"
- "vmovq %%xmm4, %0\n\t"
+ "vmovq %[storage], %[xmm_scratch]\n\t"
+ "vmovq %[xmm_scratch], %[value]\n\t"
+#elif defined(__SSE2__)
+ "movq %[storage], %[xmm_scratch]\n\t"
+ "movq %[xmm_scratch], %[value]\n\t"
#else
- "movq %1, %%xmm4\n\t"
- "movq %%xmm4, %0\n\t"
+ "xorps %[xmm_scratch], %[xmm_scratch]\n\t"
+ "movlps %[storage], %[xmm_scratch]\n\t"
+ "movlps %[xmm_scratch], %[value]\n\t"
#endif
- : "=m" (value)
- : "m" (storage)
- : "memory", "xmm4"
+ : [value] "=m" (value), [xmm_scratch] "=x" (xmm_scratch)
+ : [storage] "m" (storage)
+ : "memory"
);
#else
__asm__ __volatile__
(
- "fildll %1\n\t"
- "fistpll %0\n\t"
- : "=m" (value)
- : "m" (storage)
+ "fildll %[storage]\n\t"
+ "fistpll %[value]\n\t"
+ : [value] "=m" (value)
+ : [storage] "m" (storage)
: "memory"
);
#endif
@@ -182,7 +161,21 @@ struct gcc_dcas_x86
#if defined(__clang__)
// Clang cannot allocate eax:edx register pairs but it has sync intrinsics
value = __sync_val_compare_and_swap(&storage, (storage_type)0, (storage_type)0);
-#else
+#elif defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ uint32_t value_bits[2];
+ // We don't care for comparison result here; the previous value will be stored into value anyway.
+ // Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
+ __asm__ __volatile__
+ (
+ "movl %%ebx, %%eax\n\t"
+ "movl %%ecx, %%edx\n\t"
+ "lock; cmpxchg8b %[storage]\n\t"
+ : "=&a" (value_bits[0]), "=&d" (value_bits[1])
+ : [storage] "m" (storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ BOOST_ATOMIC_DETAIL_MEMCPY(&value, value_bits, sizeof(value));
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for ebx and ecx values, they just have to be equal to eax and edx before cmpxchg8b.
__asm__ __volatile__
@@ -194,7 +187,7 @@ struct gcc_dcas_x86
: [storage] "m" (storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
-#endif
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
}
return value;
@@ -210,56 +203,39 @@ struct gcc_dcas_x86
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
-#elif defined(__PIC__)
-
- // Make sure ebx is saved and restored properly in case
- // of position independent code. To make this work
- // setup register constraints such that ebx can not be
- // used by accident e.g. as base address for the variable
- // to be modified. Accessing "scratch" should always be okay,
- // as it can only be placed on the stack (and therefore
- // accessed through ebp or esp only).
- //
- // In theory, could push/pop ebx onto/off the stack, but movs
- // to a prepared stack slot turn out to be faster.
+#elif defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
- uint32_t scratch;
bool success;
+
#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
- "movl %%ebx, %[scratch]\n\t"
- "movl %[desired_lo], %%ebx\n\t"
+ "xchgl %%ebx, %%esi\n\t"
"lock; cmpxchg8b (%[dest])\n\t"
- "movl %[scratch], %%ebx\n\t"
- : "+A" (expected), [scratch] "=m" (scratch), [success] "=@ccz" (success)
- : [desired_lo] "Sm" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
+ "xchgl %%ebx, %%esi\n\t"
+ : "+A" (expected), [success] "=@ccz" (success)
+ : "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
- "movl %%ebx, %[scratch]\n\t"
- "movl %[desired_lo], %%ebx\n\t"
+ "xchgl %%ebx, %%esi\n\t"
"lock; cmpxchg8b (%[dest])\n\t"
- "movl %[scratch], %%ebx\n\t"
+ "xchgl %%ebx, %%esi\n\t"
"sete %[success]\n\t"
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : "+A,A,A,A,A,A" (expected), [scratch] "=m,m,m,m,m,m" (scratch), [success] "=q,m,q,m,q,m" (success)
- : [desired_lo] "S,S,D,D,m,m" ((uint32_t)desired), "c,c,c,c,c,c" ((uint32_t)(desired >> 32)), [dest] "D,D,S,S,D,D" (&storage)
-#else
- : "+A" (expected), [scratch] "=m" (scratch), [success] "=q" (success)
- : [desired_lo] "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
-#endif
+ : "+A" (expected), [success] "=qm" (success)
+ : "S" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32)), [dest] "D" (&storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
return success;
-#else // defined(__PIC__)
+#else // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
bool success;
+
#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
@@ -273,20 +249,15 @@ struct gcc_dcas_x86
(
"lock; cmpxchg8b %[dest]\n\t"
"sete %[success]\n\t"
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
- : "b,b" ((uint32_t)desired), "c,c" ((uint32_t)(desired >> 32))
-#else
- : "+A" (expected), [dest] "+m" (storage), [success] "=q" (success)
+ : "+A" (expected), [dest] "+m" (storage), [success] "=qm" (success)
: "b" ((uint32_t)desired), "c" ((uint32_t)(desired >> 32))
-#endif
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
return success;
-#endif // defined(__PIC__)
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
@@ -297,93 +268,105 @@ struct gcc_dcas_x86
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
-#if defined(__clang__)
- // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
- storage_type old_val = storage;
- while (true)
- {
- storage_type val = __sync_val_compare_and_swap(&storage, old_val, v);
- if (val == old_val)
- return val;
- old_val = val;
- }
-#elif !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
-#if defined(__PIC__)
- uint32_t scratch;
+#if defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+#if defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ uint32_t old_bits[2];
__asm__ __volatile__
(
- "movl %%ebx, %[scratch]\n\t"
- "movl %%eax, %%ebx\n\t"
- "movl %%edx, %%ecx\n\t"
- "movl %[dest], %%eax\n\t"
- "movl 4+%[dest], %%edx\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ "movl (%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg8b %[dest]\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
"jne 1b\n\t"
- "movl %[scratch], %%ebx\n\t"
- : "+A" (v), [scratch] "=m" (scratch), [dest] "+o" (storage)
- :
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "ecx", "memory"
+ "xchgl %%ebx, %%esi\n\t"
+ : "=a" (old_bits[0]), "=d" (old_bits[1])
+ : "S" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
- return v;
-#else // defined(__PIC__)
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ storage_type old_value;
__asm__ __volatile__
(
- "movl %[dest], %%eax\n\t"
- "movl 4+%[dest], %%edx\n\t"
+ "xchgl %%ebx, %%esi\n\t"
+ "movl (%[dest]), %%eax\n\t"
+ "movl 4(%[dest]), %%edx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg8b %[dest]\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
"jne 1b\n\t"
- : "=A" (v), [dest] "+o" (storage)
- : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ "xchgl %%ebx, %%esi\n\t"
+ : "=A" (old_value)
+ : "S" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
- return v;
-#endif // defined(__PIC__)
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
-#if defined(__PIC__)
- uint32_t scratch;
+ return old_value;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+#else // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
+#if defined(__MINGW32__) && ((__GNUC__+0) * 100 + (__GNUC_MINOR__+0)) < 407
+
+ // MinGW gcc up to 4.6 has problems with allocating registers in the asm blocks below
+ uint32_t old_bits[2];
__asm__ __volatile__
(
- "movl %%ebx, %[scratch]\n\t"
- "movl %%eax, %%ebx\n\t"
- "movl %%edx, %%ecx\n\t"
- "movl 0(%[dest]), %%eax\n\t"
+ "movl (%[dest]), %%eax\n\t"
"movl 4(%[dest]), %%edx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "1: lock; cmpxchg8b (%[dest])\n\t"
"jne 1b\n\t"
- "movl %[scratch], %%ebx\n\t"
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : "+A,A" (v), [scratch] "=m,m" (scratch)
- : [dest] "D,S" (&storage)
-#else
- : "+A" (v), [scratch] "=m" (scratch)
- : [dest] "D" (&storage)
-#endif
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "ecx", "memory"
+ : "=&a" (old_bits[0]), "=&d" (old_bits[1])
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "DS" (&storage)
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
- return v;
-#else // defined(__PIC__)
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+
+#elif defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ uint32_t old_bits[2];
__asm__ __volatile__
(
- "movl 0(%[dest]), %%eax\n\t"
- "movl 4(%[dest]), %%edx\n\t"
+ "movl %[dest_lo], %%eax\n\t"
+ "movl %[dest_hi], %%edx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg8b 0(%[dest])\n\t"
+ "1: lock; cmpxchg8b %[dest_lo]\n\t"
"jne 1b\n\t"
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : "=A,A" (v)
- : "b,b" ((uint32_t)v), "c,c" ((uint32_t)(v >> 32)), [dest] "D,S" (&storage)
-#else
- : "=A" (v)
- : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)), [dest] "D" (&storage)
-#endif
+ : "=&a" (old_bits[0]), "=&d" (old_bits[1]), [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint32_t* >(&storage)[1])
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
- return v;
-#endif // defined(__PIC__)
-#endif
+
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
+ return old_value;
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ storage_type old_value;
+ __asm__ __volatile__
+ (
+ "movl %[dest_lo], %%eax\n\t"
+ "movl %[dest_hi], %%edx\n\t"
+ ".align 16\n\t"
+ "1: lock; cmpxchg8b %[dest_lo]\n\t"
+ "jne 1b\n\t"
+ : "=&A" (old_value), [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint32_t* >(&storage)[1])
+ : "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32))
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
+ );
+ return old_value;
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_ASM_PRESERVE_EBX)
}
};
@@ -394,82 +377,59 @@ struct gcc_dcas_x86
template< bool Signed >
struct gcc_dcas_x86_64
{
- typedef typename make_storage_type< 16u, Signed >::type storage_type;
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 16u >::type storage_type;
+ typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
+ typedef uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS aliasing_uint64_t;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
- uint64_t const* p_value = (uint64_t const*)&v;
- const uint64_t v_lo = p_value[0], v_hi = p_value[1];
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
__asm__ __volatile__
(
- "movq %[dest], %%rax\n\t"
- "movq 8+%[dest], %%rdx\n\t"
+ "movq %[dest_lo], %%rax\n\t"
+ "movq %[dest_hi], %%rdx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg16b %[dest]\n\t"
+ "1: lock; cmpxchg16b %[dest_lo]\n\t"
"jne 1b\n\t"
- : [dest] "=o" (storage)
- : "b" (v_lo), "c" (v_hi)
+ : [dest_lo] "=m" (storage), [dest_hi] "=m" (reinterpret_cast< volatile aliasing_uint64_t* >(&storage)[1])
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&v)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&v)[1])
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "rax", "rdx", "memory"
);
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
- __asm__ __volatile__
- (
- "movq 0(%[dest]), %%rax\n\t"
- "movq 8(%[dest]), %%rdx\n\t"
- ".align 16\n\t"
- "1: lock; cmpxchg16b 0(%[dest])\n\t"
- "jne 1b\n\t"
- :
- : "b" (v_lo), "c" (v_hi), [dest] "r" (&storage)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "rax", "rdx", "memory"
- );
-#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
}
static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order) BOOST_NOEXCEPT
{
#if defined(__clang__)
+
// Clang cannot allocate rax:rdx register pairs but it has sync intrinsics
storage_type value = storage_type();
return __sync_val_compare_and_swap(&storage, value, value);
-#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
- // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
- storage_type value;
+
+#elif defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
+ // Some compilers can't allocate rax:rdx register pair either and also don't support 128-bit __sync_val_compare_and_swap
+ uint64_t value_bits[2];
// We don't care for comparison result here; the previous value will be stored into value anyway.
// Also we don't care for rbx and rcx values, they just have to be equal to rax and rdx before cmpxchg16b.
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
__asm__ __volatile__
(
"movq %%rbx, %%rax\n\t"
"movq %%rcx, %%rdx\n\t"
"lock; cmpxchg16b %[storage]\n\t"
- "movq %%rax, %[value]\n\t"
- "movq %%rdx, 8+%[value]\n\t"
- : [value] "=o" (value)
+ : "=&a" (value_bits[0]), "=&d" (value_bits[1])
: [storage] "m" (storage)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
- );
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
- __asm__ __volatile__
- (
- "movq %%rbx, %%rax\n\t"
- "movq %%rcx, %%rdx\n\t"
- "lock; cmpxchg16b %[storage]\n\t"
- "movq %%rax, 0(%[value])\n\t"
- "movq %%rdx, 8(%[value])\n\t"
- :
- : [storage] "m" (storage), [value] "r" (&value)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
-#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ storage_type value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&value, value_bits, sizeof(value));
return value;
-#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+
storage_type value;
// We don't care for comparison result here; the previous value will be stored into value anyway.
@@ -485,7 +445,8 @@ struct gcc_dcas_x86_64
);
return value;
-#endif
+
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
}
static BOOST_FORCEINLINE bool compare_exchange_strong(
@@ -498,53 +459,31 @@ struct gcc_dcas_x86_64
expected = __sync_val_compare_and_swap(&storage, old_expected, desired);
return expected == old_expected;
-#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+#elif defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
- // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
- uint64_t const* p_desired = (uint64_t const*)&desired;
- const uint64_t desired_lo = p_desired[0], desired_hi = p_desired[1];
+ // Some compilers can't allocate rax:rdx register pair either but also don't support 128-bit __sync_val_compare_and_swap
bool success;
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
- __asm__ __volatile__
- (
- "movq %[expected], %%rax\n\t"
- "movq 8+%[expected], %%rdx\n\t"
- "lock; cmpxchg16b %[dest]\n\t"
- "sete %[success]\n\t"
- "movq %%rax, %[expected]\n\t"
- "movq %%rdx, 8+%[expected]\n\t"
- : [dest] "+m" (storage), [expected] "+o" (expected), [success] "=q" (success)
- : "b" (desired_lo), "c" (desired_hi)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
- );
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
__asm__ __volatile__
(
- "movq 0(%[expected]), %%rax\n\t"
- "movq 8(%[expected]), %%rdx\n\t"
"lock; cmpxchg16b %[dest]\n\t"
"sete %[success]\n\t"
- "movq %%rax, 0(%[expected])\n\t"
- "movq %%rdx, 8(%[expected])\n\t"
- : [dest] "+m" (storage), [success] "=q" (success)
- : "b" (desired_lo), "c" (desired_hi), [expected] "r" (&expected)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ : [dest] "+m" (storage), "+a" (reinterpret_cast< aliasing_uint64_t* >(&expected)[0]), "+d" (reinterpret_cast< aliasing_uint64_t* >(&expected)[1]), [success] "=q" (success)
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
-#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
return success;
-#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
- uint64_t const* p_desired = (uint64_t const*)&desired;
- const uint64_t desired_lo = p_desired[0], desired_hi = p_desired[1];
bool success;
+
#if defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
__asm__ __volatile__
(
"lock; cmpxchg16b %[dest]\n\t"
- : "+A" (expected), [dest] "+m" (storage), [success] "=@ccz" (success)
- : "b" (desired_lo), "c" (desired_hi)
+ : "+A" (expected), [dest] "+m" (storage), "=@ccz" (success)
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[1])
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
#else // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
@@ -552,20 +491,15 @@ struct gcc_dcas_x86_64
(
"lock; cmpxchg16b %[dest]\n\t"
"sete %[success]\n\t"
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_CONSTRAINT_ALTERNATIVES)
- : "+A,A" (expected), [dest] "+m,m" (storage), [success] "=q,m" (success)
- : "b,b" (desired_lo), "c,c" (desired_hi)
-#else
- : "+A" (expected), [dest] "+m" (storage), [success] "=q" (success)
- : "b" (desired_lo), "c" (desired_hi)
-#endif
+ : "+A" (expected), [dest] "+m" (storage), [success] "=qm" (success)
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&desired)[1])
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
#endif // defined(BOOST_ATOMIC_DETAIL_ASM_HAS_FLAG_OUTPUTS)
return success;
-#endif // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
}
static BOOST_FORCEINLINE bool compare_exchange_weak(
@@ -576,83 +510,39 @@ struct gcc_dcas_x86_64
static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
-#if defined(__clang__)
- // Clang cannot allocate eax:edx register pairs but it has sync intrinsics
- storage_type old_val = storage;
- while (true)
- {
- storage_type val = __sync_val_compare_and_swap(&storage, old_val, v);
- if (val == old_val)
- return val;
- old_val = val;
- }
-#elif defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
- // GCC 4.4 can't allocate rax:rdx register pair either but it also doesn't support 128-bit __sync_val_compare_and_swap
- storage_type old_value;
- uint64_t const* p_value = (uint64_t const*)&v;
- const uint64_t v_lo = p_value[0], v_hi = p_value[1];
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+#if defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ uint64_t old_bits[2];
__asm__ __volatile__
(
- "movq %[dest], %%rax\n\t"
- "movq 8+%[dest], %%rdx\n\t"
+ "movq %[dest_lo], %%rax\n\t"
+ "movq %[dest_hi], %%rdx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg16b %[dest]\n\t"
+ "1: lock; cmpxchg16b %[dest_lo]\n\t"
"jne 1b\n\t"
- "movq %%rax, %[old_value]\n\t"
- "movq %%rdx, 8+%[old_value]\n\t"
- : [dest] "+o" (storage), [old_value] "=o" (old_value)
- : "b" (v_lo), "c" (v_hi)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
- );
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
- __asm__ __volatile__
- (
- "movq 0(%[dest]), %%rax\n\t"
- "movq 8(%[dest]), %%rdx\n\t"
- ".align 16\n\t"
- "1: lock; cmpxchg16b 0(%[dest])\n\t"
- "jne 1b\n\t"
- "movq %%rax, 0(%[old_value])\n\t"
- "movq %%rdx, 8(%[old_value])\n\t"
- :
- : "b" (v_lo), "c" (v_hi), [dest] "r" (&storage), [old_value] "r" (&old_value)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory", "rax", "rdx"
+ : [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint64_t* >(&storage)[1]), "=&a" (old_bits[0]), "=&d" (old_bits[1])
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&v)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&v)[1])
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
-#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+ storage_type old_value;
+ BOOST_ATOMIC_DETAIL_MEMCPY(&old_value, old_bits, sizeof(old_value));
return old_value;
-#else // defined(BOOST_ATOMIC_DETAIL_NO_ASM_RAX_RDX_PAIRS)
- uint64_t const* p_value = (uint64_t const*)&v;
- const uint64_t v_lo = p_value[0], v_hi = p_value[1];
-#if !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
- __asm__ __volatile__
- (
- "movq %[dest], %%rax\n\t"
- "movq 8+%[dest], %%rdx\n\t"
- ".align 16\n\t"
- "1: lock; cmpxchg16b %[dest]\n\t"
- "jne 1b\n\t"
- : "=&A" (v), [dest] "+o" (storage)
- : "b" (v_lo), "c" (v_hi)
- : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
- );
-#else // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
+#else // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
+ storage_type old_value;
__asm__ __volatile__
(
- "movq 0(%[dest]), %%rax\n\t"
- "movq 8(%[dest]), %%rdx\n\t"
+ "movq %[dest_lo], %%rax\n\t"
+ "movq %[dest_hi], %%rdx\n\t"
".align 16\n\t"
- "1: lock; cmpxchg16b 0(%[dest])\n\t"
+ "1: lock; cmpxchg16b %[dest_lo]\n\t"
"jne 1b\n\t"
- : "=&A" (v)
- : "b" (v_lo), "c" (v_hi), [dest] "r" (&storage)
+ : "=&A" (old_value), [dest_lo] "+m" (storage), [dest_hi] "+m" (reinterpret_cast< volatile aliasing_uint64_t* >(&storage)[1])
+ : "b" (reinterpret_cast< const aliasing_uint64_t* >(&v)[0]), "c" (reinterpret_cast< const aliasing_uint64_t* >(&v)[1])
: BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory"
);
-#endif // !defined(BOOST_ATOMIC_DETAIL_NO_ASM_IMPLIED_ZERO_DISPLACEMENTS)
- return v;
-#endif
+ return old_value;
+#endif // defined(BOOST_ATOMIC_DETAIL_X86_NO_ASM_AX_DX_PAIRS)
}
};
diff --git a/boost/atomic/detail/ops_linux_arm.hpp b/boost/atomic/detail/ops_linux_arm.hpp
index 840c125a7b..16af1732cf 100644
--- a/boost/atomic/detail/ops_linux_arm.hpp
+++ b/boost/atomic/detail/ops_linux_arm.hpp
@@ -58,11 +58,12 @@ namespace detail {
struct linux_arm_cas_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void fence_before_store(memory_order order) BOOST_NOEXCEPT
{
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
hardware_full_fence();
}
@@ -74,7 +75,7 @@ struct linux_arm_cas_base
static BOOST_FORCEINLINE void fence_after_load(memory_order order) BOOST_NOEXCEPT
{
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
hardware_full_fence();
}
@@ -89,8 +90,8 @@ template< bool Signed >
struct linux_arm_cas :
public linux_arm_cas_base
{
- typedef typename make_storage_type< 4u, Signed >::type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 4u >::type storage_type;
+ typedef typename make_storage_type< 4u >::aligned aligned_storage_type;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
diff --git a/boost/atomic/detail/ops_msvc_arm.hpp b/boost/atomic/detail/ops_msvc_arm.hpp
index a0cfe81afc..608c6fddf8 100644
--- a/boost/atomic/detail/ops_msvc_arm.hpp
+++ b/boost/atomic/detail/ops_msvc_arm.hpp
@@ -54,6 +54,7 @@ namespace detail {
struct msvc_arm_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
@@ -65,7 +66,7 @@ struct msvc_arm_operations_base
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
- if ((order & memory_order_release) != 0)
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
@@ -85,7 +86,7 @@ struct msvc_arm_operations_base
{
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
- if ((order & (memory_order_consume | memory_order_acquire)) != 0)
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
hardware_full_fence();
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
@@ -94,15 +95,20 @@ struct msvc_arm_operations_base
static BOOST_FORCEINLINE BOOST_CONSTEXPR memory_order cas_common_order(memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
{
// Combine order flags together and promote memory_order_consume to memory_order_acquire
- return static_cast< memory_order >(((failure_order | success_order) & ~memory_order_consume) | (((failure_order | success_order) & memory_order_consume) << 1u));
+ return static_cast< memory_order >(((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & ~static_cast< unsigned int >(memory_order_consume))
+ | (((static_cast< unsigned int >(failure_order) | static_cast< unsigned int >(success_order)) & static_cast< unsigned int >(memory_order_consume)) << 1u));
}
};
-template< typename T, typename Derived >
+template< std::size_t Size, bool Signed, typename Derived >
struct msvc_arm_operations :
public msvc_arm_operations_base
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -129,14 +135,10 @@ struct msvc_arm_operations :
template< bool Signed >
struct operations< 1u, Signed > :
- public msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+ public msvc_arm_operations< 1u, Signed, operations< 1u, Signed > >
{
- typedef msvc_arm_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef msvc_arm_operations< 1u, Signed, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -298,14 +300,10 @@ struct operations< 1u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
- public msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+ public msvc_arm_operations< 2u, Signed, operations< 2u, Signed > >
{
- typedef msvc_arm_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef msvc_arm_operations< 2u, Signed, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -467,14 +465,10 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct operations< 4u, Signed > :
- public msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+ public msvc_arm_operations< 4u, Signed, operations< 4u, Signed > >
{
- typedef msvc_arm_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef msvc_arm_operations< 4u, Signed, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -636,14 +630,10 @@ struct operations< 4u, Signed > :
template< bool Signed >
struct operations< 8u, Signed > :
- public msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
+ public msvc_arm_operations< 8u, Signed, operations< 8u, Signed > >
{
- typedef msvc_arm_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
+ typedef msvc_arm_operations< 8u, Signed, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/ops_msvc_x86.hpp b/boost/atomic/detail/ops_msvc_x86.hpp
index 85bed28ad1..70b0ea994b 100644
--- a/boost/atomic/detail/ops_msvc_x86.hpp
+++ b/boost/atomic/detail/ops_msvc_x86.hpp
@@ -73,6 +73,7 @@ namespace detail {
struct msvc_x86_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
@@ -107,11 +108,15 @@ struct msvc_x86_operations_base
}
};
-template< typename T, typename Derived >
+template< std::size_t Size, bool Signed, typename Derived >
struct msvc_x86_operations :
public msvc_x86_operations_base
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -159,14 +164,10 @@ struct msvc_x86_operations :
template< bool Signed >
struct operations< 4u, Signed > :
- public msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+ public msvc_x86_operations< 4u, Signed, operations< 4u, Signed > >
{
- typedef msvc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef msvc_x86_operations< 4u, Signed, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -234,14 +235,10 @@ struct operations< 4u, Signed > :
template< bool Signed >
struct operations< 1u, Signed > :
- public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+ public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
{
- typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -282,14 +279,10 @@ struct operations< 1u, Signed > :
template< bool Signed >
struct operations< 1u, Signed > :
- public msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > >
+ public msvc_x86_operations< 1u, Signed, operations< 1u, Signed > >
{
- typedef msvc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type;
+ typedef msvc_x86_operations< 1u, Signed, operations< 1u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 1u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -342,22 +335,19 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
- int backup;
__asm
{
- mov backup, ebx
- xor edx, edx
mov edi, storage
- movzx ebx, v
+ movzx ecx, v
+ xor edx, edx
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
- and dl, bl
+ and dl, cl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
- mov ebx, backup
};
base_type::fence_after(order);
return v;
@@ -366,22 +356,19 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
- int backup;
__asm
{
- mov backup, ebx
- xor edx, edx
mov edi, storage
- movzx ebx, v
+ movzx ecx, v
+ xor edx, edx
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
- or dl, bl
+ or dl, cl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
- mov ebx, backup
};
base_type::fence_after(order);
return v;
@@ -390,22 +377,19 @@ struct operations< 1u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
- int backup;
__asm
{
- mov backup, ebx
- xor edx, edx
mov edi, storage
- movzx ebx, v
+ movzx ecx, v
+ xor edx, edx
movzx eax, byte ptr [edi]
align 16
again:
mov dl, al
- xor dl, bl
+ xor dl, cl
lock cmpxchg byte ptr [edi], dl
jne again
mov v, al
- mov ebx, backup
};
base_type::fence_after(order);
return v;
@@ -426,14 +410,10 @@ struct operations< 1u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
- public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+ public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
{
- typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -474,14 +454,10 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct operations< 2u, Signed > :
- public msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > >
+ public msvc_x86_operations< 2u, Signed, operations< 2u, Signed > >
{
- typedef msvc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type;
+ typedef msvc_x86_operations< 2u, Signed, operations< 2u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 2u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -534,22 +510,19 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
- int backup;
__asm
{
- mov backup, ebx
- xor edx, edx
mov edi, storage
- movzx ebx, v
+ movzx ecx, v
+ xor edx, edx
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
- and dx, bx
+ and dx, cx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
- mov ebx, backup
};
base_type::fence_after(order);
return v;
@@ -558,22 +531,19 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
- int backup;
__asm
{
- mov backup, ebx
- xor edx, edx
mov edi, storage
- movzx ebx, v
+ movzx ecx, v
+ xor edx, edx
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
- or dx, bx
+ or dx, cx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
- mov ebx, backup
};
base_type::fence_after(order);
return v;
@@ -582,22 +552,19 @@ struct operations< 2u, Signed > :
static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
base_type::fence_before(order);
- int backup;
__asm
{
- mov backup, ebx
- xor edx, edx
mov edi, storage
- movzx ebx, v
+ movzx ecx, v
+ xor edx, edx
movzx eax, word ptr [edi]
align 16
again:
mov dx, ax
- xor dx, bx
+ xor dx, cx
lock cmpxchg word ptr [edi], dx
jne again
mov v, ax
- mov ebx, backup
};
base_type::fence_after(order);
return v;
@@ -620,9 +587,10 @@ struct operations< 2u, Signed > :
template< bool Signed >
struct msvc_dcas_x86
{
- typedef typename make_storage_type< 8u, Signed >::type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 8u >::type storage_type;
+ typedef typename make_storage_type< 8u >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
@@ -670,7 +638,7 @@ struct msvc_dcas_x86
}
else
{
- int backup;
+ uint32_t backup;
__asm
{
mov backup, ebx
@@ -758,7 +726,7 @@ struct msvc_dcas_x86
expected = old_val;
#else
bool result;
- int backup;
+ uint32_t backup;
__asm
{
mov backup, ebx
@@ -791,7 +759,7 @@ struct msvc_dcas_x86
BOOST_ATOMIC_DETAIL_COMPILER_BARRIER();
storage_type volatile* p = &storage;
- int backup;
+ uint32_t backup;
__asm
{
mov backup, ebx
@@ -825,14 +793,10 @@ struct operations< 8u, Signed > :
template< bool Signed >
struct operations< 8u, Signed > :
- public msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > >
+ public msvc_x86_operations< 8u, Signed, operations< 8u, Signed > >
{
- typedef msvc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type;
+ typedef msvc_x86_operations< 8u, Signed, operations< 8u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT
{
@@ -876,9 +840,10 @@ struct operations< 8u, Signed > :
template< bool Signed >
struct msvc_dcas_x86_64
{
- typedef typename make_storage_type< 16u, Signed >::type storage_type;
- typedef typename make_storage_type< 16u, Signed >::aligned aligned_storage_type;
+ typedef typename make_storage_type< 16u >::type storage_type;
+ typedef typename make_storage_type< 16u >::aligned aligned_storage_type;
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = true;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 16u;
diff --git a/boost/atomic/detail/ops_windows.hpp b/boost/atomic/detail/ops_windows.hpp
index 29bd3809d2..d4ce6d95e7 100644
--- a/boost/atomic/detail/ops_windows.hpp
+++ b/boost/atomic/detail/ops_windows.hpp
@@ -44,6 +44,7 @@ namespace detail {
struct windows_operations_base
{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
static BOOST_FORCEINLINE void hardware_full_fence() BOOST_NOEXCEPT
@@ -63,11 +64,15 @@ struct windows_operations_base
}
};
-template< typename T, typename Derived >
+template< std::size_t Size, bool Signed, typename Derived >
struct windows_operations :
public windows_operations_base
{
- typedef T storage_type;
+ typedef typename make_storage_type< Size >::type storage_type;
+ typedef typename make_storage_type< Size >::aligned aligned_storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
@@ -104,14 +109,10 @@ struct windows_operations :
template< bool Signed >
struct operations< 4u, Signed > :
- public windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > >
+ public windows_operations< 4u, Signed, operations< 4u, Signed > >
{
- typedef windows_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type;
+ typedef windows_operations< 4u, Signed, operations< 4u, Signed > > base_type;
typedef typename base_type::storage_type storage_type;
- typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type;
-
- static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
- static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
{
diff --git a/boost/atomic/detail/platform.hpp b/boost/atomic/detail/platform.hpp
index 117dff27f4..df4cc305ac 100644
--- a/boost/atomic/detail/platform.hpp
+++ b/boost/atomic/detail/platform.hpp
@@ -140,12 +140,24 @@
#define BOOST_ATOMIC_EMULATED
#endif
+#if !defined(BOOST_ATOMIC_DETAIL_FP_BACKEND)
+#define BOOST_ATOMIC_DETAIL_FP_BACKEND generic
+#define BOOST_ATOMIC_DETAIL_FP_BACKEND_GENERIC
+#endif
+
#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_BACKEND)
#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND generic
#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_GENERIC
#endif
+#if !defined(BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND)
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND generic
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_GENERIC
+#endif
+
#define BOOST_ATOMIC_DETAIL_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_BACKEND).hpp>
+#define BOOST_ATOMIC_DETAIL_FP_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_FP_BACKEND).hpp>
#define BOOST_ATOMIC_DETAIL_EXTRA_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_EXTRA_BACKEND).hpp>
+#define BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND_HEADER(prefix) <BOOST_JOIN(prefix, BOOST_ATOMIC_DETAIL_EXTRA_FP_BACKEND).hpp>
#endif // BOOST_ATOMIC_DETAIL_PLATFORM_HPP_INCLUDED_
diff --git a/boost/atomic/detail/storage_type.hpp b/boost/atomic/detail/storage_type.hpp
index d4d07f2273..5d824d3a27 100644
--- a/boost/atomic/detail/storage_type.hpp
+++ b/boost/atomic/detail/storage_type.hpp
@@ -19,9 +19,7 @@
#include <cstddef>
#include <boost/cstdint.hpp>
#include <boost/atomic/detail/config.hpp>
-#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
-#include <cstring>
-#endif
+#include <boost/atomic/detail/string_ops.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
@@ -64,7 +62,7 @@ BOOST_FORCEINLINE void non_atomic_load(buffer_storage< Size > const volatile& fr
BOOST_ATOMIC_DETAIL_MEMCPY(to.data, const_cast< unsigned char const* >(from.data), Size);
}
-template< std::size_t Size, bool Signed >
+template< std::size_t Size >
struct make_storage_type
{
typedef buffer_storage< Size > type;
@@ -79,7 +77,7 @@ struct make_storage_type
};
template< >
-struct make_storage_type< 1u, false >
+struct make_storage_type< 1u >
{
typedef boost::uint8_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
@@ -93,21 +91,7 @@ struct make_storage_type< 1u, false >
};
template< >
-struct make_storage_type< 1u, true >
-{
- typedef boost::int8_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 2u, false >
+struct make_storage_type< 2u >
{
typedef boost::uint16_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
@@ -121,21 +105,7 @@ struct make_storage_type< 2u, false >
};
template< >
-struct make_storage_type< 2u, true >
-{
- typedef boost::int16_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(2) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 4u, false >
+struct make_storage_type< 4u >
{
typedef boost::uint32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
@@ -149,21 +119,7 @@ struct make_storage_type< 4u, false >
};
template< >
-struct make_storage_type< 4u, true >
-{
- typedef boost::int32_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(4) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
-template< >
-struct make_storage_type< 8u, false >
+struct make_storage_type< 8u >
{
typedef boost::uint64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
@@ -176,24 +132,10 @@ struct make_storage_type< 8u, false >
};
};
-template< >
-struct make_storage_type< 8u, true >
-{
- typedef boost::int64_t BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(8) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
#if defined(BOOST_HAS_INT128)
template< >
-struct make_storage_type< 16u, false >
+struct make_storage_type< 16u >
{
typedef boost::uint128_type BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
@@ -206,20 +148,6 @@ struct make_storage_type< 16u, false >
};
};
-template< >
-struct make_storage_type< 16u, true >
-{
- typedef boost::int128_type BOOST_ATOMIC_DETAIL_MAY_ALIAS type;
-
- struct BOOST_ATOMIC_DETAIL_MAY_ALIAS aligned
- {
- BOOST_ALIGNMENT(16) type value;
-
- BOOST_DEFAULTED_FUNCTION(aligned(), {})
- BOOST_FORCEINLINE BOOST_CONSTEXPR explicit aligned(type v) BOOST_NOEXCEPT : value(v) {}
- };
-};
-
#elif !defined(BOOST_NO_ALIGNMENT)
struct BOOST_ATOMIC_DETAIL_MAY_ALIAS storage128_t
@@ -230,13 +158,13 @@ struct BOOST_ATOMIC_DETAIL_MAY_ALIAS storage128_t
BOOST_FORCEINLINE bool operator! () const BOOST_NOEXCEPT
{
- return data[0] == 0 && data[1] == 0;
+ return (data[0] | data[1]) == 0u;
}
};
BOOST_FORCEINLINE bool operator== (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
{
- return left.data[0] == right.data[0] && left.data[1] == right.data[1];
+ return ((left.data[0] ^ right.data[0]) | (left.data[1] ^ right.data[1])) == 0u;
}
BOOST_FORCEINLINE bool operator!= (storage128_t const& left, storage128_t const& right) BOOST_NOEXCEPT
{
@@ -249,8 +177,8 @@ BOOST_FORCEINLINE void non_atomic_load(storage128_t const volatile& from, storag
to.data[1] = from.data[1];
}
-template< bool Signed >
-struct make_storage_type< 16u, Signed >
+template< >
+struct make_storage_type< 16u >
{
typedef storage128_t type;
@@ -268,11 +196,8 @@ struct make_storage_type< 16u, Signed >
template< typename T >
struct storage_size_of
{
- enum _
- {
- size = sizeof(T),
- value = (size == 3 ? 4 : (size >= 5 && size <= 7 ? 8 : (size >= 9 && size <= 15 ? 16 : size)))
- };
+ static BOOST_CONSTEXPR_OR_CONST std::size_t size = sizeof(T);
+ static BOOST_CONSTEXPR_OR_CONST std::size_t value = (size == 3u ? 4u : (size >= 5u && size <= 7u ? 8u : (size >= 9u && size <= 15u ? 16u : size)));
};
} // namespace detail
diff --git a/boost/atomic/detail/string_ops.hpp b/boost/atomic/detail/string_ops.hpp
new file mode 100644
index 0000000000..ce145b98f2
--- /dev/null
+++ b/boost/atomic/detail/string_ops.hpp
@@ -0,0 +1,61 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/string_ops.hpp
+ *
+ * This header defines string operations for Boost.Atomic
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_STRING_OPS_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_STRING_OPS_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_memcpy)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY
+#endif
+#if __has_builtin(__builtin_memcmp)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP
+#endif
+#if __has_builtin(__builtin_memset)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMSET
+#endif
+#elif defined(BOOST_GCC)
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP
+#define BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMSET
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY)
+#define BOOST_ATOMIC_DETAIL_MEMCPY __builtin_memcpy
+#else
+#define BOOST_ATOMIC_DETAIL_MEMCPY std::memcpy
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP)
+#define BOOST_ATOMIC_DETAIL_MEMCMP __builtin_memcmp
+#else
+#define BOOST_ATOMIC_DETAIL_MEMCMP std::memcmp
+#endif
+
+#if defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMSET)
+#define BOOST_ATOMIC_DETAIL_MEMSET __builtin_memset
+#else
+#define BOOST_ATOMIC_DETAIL_MEMSET std::memset
+#endif
+
+#if !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCPY) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMCMP) || !defined(BOOST_ATOMIC_DETAIL_HAS_BUILTIN_MEMSET)
+#include <cstring>
+#endif
+
+#endif // BOOST_ATOMIC_DETAIL_STRING_OPS_HPP_INCLUDED_
diff --git a/boost/atomic/detail/type_traits/conditional.hpp b/boost/atomic/detail/type_traits/conditional.hpp
index 71397ab154..6b9e896729 100644
--- a/boost/atomic/detail/type_traits/conditional.hpp
+++ b/boost/atomic/detail/type_traits/conditional.hpp
@@ -15,7 +15,7 @@
#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_CONDITIONAL_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
#include <type_traits>
#else
#include <boost/type_traits/conditional.hpp>
@@ -29,7 +29,7 @@ namespace boost {
namespace atomics {
namespace detail {
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
using std::conditional;
#else
using boost::conditional;
diff --git a/boost/atomic/detail/type_traits/integral_constant.hpp b/boost/atomic/detail/type_traits/integral_constant.hpp
new file mode 100644
index 0000000000..eac86491e0
--- /dev/null
+++ b/boost/atomic/detail/type_traits/integral_constant.hpp
@@ -0,0 +1,46 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/integral_constant.hpp
+ *
+ * This header defines \c integral_constant wrapper
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_INTEGRAL_CONSTANT_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_INTEGRAL_CONSTANT_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+#include <type_traits>
+#else
+#include <boost/type_traits/integral_constant.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+using std::integral_constant;
+using std::true_type;
+using std::false_type;
+#else
+using boost::integral_constant;
+using boost::true_type;
+using boost::false_type;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_INTEGRAL_CONSTANT_HPP_INCLUDED_
diff --git a/boost/atomic/detail/type_traits/is_floating_point.hpp b/boost/atomic/detail/type_traits/is_floating_point.hpp
new file mode 100644
index 0000000000..c425112b8b
--- /dev/null
+++ b/boost/atomic/detail/type_traits/is_floating_point.hpp
@@ -0,0 +1,42 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/is_floating_point.hpp
+ *
+ * This header defines \c is_floating_point type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_FLOATING_POINT_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_FLOATING_POINT_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+#include <type_traits>
+#else
+#include <boost/type_traits/is_floating_point.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
+using std::is_floating_point;
+#else
+using boost::is_floating_point;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_FLOATING_POINT_HPP_INCLUDED_
diff --git a/boost/atomic/detail/type_traits/is_function.hpp b/boost/atomic/detail/type_traits/is_function.hpp
index 7b82840f5e..e7205356e4 100644
--- a/boost/atomic/detail/type_traits/is_function.hpp
+++ b/boost/atomic/detail/type_traits/is_function.hpp
@@ -15,7 +15,7 @@
#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_FUNCTION_HPP_INCLUDED_
#include <boost/atomic/detail/config.hpp>
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
#include <type_traits>
#else
#include <boost/type_traits/is_function.hpp>
@@ -29,7 +29,7 @@ namespace boost {
namespace atomics {
namespace detail {
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS)
using std::is_function;
#else
using boost::is_function;
diff --git a/boost/atomic/detail/type_traits/is_iec559.hpp b/boost/atomic/detail/type_traits/is_iec559.hpp
new file mode 100644
index 0000000000..299c4f0f4f
--- /dev/null
+++ b/boost/atomic/detail/type_traits/is_iec559.hpp
@@ -0,0 +1,47 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/is_iec559.hpp
+ *
+ * This header defines \c is_iec559 type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_IEC559_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_IEC559_HPP_INCLUDED_
+
+#include <limits>
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+template< typename T >
+struct is_iec559
+{
+ static BOOST_CONSTEXPR_OR_CONST bool value = !!std::numeric_limits< T >::is_iec559;
+};
+
+#if defined(BOOST_HAS_FLOAT128)
+// libstdc++ does not specialize numeric_limits for __float128
+template< >
+struct is_iec559< boost::float128_type >
+{
+ static BOOST_CONSTEXPR_OR_CONST bool value = true;
+};
+#endif // defined(BOOST_HAS_FLOAT128)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_IEC559_HPP_INCLUDED_
diff --git a/boost/atomic/detail/type_traits/is_integral.hpp b/boost/atomic/detail/type_traits/is_integral.hpp
index 5deb12034c..ef3e2e347e 100644
--- a/boost/atomic/detail/type_traits/is_integral.hpp
+++ b/boost/atomic/detail/type_traits/is_integral.hpp
@@ -16,7 +16,7 @@
#include <boost/atomic/detail/config.hpp>
// Some versions of libstdc++ don't consider __int128 an integral type. Use Boost.TypeTraits because of that.
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
#include <type_traits>
#else
#include <boost/type_traits/is_integral.hpp>
@@ -30,7 +30,7 @@ namespace boost {
namespace atomics {
namespace detail {
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
using std::is_integral;
#else
using boost::is_integral;
diff --git a/boost/atomic/detail/type_traits/is_signed.hpp b/boost/atomic/detail/type_traits/is_signed.hpp
index bf95163828..2dc1df7267 100644
--- a/boost/atomic/detail/type_traits/is_signed.hpp
+++ b/boost/atomic/detail/type_traits/is_signed.hpp
@@ -16,7 +16,7 @@
#include <boost/atomic/detail/config.hpp>
// Some versions of libstdc++ don't consider __int128 an integral type. Use Boost.TypeTraits because of that.
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
#include <type_traits>
#else
#include <boost/type_traits/is_signed.hpp>
@@ -30,7 +30,7 @@ namespace boost {
namespace atomics {
namespace detail {
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
using std::is_signed;
#else
using boost::is_signed;
diff --git a/boost/atomic/detail/type_traits/is_trivially_default_constructible.hpp b/boost/atomic/detail/type_traits/is_trivially_default_constructible.hpp
new file mode 100644
index 0000000000..5f88b88e42
--- /dev/null
+++ b/boost/atomic/detail/type_traits/is_trivially_default_constructible.hpp
@@ -0,0 +1,46 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2018 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/is_trivially_default_constructible.hpp
+ *
+ * This header defines \c is_trivially_default_constructible type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_TRIVIALLY_DEFAULT_CONSTRUCTIBLE_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_TRIVIALLY_DEFAULT_CONSTRUCTIBLE_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+#if !defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
+#include <type_traits>
+#else
+#include <boost/type_traits/has_trivial_constructor.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_NO_CXX11_HDR_TYPE_TRAITS)
+using std::is_trivially_default_constructible;
+#elif !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
+template< typename T >
+using is_trivially_default_constructible = boost::has_trivial_constructor< T >;
+#else
+template< typename T >
+struct is_trivially_default_constructible : public boost::has_trivial_constructor< T > {};
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_IS_TRIVIALLY_DEFAULT_CONSTRUCTIBLE_HPP_INCLUDED_
diff --git a/boost/atomic/detail/type_traits/make_signed.hpp b/boost/atomic/detail/type_traits/make_signed.hpp
index 831efdc391..82f61b33c0 100644
--- a/boost/atomic/detail/type_traits/make_signed.hpp
+++ b/boost/atomic/detail/type_traits/make_signed.hpp
@@ -16,7 +16,7 @@
#include <boost/atomic/detail/config.hpp>
// Some versions of libstdc++ don't consider __int128 an integral type. Use Boost.TypeTraits because of that.
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
#include <type_traits>
#else
#include <boost/type_traits/make_signed.hpp>
@@ -30,7 +30,7 @@ namespace boost {
namespace atomics {
namespace detail {
-#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
using std::make_signed;
#else
using boost::make_signed;
diff --git a/boost/atomic/detail/type_traits/make_unsigned.hpp b/boost/atomic/detail/type_traits/make_unsigned.hpp
new file mode 100644
index 0000000000..573a161694
--- /dev/null
+++ b/boost/atomic/detail/type_traits/make_unsigned.hpp
@@ -0,0 +1,43 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2017 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/type_traits/make_unsigned.hpp
+ *
+ * This header defines \c make_unsigned type trait
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_TRAITS_MAKE_UNSIGNED_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_TYPE_TRAITS_MAKE_UNSIGNED_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+// Some versions of libstdc++ don't consider __int128 an integral type. Use Boost.TypeTraits because of that.
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+#include <type_traits>
+#else
+#include <boost/type_traits/make_unsigned.hpp>
+#endif
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if !defined(BOOST_ATOMIC_DETAIL_NO_CXX11_BASIC_HDR_TYPE_TRAITS) && !defined(BOOST_HAS_INT128)
+using std::make_unsigned;
+#else
+using boost::make_unsigned;
+#endif
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif // BOOST_ATOMIC_DETAIL_TYPE_TRAITS_MAKE_UNSIGNED_HPP_INCLUDED_