summaryrefslogtreecommitdiff
path: root/boost/lockfree
diff options
context:
space:
mode:
authorChanho Park <chanho61.park@samsung.com>2014-12-11 18:55:56 +0900
committerChanho Park <chanho61.park@samsung.com>2014-12-11 18:55:56 +0900
commit08c1e93fa36a49f49325a07fe91ff92c964c2b6c (patch)
tree7a7053ceb8874b28ec4b868d4c49b500008a102e /boost/lockfree
parentbb4dd8289b351fae6b55e303f189127a394a1edd (diff)
downloadboost-08c1e93fa36a49f49325a07fe91ff92c964c2b6c.tar.gz
boost-08c1e93fa36a49f49325a07fe91ff92c964c2b6c.tar.bz2
boost-08c1e93fa36a49f49325a07fe91ff92c964c2b6c.zip
Imported Upstream version 1.57.0upstream/1.57.0
Diffstat (limited to 'boost/lockfree')
-rw-r--r--boost/lockfree/detail/atomic.hpp62
-rw-r--r--boost/lockfree/detail/branch_hints.hpp38
-rw-r--r--boost/lockfree/detail/copy_payload.hpp74
-rw-r--r--boost/lockfree/detail/freelist.hpp648
-rw-r--r--boost/lockfree/detail/parameter.hpp73
-rw-r--r--boost/lockfree/detail/prefix.hpp56
-rw-r--r--boost/lockfree/detail/tagged_ptr.hpp21
-rw-r--r--boost/lockfree/detail/tagged_ptr_dcas.hpp134
-rw-r--r--boost/lockfree/detail/tagged_ptr_ptrcompression.hpp175
-rw-r--r--boost/lockfree/policies.hpp59
-rw-r--r--boost/lockfree/queue.hpp550
-rw-r--r--boost/lockfree/spsc_queue.hpp962
-rw-r--r--boost/lockfree/stack.hpp583
13 files changed, 3435 insertions, 0 deletions
diff --git a/boost/lockfree/detail/atomic.hpp b/boost/lockfree/detail/atomic.hpp
new file mode 100644
index 0000000000..f36ad4b6d5
--- /dev/null
+++ b/boost/lockfree/detail/atomic.hpp
@@ -0,0 +1,62 @@
+// Copyright (C) 2011-2013 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
+#define BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
+
+#include <boost/config.hpp>
+
+#ifndef BOOST_LOCKFREE_FORCE_STD_ATOMIC
+
+#define BOOST_LOCKFREE_NO_HDR_ATOMIC
+
+// MSVC supports atomic<> from version 2012 onwards.
+#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)
+#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
+#endif
+
+// GCC supports atomic<> from version 4.8 onwards.
+#if (BOOST_GCC >= 40800) && (__cplusplus >= 201103L)
+#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
+#endif
+
+#endif // BOOST_LOCKFREE_FORCE_STD_ATOMIC
+
+
+#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
+#include <boost/atomic.hpp>
+#else
+#include <atomic>
+#endif
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
+using boost::atomic;
+using boost::memory_order_acquire;
+using boost::memory_order_consume;
+using boost::memory_order_relaxed;
+using boost::memory_order_release;
+#else
+using std::atomic;
+using std::memory_order_acquire;
+using std::memory_order_consume;
+using std::memory_order_relaxed;
+using std::memory_order_release;
+#endif
+
+}
+using detail::atomic;
+using detail::memory_order_acquire;
+using detail::memory_order_consume;
+using detail::memory_order_relaxed;
+using detail::memory_order_release;
+
+}}
+
+#endif /* BOOST_LOCKFREE_DETAIL_ATOMIC_HPP */
diff --git a/boost/lockfree/detail/branch_hints.hpp b/boost/lockfree/detail/branch_hints.hpp
new file mode 100644
index 0000000000..3a193f8b24
--- /dev/null
+++ b/boost/lockfree/detail/branch_hints.hpp
@@ -0,0 +1,38 @@
+// branch hints
+// Copyright (C) 2007, 2008 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_BRANCH_HINTS_HPP_INCLUDED
+#define BOOST_LOCKFREE_BRANCH_HINTS_HPP_INCLUDED
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+/** \brief hint for the branch prediction */
+inline bool likely(bool expr)
+{
+#ifdef __GNUC__
+ return __builtin_expect(expr, true);
+#else
+ return expr;
+#endif
+ }
+
+/** \brief hint for the branch prediction */
+inline bool unlikely(bool expr)
+{
+#ifdef __GNUC__
+ return __builtin_expect(expr, false);
+#else
+ return expr;
+#endif
+}
+
+} /* namespace detail */
+} /* namespace lockfree */
+} /* namespace boost */
+
+#endif /* BOOST_LOCKFREE_BRANCH_HINTS_HPP_INCLUDED */
diff --git a/boost/lockfree/detail/copy_payload.hpp b/boost/lockfree/detail/copy_payload.hpp
new file mode 100644
index 0000000000..a1aff54f05
--- /dev/null
+++ b/boost/lockfree/detail/copy_payload.hpp
@@ -0,0 +1,74 @@
+// boost lockfree: copy_payload helper
+//
+// Copyright (C) 2011 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
+#define BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
+
+#include <boost/mpl/if.hpp>
+#include <boost/type_traits/is_convertible.hpp>
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+struct copy_convertible
+{
+ template <typename T, typename U>
+ static void copy(T & t, U & u)
+ {
+ u = t;
+ }
+};
+
+struct copy_constructible_and_copyable
+{
+ template <typename T, typename U>
+ static void copy(T & t, U & u)
+ {
+ u = U(t);
+ }
+};
+
+template <typename T, typename U>
+void copy_payload(T & t, U & u)
+{
+ typedef typename boost::mpl::if_<typename boost::is_convertible<T, U>::type,
+ copy_convertible,
+ copy_constructible_and_copyable
+ >::type copy_type;
+ copy_type::copy(t, u);
+}
+
+template <typename T>
+struct consume_via_copy
+{
+ consume_via_copy(T & out):
+ out_(out)
+ {}
+
+ template <typename U>
+ void operator()(U & element)
+ {
+ copy_payload(element, out_);
+ }
+
+ T & out_;
+};
+
+struct consume_noop
+{
+ template <typename U>
+ void operator()(const U &)
+ {
+ }
+};
+
+
+}}}
+
+#endif /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */
diff --git a/boost/lockfree/detail/freelist.hpp b/boost/lockfree/detail/freelist.hpp
new file mode 100644
index 0000000000..739df08ee5
--- /dev/null
+++ b/boost/lockfree/detail/freelist.hpp
@@ -0,0 +1,648 @@
+// lock-free freelist
+//
+// Copyright (C) 2008-2013 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
+#define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
+
+#include <limits>
+#include <memory>
+
+#include <boost/array.hpp>
+#include <boost/config.hpp>
+#include <boost/cstdint.hpp>
+#include <boost/noncopyable.hpp>
+#include <boost/static_assert.hpp>
+
+#include <boost/lockfree/detail/atomic.hpp>
+#include <boost/lockfree/detail/parameter.hpp>
+#include <boost/lockfree/detail/tagged_ptr.hpp>
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4100) // unreferenced formal parameter
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+template <typename T,
+ typename Alloc = std::allocator<T>
+ >
+class freelist_stack:
+ Alloc
+{
+ struct freelist_node
+ {
+ tagged_ptr<freelist_node> next;
+ };
+
+ typedef tagged_ptr<freelist_node> tagged_node_ptr;
+
+public:
+ typedef tagged_ptr<T> tagged_node_handle;
+
+ template <typename Allocator>
+ freelist_stack (Allocator const & alloc, std::size_t n = 0):
+ Alloc(alloc),
+ pool_(tagged_node_ptr(NULL))
+ {
+ for (std::size_t i = 0; i != n; ++i) {
+ T * node = Alloc::allocate(1);
+#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
+ destruct<false>(node);
+#else
+ deallocate<false>(node);
+#endif
+ }
+ }
+
+ template <bool ThreadSafe>
+ void reserve (std::size_t count)
+ {
+ for (std::size_t i = 0; i != count; ++i) {
+ T * node = Alloc::allocate(1);
+ deallocate<ThreadSafe>(node);
+ }
+ }
+
+ template <bool ThreadSafe, bool Bounded>
+ T * construct (void)
+ {
+ T * node = allocate<ThreadSafe, Bounded>();
+ if (node)
+ new(node) T();
+ return node;
+ }
+
+ template <bool ThreadSafe, bool Bounded, typename ArgumentType>
+ T * construct (ArgumentType const & arg)
+ {
+ T * node = allocate<ThreadSafe, Bounded>();
+ if (node)
+ new(node) T(arg);
+ return node;
+ }
+
+ template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
+ T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
+ {
+ T * node = allocate<ThreadSafe, Bounded>();
+ if (node)
+ new(node) T(arg1, arg2);
+ return node;
+ }
+
+ template <bool ThreadSafe>
+ void destruct (tagged_node_handle tagged_ptr)
+ {
+ T * n = tagged_ptr.get_ptr();
+ n->~T();
+ deallocate<ThreadSafe>(n);
+ }
+
+ template <bool ThreadSafe>
+ void destruct (T * n)
+ {
+ n->~T();
+ deallocate<ThreadSafe>(n);
+ }
+
+ ~freelist_stack(void)
+ {
+ tagged_node_ptr current = pool_.load();
+
+ while (current) {
+ freelist_node * current_ptr = current.get_ptr();
+ if (current_ptr)
+ current = current_ptr->next;
+ Alloc::deallocate((T*)current_ptr, 1);
+ }
+ }
+
+ bool is_lock_free(void) const
+ {
+ return pool_.is_lock_free();
+ }
+
+ T * get_handle(T * pointer) const
+ {
+ return pointer;
+ }
+
+ T * get_handle(tagged_node_handle const & handle) const
+ {
+ return get_pointer(handle);
+ }
+
+ T * get_pointer(tagged_node_handle const & tptr) const
+ {
+ return tptr.get_ptr();
+ }
+
+ T * get_pointer(T * pointer) const
+ {
+ return pointer;
+ }
+
+ T * null_handle(void) const
+ {
+ return NULL;
+ }
+
+protected: // allow use from subclasses
+ template <bool ThreadSafe, bool Bounded>
+ T * allocate (void)
+ {
+ if (ThreadSafe)
+ return allocate_impl<Bounded>();
+ else
+ return allocate_impl_unsafe<Bounded>();
+ }
+
+private:
+ template <bool Bounded>
+ T * allocate_impl (void)
+ {
+ tagged_node_ptr old_pool = pool_.load(memory_order_consume);
+
+ for(;;) {
+ if (!old_pool.get_ptr()) {
+ if (!Bounded)
+ return Alloc::allocate(1);
+ else
+ return 0;
+ }
+
+ freelist_node * new_pool_ptr = old_pool->next.get_ptr();
+ tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
+
+ if (pool_.compare_exchange_weak(old_pool, new_pool)) {
+ void * ptr = old_pool.get_ptr();
+ return reinterpret_cast<T*>(ptr);
+ }
+ }
+ }
+
+ template <bool Bounded>
+ T * allocate_impl_unsafe (void)
+ {
+ tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
+
+ if (!old_pool.get_ptr()) {
+ if (!Bounded)
+ return Alloc::allocate(1);
+ else
+ return 0;
+ }
+
+ freelist_node * new_pool_ptr = old_pool->next.get_ptr();
+ tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
+
+ pool_.store(new_pool, memory_order_relaxed);
+ void * ptr = old_pool.get_ptr();
+ return reinterpret_cast<T*>(ptr);
+ }
+
+protected:
+ template <bool ThreadSafe>
+ void deallocate (T * n)
+ {
+ if (ThreadSafe)
+ deallocate_impl(n);
+ else
+ deallocate_impl_unsafe(n);
+ }
+
+private:
+ void deallocate_impl (T * n)
+ {
+ void * node = n;
+ tagged_node_ptr old_pool = pool_.load(memory_order_consume);
+ freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
+
+ for(;;) {
+ tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
+ new_pool->next.set_ptr(old_pool.get_ptr());
+
+ if (pool_.compare_exchange_weak(old_pool, new_pool))
+ return;
+ }
+ }
+
+ void deallocate_impl_unsafe (T * n)
+ {
+ void * node = n;
+ tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
+ freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
+
+ tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
+ new_pool->next.set_ptr(old_pool.get_ptr());
+
+ pool_.store(new_pool, memory_order_relaxed);
+ }
+
+ atomic<tagged_node_ptr> pool_;
+};
+
+class tagged_index
+{
+public:
+ typedef boost::uint16_t tag_t;
+ typedef boost::uint16_t index_t;
+
+ /** uninitialized constructor */
+ tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0)
+ {}
+
+ /** copy constructor */
+#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
+ tagged_index(tagged_index const & rhs):
+ index(rhs.index), tag(rhs.tag)
+ {}
+#else
+ tagged_index(tagged_index const & rhs) = default;
+#endif
+
+ explicit tagged_index(index_t i, tag_t t = 0):
+ index(i), tag(t)
+ {}
+
+ /** index access */
+ /* @{ */
+ index_t get_index() const
+ {
+ return index;
+ }
+
+ void set_index(index_t i)
+ {
+ index = i;
+ }
+ /* @} */
+
+ /** tag access */
+ /* @{ */
+ tag_t get_tag() const
+ {
+ return tag;
+ }
+
+ tag_t get_next_tag() const
+ {
+ tag_t next = (get_tag() + 1) & (std::numeric_limits<tag_t>::max)();
+ return next;
+ }
+
+ void set_tag(tag_t t)
+ {
+ tag = t;
+ }
+ /* @} */
+
+ bool operator==(tagged_index const & rhs) const
+ {
+ return (index == rhs.index) && (tag == rhs.tag);
+ }
+
+ bool operator!=(tagged_index const & rhs) const
+ {
+ return !operator==(rhs);
+ }
+
+protected:
+ index_t index;
+ tag_t tag;
+};
+
+template <typename T,
+ std::size_t size>
+struct compiletime_sized_freelist_storage
+{
+ // array-based freelists only support a 16bit address space.
+ BOOST_STATIC_ASSERT(size < 65536);
+
+ boost::array<char, size * sizeof(T)> data;
+
+ // unused ... only for API purposes
+ template <typename Allocator>
+ compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */)
+ {}
+
+ T * nodes(void) const
+ {
+ return reinterpret_cast<T*>(const_cast<char*>(data.data()));
+ }
+
+ std::size_t node_count(void) const
+ {
+ return size;
+ }
+};
+
+template <typename T,
+ typename Alloc = std::allocator<T> >
+struct runtime_sized_freelist_storage:
+ Alloc
+{
+ T * nodes_;
+ std::size_t node_count_;
+
+ template <typename Allocator>
+ runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count):
+ Alloc(alloc), node_count_(count)
+ {
+ if (count > 65535)
+ boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects"));
+ nodes_ = Alloc::allocate(count);
+ }
+
+ ~runtime_sized_freelist_storage(void)
+ {
+ Alloc::deallocate(nodes_, node_count_);
+ }
+
+ T * nodes(void) const
+ {
+ return nodes_;
+ }
+
+ std::size_t node_count(void) const
+ {
+ return node_count_;
+ }
+};
+
+
+template <typename T,
+ typename NodeStorage = runtime_sized_freelist_storage<T>
+ >
+class fixed_size_freelist:
+ NodeStorage
+{
+ struct freelist_node
+ {
+ tagged_index next;
+ };
+
+ typedef tagged_index::index_t index_t;
+
+ void initialize(void)
+ {
+ T * nodes = NodeStorage::nodes();
+ for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) {
+ tagged_index * next_index = reinterpret_cast<tagged_index*>(nodes + i);
+ next_index->set_index(null_handle());
+
+#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
+ destruct<false>(nodes + i);
+#else
+ deallocate<false>(static_cast<index_t>(i));
+#endif
+ }
+ }
+
+public:
+ typedef tagged_index tagged_node_handle;
+
+ template <typename Allocator>
+ fixed_size_freelist (Allocator const & alloc, std::size_t count):
+ NodeStorage(alloc, count),
+ pool_(tagged_index(static_cast<index_t>(count), 0))
+ {
+ initialize();
+ }
+
+ fixed_size_freelist (void):
+ pool_(tagged_index(NodeStorage::node_count(), 0))
+ {
+ initialize();
+ }
+
+ template <bool ThreadSafe, bool Bounded>
+ T * construct (void)
+ {
+ index_t node_index = allocate<ThreadSafe>();
+ if (node_index == null_handle())
+ return NULL;
+
+ T * node = NodeStorage::nodes() + node_index;
+ new(node) T();
+ return node;
+ }
+
+ template <bool ThreadSafe, bool Bounded, typename ArgumentType>
+ T * construct (ArgumentType const & arg)
+ {
+ index_t node_index = allocate<ThreadSafe>();
+ if (node_index == null_handle())
+ return NULL;
+
+ T * node = NodeStorage::nodes() + node_index;
+ new(node) T(arg);
+ return node;
+ }
+
+ template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
+ T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
+ {
+ index_t node_index = allocate<ThreadSafe>();
+ if (node_index == null_handle())
+ return NULL;
+
+ T * node = NodeStorage::nodes() + node_index;
+ new(node) T(arg1, arg2);
+ return node;
+ }
+
+ template <bool ThreadSafe>
+ void destruct (tagged_node_handle tagged_index)
+ {
+ index_t index = tagged_index.get_index();
+ T * n = NodeStorage::nodes() + index;
+ n->~T();
+ deallocate<ThreadSafe>(index);
+ }
+
+ template <bool ThreadSafe>
+ void destruct (T * n)
+ {
+ n->~T();
+ deallocate<ThreadSafe>(n - NodeStorage::nodes());
+ }
+
+ bool is_lock_free(void) const
+ {
+ return pool_.is_lock_free();
+ }
+
+ index_t null_handle(void) const
+ {
+ return static_cast<index_t>(NodeStorage::node_count());
+ }
+
+ index_t get_handle(T * pointer) const
+ {
+ if (pointer == NULL)
+ return null_handle();
+ else
+ return static_cast<index_t>(pointer - NodeStorage::nodes());
+ }
+
+ index_t get_handle(tagged_node_handle const & handle) const
+ {
+ return handle.get_index();
+ }
+
+ T * get_pointer(tagged_node_handle const & tptr) const
+ {
+ return get_pointer(tptr.get_index());
+ }
+
+ T * get_pointer(index_t index) const
+ {
+ if (index == null_handle())
+ return 0;
+ else
+ return NodeStorage::nodes() + index;
+ }
+
+ T * get_pointer(T * ptr) const
+ {
+ return ptr;
+ }
+
+protected: // allow use from subclasses
+ template <bool ThreadSafe>
+ index_t allocate (void)
+ {
+ if (ThreadSafe)
+ return allocate_impl();
+ else
+ return allocate_impl_unsafe();
+ }
+
+private:
+ index_t allocate_impl (void)
+ {
+ tagged_index old_pool = pool_.load(memory_order_consume);
+
+ for(;;) {
+ index_t index = old_pool.get_index();
+ if (index == null_handle())
+ return index;
+
+ T * old_node = NodeStorage::nodes() + index;
+ tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
+
+ tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
+
+ if (pool_.compare_exchange_weak(old_pool, new_pool))
+ return old_pool.get_index();
+ }
+ }
+
+ index_t allocate_impl_unsafe (void)
+ {
+ tagged_index old_pool = pool_.load(memory_order_consume);
+
+ index_t index = old_pool.get_index();
+ if (index == null_handle())
+ return index;
+
+ T * old_node = NodeStorage::nodes() + index;
+ tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
+
+ tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
+
+ pool_.store(new_pool, memory_order_relaxed);
+ return old_pool.get_index();
+ }
+
+ template <bool ThreadSafe>
+ void deallocate (index_t index)
+ {
+ if (ThreadSafe)
+ deallocate_impl(index);
+ else
+ deallocate_impl_unsafe(index);
+ }
+
+ void deallocate_impl (index_t index)
+ {
+ freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
+ tagged_index old_pool = pool_.load(memory_order_consume);
+
+ for(;;) {
+ tagged_index new_pool (index, old_pool.get_tag());
+ new_pool_node->next.set_index(old_pool.get_index());
+
+ if (pool_.compare_exchange_weak(old_pool, new_pool))
+ return;
+ }
+ }
+
+ void deallocate_impl_unsafe (index_t index)
+ {
+ freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
+ tagged_index old_pool = pool_.load(memory_order_consume);
+
+ tagged_index new_pool (index, old_pool.get_tag());
+ new_pool_node->next.set_index(old_pool.get_index());
+
+ pool_.store(new_pool);
+ }
+
+ atomic<tagged_index> pool_;
+};
+
+template <typename T,
+ typename Alloc,
+ bool IsCompileTimeSized,
+ bool IsFixedSize,
+ std::size_t Capacity
+ >
+struct select_freelist
+{
+ typedef typename mpl::if_c<IsCompileTimeSized,
+ compiletime_sized_freelist_storage<T, Capacity>,
+ runtime_sized_freelist_storage<T, Alloc>
+ >::type fixed_sized_storage_type;
+
+ typedef typename mpl::if_c<IsCompileTimeSized || IsFixedSize,
+ fixed_size_freelist<T, fixed_sized_storage_type>,
+ freelist_stack<T, Alloc>
+ >::type type;
+};
+
+template <typename T, bool IsNodeBased>
+struct select_tagged_handle
+{
+ typedef typename mpl::if_c<IsNodeBased,
+ tagged_ptr<T>,
+ tagged_index
+ >::type tagged_handle_type;
+
+ typedef typename mpl::if_c<IsNodeBased,
+ T*,
+ typename tagged_index::index_t
+ >::type handle_type;
+};
+
+
+} /* namespace detail */
+} /* namespace lockfree */
+} /* namespace boost */
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+
+#endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */
diff --git a/boost/lockfree/detail/parameter.hpp b/boost/lockfree/detail/parameter.hpp
new file mode 100644
index 0000000000..2e2b068bed
--- /dev/null
+++ b/boost/lockfree/detail/parameter.hpp
@@ -0,0 +1,73 @@
+// boost lockfree
+//
+// Copyright (C) 2011 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
+#define BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
+
+#include <boost/lockfree/policies.hpp>
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+namespace mpl = boost::mpl;
+
+template <typename bound_args, typename tag_type>
+struct has_arg
+{
+ typedef typename parameter::binding<bound_args, tag_type, mpl::void_>::type type;
+ static const bool value = mpl::is_not_void_<type>::type::value;
+};
+
+
+template <typename bound_args>
+struct extract_capacity
+{
+ static const bool has_capacity = has_arg<bound_args, tag::capacity>::value;
+
+ typedef typename mpl::if_c<has_capacity,
+ typename has_arg<bound_args, tag::capacity>::type,
+ mpl::size_t< 0 >
+ >::type capacity_t;
+
+ static const std::size_t capacity = capacity_t::value;
+};
+
+
+template <typename bound_args, typename T>
+struct extract_allocator
+{
+ static const bool has_allocator = has_arg<bound_args, tag::allocator>::value;
+
+ typedef typename mpl::if_c<has_allocator,
+ typename has_arg<bound_args, tag::allocator>::type,
+ std::allocator<T>
+ >::type allocator_arg;
+
+ typedef typename allocator_arg::template rebind<T>::other type;
+};
+
+template <typename bound_args, bool default_ = false>
+struct extract_fixed_sized
+{
+ static const bool has_fixed_sized = has_arg<bound_args, tag::fixed_sized>::value;
+
+ typedef typename mpl::if_c<has_fixed_sized,
+ typename has_arg<bound_args, tag::fixed_sized>::type,
+ mpl::bool_<default_>
+ >::type type;
+
+ static const bool value = type::value;
+};
+
+
+} /* namespace detail */
+} /* namespace lockfree */
+} /* namespace boost */
+
+#endif /* BOOST_LOCKFREE_DETAIL_PARAMETER_HPP */
diff --git a/boost/lockfree/detail/prefix.hpp b/boost/lockfree/detail/prefix.hpp
new file mode 100644
index 0000000000..6ed9446cb0
--- /dev/null
+++ b/boost/lockfree/detail/prefix.hpp
@@ -0,0 +1,56 @@
+// Copyright (C) 2009 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
+#define BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
+
+/* this file defines the following macros:
+ BOOST_LOCKFREE_CACHELINE_BYTES: size of a cache line
+ BOOST_LOCKFREE_PTR_COMPRESSION: use tag/pointer compression to utilize parts
+ of the virtual address space as tag (at least 16bit)
+ BOOST_LOCKFREE_DCAS_ALIGNMENT: symbol used for aligning structs at cache line
+ boundaries
+*/
+
+#define BOOST_LOCKFREE_CACHELINE_BYTES 64
+
+#ifdef _MSC_VER
+
+#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __declspec(align(BOOST_LOCKFREE_CACHELINE_BYTES))
+
+#if defined(_M_IX86)
+ #define BOOST_LOCKFREE_DCAS_ALIGNMENT
+#elif defined(_M_X64) || defined(_M_IA64)
+ #define BOOST_LOCKFREE_PTR_COMPRESSION 1
+ #define BOOST_LOCKFREE_DCAS_ALIGNMENT __declspec(align(16))
+#endif
+
+#endif /* _MSC_VER */
+
+#ifdef __GNUC__
+
+#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __attribute__((aligned(BOOST_LOCKFREE_CACHELINE_BYTES)))
+
+#if defined(__i386__) || defined(__ppc__)
+ #define BOOST_LOCKFREE_DCAS_ALIGNMENT
+#elif defined(__x86_64__)
+ #define BOOST_LOCKFREE_PTR_COMPRESSION 1
+ #define BOOST_LOCKFREE_DCAS_ALIGNMENT __attribute__((aligned(16)))
+#elif defined(__alpha__)
+ // LATER: alpha may benefit from pointer compression. but what is the maximum size of the address space?
+ #define BOOST_LOCKFREE_DCAS_ALIGNMENT
+#endif
+#endif /* __GNUC__ */
+
+#ifndef BOOST_LOCKFREE_DCAS_ALIGNMENT
+#define BOOST_LOCKFREE_DCAS_ALIGNMENT /*BOOST_LOCKFREE_DCAS_ALIGNMENT*/
+#endif
+
+#ifndef BOOST_LOCKFREE_CACHELINE_ALIGNMENT
+#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT /*BOOST_LOCKFREE_CACHELINE_ALIGNMENT*/
+#endif
+
+#endif /* BOOST_LOCKFREE_PREFIX_HPP_INCLUDED */
diff --git a/boost/lockfree/detail/tagged_ptr.hpp b/boost/lockfree/detail/tagged_ptr.hpp
new file mode 100644
index 0000000000..f70a2c751a
--- /dev/null
+++ b/boost/lockfree/detail/tagged_ptr.hpp
@@ -0,0 +1,21 @@
+// tagged pointer, for aba prevention
+//
+// Copyright (C) 2008 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
+#define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
+
+#include <boost/config.hpp>
+#include <boost/lockfree/detail/prefix.hpp>
+
+#ifndef BOOST_LOCKFREE_PTR_COMPRESSION
+#include <boost/lockfree/detail/tagged_ptr_dcas.hpp>
+#else
+#include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp>
+#endif
+
+#endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */
diff --git a/boost/lockfree/detail/tagged_ptr_dcas.hpp b/boost/lockfree/detail/tagged_ptr_dcas.hpp
new file mode 100644
index 0000000000..8bc68386e6
--- /dev/null
+++ b/boost/lockfree/detail/tagged_ptr_dcas.hpp
@@ -0,0 +1,134 @@
+// tagged pointer, for aba prevention
+//
+// Copyright (C) 2008 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
+#define BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
+
+#include <cstddef> /* for std::size_t */
+#include <limits>
+
+#include <boost/lockfree/detail/branch_hints.hpp>
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+template <class T>
+class BOOST_LOCKFREE_DCAS_ALIGNMENT tagged_ptr
+{
+public:
+ typedef std::size_t tag_t;
+
+ /** uninitialized constructor */
+ tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
+ {}
+
+#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
+ tagged_ptr(tagged_ptr const & p):
+ ptr(p.ptr), tag(p.tag)
+ {}
+#else
+ tagged_ptr(tagged_ptr const & p) = default;
+#endif
+
+ explicit tagged_ptr(T * p, tag_t t = 0):
+ ptr(p), tag(t)
+ {}
+
+ /** unsafe set operation */
+ /* @{ */
+#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
+ tagged_ptr & operator= (tagged_ptr const & p)
+ {
+ set(p.ptr, p.tag);
+ return *this;
+ }
+#else
+ tagged_ptr & operator= (tagged_ptr const & p) = default;
+#endif
+
+ void set(T * p, tag_t t)
+ {
+ ptr = p;
+ tag = t;
+ }
+ /* @} */
+
+ /** comparing semantics */
+ /* @{ */
+ bool operator== (volatile tagged_ptr const & p) const
+ {
+ return (ptr == p.ptr) && (tag == p.tag);
+ }
+
+ bool operator!= (volatile tagged_ptr const & p) const
+ {
+ return !operator==(p);
+ }
+ /* @} */
+
+ /** pointer access */
+ /* @{ */
+ T * get_ptr(void) const
+ {
+ return ptr;
+ }
+
+ void set_ptr(T * p)
+ {
+ ptr = p;
+ }
+ /* @} */
+
+ /** tag access */
+ /* @{ */
+ tag_t get_tag() const
+ {
+ return tag;
+ }
+
+ tag_t get_next_tag() const
+ {
+ tag_t next = (get_tag() + 1) & (std::numeric_limits<tag_t>::max)();
+ return next;
+ }
+
+ void set_tag(tag_t t)
+ {
+ tag = t;
+ }
+ /* @} */
+
+ /** smart pointer support */
+ /* @{ */
+ T & operator*() const
+ {
+ return *ptr;
+ }
+
+ T * operator->() const
+ {
+ return ptr;
+ }
+
+ operator bool(void) const
+ {
+ return ptr != 0;
+ }
+ /* @} */
+
+protected:
+ T * ptr;
+ tag_t tag;
+};
+
+} /* namespace detail */
+} /* namespace lockfree */
+} /* namespace boost */
+
+#endif /* BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED */
diff --git a/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp b/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp
new file mode 100644
index 0000000000..e99e6bff6c
--- /dev/null
+++ b/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp
@@ -0,0 +1,175 @@
+// tagged pointer, for aba prevention
+//
+// Copyright (C) 2008, 2009 Tim Blechmann, based on code by Cory Nelson
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
+#define BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
+
+#include <cstddef> /* for std::size_t */
+#include <limits>
+
+#include <boost/cstdint.hpp>
+
+#include <boost/lockfree/detail/branch_hints.hpp>
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+#if defined (__x86_64__) || defined (_M_X64)
+
+template <class T>
+class tagged_ptr
+{
+ typedef boost::uint64_t compressed_ptr_t;
+
+public:
+ typedef boost::uint16_t tag_t;
+
+private:
+ union cast_unit
+ {
+ compressed_ptr_t value;
+ tag_t tag[4];
+ };
+
+ static const int tag_index = 3;
+ static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1;
+
+ static T* extract_ptr(volatile compressed_ptr_t const & i)
+ {
+ return (T*)(i & ptr_mask);
+ }
+
+ static tag_t extract_tag(volatile compressed_ptr_t const & i)
+ {
+ cast_unit cu;
+ cu.value = i;
+ return cu.tag[tag_index];
+ }
+
+ static compressed_ptr_t pack_ptr(T * ptr, int tag)
+ {
+ cast_unit ret;
+ ret.value = compressed_ptr_t(ptr);
+ ret.tag[tag_index] = tag;
+ return ret.value;
+ }
+
+public:
+ /** uninitialized constructor */
+ tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
+ {}
+
+ /** copy constructor */
+#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
+ tagged_ptr(tagged_ptr const & p):
+ ptr(p.ptr)
+ {}
+#else
+ tagged_ptr(tagged_ptr const & p) = default;
+#endif
+
+ explicit tagged_ptr(T * p, tag_t t = 0):
+ ptr(pack_ptr(p, t))
+ {}
+
+ /** unsafe set operation */
+ /* @{ */
+#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
+ tagged_ptr & operator= (tagged_ptr const & p)
+ {
+ ptr = p.ptr;
+ return *this;
+ }
+#else
+ tagged_ptr & operator= (tagged_ptr const & p) = default;
+#endif
+
+ void set(T * p, tag_t t)
+ {
+ ptr = pack_ptr(p, t);
+ }
+ /* @} */
+
+ /** comparing semantics */
+ /* @{ */
+ bool operator== (volatile tagged_ptr const & p) const
+ {
+ return (ptr == p.ptr);
+ }
+
+ bool operator!= (volatile tagged_ptr const & p) const
+ {
+ return !operator==(p);
+ }
+ /* @} */
+
+ /** pointer access */
+ /* @{ */
+ T * get_ptr() const
+ {
+ return extract_ptr(ptr);
+ }
+
+ void set_ptr(T * p)
+ {
+ tag_t tag = get_tag();
+ ptr = pack_ptr(p, tag);
+ }
+ /* @} */
+
+ /** tag access */
+ /* @{ */
+ tag_t get_tag() const
+ {
+ return extract_tag(ptr);
+ }
+
+ tag_t get_next_tag() const
+ {
+ tag_t next = (get_tag() + 1) & (std::numeric_limits<tag_t>::max)();
+ return next;
+ }
+
+ void set_tag(tag_t t)
+ {
+ T * p = get_ptr();
+ ptr = pack_ptr(p, t);
+ }
+ /* @} */
+
+ /** smart pointer support */
+ /* @{ */
+ T & operator*() const
+ {
+ return *get_ptr();
+ }
+
+ T * operator->() const
+ {
+ return get_ptr();
+ }
+
+ operator bool(void) const
+ {
+ return get_ptr() != 0;
+ }
+ /* @} */
+
+protected:
+ compressed_ptr_t ptr;
+};
+#else
+#error unsupported platform
+#endif
+
+} /* namespace detail */
+} /* namespace lockfree */
+} /* namespace boost */
+
+#endif /* BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED */
diff --git a/boost/lockfree/policies.hpp b/boost/lockfree/policies.hpp
new file mode 100644
index 0000000000..cec9b57f89
--- /dev/null
+++ b/boost/lockfree/policies.hpp
@@ -0,0 +1,59 @@
+// boost lockfree
+//
+// Copyright (C) 2011 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_POLICIES_HPP_INCLUDED
+#define BOOST_LOCKFREE_POLICIES_HPP_INCLUDED
+
+#include <boost/parameter.hpp>
+#include <boost/mpl/bool.hpp>
+#include <boost/mpl/size_t.hpp>
+#include <boost/mpl/void.hpp>
+
+namespace boost {
+namespace lockfree {
+
+#ifndef BOOST_DOXYGEN_INVOKED
+namespace tag { struct allocator ; }
+namespace tag { struct fixed_sized; }
+namespace tag { struct capacity; }
+
+#endif
+
+/** Configures a data structure as \b fixed-sized.
+ *
+ * The internal nodes are stored inside an array and they are addressed by array indexing. This limits the possible size of the
+ * queue to the number of elements that can be addressed by the index type (usually 2**16-2), but on platforms that lack
+ * double-width compare-and-exchange instructions, this is the best way to achieve lock-freedom.
+ * This implies that a data structure is bounded.
+ * */
+template <bool IsFixedSized>
+struct fixed_sized:
+ boost::parameter::template_keyword<tag::fixed_sized, boost::mpl::bool_<IsFixedSized> >
+{};
+
+/** Sets the \b capacity of a data structure at compile-time.
+ *
+ * This implies that a data structure is bounded and fixed-sized.
+ * */
+template <size_t Size>
+struct capacity:
+ boost::parameter::template_keyword<tag::capacity, boost::mpl::size_t<Size> >
+{};
+
+/** Defines the \b allocator type of a data structure.
+ * */
+template <class Alloc>
+struct allocator:
+ boost::parameter::template_keyword<tag::allocator, Alloc>
+{};
+
+}
+}
+
+#endif /* BOOST_LOCKFREE_POLICIES_HPP_INCLUDED */
+
diff --git a/boost/lockfree/queue.hpp b/boost/lockfree/queue.hpp
new file mode 100644
index 0000000000..74001fba9d
--- /dev/null
+++ b/boost/lockfree/queue.hpp
@@ -0,0 +1,550 @@
+// lock-free queue from
+// Michael, M. M. and Scott, M. L.,
+// "simple, fast and practical non-blocking and blocking concurrent queue algorithms"
+//
+// Copyright (C) 2008-2013 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED
+#define BOOST_LOCKFREE_FIFO_HPP_INCLUDED
+
+#include <boost/assert.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/type_traits/has_trivial_assign.hpp>
+#include <boost/type_traits/has_trivial_destructor.hpp>
+
+#include <boost/lockfree/detail/atomic.hpp>
+#include <boost/lockfree/detail/copy_payload.hpp>
+#include <boost/lockfree/detail/freelist.hpp>
+#include <boost/lockfree/detail/parameter.hpp>
+#include <boost/lockfree/detail/tagged_ptr.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+#endif
+
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
+ boost::parameter::optional<tag::capacity>
+ > queue_signature;
+
+} /* namespace detail */
+
+
+/** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,
+ * construction/destruction has to be synchronized. It uses a freelist for memory management,
+ * freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.
+ *
+ * \b Policies:
+ * - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized<false> \n
+ * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n
+ * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
+ * by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index
+ * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
+ * to achieve lock-freedom.
+ *
+ * - \ref boost::lockfree::capacity, optional \n
+ * If this template argument is passed to the options, the size of the queue is set at compile-time.\n
+ * It this option implies \c fixed_sized<true>
+ *
+ * - \ref boost::lockfree::allocator, defaults to \c boost::lockfree::allocator<std::allocator<void>> \n
+ * Specifies the allocator that is used for the internal freelist
+ *
+ * \b Requirements:
+ * - T must have a copy constructor
+ * - T must have a trivial assignment operator
+ * - T must have a trivial destructor
+ *
+ * */
+#ifndef BOOST_DOXYGEN_INVOKED
+template <typename T,
+ class A0 = boost::parameter::void_,
+ class A1 = boost::parameter::void_,
+ class A2 = boost::parameter::void_>
+#else
+template <typename T, ...Options>
+#endif
+class queue
+{
+private:
+#ifndef BOOST_DOXYGEN_INVOKED
+
+#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR
+ BOOST_STATIC_ASSERT((boost::has_trivial_destructor<T>::value));
+#endif
+
+#ifdef BOOST_HAS_TRIVIAL_ASSIGN
+ BOOST_STATIC_ASSERT((boost::has_trivial_assign<T>::value));
+#endif
+
+ typedef typename detail::queue_signature::bind<A0, A1, A2>::type bound_args;
+
+ static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
+ static const size_t capacity = detail::extract_capacity<bound_args>::capacity + 1; // the queue uses one dummy node
+ static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
+ static const bool node_based = !(has_capacity || fixed_sized);
+ static const bool compile_time_sized = has_capacity;
+
+ struct BOOST_LOCKFREE_CACHELINE_ALIGNMENT node
+ {
+ typedef typename detail::select_tagged_handle<node, node_based>::tagged_handle_type tagged_node_handle;
+ typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
+
+ node(T const & v, handle_type null_handle):
+ data(v)//, next(tagged_node_handle(0, 0))
+ {
+ /* increment tag to avoid ABA problem */
+ tagged_node_handle old_next = next.load(memory_order_relaxed);
+ tagged_node_handle new_next (null_handle, old_next.get_next_tag());
+ next.store(new_next, memory_order_release);
+ }
+
+ node (handle_type null_handle):
+ next(tagged_node_handle(null_handle, 0))
+ {}
+
+ node(void)
+ {}
+
+ atomic<tagged_node_handle> next;
+ T data;
+ };
+
+ typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
+ typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
+ typedef typename pool_t::tagged_node_handle tagged_node_handle;
+ typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
+
+ void initialize(void)
+ {
+ node * n = pool.template construct<true, false>(pool.null_handle());
+ tagged_node_handle dummy_node(pool.get_handle(n), 0);
+ head_.store(dummy_node, memory_order_relaxed);
+ tail_.store(dummy_node, memory_order_release);
+ }
+
+ struct implementation_defined
+ {
+ typedef node_allocator allocator;
+ typedef std::size_t size_type;
+ };
+
+#endif
+
+ BOOST_DELETED_FUNCTION(queue(queue const&))
+ BOOST_DELETED_FUNCTION(queue& operator= (queue const&))
+
+public:
+ typedef T value_type;
+ typedef typename implementation_defined::allocator allocator;
+ typedef typename implementation_defined::size_type size_type;
+
+ /**
+ * \return true, if implementation is lock-free.
+ *
+ * \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.
+ * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is
+ * no possibility to provide a completely accurate implementation, because one would need to test every internal
+ * node, which is impossible if further nodes will be allocated from the operating system.
+ * */
+ bool is_lock_free (void) const
+ {
+ return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free();
+ }
+
+ //! Construct queue
+ // @{
+ queue(void):
+ head_(tagged_node_handle(0, 0)),
+ tail_(tagged_node_handle(0, 0)),
+ pool(node_allocator(), capacity)
+ {
+ BOOST_ASSERT(has_capacity);
+ initialize();
+ }
+
+ template <typename U>
+ explicit queue(typename node_allocator::template rebind<U>::other const & alloc):
+ head_(tagged_node_handle(0, 0)),
+ tail_(tagged_node_handle(0, 0)),
+ pool(alloc, capacity)
+ {
+ BOOST_STATIC_ASSERT(has_capacity);
+ initialize();
+ }
+
+ explicit queue(allocator const & alloc):
+ head_(tagged_node_handle(0, 0)),
+ tail_(tagged_node_handle(0, 0)),
+ pool(alloc, capacity)
+ {
+ BOOST_ASSERT(has_capacity);
+ initialize();
+ }
+ // @}
+
+ //! Construct queue, allocate n nodes for the freelist.
+ // @{
+ explicit queue(size_type n):
+ head_(tagged_node_handle(0, 0)),
+ tail_(tagged_node_handle(0, 0)),
+ pool(node_allocator(), n + 1)
+ {
+ BOOST_ASSERT(!has_capacity);
+ initialize();
+ }
+
+ template <typename U>
+ queue(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
+ head_(tagged_node_handle(0, 0)),
+ tail_(tagged_node_handle(0, 0)),
+ pool(alloc, n + 1)
+ {
+ BOOST_STATIC_ASSERT(!has_capacity);
+ initialize();
+ }
+ // @}
+
+ /** \copydoc boost::lockfree::stack::reserve
+ * */
+ void reserve(size_type n)
+ {
+ pool.template reserve<true>(n);
+ }
+
+ /** \copydoc boost::lockfree::stack::reserve_unsafe
+ * */
+ void reserve_unsafe(size_type n)
+ {
+ pool.template reserve<false>(n);
+ }
+
+ /** Destroys queue, free all nodes from freelist.
+ * */
+ ~queue(void)
+ {
+ T dummy;
+ while(unsynchronized_pop(dummy))
+ {}
+
+ pool.template destruct<false>(head_.load(memory_order_relaxed));
+ }
+
+ /** Check if the queue is empty
+ *
+ * \return true, if the queue is empty, false otherwise
+ * \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this
+ * value in program logic.
+ * */
+ bool empty(void) const
+ {
+ return pool.get_handle(head_.load()) == pool.get_handle(tail_.load());
+ }
+
+ /** Pushes object t to the queue.
+ *
+ * \post object will be pushed to the queue, if internal node can be allocated
+ * \returns true, if the push operation is successful.
+ *
+ * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
+ * from the OS. This may not be lock-free.
+ * */
+ bool push(T const & t)
+ {
+ return do_push<false>(t);
+ }
+
+ /** Pushes object t to the queue.
+ *
+ * \post object will be pushed to the queue, if internal node can be allocated
+ * \returns true, if the push operation is successful.
+ *
+ * \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail
+ * \throws if memory allocator throws
+ * */
+ bool bounded_push(T const & t)
+ {
+ return do_push<true>(t);
+ }
+
+
+private:
+#ifndef BOOST_DOXYGEN_INVOKED
+ template <bool Bounded>
+ bool do_push(T const & t)
+ {
+ using detail::likely;
+
+ node * n = pool.template construct<true, Bounded>(t, pool.null_handle());
+ handle_type node_handle = pool.get_handle(n);
+
+ if (n == NULL)
+ return false;
+
+ for (;;) {
+ tagged_node_handle tail = tail_.load(memory_order_acquire);
+ node * tail_node = pool.get_pointer(tail);
+ tagged_node_handle next = tail_node->next.load(memory_order_acquire);
+ node * next_ptr = pool.get_pointer(next);
+
+ tagged_node_handle tail2 = tail_.load(memory_order_acquire);
+ if (likely(tail == tail2)) {
+ if (next_ptr == 0) {
+ tagged_node_handle new_tail_next(node_handle, next.get_next_tag());
+ if ( tail_node->next.compare_exchange_weak(next, new_tail_next) ) {
+ tagged_node_handle new_tail(node_handle, tail.get_next_tag());
+ tail_.compare_exchange_strong(tail, new_tail);
+ return true;
+ }
+ }
+ else {
+ tagged_node_handle new_tail(pool.get_handle(next_ptr), tail.get_next_tag());
+ tail_.compare_exchange_strong(tail, new_tail);
+ }
+ }
+ }
+ }
+#endif
+
+public:
+
+ /** Pushes object t to the queue.
+ *
+ * \post object will be pushed to the queue, if internal node can be allocated
+ * \returns true, if the push operation is successful.
+ *
+ * \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
+ * from the OS. This may not be lock-free.
+ * \throws if memory allocator throws
+ * */
+ bool unsynchronized_push(T const & t)
+ {
+ node * n = pool.template construct<false, false>(t, pool.null_handle());
+
+ if (n == NULL)
+ return false;
+
+ for (;;) {
+ tagged_node_handle tail = tail_.load(memory_order_relaxed);
+ tagged_node_handle next = tail->next.load(memory_order_relaxed);
+ node * next_ptr = next.get_ptr();
+
+ if (next_ptr == 0) {
+ tail->next.store(tagged_node_handle(n, next.get_next_tag()), memory_order_relaxed);
+ tail_.store(tagged_node_handle(n, tail.get_next_tag()), memory_order_relaxed);
+ return true;
+ }
+ else
+ tail_.store(tagged_node_handle(next_ptr, tail.get_next_tag()), memory_order_relaxed);
+ }
+ }
+
+ /** Pops object from queue.
+ *
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if queue was empty.
+ *
+ * \note Thread-safe and non-blocking
+ * */
+ bool pop (T & ret)
+ {
+ return pop<T>(ret);
+ }
+
+ /** Pops object from queue.
+ *
+ * \pre type U must be constructible by T and copyable, or T must be convertible to U
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if queue was empty.
+ *
+ * \note Thread-safe and non-blocking
+ * */
+ template <typename U>
+ bool pop (U & ret)
+ {
+ using detail::likely;
+ for (;;) {
+ tagged_node_handle head = head_.load(memory_order_acquire);
+ node * head_ptr = pool.get_pointer(head);
+
+ tagged_node_handle tail = tail_.load(memory_order_acquire);
+ tagged_node_handle next = head_ptr->next.load(memory_order_acquire);
+ node * next_ptr = pool.get_pointer(next);
+
+ tagged_node_handle head2 = head_.load(memory_order_acquire);
+ if (likely(head == head2)) {
+ if (pool.get_handle(head) == pool.get_handle(tail)) {
+ if (next_ptr == 0)
+ return false;
+
+ tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());
+ tail_.compare_exchange_strong(tail, new_tail);
+
+ } else {
+ if (next_ptr == 0)
+ /* this check is not part of the original algorithm as published by michael and scott
+ *
+ * however we reuse the tagged_ptr part for the freelist and clear the next part during node
+ * allocation. we can observe a null-pointer here.
+ * */
+ continue;
+ detail::copy_payload(next_ptr->data, ret);
+
+ tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());
+ if (head_.compare_exchange_weak(head, new_head)) {
+ pool.template destruct<true>(head);
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ /** Pops object from queue.
+ *
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if queue was empty.
+ *
+ * \note Not thread-safe, but non-blocking
+ *
+ * */
+ bool unsynchronized_pop (T & ret)
+ {
+ return unsynchronized_pop<T>(ret);
+ }
+
+ /** Pops object from queue.
+ *
+ * \pre type U must be constructible by T and copyable, or T must be convertible to U
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if queue was empty.
+ *
+ * \note Not thread-safe, but non-blocking
+ *
+ * */
+ template <typename U>
+ bool unsynchronized_pop (U & ret)
+ {
+ for (;;) {
+ tagged_node_handle head = head_.load(memory_order_relaxed);
+ node * head_ptr = pool.get_pointer(head);
+ tagged_node_handle tail = tail_.load(memory_order_relaxed);
+ tagged_node_handle next = head_ptr->next.load(memory_order_relaxed);
+ node * next_ptr = pool.get_pointer(next);
+
+ if (pool.get_handle(head) == pool.get_handle(tail)) {
+ if (next_ptr == 0)
+ return false;
+
+ tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());
+ tail_.store(new_tail);
+ } else {
+ if (next_ptr == 0)
+ /* this check is not part of the original algorithm as published by michael and scott
+ *
+ * however we reuse the tagged_ptr part for the freelist and clear the next part during node
+ * allocation. we can observe a null-pointer here.
+ * */
+ continue;
+ detail::copy_payload(next_ptr->data, ret);
+ tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());
+ head_.store(new_head);
+ pool.template destruct<false>(head);
+ return true;
+ }
+ }
+ }
+
+ /** consumes one element via a functor
+ *
+ * pops one element from the queue and applies the functor on this object
+ *
+ * \returns true, if one element was consumed
+ *
+ * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
+ * */
+ template <typename Functor>
+ bool consume_one(Functor & f)
+ {
+ T element;
+ bool success = pop(element);
+ if (success)
+ f(element);
+
+ return success;
+ }
+
+ /// \copydoc boost::lockfree::queue::consume_one(Functor & rhs)
+ template <typename Functor>
+ bool consume_one(Functor const & f)
+ {
+ T element;
+ bool success = pop(element);
+ if (success)
+ f(element);
+
+ return success;
+ }
+
+ /** consumes all elements via a functor
+ *
+ * sequentially pops all elements from the queue and applies the functor on each object
+ *
+ * \returns number of elements that are consumed
+ *
+ * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
+ * */
+ template <typename Functor>
+ size_t consume_all(Functor & f)
+ {
+ size_t element_count = 0;
+ while (consume_one(f))
+ element_count += 1;
+
+ return element_count;
+ }
+
+ /// \copydoc boost::lockfree::queue::consume_all(Functor & rhs)
+ template <typename Functor>
+ size_t consume_all(Functor const & f)
+ {
+ size_t element_count = 0;
+ while (consume_one(f))
+ element_count += 1;
+
+ return element_count;
+ }
+
+private:
+#ifndef BOOST_DOXYGEN_INVOKED
+ atomic<tagged_node_handle> head_;
+ static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
+ char padding1[padding_size];
+ atomic<tagged_node_handle> tail_;
+ char padding2[padding_size];
+
+ pool_t pool;
+#endif
+};
+
+} /* namespace lockfree */
+} /* namespace boost */
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */
diff --git a/boost/lockfree/spsc_queue.hpp b/boost/lockfree/spsc_queue.hpp
new file mode 100644
index 0000000000..24790e4988
--- /dev/null
+++ b/boost/lockfree/spsc_queue.hpp
@@ -0,0 +1,962 @@
+// lock-free single-producer/single-consumer ringbuffer
+// this algorithm is implemented in various projects (linux kernel)
+//
+// Copyright (C) 2009-2013 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
+#define BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED
+
+#include <algorithm>
+#include <memory>
+
+#include <boost/aligned_storage.hpp>
+#include <boost/assert.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/utility.hpp>
+#include <boost/utility/enable_if.hpp>
+
+#include <boost/type_traits/has_trivial_destructor.hpp>
+#include <boost/type_traits/is_convertible.hpp>
+
+#include <boost/lockfree/detail/atomic.hpp>
+#include <boost/lockfree/detail/branch_hints.hpp>
+#include <boost/lockfree/detail/copy_payload.hpp>
+#include <boost/lockfree/detail/parameter.hpp>
+#include <boost/lockfree/detail/prefix.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+typedef parameter::parameters<boost::parameter::optional<tag::capacity>,
+ boost::parameter::optional<tag::allocator>
+ > ringbuffer_signature;
+
+template <typename T>
+class ringbuffer_base
+{
+#ifndef BOOST_DOXYGEN_INVOKED
+ typedef std::size_t size_t;
+ static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t);
+ atomic<size_t> write_index_;
+ char padding1[padding_size]; /* force read_index and write_index to different cache lines */
+ atomic<size_t> read_index_;
+
+ BOOST_DELETED_FUNCTION(ringbuffer_base(ringbuffer_base const&))
+ BOOST_DELETED_FUNCTION(ringbuffer_base& operator= (ringbuffer_base const&))
+
+protected:
+ ringbuffer_base(void):
+ write_index_(0), read_index_(0)
+ {}
+
+ static size_t next_index(size_t arg, size_t max_size)
+ {
+ size_t ret = arg + 1;
+ while (unlikely(ret >= max_size))
+ ret -= max_size;
+ return ret;
+ }
+
+ static size_t read_available(size_t write_index, size_t read_index, size_t max_size)
+ {
+ if (write_index >= read_index)
+ return write_index - read_index;
+
+ const size_t ret = write_index + max_size - read_index;
+ return ret;
+ }
+
+ static size_t write_available(size_t write_index, size_t read_index, size_t max_size)
+ {
+ size_t ret = read_index - write_index - 1;
+ if (write_index >= read_index)
+ ret += max_size;
+ return ret;
+ }
+
+ size_t read_available(size_t max_size) const
+ {
+ size_t write_index = write_index_.load(memory_order_relaxed);
+ const size_t read_index = read_index_.load(memory_order_relaxed);
+ return read_available(write_index, read_index, max_size);
+ }
+
+ size_t write_available(size_t max_size) const
+ {
+ size_t write_index = write_index_.load(memory_order_relaxed);
+ const size_t read_index = read_index_.load(memory_order_relaxed);
+ return write_available(write_index, read_index, max_size);
+ }
+
+ bool push(T const & t, T * buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
+ const size_t next = next_index(write_index, max_size);
+
+ if (next == read_index_.load(memory_order_acquire))
+ return false; /* ringbuffer is full */
+
+ new (buffer + write_index) T(t); // copy-construct
+
+ write_index_.store(next, memory_order_release);
+
+ return true;
+ }
+
+ size_t push(const T * input_buffer, size_t input_count, T * internal_buffer, size_t max_size)
+ {
+ return push(input_buffer, input_buffer + input_count, internal_buffer, max_size) - input_buffer;
+ }
+
+ template <typename ConstIterator>
+ ConstIterator push(ConstIterator begin, ConstIterator end, T * internal_buffer, size_t max_size)
+ {
+ // FIXME: avoid std::distance
+
+ const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread
+ const size_t read_index = read_index_.load(memory_order_acquire);
+ const size_t avail = write_available(write_index, read_index, max_size);
+
+ if (avail == 0)
+ return begin;
+
+ size_t input_count = std::distance(begin, end);
+ input_count = (std::min)(input_count, avail);
+
+ size_t new_write_index = write_index + input_count;
+
+ const ConstIterator last = boost::next(begin, input_count);
+
+ if (write_index + input_count > max_size) {
+ /* copy data in two sections */
+ const size_t count0 = max_size - write_index;
+ const ConstIterator midpoint = boost::next(begin, count0);
+
+ std::uninitialized_copy(begin, midpoint, internal_buffer + write_index);
+ std::uninitialized_copy(midpoint, last, internal_buffer);
+ new_write_index -= max_size;
+ } else {
+ std::uninitialized_copy(begin, last, internal_buffer + write_index);
+
+ if (new_write_index == max_size)
+ new_write_index = 0;
+ }
+
+ write_index_.store(new_write_index, memory_order_release);
+ return last;
+ }
+
+ template <typename Functor>
+ bool consume_one(Functor & functor, T * buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_acquire);
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+ if ( empty(write_index, read_index) )
+ return false;
+
+ T & object_to_consume = buffer[read_index];
+ functor( object_to_consume );
+ object_to_consume.~T();
+
+ size_t next = next_index(read_index, max_size);
+ read_index_.store(next, memory_order_release);
+ return true;
+ }
+
+ template <typename Functor>
+ bool consume_one(Functor const & functor, T * buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_acquire);
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+ if ( empty(write_index, read_index) )
+ return false;
+
+ T & object_to_consume = buffer[read_index];
+ functor( object_to_consume );
+ object_to_consume.~T();
+
+ size_t next = next_index(read_index, max_size);
+ read_index_.store(next, memory_order_release);
+ return true;
+ }
+
+ template <typename Functor>
+ size_t consume_all (Functor const & functor, T * internal_buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_acquire);
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+
+ const size_t avail = read_available(write_index, read_index, max_size);
+
+ if (avail == 0)
+ return 0;
+
+ const size_t output_count = avail;
+
+ size_t new_read_index = read_index + output_count;
+
+ if (read_index + output_count > max_size) {
+ /* copy data in two sections */
+ const size_t count0 = max_size - read_index;
+ const size_t count1 = output_count - count0;
+
+ run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
+ run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
+
+ new_read_index -= max_size;
+ } else {
+ run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
+
+ if (new_read_index == max_size)
+ new_read_index = 0;
+ }
+
+ read_index_.store(new_read_index, memory_order_release);
+ return output_count;
+ }
+
+ template <typename Functor>
+ size_t consume_all (Functor & functor, T * internal_buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_acquire);
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+
+ const size_t avail = read_available(write_index, read_index, max_size);
+
+ if (avail == 0)
+ return 0;
+
+ const size_t output_count = avail;
+
+ size_t new_read_index = read_index + output_count;
+
+ if (read_index + output_count > max_size) {
+ /* copy data in two sections */
+ const size_t count0 = max_size - read_index;
+ const size_t count1 = output_count - count0;
+
+ run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor);
+ run_functor_and_delete(internal_buffer, internal_buffer + count1, functor);
+
+ new_read_index -= max_size;
+ } else {
+ run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor);
+
+ if (new_read_index == max_size)
+ new_read_index = 0;
+ }
+
+ read_index_.store(new_read_index, memory_order_release);
+ return output_count;
+ }
+
+ size_t pop (T * output_buffer, size_t output_count, T * internal_buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_acquire);
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+
+ const size_t avail = read_available(write_index, read_index, max_size);
+
+ if (avail == 0)
+ return 0;
+
+ output_count = (std::min)(output_count, avail);
+
+ size_t new_read_index = read_index + output_count;
+
+ if (read_index + output_count > max_size) {
+ /* copy data in two sections */
+ const size_t count0 = max_size - read_index;
+ const size_t count1 = output_count - count0;
+
+ copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, output_buffer);
+ copy_and_delete(internal_buffer, internal_buffer + count1, output_buffer + count0);
+
+ new_read_index -= max_size;
+ } else {
+ copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer);
+ if (new_read_index == max_size)
+ new_read_index = 0;
+ }
+
+ read_index_.store(new_read_index, memory_order_release);
+ return output_count;
+ }
+
+ template <typename OutputIterator>
+ size_t pop_to_output_iterator (OutputIterator it, T * internal_buffer, size_t max_size)
+ {
+ const size_t write_index = write_index_.load(memory_order_acquire);
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+
+ const size_t avail = read_available(write_index, read_index, max_size);
+ if (avail == 0)
+ return 0;
+
+ size_t new_read_index = read_index + avail;
+
+ if (read_index + avail > max_size) {
+ /* copy data in two sections */
+ const size_t count0 = max_size - read_index;
+ const size_t count1 = avail - count0;
+
+ it = copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, it);
+ copy_and_delete(internal_buffer, internal_buffer + count1, it);
+
+ new_read_index -= max_size;
+ } else {
+ copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + avail, it);
+ if (new_read_index == max_size)
+ new_read_index = 0;
+ }
+
+ read_index_.store(new_read_index, memory_order_release);
+ return avail;
+ }
+
+ const T& front(const T * internal_buffer) const
+ {
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+ return *(internal_buffer + read_index);
+ }
+
+ T& front(T * internal_buffer)
+ {
+ const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread
+ return *(internal_buffer + read_index);
+ }
+#endif
+
+
+public:
+ /** reset the ringbuffer
+ *
+ * \note Not thread-safe
+ * */
+ void reset(void)
+ {
+ if ( !boost::has_trivial_destructor<T>::value ) {
+ // make sure to call all destructors!
+
+ T dummy_element;
+ while (pop(dummy_element))
+ {}
+ } else {
+ write_index_.store(0, memory_order_relaxed);
+ read_index_.store(0, memory_order_release);
+ }
+ }
+
+ /** Check if the ringbuffer is empty
+ *
+ * \return true, if the ringbuffer is empty, false otherwise
+ * \note Due to the concurrent nature of the ringbuffer the result may be inaccurate.
+ * */
+ bool empty(void)
+ {
+ return empty(write_index_.load(memory_order_relaxed), read_index_.load(memory_order_relaxed));
+ }
+
+ /**
+ * \return true, if implementation is lock-free.
+ *
+ * */
+ bool is_lock_free(void) const
+ {
+ return write_index_.is_lock_free() && read_index_.is_lock_free();
+ }
+
+private:
+ bool empty(size_t write_index, size_t read_index)
+ {
+ return write_index == read_index;
+ }
+
+ template< class OutputIterator >
+ OutputIterator copy_and_delete( T * first, T * last, OutputIterator out )
+ {
+ if (boost::has_trivial_destructor<T>::value) {
+ return std::copy(first, last, out); // will use memcpy if possible
+ } else {
+ for (; first != last; ++first, ++out) {
+ *out = *first;
+ first->~T();
+ }
+ return out;
+ }
+ }
+
+ template< class Functor >
+ void run_functor_and_delete( T * first, T * last, Functor & functor )
+ {
+ for (; first != last; ++first) {
+ functor(*first);
+ first->~T();
+ }
+ }
+
+ template< class Functor >
+ void run_functor_and_delete( T * first, T * last, Functor const & functor )
+ {
+ for (; first != last; ++first) {
+ functor(*first);
+ first->~T();
+ }
+ }
+};
+
+template <typename T, std::size_t MaxSize>
+class compile_time_sized_ringbuffer:
+ public ringbuffer_base<T>
+{
+ typedef std::size_t size_type;
+ static const std::size_t max_size = MaxSize + 1;
+
+ typedef typename boost::aligned_storage<max_size * sizeof(T),
+ boost::alignment_of<T>::value
+ >::type storage_type;
+
+ storage_type storage_;
+
+ T * data()
+ {
+ return static_cast<T*>(storage_.address());
+ }
+
+ const T * data() const
+ {
+ return static_cast<const T*>(storage_.address());
+ }
+
+protected:
+ size_type max_number_of_elements() const
+ {
+ return max_size;
+ }
+
+public:
+ bool push(T const & t)
+ {
+ return ringbuffer_base<T>::push(t, data(), max_size);
+ }
+
+ template <typename Functor>
+ bool consume_one(Functor & f)
+ {
+ return ringbuffer_base<T>::consume_one(f, data(), max_size);
+ }
+
+ template <typename Functor>
+ bool consume_one(Functor const & f)
+ {
+ return ringbuffer_base<T>::consume_one(f, data(), max_size);
+ }
+
+ template <typename Functor>
+ bool consume_all(Functor & f)
+ {
+ return ringbuffer_base<T>::consume_all(f, data(), max_size);
+ }
+
+ template <typename Functor>
+ bool consume_all(Functor const & f)
+ {
+ return ringbuffer_base<T>::consume_all(f, data(), max_size);
+ }
+
+ size_type push(T const * t, size_type size)
+ {
+ return ringbuffer_base<T>::push(t, size, data(), max_size);
+ }
+
+ template <size_type size>
+ size_type push(T const (&t)[size])
+ {
+ return push(t, size);
+ }
+
+ template <typename ConstIterator>
+ ConstIterator push(ConstIterator begin, ConstIterator end)
+ {
+ return ringbuffer_base<T>::push(begin, end, data(), max_size);
+ }
+
+ size_type pop(T * ret, size_type size)
+ {
+ return ringbuffer_base<T>::pop(ret, size, data(), max_size);
+ }
+
+ template <typename OutputIterator>
+ size_type pop_to_output_iterator(OutputIterator it)
+ {
+ return ringbuffer_base<T>::pop_to_output_iterator(it, data(), max_size);
+ }
+
+ const T& front(void) const
+ {
+ return ringbuffer_base<T>::front(data());
+ }
+
+ T& front(void)
+ {
+ return ringbuffer_base<T>::front(data());
+ }
+};
+
+template <typename T, typename Alloc>
+class runtime_sized_ringbuffer:
+ public ringbuffer_base<T>,
+ private Alloc
+{
+ typedef std::size_t size_type;
+ size_type max_elements_;
+ typedef typename Alloc::pointer pointer;
+ pointer array_;
+
+protected:
+ size_type max_number_of_elements() const
+ {
+ return max_elements_;
+ }
+
+public:
+ explicit runtime_sized_ringbuffer(size_type max_elements):
+ max_elements_(max_elements + 1)
+ {
+ array_ = Alloc::allocate(max_elements_);
+ }
+
+ template <typename U>
+ runtime_sized_ringbuffer(typename Alloc::template rebind<U>::other const & alloc, size_type max_elements):
+ Alloc(alloc), max_elements_(max_elements + 1)
+ {
+ array_ = Alloc::allocate(max_elements_);
+ }
+
+ runtime_sized_ringbuffer(Alloc const & alloc, size_type max_elements):
+ Alloc(alloc), max_elements_(max_elements + 1)
+ {
+ array_ = Alloc::allocate(max_elements_);
+ }
+
+ ~runtime_sized_ringbuffer(void)
+ {
+ // destroy all remaining items
+ T out;
+ while (pop(&out, 1)) {}
+
+ Alloc::deallocate(array_, max_elements_);
+ }
+
+ bool push(T const & t)
+ {
+ return ringbuffer_base<T>::push(t, &*array_, max_elements_);
+ }
+
+ template <typename Functor>
+ bool consume_one(Functor & f)
+ {
+ return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
+ }
+
+ template <typename Functor>
+ bool consume_one(Functor const & f)
+ {
+ return ringbuffer_base<T>::consume_one(f, &*array_, max_elements_);
+ }
+
+ template <typename Functor>
+ size_type consume_all(Functor & f)
+ {
+ return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
+ }
+
+ template <typename Functor>
+ size_type consume_all(Functor const & f)
+ {
+ return ringbuffer_base<T>::consume_all(f, &*array_, max_elements_);
+ }
+
+ size_type push(T const * t, size_type size)
+ {
+ return ringbuffer_base<T>::push(t, size, &*array_, max_elements_);
+ }
+
+ template <size_type size>
+ size_type push(T const (&t)[size])
+ {
+ return push(t, size);
+ }
+
+ template <typename ConstIterator>
+ ConstIterator push(ConstIterator begin, ConstIterator end)
+ {
+ return ringbuffer_base<T>::push(begin, end, array_, max_elements_);
+ }
+
+ size_type pop(T * ret, size_type size)
+ {
+ return ringbuffer_base<T>::pop(ret, size, array_, max_elements_);
+ }
+
+ template <typename OutputIterator>
+ size_type pop_to_output_iterator(OutputIterator it)
+ {
+ return ringbuffer_base<T>::pop_to_output_iterator(it, array_, max_elements_);
+ }
+
+ const T& front(void) const
+ {
+ return ringbuffer_base<T>::front(array_);
+ }
+
+ T& front(void)
+ {
+ return ringbuffer_base<T>::front(array_);
+ }
+};
+
+template <typename T, typename A0, typename A1>
+struct make_ringbuffer
+{
+ typedef typename ringbuffer_signature::bind<A0, A1>::type bound_args;
+
+ typedef extract_capacity<bound_args> extract_capacity_t;
+
+ static const bool runtime_sized = !extract_capacity_t::has_capacity;
+ static const size_t capacity = extract_capacity_t::capacity;
+
+ typedef extract_allocator<bound_args, T> extract_allocator_t;
+ typedef typename extract_allocator_t::type allocator;
+
+ // allocator argument is only sane, for run-time sized ringbuffers
+ BOOST_STATIC_ASSERT((mpl::if_<mpl::bool_<!runtime_sized>,
+ mpl::bool_<!extract_allocator_t::has_allocator>,
+ mpl::true_
+ >::type::value));
+
+ typedef typename mpl::if_c<runtime_sized,
+ runtime_sized_ringbuffer<T, allocator>,
+ compile_time_sized_ringbuffer<T, capacity>
+ >::type ringbuffer_type;
+};
+
+
+} /* namespace detail */
+
+
+/** The spsc_queue class provides a single-writer/single-reader fifo queue, pushing and popping is wait-free.
+ *
+ * \b Policies:
+ * - \c boost::lockfree::capacity<>, optional <br>
+ * If this template argument is passed to the options, the size of the ringbuffer is set at compile-time.
+ *
+ * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<T>> <br>
+ * Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is configured
+ * to be sized at run-time
+ *
+ * \b Requirements:
+ * - T must have a default constructor
+ * - T must be copyable
+ * */
+#ifndef BOOST_DOXYGEN_INVOKED
+template <typename T,
+ class A0 = boost::parameter::void_,
+ class A1 = boost::parameter::void_>
+#else
+template <typename T, ...Options>
+#endif
+class spsc_queue:
+ public detail::make_ringbuffer<T, A0, A1>::ringbuffer_type
+{
+private:
+
+#ifndef BOOST_DOXYGEN_INVOKED
+ typedef typename detail::make_ringbuffer<T, A0, A1>::ringbuffer_type base_type;
+ static const bool runtime_sized = detail::make_ringbuffer<T, A0, A1>::runtime_sized;
+ typedef typename detail::make_ringbuffer<T, A0, A1>::allocator allocator_arg;
+
+ struct implementation_defined
+ {
+ typedef allocator_arg allocator;
+ typedef std::size_t size_type;
+ };
+#endif
+
+public:
+ typedef T value_type;
+ typedef typename implementation_defined::allocator allocator;
+ typedef typename implementation_defined::size_type size_type;
+
+ /** Constructs a spsc_queue
+ *
+ * \pre spsc_queue must be configured to be sized at compile-time
+ */
+ // @{
+ spsc_queue(void)
+ {
+ BOOST_ASSERT(!runtime_sized);
+ }
+
+ template <typename U>
+ explicit spsc_queue(typename allocator::template rebind<U>::other const & alloc)
+ {
+ // just for API compatibility: we don't actually need an allocator
+ BOOST_STATIC_ASSERT(!runtime_sized);
+ }
+
+ explicit spsc_queue(allocator const & alloc)
+ {
+ // just for API compatibility: we don't actually need an allocator
+ BOOST_ASSERT(!runtime_sized);
+ }
+ // @}
+
+
+ /** Constructs a spsc_queue for element_count elements
+ *
+ * \pre spsc_queue must be configured to be sized at run-time
+ */
+ // @{
+ explicit spsc_queue(size_type element_count):
+ base_type(element_count)
+ {
+ BOOST_ASSERT(runtime_sized);
+ }
+
+ template <typename U>
+ spsc_queue(size_type element_count, typename allocator::template rebind<U>::other const & alloc):
+ base_type(alloc, element_count)
+ {
+ BOOST_STATIC_ASSERT(runtime_sized);
+ }
+
+ spsc_queue(size_type element_count, allocator_arg const & alloc):
+ base_type(alloc, element_count)
+ {
+ BOOST_ASSERT(runtime_sized);
+ }
+ // @}
+
+ /** Pushes object t to the ringbuffer.
+ *
+ * \pre only one thread is allowed to push data to the spsc_queue
+ * \post object will be pushed to the spsc_queue, unless it is full.
+ * \return true, if the push operation is successful.
+ *
+ * \note Thread-safe and wait-free
+ * */
+ bool push(T const & t)
+ {
+ return base_type::push(t);
+ }
+
+ /** Pops one object from ringbuffer.
+ *
+ * \pre only one thread is allowed to pop data to the spsc_queue
+ * \post if ringbuffer is not empty, object will be discarded.
+ * \return true, if the pop operation is successful, false if ringbuffer was empty.
+ *
+ * \note Thread-safe and wait-free
+ */
+ bool pop ()
+ {
+ detail::consume_noop consume_functor;
+ return consume_one( consume_functor );
+ }
+
+ /** Pops one object from ringbuffer.
+ *
+ * \pre only one thread is allowed to pop data to the spsc_queue
+ * \post if ringbuffer is not empty, object will be copied to ret.
+ * \return true, if the pop operation is successful, false if ringbuffer was empty.
+ *
+ * \note Thread-safe and wait-free
+ */
+ template <typename U>
+ typename boost::enable_if<typename is_convertible<T, U>::type, bool>::type
+ pop (U & ret)
+ {
+ detail::consume_via_copy<U> consume_functor(ret);
+ return consume_one( consume_functor );
+ }
+
+ /** Pushes as many objects from the array t as there is space.
+ *
+ * \pre only one thread is allowed to push data to the spsc_queue
+ * \return number of pushed items
+ *
+ * \note Thread-safe and wait-free
+ */
+ size_type push(T const * t, size_type size)
+ {
+ return base_type::push(t, size);
+ }
+
+ /** Pushes as many objects from the array t as there is space available.
+ *
+ * \pre only one thread is allowed to push data to the spsc_queue
+ * \return number of pushed items
+ *
+ * \note Thread-safe and wait-free
+ */
+ template <size_type size>
+ size_type push(T const (&t)[size])
+ {
+ return push(t, size);
+ }
+
+ /** Pushes as many objects from the range [begin, end) as there is space .
+ *
+ * \pre only one thread is allowed to push data to the spsc_queue
+ * \return iterator to the first element, which has not been pushed
+ *
+ * \note Thread-safe and wait-free
+ */
+ template <typename ConstIterator>
+ ConstIterator push(ConstIterator begin, ConstIterator end)
+ {
+ return base_type::push(begin, end);
+ }
+
+ /** Pops a maximum of size objects from ringbuffer.
+ *
+ * \pre only one thread is allowed to pop data to the spsc_queue
+ * \return number of popped items
+ *
+ * \note Thread-safe and wait-free
+ * */
+ size_type pop(T * ret, size_type size)
+ {
+ return base_type::pop(ret, size);
+ }
+
+ /** Pops a maximum of size objects from spsc_queue.
+ *
+ * \pre only one thread is allowed to pop data to the spsc_queue
+ * \return number of popped items
+ *
+ * \note Thread-safe and wait-free
+ * */
+ template <size_type size>
+ size_type pop(T (&ret)[size])
+ {
+ return pop(ret, size);
+ }
+
+ /** Pops objects to the output iterator it
+ *
+ * \pre only one thread is allowed to pop data to the spsc_queue
+ * \return number of popped items
+ *
+ * \note Thread-safe and wait-free
+ * */
+ template <typename OutputIterator>
+ typename boost::disable_if<typename is_convertible<T, OutputIterator>::type, size_type>::type
+ pop(OutputIterator it)
+ {
+ return base_type::pop_to_output_iterator(it);
+ }
+
+ /** consumes one element via a functor
+ *
+ * pops one element from the queue and applies the functor on this object
+ *
+ * \returns true, if one element was consumed
+ *
+ * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
+ * */
+ template <typename Functor>
+ bool consume_one(Functor & f)
+ {
+ return base_type::consume_one(f);
+ }
+
+ /// \copydoc boost::lockfree::spsc_queue::consume_one(Functor & rhs)
+ template <typename Functor>
+ bool consume_one(Functor const & f)
+ {
+ return base_type::consume_one(f);
+ }
+
+ /** consumes all elements via a functor
+ *
+ * sequentially pops all elements from the queue and applies the functor on each object
+ *
+ * \returns number of elements that are consumed
+ *
+ * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
+ * */
+ template <typename Functor>
+ size_type consume_all(Functor & f)
+ {
+ return base_type::consume_all(f);
+ }
+
+ /// \copydoc boost::lockfree::spsc_queue::consume_all(Functor & rhs)
+ template <typename Functor>
+ size_type consume_all(Functor const & f)
+ {
+ return base_type::consume_all(f);
+ }
+
+ /** get number of elements that are available for read
+ *
+ * \return number of available elements that can be popped from the spsc_queue
+ *
+ * \note Thread-safe and wait-free, should only be called from the producer thread
+ * */
+ size_type read_available() const
+ {
+ return base_type::read_available(base_type::max_number_of_elements());
+ }
+
+ /** get write space to write elements
+ *
+ * \return number of elements that can be pushed to the spsc_queue
+ *
+ * \note Thread-safe and wait-free, should only be called from the consumer thread
+ * */
+ size_type write_available() const
+ {
+ return base_type::write_available(base_type::max_number_of_elements());
+ }
+
+ /** get reference to element in the front of the queue
+ *
+ * Availability of front element can be checked using read_available().
+ *
+ * \pre only one thread is allowed to check front element
+ * \pre read_available() > 0. If ringbuffer is empty, it's undefined behaviour to invoke this method.
+ * \return reference to the first element in the queue
+ *
+ * \note Thread-safe and wait-free
+ */
+ const T& front() const
+ {
+ BOOST_ASSERT(read_available() > 0);
+ return base_type::front();
+ }
+
+ /// \copydoc boost::lockfree::spsc_queue::front() const
+ T& front()
+ {
+ BOOST_ASSERT(read_available() > 0);
+ return base_type::front();
+ }
+};
+
+} /* namespace lockfree */
+} /* namespace boost */
+
+
+#endif /* BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED */
diff --git a/boost/lockfree/stack.hpp b/boost/lockfree/stack.hpp
new file mode 100644
index 0000000000..6db986c87b
--- /dev/null
+++ b/boost/lockfree/stack.hpp
@@ -0,0 +1,583 @@
+// Copyright (C) 2008-2013 Tim Blechmann
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
+#define BOOST_LOCKFREE_STACK_HPP_INCLUDED
+
+#include <boost/assert.hpp>
+#include <boost/checked_delete.hpp>
+#include <boost/integer_traits.hpp>
+#include <boost/static_assert.hpp>
+#include <boost/tuple/tuple.hpp>
+#include <boost/type_traits/has_trivial_assign.hpp>
+#include <boost/type_traits/has_trivial_destructor.hpp>
+
+#include <boost/lockfree/detail/atomic.hpp>
+#include <boost/lockfree/detail/copy_payload.hpp>
+#include <boost/lockfree/detail/freelist.hpp>
+#include <boost/lockfree/detail/parameter.hpp>
+#include <boost/lockfree/detail/tagged_ptr.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace lockfree {
+namespace detail {
+
+typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
+ boost::parameter::optional<tag::capacity>
+ > stack_signature;
+
+}
+
+/** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
+ * construction/destruction has to be synchronized. It uses a freelist for memory management,
+ * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
+ *
+ * \b Policies:
+ *
+ * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
+ * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
+ * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
+ * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
+ * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
+ * to achieve lock-freedom.
+ *
+ * - \c boost::lockfree::capacity<>, optional <br>
+ * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
+ * It this option implies \c fixed_sized<true>
+ *
+ * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
+ * Specifies the allocator that is used for the internal freelist
+ *
+ * \b Requirements:
+ * - T must have a copy constructor
+ * */
+#ifndef BOOST_DOXYGEN_INVOKED
+template <typename T,
+ class A0 = boost::parameter::void_,
+ class A1 = boost::parameter::void_,
+ class A2 = boost::parameter::void_>
+#else
+template <typename T, ...Options>
+#endif
+class stack
+{
+private:
+#ifndef BOOST_DOXYGEN_INVOKED
+ BOOST_STATIC_ASSERT(boost::has_trivial_assign<T>::value);
+ BOOST_STATIC_ASSERT(boost::has_trivial_destructor<T>::value);
+
+ typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
+
+ static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
+ static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
+ static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
+ static const bool node_based = !(has_capacity || fixed_sized);
+ static const bool compile_time_sized = has_capacity;
+
+ struct node
+ {
+ node(T const & val):
+ v(val)
+ {}
+
+ typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
+ handle_t next;
+ const T v;
+ };
+
+ typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
+ typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
+ typedef typename pool_t::tagged_node_handle tagged_node_handle;
+
+ // check compile-time capacity
+ BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
+ mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
+ mpl::true_
+ >::type::value));
+
+ struct implementation_defined
+ {
+ typedef node_allocator allocator;
+ typedef std::size_t size_type;
+ };
+
+#endif
+
+ BOOST_DELETED_FUNCTION(stack(stack const&))
+ BOOST_DELETED_FUNCTION(stack& operator= (stack const&))
+
+public:
+ typedef T value_type;
+ typedef typename implementation_defined::allocator allocator;
+ typedef typename implementation_defined::size_type size_type;
+
+ /**
+ * \return true, if implementation is lock-free.
+ *
+ * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
+ * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
+ * there is no possibility to provide a completely accurate implementation, because one would need to test
+ * every internal node, which is impossible if further nodes will be allocated from the operating system.
+ *
+ * */
+ bool is_lock_free (void) const
+ {
+ return tos.is_lock_free() && pool.is_lock_free();
+ }
+
+ //! Construct stack
+ // @{
+ stack(void):
+ pool(node_allocator(), capacity)
+ {
+ BOOST_ASSERT(has_capacity);
+ initialize();
+ }
+
+ template <typename U>
+ explicit stack(typename node_allocator::template rebind<U>::other const & alloc):
+ pool(alloc, capacity)
+ {
+ BOOST_STATIC_ASSERT(has_capacity);
+ initialize();
+ }
+
+ explicit stack(allocator const & alloc):
+ pool(alloc, capacity)
+ {
+ BOOST_ASSERT(has_capacity);
+ initialize();
+ }
+ // @}
+
+ //! Construct stack, allocate n nodes for the freelist.
+ // @{
+ explicit stack(size_type n):
+ pool(node_allocator(), n)
+ {
+ BOOST_ASSERT(!has_capacity);
+ initialize();
+ }
+
+ template <typename U>
+ stack(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
+ pool(alloc, n)
+ {
+ BOOST_STATIC_ASSERT(!has_capacity);
+ initialize();
+ }
+ // @}
+
+ /** Allocate n nodes for freelist
+ *
+ * \pre only valid if no capacity<> argument given
+ * \note thread-safe, may block if memory allocator blocks
+ *
+ * */
+ void reserve(size_type n)
+ {
+ BOOST_STATIC_ASSERT(!has_capacity);
+ pool.template reserve<true>(n);
+ }
+
+ /** Allocate n nodes for freelist
+ *
+ * \pre only valid if no capacity<> argument given
+ * \note not thread-safe, may block if memory allocator blocks
+ *
+ * */
+ void reserve_unsafe(size_type n)
+ {
+ BOOST_STATIC_ASSERT(!has_capacity);
+ pool.template reserve<false>(n);
+ }
+
+ /** Destroys stack, free all nodes from freelist.
+ *
+ * \note not thread-safe
+ *
+ * */
+ ~stack(void)
+ {
+ T dummy;
+ while(unsynchronized_pop(dummy))
+ {}
+ }
+
+private:
+#ifndef BOOST_DOXYGEN_INVOKED
+ void initialize(void)
+ {
+ tos.store(tagged_node_handle(pool.null_handle(), 0));
+ }
+
+ void link_nodes_atomic(node * new_top_node, node * end_node)
+ {
+ tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
+ for (;;) {
+ tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
+ end_node->next = pool.get_handle(old_tos);
+
+ if (tos.compare_exchange_weak(old_tos, new_tos))
+ break;
+ }
+ }
+
+ void link_nodes_unsafe(node * new_top_node, node * end_node)
+ {
+ tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
+
+ tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
+ end_node->next = pool.get_pointer(old_tos);
+
+ tos.store(new_tos, memory_order_relaxed);
+ }
+
+ template <bool Threadsafe, bool Bounded, typename ConstIterator>
+ tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
+ {
+ ConstIterator it = begin;
+ node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
+ if (end_node == NULL) {
+ ret = begin;
+ return make_tuple<node*, node*>(NULL, NULL);
+ }
+
+ node * new_top_node = end_node;
+ end_node->next = NULL;
+
+ try {
+ /* link nodes */
+ for (; it != end; ++it) {
+ node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
+ if (newnode == NULL)
+ break;
+ newnode->next = new_top_node;
+ new_top_node = newnode;
+ }
+ } catch (...) {
+ for (node * current_node = new_top_node; current_node != NULL;) {
+ node * next = current_node->next;
+ pool.template destruct<Threadsafe>(current_node);
+ current_node = next;
+ }
+ throw;
+ }
+ ret = it;
+ return make_tuple(new_top_node, end_node);
+ }
+#endif
+
+public:
+ /** Pushes object t to the stack.
+ *
+ * \post object will be pushed to the stack, if internal node can be allocated
+ * \returns true, if the push operation is successful.
+ *
+ * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
+ * from the OS. This may not be lock-free.
+ * \throws if memory allocator throws
+ * */
+ bool push(T const & v)
+ {
+ return do_push<false>(v);
+ }
+
+ /** Pushes object t to the stack.
+ *
+ * \post object will be pushed to the stack, if internal node can be allocated
+ * \returns true, if the push operation is successful.
+ *
+ * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
+ * */
+ bool bounded_push(T const & v)
+ {
+ return do_push<true>(v);
+ }
+
+#ifndef BOOST_DOXYGEN_INVOKED
+private:
+ template <bool Bounded>
+ bool do_push(T const & v)
+ {
+ node * newnode = pool.template construct<true, Bounded>(v);
+ if (newnode == 0)
+ return false;
+
+ link_nodes_atomic(newnode, newnode);
+ return true;
+ }
+
+ template <bool Bounded, typename ConstIterator>
+ ConstIterator do_push(ConstIterator begin, ConstIterator end)
+ {
+ node * new_top_node;
+ node * end_node;
+ ConstIterator ret;
+
+ tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
+ if (new_top_node)
+ link_nodes_atomic(new_top_node, end_node);
+
+ return ret;
+ }
+
+public:
+#endif
+
+ /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
+ *
+ * \return iterator to the first element, which has not been pushed
+ *
+ * \note Operation is applied atomically
+ * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
+ * from the OS. This may not be lock-free.
+ * \throws if memory allocator throws
+ */
+ template <typename ConstIterator>
+ ConstIterator push(ConstIterator begin, ConstIterator end)
+ {
+ return do_push<false, ConstIterator>(begin, end);
+ }
+
+ /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
+ *
+ * \return iterator to the first element, which has not been pushed
+ *
+ * \note Operation is applied atomically
+ * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
+ * \throws if memory allocator throws
+ */
+ template <typename ConstIterator>
+ ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
+ {
+ return do_push<true, ConstIterator>(begin, end);
+ }
+
+
+ /** Pushes object t to the stack.
+ *
+ * \post object will be pushed to the stack, if internal node can be allocated
+ * \returns true, if the push operation is successful.
+ *
+ * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
+ * from the OS. This may not be lock-free.
+ * \throws if memory allocator throws
+ * */
+ bool unsynchronized_push(T const & v)
+ {
+ node * newnode = pool.template construct<false, false>(v);
+ if (newnode == 0)
+ return false;
+
+ link_nodes_unsafe(newnode, newnode);
+ return true;
+ }
+
+ /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
+ *
+ * \return iterator to the first element, which has not been pushed
+ *
+ * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
+ * from the OS. This may not be lock-free.
+ * \throws if memory allocator throws
+ */
+ template <typename ConstIterator>
+ ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
+ {
+ node * new_top_node;
+ node * end_node;
+ ConstIterator ret;
+
+ tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
+ if (new_top_node)
+ link_nodes_unsafe(new_top_node, end_node);
+
+ return ret;
+ }
+
+
+ /** Pops object from stack.
+ *
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if stack was empty.
+ *
+ * \note Thread-safe and non-blocking
+ *
+ * */
+ bool pop(T & ret)
+ {
+ return pop<T>(ret);
+ }
+
+ /** Pops object from stack.
+ *
+ * \pre type T must be convertible to U
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if stack was empty.
+ *
+ * \note Thread-safe and non-blocking
+ *
+ * */
+ template <typename U>
+ bool pop(U & ret)
+ {
+ BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
+ detail::consume_via_copy<U> consumer(ret);
+
+ return consume_one(consumer);
+ }
+
+
+ /** Pops object from stack.
+ *
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if stack was empty.
+ *
+ * \note Not thread-safe, but non-blocking
+ *
+ * */
+ bool unsynchronized_pop(T & ret)
+ {
+ return unsynchronized_pop<T>(ret);
+ }
+
+ /** Pops object from stack.
+ *
+ * \pre type T must be convertible to U
+ * \post if pop operation is successful, object will be copied to ret.
+ * \returns true, if the pop operation is successful, false if stack was empty.
+ *
+ * \note Not thread-safe, but non-blocking
+ *
+ * */
+ template <typename U>
+ bool unsynchronized_pop(U & ret)
+ {
+ BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
+ tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
+ node * old_tos_pointer = pool.get_pointer(old_tos);
+
+ if (!pool.get_pointer(old_tos))
+ return false;
+
+ node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
+ tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag());
+
+ tos.store(new_tos, memory_order_relaxed);
+ detail::copy_payload(old_tos_pointer->v, ret);
+ pool.template destruct<false>(old_tos);
+ return true;
+ }
+
+ /** consumes one element via a functor
+ *
+ * pops one element from the stack and applies the functor on this object
+ *
+ * \returns true, if one element was consumed
+ *
+ * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
+ * */
+ template <typename Functor>
+ bool consume_one(Functor & f)
+ {
+ tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
+
+ for (;;) {
+ node * old_tos_pointer = pool.get_pointer(old_tos);
+ if (!old_tos_pointer)
+ return false;
+
+ tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
+
+ if (tos.compare_exchange_weak(old_tos, new_tos)) {
+ f(old_tos_pointer->v);
+ pool.template destruct<true>(old_tos);
+ return true;
+ }
+ }
+ }
+
+ /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs)
+ template <typename Functor>
+ bool consume_one(Functor const & f)
+ {
+ tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
+
+ for (;;) {
+ node * old_tos_pointer = pool.get_pointer(old_tos);
+ if (!old_tos_pointer)
+ return false;
+
+ tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag());
+
+ if (tos.compare_exchange_weak(old_tos, new_tos)) {
+ f(old_tos_pointer->v);
+ pool.template destruct<true>(old_tos);
+ return true;
+ }
+ }
+ }
+
+ /** consumes all elements via a functor
+ *
+ * sequentially pops all elements from the stack and applies the functor on each object
+ *
+ * \returns number of elements that are consumed
+ *
+ * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
+ * */
+ template <typename Functor>
+ size_t consume_all(Functor & f)
+ {
+ size_t element_count = 0;
+ while (consume_one(f))
+ element_count += 1;
+
+ return element_count;
+ }
+
+ /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs)
+ template <typename Functor>
+ size_t consume_all(Functor const & f)
+ {
+ size_t element_count = 0;
+ while (consume_one(f))
+ element_count += 1;
+
+ return element_count;
+ }
+
+ /**
+ * \return true, if stack is empty.
+ *
+ * \note It only guarantees that at some point during the execution of the function the stack has been empty.
+ * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
+ * */
+ bool empty(void) const
+ {
+ return pool.get_pointer(tos.load()) == NULL;
+ }
+
+private:
+#ifndef BOOST_DOXYGEN_INVOKED
+ detail::atomic<tagged_node_handle> tos;
+
+ static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
+ char padding[padding_size];
+
+ pool_t pool;
+#endif
+};
+
+} /* namespace lockfree */
+} /* namespace boost */
+
+#endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */