summaryrefslogtreecommitdiff
path: root/boost/move
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2016-10-06 10:41:18 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2016-10-06 10:43:11 +0900
commitf763a99a501650eff2c60288aa6f10ef916d769e (patch)
tree02af7e13f9a38c888ebf340fe764cbe7dae99da9 /boost/move
parent5cde13f21d36c7224b0e13d11c4b49379ae5210d (diff)
downloadboost-f763a99a501650eff2c60288aa6f10ef916d769e.tar.gz
boost-f763a99a501650eff2c60288aa6f10ef916d769e.tar.bz2
boost-f763a99a501650eff2c60288aa6f10ef916d769e.zip
Imported Upstream version 1.62.0upstream/1.62.0
Change-Id: I9d4c1ddb7b7d8f0069217ecc582700f9fda6dd4c Signed-off-by: DongHun Kwak <dh0128.kwak@samsung.com>
Diffstat (limited to 'boost/move')
-rw-r--r--boost/move/adl_move_swap.hpp4
-rw-r--r--boost/move/algo/adaptive_merge.hpp2
-rw-r--r--boost/move/algo/detail/adaptive_sort_merge.hpp1308
-rw-r--r--boost/move/algo/detail/basic_op.hpp18
-rw-r--r--boost/move/algo/detail/merge.hpp165
-rw-r--r--boost/move/algo/detail/merge_sort.hpp15
-rw-r--r--boost/move/algo/move.hpp4
-rw-r--r--boost/move/core.hpp4
-rw-r--r--boost/move/detail/fwd_macros.hpp259
-rw-r--r--boost/move/detail/meta_utils_core.hpp12
-rw-r--r--boost/move/detail/reverse_iterator.hpp171
-rw-r--r--boost/move/detail/type_traits.hpp34
12 files changed, 1283 insertions, 713 deletions
diff --git a/boost/move/adl_move_swap.hpp b/boost/move/adl_move_swap.hpp
index 930320108d..d6906a483f 100644
--- a/boost/move/adl_move_swap.hpp
+++ b/boost/move/adl_move_swap.hpp
@@ -231,8 +231,8 @@ BOOST_MOVE_FORCEINLINE void adl_move_swap(T& x, T& y)
//! using boost::adl_move_swap.
//!
//! Parameters:
-//! first1, last1 - the first range of elements to swap
-//! first2 - beginning of the second range of elements to swap
+//! first1, last1 - the first range of elements to swap
+//! first2 - beginning of the second range of elements to swap
//!
//! Type requirements:
//! - ForwardIt1, ForwardIt2 must meet the requirements of ForwardIterator.
diff --git a/boost/move/algo/adaptive_merge.hpp b/boost/move/algo/adaptive_merge.hpp
index ef20651bbf..0233b232e3 100644
--- a/boost/move/algo/adaptive_merge.hpp
+++ b/boost/move/algo/adaptive_merge.hpp
@@ -56,7 +56,7 @@ void adaptive_merge( RandIt first, RandIt middle, RandIt last, Compare comp
typedef typename iterator_traits<RandIt>::value_type value_type;
::boost::movelib::detail_adaptive::adaptive_xbuf<value_type> xbuf(uninitialized, uninitialized_len);
- ::boost::movelib::detail_adaptive::adaptive_merge_impl(first, size_type(middle - first), size_type(last - middle), comp, xbuf);
+ ::boost::movelib::detail_adaptive::adaptive_merge_impl(first, size_type(middle - first), size_type(last - middle), comp, xbuf);
}
} //namespace movelib {
diff --git a/boost/move/algo/detail/adaptive_sort_merge.hpp b/boost/move/algo/detail/adaptive_sort_merge.hpp
index 46dba187c5..3d97212a29 100644
--- a/boost/move/algo/detail/adaptive_sort_merge.hpp
+++ b/boost/move/algo/detail/adaptive_sort_merge.hpp
@@ -37,14 +37,15 @@
// elements twice.
//
// The adaptive_merge algorithm was developed by Ion Gaztanaga reusing some parts
-// from the sorting algorithm and implementing a block merge algorithm
-// without moving elements left or right, which is used when external memory
+// from the sorting algorithm and implementing an additional block merge algorithm
+// without moving elements to left or right, which is used when external memory
// is available.
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP
#define BOOST_MOVE_ADAPTIVE_SORT_MERGE_HPP
#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/detail/reverse_iterator.hpp>
#include <boost/move/algo/move.hpp>
#include <boost/move/algo/detail/merge.hpp>
#include <boost/move/adl_move_swap.hpp>
@@ -161,6 +162,29 @@ class adaptive_xbuf
m_size = size;
}
+ void shrink_to_fit(std::size_t const size)
+ {
+ if(m_size > size){
+ for(std::size_t szt_i = size; szt_i != m_size; ++szt_i){
+ m_ptr[szt_i].~T();
+ }
+ m_size = size;
+ }
+ }
+
+ void initialize_until(std::size_t const size, T &t)
+ {
+ BOOST_ASSERT(m_size < m_capacity);
+ if(m_size < size){
+ ::new((void*)&m_ptr[m_size]) T(::boost::move(t));
+ ++m_size;
+ for(; m_size != size; ++m_size){
+ ::new((void*)&m_ptr[m_size]) T(::boost::move(m_ptr[m_size-1]));
+ }
+ t = ::boost::move(m_ptr[m_size-1]);
+ }
+ }
+
template<class U>
bool supports_aligned_trailing(std::size_t size, std::size_t trail_count) const
{
@@ -210,11 +234,7 @@ class adaptive_xbuf
void clear()
{
- std::size_t size = m_size;
- while(size--){
- m_ptr[size].~T();
- }
- m_size = 0u;
+ this->shrink_to_fit(0u);
}
private:
@@ -288,41 +308,75 @@ class range_xbuf
Iterator const m_cap;
};
-template<class RandIt, class Buf>
-bool three_way_init( RandIt first1, RandIt last1, Buf &buf
- , typename Buf::iterator &buf_first, typename Buf::iterator &buf_last, move_op)
-{
- buf.move_assign(first1, last1-first1);
- buf_first = buf.data();
- buf_last = buf.end();
- return true;
-}
-template<class RandIt, class Buf>
-bool three_way_init( RandIt first, RandIt last, Buf &buf
- , typename Buf::iterator &buf_first, typename Buf::iterator &buf_last, swap_op)
+template<class RandIt, class Compare>
+RandIt skip_until_merge
+ ( RandIt first1, RandIt const last1
+ , const typename iterator_traits<RandIt>::value_type &next_key, Compare comp)
{
- typedef typename iterator_traits<RandIt>::size_type size_type;
- buf.clear();
- buf_first = buf.data();
- buf_last = buf_first + size_type(last-first);
- return false;
+ while(first1 != last1 && !comp(next_key, *first1)){
+ ++first1;
+ }
+ return first1;
}
-template<class T, class Buf>
-void three_way_move(T &a, T &b, Buf &buf, move_op)
+template<class InputIt1, class InputIt2, class OutputIt, class Compare, class Op>
+OutputIt op_partial_merge
+ (InputIt1 &r_first1, InputIt1 const last1, InputIt2 &r_first2, InputIt2 const last2, OutputIt d_first, Compare comp, Op op)
{
- buf.add(&b);
- b = boost::move(a);
+ InputIt1 first1(r_first1);
+ InputIt2 first2(r_first2);
+ if(first2 != last2 && last1 != first1)
+ while(1){
+ if(comp(*first2, *first1)) {
+ op(first2++, d_first++);
+ if(first2 == last2){
+ break;
+ }
+ }
+ else{
+ op(first1++, d_first++);
+ if(first1 == last1){
+ break;
+ }
+ }
+ }
+ r_first1 = first1;
+ r_first2 = first2;
+ return d_first;
}
-template<class T, class Buf>
-void three_way_move(T &a, T &b, Buf &buf, swap_op)
+template<class RandIt1, class RandIt2, class RandItB, class Compare, class Op>
+RandItB op_buffered_partial_merge_to_left_placed
+ ( RandIt1 first1, RandIt1 const last1
+ , RandIt2 &rfirst2, RandIt2 const last2
+ , RandItB &rfirstb, Compare comp, Op op )
{
- T tmp(boost::move(*buf.end()));
- buf.add(&b);
- b = boost::move(a);
- a = boost::move(tmp);
+ RandItB firstb = rfirstb;
+ RandItB lastb = firstb;
+ RandIt2 first2 = rfirst2;
+
+ //Move to buffer while merging
+ //Three way moves need less moves when op is swap_op so use it
+ //when merging elements from range2 to the destination occupied by range1
+ if(first1 != last1 && first2 != last2){
+ op(three_way_t(), first2++, first1++, lastb++);
+
+ while(true){
+ if(first1 == last1){
+ break;
+ }
+ if(first2 == last2){
+ lastb = op(forward_t(), first1, last1, firstb);
+ break;
+ }
+ op(three_way_t(), comp(*first2, *firstb) ? first2++ : firstb++, first1++, lastb++);
+ }
+ }
+
+ rfirst2 = first2;
+ rfirstb = firstb;
+ return lastb;
}
///////////////////////////////////////////////////////////////////////////////
@@ -331,7 +385,6 @@ void three_way_move(T &a, T &b, Buf &buf, swap_op)
//
///////////////////////////////////////////////////////////////////////////////
-
template<class Buf, class RandIt, class Compare, class Op>
RandIt op_partial_merge_with_buf_impl
( RandIt first1, RandIt const last1, RandIt first2, RandIt last2
@@ -340,7 +393,6 @@ RandIt op_partial_merge_with_buf_impl
)
{
typedef typename Buf::iterator buf_iterator;
- typedef typename iterator_traits<RandIt>::value_type value_type;
BOOST_ASSERT(first1 != last1);
BOOST_ASSERT(first2 != last2);
@@ -349,40 +401,21 @@ RandIt op_partial_merge_with_buf_impl
if(buf_first1 == buf_last1){
//Skip any element that does not need to be moved
- while(!comp(*last1, *first1)){
- ++first1;
- if(first1 == last1){
- return first1;
- }
- }
-
- //If initialization is successful, move to buffer while merging
- //Three way moves need less moves when op is swap_op so use it
- //when merging elements from range2 to the destination occupied by range1
- if(!three_way_init(first1, last1, buf, buf_first1, buf_last1, op)){
- three_way_move(*first2, *first1, buf, op);
- for(++first1, ++first2; first1 != last1; ++first1){
- value_type &v = comp(*first2, *buf_first1) ? *first2++ : *buf_first1++;
- three_way_move(v, *first1, buf, op);
- }
+ first1 = skip_until_merge(first1, last1, *last1, comp);
+ if(first1 == last1){
+ return first1;
}
+ buf_first1 = buf.data();
+ buf_last1 = op_buffered_partial_merge_to_left_placed(first1, last1, first2, last2, buf_first1, comp, op);
+ BOOST_ASSERT(buf_last1 == (buf.data() + (last1-first1)));
+ first1 = last1;
}
-
- //Now merge from buffer
- if(first2 != last2)
- while(1){
- if(comp(*first2, *buf_first1)) {
- op(first2++, first1++);
- if(first2 == last2)
- break;
- }
- else{
- op(buf_first1++, first1++);
- if(buf_first1 == buf_last1)
- break;
- }
+ else{
+ BOOST_ASSERT((last1-first1) == (buf_last1 - buf_first1));
}
+ //Now merge from buffer
+ first1 = op_partial_merge(buf_first1, buf_last1, first2, last2, first1, comp, op);
buf_first1_in_out = buf_first1;
buf_last1_in_out = buf_last1;
return first1;
@@ -429,72 +462,53 @@ void op_merge_blocks_with_buf
, Op op
, Buf & xbuf)
{
- if(n_bef_irreg2 == 0){
- RandIt const last_reg(first+l_irreg1+n_aft_irreg2*l_block);
- op_buffered_merge(first, last_reg, last_reg+l_irreg2, comp, op, xbuf);
+ typedef typename Buf::iterator buf_iterator;
+ buf_iterator buffer = xbuf.data();
+ buf_iterator buffer_end = buffer;
+ RandIt first1 = first;
+ RandIt last1 = first1 + l_irreg1;
+ RandItKeys const key_end (key_first+n_bef_irreg2);
+
+ bool is_range1_A = true; //first l_irreg1 elements are always from range A
+
+ for( ; key_first != key_end; ++key_first, last1 += l_block){
+ //If the trailing block is empty, we'll make it equal to the previous if empty
+ bool const is_range2_A = key_comp(*key_first, midkey);
+
+ if(is_range1_A == is_range2_A){
+ //If buffered, put those elements in place
+ RandIt res = op(forward_t(), buffer, buffer_end, first1);
+ BOOST_ASSERT(buffer == buffer_end || res == last1); (void)res;
+ buffer_end = buffer;
+ first1 = last1;
+ }
+ else {
+ first1 = op_partial_merge_with_buf(first1, last1, last1, last1 + l_block, xbuf, buffer, buffer_end, comp, op, is_range1_A);
+ BOOST_ASSERT(buffer == buffer_end || (buffer_end-buffer) == (last1+l_block-first1));
+ is_range1_A ^= buffer == buffer_end;
+ }
}
- else {
- typedef typename Buf::iterator buf_iterator;
- buf_iterator buffer = xbuf.data();
- buf_iterator buffer_end = buffer;
- RandIt first1 = first;
- RandIt last1 = l_irreg1 ? first1 + l_irreg1 : first1 + l_block;
- RandIt first2 = last1;
- RandItKeys const key_end (key_first+n_bef_irreg2);
- for( bool is_range1_A = l_irreg1 ? true : key_comp(*key_first, midkey), skip_first_it = l_irreg1 != 0
- ; key_first != key_end; ){
- if(!skip_first_it){
- ++key_first;
- }
- skip_first_it = false;
- bool const last_it = key_first == key_end;
- //If the trailing block is empty, we'll make it equal to the previous if empty
- bool const is_range2_A = last_it ? (!l_irreg2 && is_range1_A) : key_comp(*key_first, midkey);
+ //Now the trailing irregular block, first put buffered elements in place
+ RandIt res = op(forward_t(), buffer, buffer_end, first1);
+ BOOST_ASSERT(buffer == buffer_end || res == last1); (void)res;
- if(is_range1_A == is_range2_A){
- if(buffer != buffer_end){
- first1 = op(forward_t(), buffer, buffer_end, first1);
- BOOST_ASSERT(first1 == first2);
- buffer_end = buffer;
- }
- first1 = first2;
- if(last_it){
- xbuf.clear();
- last1 = first2+l_block*n_aft_irreg2;
- op_buffered_merge(first1, last1, last1+l_irreg2, comp, op, xbuf);
- break;
- }
- else{
- last1 = first2 + l_block;
- }
- first2 += l_block;
- }
- else {
- BOOST_ASSERT(!last_it || (l_irreg2 || n_aft_irreg2));
- if(last_it){
- RandIt res = op(forward_t(), buffer, buffer_end, first1);
- BOOST_ASSERT(buffer == buffer_end || res == last1); (void)res;
- last1 += l_block*n_aft_irreg2;
- xbuf.clear();
- op_buffered_merge(first1, last1, last1+l_irreg2, comp, op, xbuf);
- break;
- }
- else{
- RandIt const last2 = first2 + l_block;
- first1 = op_partial_merge_with_buf(first1, last1, first2, last2, xbuf, buffer, buffer_end, comp, op, is_range1_A);
- if(buffer == buffer_end){
- is_range1_A = is_range2_A;
- }
- last1 = last2;
- first2 = last1;
- BOOST_ASSERT((buffer == buffer_end) || (buffer_end-buffer) == (last1-first1));
- }
- }
+ BOOST_ASSERT(l_irreg2 || n_aft_irreg2);
+ if(l_irreg2){
+ bool const is_range2_A = false; //last l_irreg2 elements always from range B
+ if(is_range1_A == is_range2_A){
+ first1 = last1;
+ last1 = last1+l_block*n_aft_irreg2;
+ }
+ else {
+ last1 += l_block*n_aft_irreg2;
}
+ xbuf.clear();
+ op_buffered_merge(first1, last1, last1+l_irreg2, comp, op, xbuf);
}
}
+
template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Buf>
void merge_blocks_with_buf
( RandItKeys key_first
@@ -532,9 +546,8 @@ RandIt op_partial_merge_left_middle_buffer_impl
, const typename iterator_traits<RandIt>::value_type &next_key, Compare comp
, Op op)
{
- while(first1 != last1 && !comp(next_key, *first1)){
- ++first1;
- }
+ first1 = skip_until_merge(first1, last1, next_key, comp);
+
//Even if we copy backward, no overlapping occurs so use forward copy
//that can be faster specially with trivial types
RandIt const new_first1 = first2 - (last1 - first1);
@@ -558,108 +571,20 @@ RandIt op_partial_merge_left_middle_buffer
// [first1, last1) merge [last1,last2) -> [buf_first, buf_first+M)
// Note: distance(buf_first, first1) >= distance(last1, last2), so no overlapping occurs.
template<class RandIt, class Compare, class Op>
-RandIt op_partial_merge_left_impl
- ( RandIt buf_first, RandIt first1, RandIt const last1, RandIt const last2, Compare comp, Op op)
-{
- RandIt first2 = last1;
- while(first1 != last1){
- if(first2 == last2){
- return first1;
- }
- if(comp(*first2, *first1)) {
- op(first2, buf_first);
- ++first2;
- }
- else{
- op(first1, buf_first);
- ++first1;
- }
- ++buf_first;
- }
- return first2;
-}
-
-
-template<class RandIt, class Compare, class Op>
-RandIt op_partial_merge_left
- ( RandIt buf_first, RandIt first1, RandIt const last1, RandIt const last2, Compare comp, Op op, bool is_stable)
-{
- return is_stable ? op_partial_merge_left_impl(buf_first, first1, last1, last2, comp, op)
- : op_partial_merge_left_impl(buf_first, first1, last1, last2, antistable<Compare>(comp), op);
-}
-
-template<class RandIt>
-bool three_way_side_init( RandIt first1, RandIt last1, RandIt buf_first1, move_op)
-{
- boost::move(first1, last1, buf_first1);
- return true;
-}
-
-template<class RandIt>
-bool three_way_side_init( RandIt, RandIt, RandIt, swap_op)
-{
- return false;
-}
-
-template<class T>
-void three_way_side_move(T &a, T &b, T&c, move_op)
-{
- c = boost::move(b);
- b = boost::move(a);
-}
-
-template<class T>
-void three_way_side_move(T &a, T &b, T &c, swap_op)
-{
- T tmp(boost::move(c));
- c = boost::move(b);
- b = boost::move(a);
- a = boost::move(tmp);
-}
-
-
-// Partially merges two ordered ranges. Partially means that elements are merged
-// until one of two ranges is exhausted (M elements from ranges 1 y 2).
-// [buf_first, ...) -> buffer that can be overwritten
-// [first1, last1) merge [last1,last2) -> [buf_first, buf_first+M)
-// Note: distance(buf_first, first1) >= distance(last1, last2), so no overlapping occurs.
-template<class RandIt, class Compare, class Op>
RandIt op_partial_merge_left_smart_impl
( RandIt first1, RandIt last1, RandIt first2, RandIt const last2, Compare comp, Op op)
{
- typedef typename iterator_traits<RandIt>::value_type value_type;
-
RandIt dest;
if(last1 != first2){
+ BOOST_ASSERT(0 != (last1-first1));
BOOST_ASSERT((first2-last1)==(last2-first2));
//Skip any element that does not need to be moved
- while(!comp(*first2, *first1)){
- ++first1;
- if(first1 == last1)
- return first2;
- }
-
+ first1 = skip_until_merge(first1, last1, *first2, comp);
+ if(first1 == last1)
+ return first2;
RandIt buf_first1 = first2 - (last1-first1);
-
- //If initialization is successful, move to buffer while merging
- //Three way moves need less moves when op is swap_op so use it
- //when merging elements from range2 to the destination occupied by range1
- if(!three_way_side_init(first1, last1, buf_first1, op)){
- RandIt buf_last1 = buf_first1;
- three_way_side_move(*first2, *first1, *buf_last1++, op);
-
- RandIt const orig_first2 = first2;(void)(orig_first2);
- for(++first1, ++first2; first1 != last1; ++first1, ++buf_last1){
- value_type &v = comp(*first2, *buf_first1) ? *first2++ : *buf_first1++;
- three_way_side_move(v, *first1, *buf_last1, op);
- }
- BOOST_ASSERT(buf_last1 == orig_first2);
- last1 = buf_last1;
- }
- else{
- last1 = first2;
- }
- dest = first1;
+ dest = last1;
+ last1 = op_buffered_partial_merge_to_left_placed(first1, last1, first2, last2, buf_first1, comp, op);
first1 = buf_first1;
BOOST_ASSERT((first1-dest) == (last2-first2));
}
@@ -667,27 +592,10 @@ RandIt op_partial_merge_left_smart_impl
dest = first1-(last2-first2);
}
- BOOST_ASSERT(0 != (last1-first1));
- if(first2 != last2)
- while(1){
- if(comp(*first2, *first1)) {
- op(first2++, dest++);
- if(first2 == last2){
- return first1;
- }
- }
- else{
- op(first1++, dest++);
- if(first1 == last1){
- return first2;
- }
- }
- }
- return first1;
+ op_partial_merge(first1, last1, first2, last2, dest, comp, op);
+ return first1 == last1 ? first2 : first1;
}
-
-
template<class RandIt, class Compare, class Op>
RandIt op_partial_merge_left_smart
(RandIt first1, RandIt const last1, RandIt first2, RandIt const last2, Compare comp, Op op, bool is_stable)
@@ -696,7 +604,6 @@ RandIt op_partial_merge_left_smart
: op_partial_merge_left_smart_impl(first1, last1, first2, last2, antistable<Compare>(comp), op);
}
-
// first - first element to merge.
// first[-l_block, 0) - buffer
// l_block - length of regular blocks. Blocks are stable sorted by 1st elements and key-coded
@@ -717,209 +624,58 @@ void op_merge_blocks_left
, typename iterator_traits<RandIt>::size_type const l_irreg2
, Compare comp, Op op)
{
- if(n_bef_irreg2 == 0){
- RandIt const last_reg(first+l_irreg1+n_aft_irreg2*l_block);
- op_merge_left(first-l_block, first, last_reg, last_reg+l_irreg2, comp, op);
- }
- else {
- RandIt buffer = first - l_block;
- RandIt first1 = first;
- RandIt last1 = l_irreg1 ? first1 + l_irreg1 : first1 + l_block;
- RandIt first2 = last1;
- RandItKeys const key_end (key_first+n_bef_irreg2);
- bool skip_first_it = l_irreg1 != 0;
- for( bool is_range1_A = l_irreg1 ? true : key_comp(*key_first, midkey)
- ; key_first != key_end; first2 += l_block){
- if(!skip_first_it){
- ++key_first;
- }
- skip_first_it = false;
- bool const last_it = key_first == key_end;
- //If the trailing block is empty, we'll make it equal to the previous if empty
- bool const is_range2_A = last_it ? (!l_irreg2 && is_range1_A) : key_comp(*key_first, midkey);
- bool const is_buffer_middle = last1 == buffer;
-
- if(is_range1_A == is_range2_A){
- //If range1 is buffered, write it to its final position
- if(!is_buffer_middle){
- buffer = op(forward_t(), first1, last1, buffer);
- }
-
- first1 = first2;
- if(last_it){
- last1 = first2+l_block*n_aft_irreg2;
- op_merge_left(buffer, first1, last1, last1+l_irreg2, comp, op);
- break;
- }
- else{
- last1 = first2 + l_block;
- }
- }
- else {
- BOOST_ASSERT(!last_it || (l_irreg2 || n_aft_irreg2));
- if(last_it){
- if(is_buffer_middle){
- //See comment below marked with (*)
- first1 = op_partial_merge_left_middle_buffer(first1, last1, first2, first2[l_block*n_aft_irreg2], comp, op, is_range1_A);
- last1 = first2;
- buffer = first1 - l_block;
- }
- last1 += l_block*n_aft_irreg2;
- op_merge_left(buffer, first1, last1, last1+l_irreg2, comp, op);
- break;
- }
- else{
- RandIt const last2 = first2 + l_block;
- first1 = op_partial_merge_left_smart(first1, last1, first2, last2, comp, op, is_range1_A);
-
- if(first1 < first2){ //is_buffer_middle == true for the next iteration
- last1 = first2;
- buffer = last1;
- }
- else{ //is_buffer_middle == false for the next iteration
- is_range1_A = is_range2_A;
- buffer = first1 - l_block;
- last1 = last2;
- }
- }
- }
- }
- }
-}
-
-///////////////////////////////////////////////////////////////////////////////
-//
-// PARTIAL MERGE RIGHT
-//
-///////////////////////////////////////////////////////////////////////////////
-
-
-template<class RandIt, class Compare, class Op>
-RandIt op_partial_merge_right_middle_buffer_impl
- ( RandIt const last1, RandIt const first2, RandIt last2, Compare comp, Op op)
-{
- while(first2 != last2 && !comp(last2[-1], last1[-1])){
- --last2;
- }
- return op(forward_t(), first2, last2, last1);
-}
-
-template<class RandIt, class Compare, class Op>
-RandIt op_partial_merge_right_middle_buffer
- ( RandIt const last1, RandIt first2, RandIt const last2
- , Compare comp, Op op, bool is_stable)
-{
- return is_stable ? op_partial_merge_right_middle_buffer_impl(last1, first2, last2, comp, op)
- : op_partial_merge_right_middle_buffer_impl(last1, first2, last2, antistable<Compare>(comp), op);
-}
-
-// Partially merges two ordered ranges. Partially means that elements are merged
-// until one of two ranges is exhausted (M elements from ranges 1 y 2).
-// [last2, buf_last) -> buffer that can be overwritten
-// [first1, last1) merge [last1,last2) -> [buf_last - M, buf_last)
-// Note: distance(last2, buf_last) >= distance(first1, last1), so no overlapping occurs.
-template<class RandIt, class Compare, class Op>
-RandIt op_partial_merge_right_impl
- ( RandIt const first1, RandIt last1, RandIt last2, RandIt buf_last, Compare comp, Op op)
-{
- RandIt const first2 = last1;
- while(first2 != last2){
- if(last1 == first1){
- return last2;
- }
- --last2;
- --last1;
- --buf_last;
- if(comp(*last2, *last1)){
- op(last1, buf_last);
- ++last2;
- }
- else{
- op(last2, buf_last);
- ++last1;
- }
- }
- return last1;
-}
-
-template<class RandIt, class Compare, class Op>
-RandIt op_partial_merge_right
- ( RandIt first1, RandIt const last1, RandIt const last2, RandIt buf_last, Compare comp, Op op, bool is_stable)
-{
- return is_stable ? op_partial_merge_right_impl(first1, last1, last2, buf_last, comp, op)
- : op_partial_merge_right_impl(first1, last1, last2, buf_last, antistable<Compare>(comp), op);
-}
-
-
-// first - first element to merge.
-// last iterator is (first+l_block*(n_bef_irreg2+n_aft_irreg2)+l_irreg2)
-// [last, last+l_block) - buffer
-// l_block - length of regular blocks. Blocks are stable sorted by 1st elements and key-coded
-// key_first - sequence of keys, in same order as blocks. key<midkey means stream A
-// n_bef_irreg2/n_aft_irreg2 are regular blocks
-// l_irreg2 is a irregular block, that is to be merged after n_bef_irreg2 blocks and before n_aft_irreg2 blocks
-// If l_irreg2==0 then n_aft_irreg2==0 (no irregular blocks).
-template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class Op>
-void op_merge_blocks_right
- ( RandItKeys const key_first
- , const typename iterator_traits<RandItKeys>::value_type &midkey
- , KeyCompare key_comp
- , RandIt const first
- , typename iterator_traits<RandIt>::size_type const l_block
- , typename iterator_traits<RandIt>::size_type const n_bef_irreg2
- , typename iterator_traits<RandIt>::size_type const n_aft_irreg2
- , typename iterator_traits<RandIt>::size_type const l_irreg2
- , Compare comp, Op op)
-{
- RandIt last1 = first + (n_bef_irreg2+n_aft_irreg2)*l_block;
- RandIt first1 = last1-l_block;
+ RandIt buffer = first - l_block;
+ RandIt first1 = first;
+ RandIt last1 = first1 + l_irreg1;
RandIt first2 = last1;
- RandIt last2 = first2 + l_irreg2;
- RandIt buffer_end = last2 + l_block;
- RandItKeys key_end (key_first+(n_bef_irreg2+n_aft_irreg2));
-
- for(bool is_range2_A = false; key_first != key_end; last1 = first1, first1 -= l_block){
- --key_end;
- bool const is_range1_A = key_comp(*key_end, midkey);
- bool const is_buffer_middle = first2 == buffer_end;
+ RandItKeys const key_end (key_first+n_bef_irreg2);
+ bool is_range1_A = true;
+ for( ; key_first != key_end; first2 += l_block, ++key_first){
+ //If the trailing block is empty, we'll make it equal to the previous if empty
+ bool const is_range2_A = key_comp(*key_first, midkey);
if(is_range1_A == is_range2_A){
- if(!is_buffer_middle){
- buffer_end = op(backward_t(), first2, last2, buffer_end);
+ if(last1 != buffer){ //equiv. to if(!is_buffer_middle)
+ buffer = op(forward_t(), first1, last1, buffer);
}
- //else //op forward already done on previous op_partial_merge_right
-
- first2 = first1;
- last2 = last1;
+ first1 = first2;
+ last1 = first2 + l_block;
}
else {
- if(is_buffer_middle){
- //A previous op_partial_merge_right has right range2 elements after the buffer.
- //In order to merge it with the next block, move them to the start of the buffer so that
- //buffer is placed to the right. Move only the minimum elements as some range1 elements
- //won't be moved in the merge.
- last2 = op_partial_merge_right_middle_buffer(last1, first2, last2, comp, op, !is_range2_A);
- first2 = last1;
- buffer_end = last2 + l_block;
- }
+ RandIt const last2 = first2 + l_block;
+ first1 = op_partial_merge_left_smart(first1, last1, first2, last2, comp, op, is_range1_A);
- //op_partial_merge_right merges two ranges, but stops moving elements
- //once one range is emptied to avoid moving data twice in the next iteration
- last2 = op_partial_merge_right(first1, last1, last2, buffer_end, comp, op, !is_range2_A);
- if(last2 > first2){ //is_buffer_middle == true for the next iteration
- buffer_end = first2;
+ if(first1 < first2){ //is_buffer_middle for the next iteration
+ last1 = first2;
+ buffer = last1;
}
- else{ //is_buffer_middle == false for the next iteration
- is_range2_A = is_range1_A;
- buffer_end = last2 + l_block;
- first2 = first1;
+ else{ //!is_buffer_middle for the next iteration
+ is_range1_A = is_range2_A;
+ buffer = first1 - l_block;
+ last1 = last2;
}
}
}
- if(first2 != buffer_end){
- op(backward_t(), first2, last2, buffer_end);
+ //Now the trailing irregular block
+ bool const is_range2_A = false; //Trailing l_irreg2 is always from Range B
+ bool const is_buffer_middle = last1 == buffer;
+
+ if(!l_irreg2 || is_range1_A == is_range2_A){ //trailing is always B type
+ //If range1 is buffered, write it to its final position
+ if(!is_buffer_middle){
+ buffer = op(forward_t(), first1, last1, buffer);
+ }
+ first1 = first2;
+ }
+ else {
+ if(is_buffer_middle){
+ first1 = op_partial_merge_left_middle_buffer(first1, last1, first2, first2[l_block*n_aft_irreg2], comp, op, is_range1_A);
+ buffer = first1 - l_block;
+ }
}
+ last1 = first2 + l_block*n_aft_irreg2;
+ op_merge_left(buffer, first1, last1, last1+l_irreg2, comp, op);
}
///////////////////////////////////////////////////////////////////////////////
@@ -937,17 +693,17 @@ RandIt partial_merge_bufferless_impl
return first1;
}
bool const is_range1_A = *pis_range1_A;
- if(first1 != last1 && comp(*last1, last1[-1])){
+ if(first1 != last1 && comp(*last1, last1[-1])){
do{
RandIt const old_last1 = last1;
- last1 = lower_bound(last1, last2, *first1, comp);
+ last1 = lower_bound(last1, last2, *first1, comp);
first1 = rotate_gcd(first1, old_last1, last1);//old_last1 == last1 supported
if(last1 == last2){
return first1;
}
do{
++first1;
- } while(last1 != first1 && !comp(*last1, *first1) );
+ } while(last1 != first1 && !comp(*last1, *first1) );
} while(first1 != last1);
}
*pis_range1_A = !is_range1_A;
@@ -993,7 +749,7 @@ void merge_blocks_bufferless
bool is_range1_A = l_irreg1 ? true : key_comp(*key_first++, midkey);
for( ; key_first != key_end; ++key_first){
- bool is_range2_A = key_comp(*key_first, midkey);
+ bool is_range2_A = key_comp(*key_first, midkey);
if(is_range1_A == is_range2_A){
first1 = last1;
}
@@ -1077,9 +833,9 @@ typename iterator_traits<RandIt>::size_type
if(xbuf.capacity() >= max_collected){
value_type *const ph0 = xbuf.add(first);
while(u != last && h < max_collected){
- value_type * const r = lower_bound(ph0, xbuf.end(), *u, comp);
+ value_type * const r = lower_bound(ph0, xbuf.end(), *u, comp);
//If key not found add it to [h, h+h0)
- if(r == xbuf.end() || comp(*u, *r) ){
+ if(r == xbuf.end() || comp(*u, *r) ){
RandIt const new_h0 = boost::move(search_end, u, h0);
search_end = u;
++search_end;
@@ -1094,9 +850,9 @@ typename iterator_traits<RandIt>::size_type
}
else{
while(u != last && h < max_collected){
- RandIt const r = lower_bound(h0, search_end, *u, comp);
+ RandIt const r = lower_bound(h0, search_end, *u, comp);
//If key not found add it to [h, h+h0)
- if(r == search_end || comp(*u, *r) ){
+ if(r == search_end || comp(*u, *r) ){
RandIt const new_h0 = rotate_gcd(h0, search_end, u);
search_end = u;
++search_end;
@@ -1200,6 +956,18 @@ struct less
//
///////////////////////////////////////////////////////////////////////////////
+//#define ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+
+#if defined ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+template<class RandIt, class Compare>
+void slow_stable_sort
+ ( RandIt const first, RandIt const last, Compare comp)
+{
+ boost::movelib::inplace_stable_sort(first, last, comp);
+}
+
+#else //ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+
template<class RandIt, class Compare>
void slow_stable_sort
( RandIt const first, RandIt const last, Compare comp)
@@ -1222,16 +990,18 @@ void slow_stable_sort
if(do_merge){
size_type const h_2 = 2*h;
while((L-p0) > h_2){
- merge_bufferless(first+p0, first+p0+h, first+p0+h_2, comp);
+ merge_bufferless(first+p0, first+p0+h, first+p0+h_2, comp);
p0 += h_2;
}
}
- if((L-p0) > h){
+ if((L-p0) > h){
merge_bufferless(first+p0, first+p0+h, last, comp);
}
}
}
+#endif //ADAPTIVE_SORT_MERGE_SLOW_STABLE_SORT_IS_NLOGN
+
//Returns new l_block and updates use_buf
template<class Unsigned>
Unsigned lblock_for_combine
@@ -1271,7 +1041,7 @@ Unsigned lblock_for_combine
//Although "cycle" sort is known to have the minimum number of writes to target
//selection sort is more appropriate here as we want to minimize swaps.
-template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf>
void selection_sort_blocks
( RandItKeys keys
, typename iterator_traits<RandIt>::size_type &midkey_idx //inout
@@ -1280,7 +1050,9 @@ void selection_sort_blocks
, typename iterator_traits<RandIt>::size_type const l_block
, typename iterator_traits<RandIt>::size_type const n_blocks
, Compare comp
- , bool use_first_element)
+ , bool use_first_element
+ , XBuf & xbuf
+)
{
typedef typename iterator_traits<RandIt>::size_type size_type ;
size_type const back_midkey_idx = midkey_idx;
@@ -1294,6 +1066,10 @@ void selection_sort_blocks
//One-past the position of the first untouched element of the second half
size_type high_watermark = back_midkey_idx+1;
BOOST_ASSERT(high_watermark <= n_blocks);
+ const bool b_cache_on = xbuf.capacity() >= l_block;
+ //const bool b_cache_on = false;
+ const size_type cached_none = size_type(-1);
+ size_type cached_block = cached_none;
//Sort by first element if left merging, last element otherwise
size_type const reg_off = use_first_element ? 0u: l_block-1;
@@ -1303,35 +1079,89 @@ void selection_sort_blocks
//Since we are searching for the minimum value in two sorted halves:
//Optimization 1: If block belongs to first half, don't waste time comparing elements of the first half.
//Optimization 2: It is enough to compare until the first untouched element of the second half.
+ //Optimization 3: If cache memory is available, instead of swapping blocks (3 writes per element),
+ // play with the cache to aproximate it to 2 writes per element.
high_watermark = size_type(max_value(block+2, high_watermark));
BOOST_ASSERT(high_watermark <= n_blocks);
for(size_type next_block = size_type(max_value(block+1, back_midkey_idx)); next_block < high_watermark; ++next_block){
- const value_type &v = first_block[next_block*l_block+reg_off];
- const value_type &min = first_block[min_block*l_block+reg_off];
- if( comp(v, min) || (!comp(min, v) && key_comp(keys[next_block], keys[min_block])) ){
- min_block=next_block;
+ const value_type &min_v = (b_cache_on && (cached_block == min_block) ? xbuf.data()[reg_off] : first_block[min_block*l_block+reg_off]);
+ const value_type &v = (b_cache_on && (cached_block == next_block) ? xbuf.data()[reg_off] : first_block[next_block*l_block+reg_off]);
+
+ if( comp(v, min_v) || (!comp(min_v, v) && key_comp(keys[next_block], keys[min_block])) ){
+ min_block = next_block;
}
}
if(min_block != block){
BOOST_ASSERT(block >= back_midkey_idx || min_block >= back_midkey_idx);
BOOST_ASSERT(min_block < high_watermark);
- //Update high watermark if n_blocks is not surpassed
+ //Increase high watermark if not the maximum and min_block is just before the high watermark
high_watermark += size_type((min_block + 1) != n_blocks && (min_block + 1) == high_watermark);
BOOST_ASSERT(high_watermark <= n_blocks);
- boost::adl_move_swap_ranges(first_block+block*l_block, first_block+(block+1)*l_block, first_block+min_block*l_block);
+ if(!b_cache_on){
+ boost::adl_move_swap_ranges(first_block+block*l_block, first_block+(block+1)*l_block, first_block+min_block*l_block);
+ }
+ else if(cached_block == cached_none){
+ //Cache the biggest block and put the minimum into its final position
+ xbuf.move_assign(first_block+block*l_block, l_block);
+ boost::move(first_block+min_block*l_block, first_block+(min_block+1)*l_block, first_block+block*l_block);
+ cached_block = min_block;
+ }
+ else if(cached_block == block){
+ //Since block is cached and is not the minimum, just put the minimum directly into its final position and update the cache index
+ boost::move(first_block+min_block*l_block, first_block+(min_block+1)*l_block, first_block+block*l_block);
+ cached_block = min_block;
+ }
+ else if(cached_block == min_block){
+ //Since the minimum is cached, move the block to the back position and flush the cache to its final position
+ boost::move(first_block+block*l_block, first_block+(block+1)*l_block, first_block+min_block*l_block);
+ boost::move(xbuf.data(), xbuf.end(), first_block+block*l_block);
+ cached_block = cached_none;
+ }
+ else{
+ //Cached block is not any of two blocks to be exchanged, a smarter operation must be performed
+ BOOST_ASSERT(cached_block != min_block);
+ BOOST_ASSERT(cached_block != block);
+ BOOST_ASSERT(cached_block > block);
+ BOOST_ASSERT(cached_block < high_watermark);
+ //Instead of moving block to the slot of the minimum (which is typical selection sort), before copying
+ //data from the minimum slot to its final position:
+ // -> move it to free slot pointed by cached index, and
+ // -> move cached index into slot of the minimum.
+ //Since both cached_block and min_block belong to the still unordered range of blocks, the change
+ //does not break selection sort and saves one copy.
+ boost::move(first_block+block*l_block, first_block+(block+1)*l_block, first_block+cached_block*l_block);
+ boost::move(first_block+min_block*l_block, first_block+(min_block+1)*l_block, first_block+block*l_block);
+ //Note that this trick requires an additionl fix for keys and midkey index
+ boost::adl_move_swap(keys[cached_block], keys[min_block]);
+ if(midkey_idx == cached_block)
+ midkey_idx = min_block;
+ else if(midkey_idx == min_block)
+ midkey_idx = cached_block;
+ boost::adl_move_swap(cached_block, min_block);
+ }
+ //Once min_block and block are exchanged, fix the movement imitation key buffer and midkey index.
boost::adl_move_swap(keys[block], keys[min_block]);
if(midkey_idx == block)
midkey_idx = min_block;
else if(midkey_idx == min_block)
midkey_idx = block;
}
+ else if(b_cache_on && cached_block == block){
+ //The selected block was the minimum, but since it was cached, move it to its final position
+ boost::move(xbuf.data(), xbuf.end(), first_block+block*l_block);
+ cached_block = cached_none;
+ }
+ } //main for loop
+
+ if(b_cache_on && cached_block != cached_none){
+ //The sort has ended with cached data, move it to its final position
+ boost::move(xbuf.data(), xbuf.end(), first_block+cached_block*l_block);
}
}
-template<class RandIt, class Compare>
-void stable_sort( RandIt first, RandIt last, Compare comp
- , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf)
+template<class RandIt, class Compare, class XBuf>
+void stable_sort( RandIt first, RandIt last, Compare comp, XBuf & xbuf)
{
typedef typename iterator_traits<RandIt>::size_type size_type;
size_type const len = size_type(last - first);
@@ -1344,10 +1174,10 @@ void stable_sort( RandIt first, RandIt last, Compare comp
}
}
-template<class RandIt, class Comp>
+template<class RandIt, class Comp, class XBuf>
void initialize_keys( RandIt first, RandIt last
, Comp comp
- , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf)
+ , XBuf & xbuf)
{
stable_sort(first, last, comp, xbuf);
}
@@ -1365,7 +1195,7 @@ void initialize_keys( RandIt first, RandIt last
}
}
-template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf>
void combine_params
( RandItKeys const keys
, KeyCompare key_comp
@@ -1373,7 +1203,7 @@ void combine_params
, typename iterator_traits<RandIt>::size_type l_combined
, typename iterator_traits<RandIt>::size_type const l_prev_merged
, typename iterator_traits<RandIt>::size_type const l_block
- , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf
+ , XBuf & xbuf
, Compare comp
//Output
, typename iterator_traits<RandIt>::size_type &midkey_idx
@@ -1381,27 +1211,39 @@ void combine_params
, typename iterator_traits<RandIt>::size_type &n_bef_irreg2
, typename iterator_traits<RandIt>::size_type &n_aft_irreg2
, typename iterator_traits<RandIt>::size_type &l_irreg2
- , bool is_merge_left)
+ //Options
+ , bool is_merge_left_or_bufferless
+ , bool do_initialize_keys = true)
{
typedef typename iterator_traits<RandIt>::size_type size_type;
typedef typename iterator_traits<RandIt>::value_type value_type;
+ //Initial parameters for selection sort blocks
l_irreg1 = l_prev_merged%l_block;
l_irreg2 = (l_combined-l_irreg1)%l_block;
BOOST_ASSERT(((l_combined-l_irreg1-l_irreg2)%l_block) == 0);
size_type const n_reg_block = (l_combined-l_irreg1-l_irreg2)/l_block;
midkey_idx = l_prev_merged/l_block;
BOOST_ASSERT(n_reg_block>=midkey_idx);
- initialize_keys(keys, keys+n_reg_block+(midkey_idx==n_reg_block), key_comp, xbuf);
- selection_sort_blocks(keys, midkey_idx, key_comp, first+l_irreg1, l_block, n_reg_block, comp, is_merge_left);
+ //Key initialization
+ if (do_initialize_keys) {
+ initialize_keys(keys, keys+n_reg_block+(midkey_idx==n_reg_block), key_comp, xbuf);
+ }
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A initkey: ", l_combined + l_block);
+
+ //Selection sort blocks
+ selection_sort_blocks(keys, midkey_idx, key_comp, first+l_irreg1, l_block, n_reg_block, comp, is_merge_left_or_bufferless, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A selsort: ", l_combined + l_block);
+
+ //Special case for the last elements
n_aft_irreg2 = 0;
- if(l_irreg2!=0){
- size_type const reg_off = is_merge_left ? 0u: l_block-1;
- size_type const irreg_off = is_merge_left ? 0u: l_irreg2-1;
+ if(l_irreg2 != 0){
+ size_type const reg_off = is_merge_left_or_bufferless ? 0u: l_block-1;
+ size_type const irreg_off = is_merge_left_or_bufferless ? 0u: l_irreg2-1;
RandIt prev_block_first = first + l_combined - l_irreg2;
const value_type &incomplete_block_first = prev_block_first[irreg_off];
- while(n_aft_irreg2 != n_reg_block &&
+ while(n_aft_irreg2 != n_reg_block &&
comp(incomplete_block_first, (prev_block_first-= l_block)[reg_off]) ){
++n_aft_irreg2;
}
@@ -1461,14 +1303,17 @@ void merge_blocks_right
, Compare comp
, bool const xbuf_used)
{
- if(xbuf_used){
- op_merge_blocks_right
- (key_first, midkey, key_comp, first, l_block, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, move_op());
- }
- else{
- op_merge_blocks_right
- (key_first, midkey, key_comp, first, l_block, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, swap_op());
- }
+ merge_blocks_left
+ ( make_reverse_iterator(key_first+n_aft_irreg2 + n_bef_irreg2)
+ , midkey
+ , negate<KeyCompare>(key_comp)
+ , make_reverse_iterator(first+(n_bef_irreg2+n_aft_irreg2)*l_block+l_irreg2)
+ , l_block
+ , l_irreg2
+ , n_aft_irreg2 + n_bef_irreg2
+ , 0
+ , 0
+ , inverse<Compare>(comp), xbuf_used);
}
@@ -1530,8 +1375,8 @@ Unsigned calculate_total_combined(Unsigned const len, Unsigned const l_prev_merg
// Blocks of length l_prev_merged combined. We'll combine them in pairs
// l_prev_merged and n_keys are powers of 2. (2*l_prev_merged/l_block) keys are guaranteed
// Returns the number of combined elements (some trailing elements might be left uncombined)
-template<class RandItKeys, class KeyCompare, class RandIt, class Compare>
-void combine_blocks
+template<class RandItKeys, class KeyCompare, class RandIt, class Compare, class XBuf>
+void adaptive_sort_combine_blocks
( RandItKeys const keys
, KeyCompare key_comp
, RandIt const first
@@ -1540,16 +1385,17 @@ void combine_blocks
, typename iterator_traits<RandIt>::size_type const l_block
, bool const use_buf
, bool const xbuf_used
- , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf
+ , XBuf & xbuf
, Compare comp
, bool merge_left)
{
+ (void)xbuf;
typedef typename iterator_traits<RandIt>::size_type size_type;
- size_type const l_combined = 2*l_prev_merged;
+ size_type const l_reg_combined = 2*l_prev_merged;
size_type l_irreg_combined = 0;
size_type const l_total_combined = calculate_total_combined(len, l_prev_merged, &l_irreg_combined);
- size_type const n_reg_combined = len/l_combined;
+ size_type const n_reg_combined = len/l_reg_combined;
RandIt combined_first = first;
(void)l_total_combined;
@@ -1558,32 +1404,37 @@ void combine_blocks
size_type n_bef_irreg2, n_aft_irreg2, midkey_idx, l_irreg1, l_irreg2;
size_type const max_i = n_reg_combined + (l_irreg_combined != 0);
- if(merge_left || !use_buf)
- for( size_type combined_i = 0; combined_i != max_i; ++combined_i, combined_first += l_combined) {
+ if(merge_left || !use_buf) {
+ for( size_type combined_i = 0; combined_i != max_i; ++combined_i, combined_first += l_reg_combined) {
bool const is_last = combined_i==n_reg_combined;
- combine_params( keys, key_comp, combined_first, is_last ? l_irreg_combined : l_combined
- , l_prev_merged, l_block, xbuf, comp
+ size_type const l_cur_combined = is_last ? l_irreg_combined : l_reg_combined;
+
+ range_xbuf<RandIt, move_op> rbuf( (use_buf && xbuf_used) ? (combined_first-l_block) : combined_first, combined_first);
+ combine_params( keys, key_comp, combined_first, l_cur_combined
+ , l_prev_merged, l_block, rbuf, comp
, midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true); //Outputs
- //BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After combine_params: ", len + l_block);
- BOOST_ASSERT(!l_irreg1);
- if(use_buf){
- merge_blocks_left
- (keys, keys[midkey_idx], key_comp, combined_first, l_block, 0u, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used);
- }
- else{
+ //Now merge blocks
+ if(!use_buf){
merge_blocks_bufferless
(keys, keys[midkey_idx], key_comp, combined_first, l_block, 0u, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp);
}
+ else{
+ merge_blocks_left
+ (keys, keys[midkey_idx], key_comp, combined_first, l_block, 0u, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used);
+ }
//BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After merge_blocks_l: ", len + l_block);
}
+ }
else{
- combined_first += l_combined*(max_i-1);
- for( size_type combined_i = max_i; combined_i--; combined_first -= l_combined) {
+ combined_first += l_reg_combined*(max_i-1);
+ for( size_type combined_i = max_i; combined_i--; combined_first -= l_reg_combined) {
bool const is_last = combined_i==n_reg_combined;
- combine_params( keys, key_comp, combined_first, is_last ? l_irreg_combined : l_combined
- , l_prev_merged, l_block, xbuf, comp
- , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, false); //Outputs
- BOOST_ASSERT(!l_irreg1);
+ size_type const l_cur_combined = is_last ? l_irreg_combined : l_reg_combined;
+ RandIt const combined_last(combined_first+l_cur_combined);
+ range_xbuf<RandIt, move_op> rbuf(combined_last, xbuf_used ? (combined_last+l_block) : combined_last);
+ combine_params( keys, key_comp, combined_first, l_cur_combined
+ , l_prev_merged, l_block, rbuf, comp
+ , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, false); //Outputs
//BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After combine_params: ", len + l_block);
merge_blocks_right
(keys, keys[midkey_idx], key_comp, combined_first, l_block, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used);
@@ -1709,12 +1560,12 @@ void op_merge_right_step
if(restk <= l_build_buf){
op(backward_t(),first_block+p, first_block+p+restk, first_block+p+restk+l_build_buf);
}
- else{
+ else{
op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+restk, first_block+p+restk+l_build_buf, comp, op);
}
while(p>0){
p -= 2*l_build_buf;
- op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+2*l_build_buf, first_block+p+3*l_build_buf, comp, op);
+ op_merge_right(first_block+p, first_block+p+l_build_buf, first_block+p+2*l_build_buf, first_block+p+3*l_build_buf, comp, op);
}
}
@@ -1741,7 +1592,7 @@ void op_merge_right_step
// until all is merged or auxiliary memory is not large enough.
template<class RandIt, class Compare>
typename iterator_traits<RandIt>::size_type
- build_blocks
+ adaptive_sort_build_blocks
( RandIt const first
, typename iterator_traits<RandIt>::size_type const len
, typename iterator_traits<RandIt>::size_type const l_base
@@ -1830,7 +1681,7 @@ typename iterator_traits<RandIt>::size_type
//[buffer+len-l_intbuf, buffer+len). Otherwise, buffer is
//[buffer,buffer+l_intbuf)
template<class RandIt, class Compare>
-bool combine_all_blocks
+bool adaptive_sort_combine_all_blocks
( RandIt keys
, typename iterator_traits<RandIt>::size_type &n_keys
, RandIt const buffer
@@ -1847,14 +1698,7 @@ bool combine_all_blocks
//Backup data to external buffer once if possible
bool const common_xbuf = l_data > l_merged && l_intbuf && l_intbuf <= xbuf.capacity();
if(common_xbuf){
- if(n_keys){
- xbuf.move_assign(buffer, l_intbuf);
- }
- else{
- xbuf.clear();
- merge_sort_uninitialized_copy(buffer, first, xbuf.data(), comp);
- xbuf.set_size(l_intbuf);
- }
+ xbuf.move_assign(buffer, l_intbuf);
}
bool prev_merge_left = true;
@@ -1877,8 +1721,7 @@ bool combine_all_blocks
bool const is_merge_left = (n&1) == 0;
size_type const l_total_combined = calculate_total_combined(l_data, l_merged);
-
- if(prev_use_internal_buf && prev_merge_left){
+ if(n && prev_use_internal_buf && prev_merge_left){
if(is_merge_left || !use_internal_buf){
move_data_backward(first-l_prev_block, l_prev_total_combined, first, common_xbuf);
}
@@ -1900,13 +1743,13 @@ bool combine_all_blocks
//Combine to form l_merged*2 segments
if(n_keys){
- combine_blocks
+ adaptive_sort_combine_blocks
( keys, comp, !use_internal_buf || is_merge_left ? first : first-l_block
, l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left);
}
else{
size_type *const uint_keys = xbuf.template aligned_trailing<size_type>();
- combine_blocks
+ adaptive_sort_combine_blocks
( uint_keys, less(), !use_internal_buf || is_merge_left ? first : first-l_block
, l_data, l_merged, l_block, use_internal_buf, common_xbuf, xbuf, comp, is_merge_left);
}
@@ -1918,6 +1761,7 @@ bool combine_all_blocks
}
BOOST_ASSERT(l_prev_total_combined == l_data);
bool const buffer_right = prev_use_internal_buf && prev_merge_left;
+
l_intbuf = prev_use_internal_buf ? l_prev_block : 0u;
n_keys = l_unique - l_intbuf;
//Restore data from to external common buffer if used
@@ -1954,15 +1798,15 @@ void stable_merge
template<class RandIt, class Compare>
-void final_merge( bool buffer_right
- , RandIt const first
- , typename iterator_traits<RandIt>::size_type const l_intbuf
- , typename iterator_traits<RandIt>::size_type const n_keys
- , typename iterator_traits<RandIt>::size_type const len
- , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf
- , Compare comp)
-{
- BOOST_ASSERT(n_keys || xbuf.size() == l_intbuf);
+void adaptive_sort_final_merge( bool buffer_right
+ , RandIt const first
+ , typename iterator_traits<RandIt>::size_type const l_intbuf
+ , typename iterator_traits<RandIt>::size_type const n_keys
+ , typename iterator_traits<RandIt>::size_type const len
+ , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf
+ , Compare comp)
+{
+ //BOOST_ASSERT(n_keys || xbuf.size() == l_intbuf);
xbuf.clear();
typedef typename iterator_traits<RandIt>::size_type size_type;
@@ -1990,7 +1834,7 @@ void final_merge( bool buffer_right
}
template<class RandIt, class Compare, class Unsigned, class T>
-bool build_params
+bool adaptive_sort_build_params
(RandIt first, Unsigned const len, Compare comp
, Unsigned &n_keys, Unsigned &l_intbuf, Unsigned &l_base, Unsigned &l_build_buf
, adaptive_xbuf<T> & xbuf
@@ -2009,7 +1853,7 @@ bool build_params
//segments of size l_build_buf*2, maximizing the classic merge phase.
l_intbuf = size_type(ceil_sqrt_multiple(len, &l_base));
- //This is the minimum number of case to implement the ideal algorithm
+ //This is the minimum number of keys to implement the ideal algorithm
//
//l_intbuf is used as buffer plus the key count
size_type n_min_ideal_keys = l_intbuf-1u;
@@ -2030,10 +1874,10 @@ bool build_params
//
//If available memory is 2*sqrt(l), then only sqrt(l) unique keys are needed,
//(to be used for keys in combine_all_blocks) as the whole l_build_buf
- //we'll be backuped in the buffer during build_blocks.
+ //will be backuped in the buffer during build_blocks.
bool const non_unique_buf = xbuf.capacity() >= 2*l_intbuf;
size_type const to_collect = non_unique_buf ? l_intbuf : l_intbuf*2;
- size_type collected = collect_unique(first, first+len, to_collect, comp, xbuf);
+ size_type collected = collect_unique(first, first+len, to_collect, comp, xbuf);
//If available memory is 2*sqrt(l), then for "build_params"
//the situation is the same as if 2*l_intbuf were collected.
@@ -2044,7 +1888,7 @@ bool build_params
//is possible (due to very low unique keys), then go to a slow sort based on rotations.
if(collected < (n_min_ideal_keys+l_intbuf)){
if(collected < 4){ //No combination possible with less that 4 keys
- return false;
+ return false;
}
n_keys = l_intbuf;
while(n_keys&(n_keys-1)){
@@ -2053,6 +1897,7 @@ bool build_params
while(n_keys > collected){
n_keys/=2;
}
+ //AdaptiveSortInsertionSortThreshold is always power of two so the minimum is power of two
l_base = min_value<Unsigned>(n_keys, AdaptiveSortInsertionSortThreshold);
l_intbuf = 0;
l_build_buf = n_keys;
@@ -2072,6 +1917,224 @@ bool build_params
return true;
}
+
+#define BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF
+
+template<class RandIt, class Compare>
+inline void adaptive_merge_combine_blocks( RandIt first
+ , typename iterator_traits<RandIt>::size_type len1
+ , typename iterator_traits<RandIt>::size_type len2
+ , typename iterator_traits<RandIt>::size_type collected
+ , typename iterator_traits<RandIt>::size_type n_keys
+ , typename iterator_traits<RandIt>::size_type l_block
+ , bool use_internal_buf
+ , bool xbuf_used
+ , Compare comp
+ , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf
+ )
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ size_type const len = len1+len2;
+ size_type const l_combine = len-collected;
+ size_type const l_combine1 = len1-collected;
+ size_type n_bef_irreg2, n_aft_irreg2, l_irreg1, l_irreg2, midkey_idx;
+
+ if(n_keys){
+ RandIt const first_data = first+collected;
+ RandIt const keys = first;
+ combine_params( keys, comp, first_data, l_combine
+ , l_combine1, l_block, xbuf, comp
+ , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true, false); //Outputs
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A combine: ", len);
+ if(xbuf_used){
+ BOOST_ASSERT(xbuf.size() >= l_block);
+ merge_blocks_with_buf
+ (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf, xbuf_used);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg xbf: ", len);
+ }
+ else if(use_internal_buf){
+ #ifdef BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF
+ range_xbuf<RandIt, swap_op> rbuf(first_data-l_block, first_data);
+ merge_blocks_with_buf
+ (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, rbuf, xbuf_used);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg buf: ", len);
+ #else
+ merge_blocks_left
+ (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg lft: ", len);
+ #endif
+ }
+ else{
+ merge_blocks_bufferless
+ (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg xbf: ", len);
+ }
+ }
+ else{
+ xbuf.shrink_to_fit(l_block);
+ if(xbuf.size() < l_block){
+ xbuf.initialize_until(l_block, *first);
+ }
+ size_type *const uint_keys = xbuf.template aligned_trailing<size_type>(l_block);
+ combine_params( uint_keys, less(), first, l_combine
+ , l_combine1, l_block, xbuf, comp
+ , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true, true); //Outputs
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A combine: ", len);
+ BOOST_ASSERT(xbuf.size() >= l_block);
+ merge_blocks_with_buf
+ (uint_keys, uint_keys[midkey_idx], less(), first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf, true);
+ xbuf.clear();
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg buf: ", len);
+ }
+
+}
+
+template<class RandIt, class Compare>
+inline void adaptive_merge_final_merge( RandIt first
+ , typename iterator_traits<RandIt>::size_type len1
+ , typename iterator_traits<RandIt>::size_type len2
+ , typename iterator_traits<RandIt>::size_type collected
+ , typename iterator_traits<RandIt>::size_type l_intbuf
+ , typename iterator_traits<RandIt>::size_type l_block
+ , bool use_internal_buf
+ , bool xbuf_used
+ , Compare comp
+ , adaptive_xbuf<typename iterator_traits<RandIt>::value_type> & xbuf
+ )
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ (void)l_block;
+ size_type n_keys = collected-l_intbuf;
+ size_type len = len1+len2;
+ if(use_internal_buf){
+ if(xbuf_used){
+ xbuf.clear();
+ //Nothing to do
+ if(n_keys){
+ stable_sort(first, first+n_keys, comp, xbuf);
+ stable_merge(first, first+n_keys, first+len, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key mrg: ", len);
+ }
+ }
+ else{
+ #ifdef BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF
+ xbuf.clear();
+ stable_sort(first, first+collected, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b srt: ", len);
+ stable_merge(first, first+collected, first+len, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b mrg: ", len);
+ #else
+ xbuf.clear();
+ stable_sort(first+len-l_block, first+len, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A buf srt: ", len);
+ RandIt const pos1 = lower_bound(first+n_keys, first+len-l_block, first[len-1], comp);
+ RandIt const pos2 = rotate_gcd(pos1, first+len-l_block, first+len);
+ stable_merge(first+n_keys, pos1, pos2, antistable<Compare>(comp), xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A buf mrg: ", len);
+ if(n_keys){
+ stable_sort(first, first+n_keys, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key srt: ", len);
+ stable_merge(first, first+n_keys, first+len, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key mrg: ", len);
+ }
+ #endif
+ }
+ }
+ else{
+ xbuf.clear();
+ stable_sort(first, first+collected, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b srt: ", len);
+ stable_merge(first, first+collected, first+len1+len2, comp, xbuf);
+ BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A k/b mrg: ", len);
+ }
+}
+
+template<class SizeType, class Xbuf>
+inline SizeType adaptive_merge_n_keys_intbuf(SizeType l_block, SizeType len, Xbuf & xbuf, SizeType &l_intbuf_inout)
+{
+ typedef SizeType size_type;
+ size_type l_intbuf = xbuf.capacity() >= l_block ? 0u : l_block;
+
+ //This is the minimum number of keys to implement the ideal algorithm
+ //ceil(len/l_block) - 1 (as the first block is used as buffer)
+ size_type n_keys = len/l_block+1;
+ while(n_keys >= (len-l_intbuf-n_keys)/l_block){
+ --n_keys;
+ }
+ ++n_keys;
+ //BOOST_ASSERT(n_keys < l_block);
+
+ if(xbuf.template supports_aligned_trailing<size_type>(l_block, n_keys)){
+ n_keys = 0u;
+ }
+ l_intbuf_inout = l_intbuf;
+ return n_keys;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////
+
+// Main explanation of the sort algorithm.
+//
+// csqrtlen = ceil(sqrt(len));
+//
+// * First, 2*csqrtlen unique elements elements are extracted from elements to be
+// sorted and placed in the beginning of the range.
+//
+// * Step "build_blocks": In this nearly-classic merge step, 2*csqrtlen unique elements
+// will be used as auxiliary memory, so trailing len-2*csqrtlen elements are
+// are grouped in blocks of sorted 4*csqrtlen elements. At the end of the step
+// 2*csqrtlen unique elements are again the leading elements of the whole range.
+//
+// * Step "combine_blocks": pairs of previously formed blocks are merged with a different
+// ("smart") algorithm to form blocks of 8*csqrtlen elements. This step is slower than the
+// "build_blocks" step and repeated iteratively (forming blocks of 16*csqrtlen, 32*csqrtlen
+// elements, etc) of until all trailing (len-2*csqrtlen) elements are merged.
+//
+// In "combine_blocks" len/csqrtlen elements used are as "keys" (markers) to
+// know if elements belong to the first or second block to be merged and another
+// leading csqrtlen elements are used as buffer. Explanation of the "combine_blocks" step:
+//
+// Iteratively until all trailing (len-2*csqrtlen) elements are merged:
+// Iteratively for each pair of previously merged block:
+// * Blocks are divided groups of csqrtlen elements and
+// 2*merged_block/csqrtlen keys are sorted to be used as markers
+// * Groups are selection-sorted by first or last element (depending wheter they
+// merged to left or right) and keys are reordered accordingly as an imitation-buffer.
+// * Elements of each block pair is merged using the csqrtlen buffer taking into account
+// if they belong to the first half or second half (marked by the key).
+//
+// * In the final merge step leading elements (2*csqrtlen) are sorted and merged with
+// rotations with the rest of sorted elements in the "combine_blocks" step.
+//
+// Corner cases:
+//
+// * If no 2*csqrtlen elements can be extracted:
+//
+// * If csqrtlen+len/csqrtlen are extracted, then only csqrtlen elements are used
+// as buffer in the "build_blocks" step forming blocks of 2*csqrtlen elements. This
+// means that an additional "combine_blocks" step will be needed to merge all elements.
+//
+// * If no csqrtlen+len/csqrtlen elements can be extracted, but still more than a minimum,
+// then reduces the number of elements used as buffer and keys in the "build_blocks"
+// and "combine_blocks" steps. If "combine_blocks" has no enough keys due to this reduction
+// then uses a rotation based smart merge.
+//
+// * If the minimum number of keys can't be extracted, a rotation-based sorting is performed.
+//
+// * If auxiliary memory is more or equal than ceil(len/2), half-copying mergesort is used.
+//
+// * If auxiliary memory is more than csqrtlen+n_keys*sizeof(std::size_t),
+// then only csqrtlen elements need to be extracted and "combine_blocks" will use integral
+// keys to combine blocks.
+//
+// * If auxiliary memory is available, the "build_blocks" will be extended to build bigger blocks
+// using classic merge.
template<class RandIt, class Compare>
void adaptive_sort_impl
( RandIt first
@@ -2093,7 +2156,7 @@ void adaptive_sort_impl
return;
}
- //Make sure it is at least two
+ //Make sure it is at least four
BOOST_STATIC_ASSERT(AdaptiveSortInsertionSortThreshold >= 4);
size_type l_base = 0;
@@ -2101,30 +2164,74 @@ void adaptive_sort_impl
size_type n_keys = 0;
size_type l_build_buf = 0;
- if(!build_params(first, len, comp, n_keys, l_intbuf, l_base, l_build_buf, xbuf)){
+ //Calculate and extract needed unique elements. If a minimum is not achieved
+ //fallback to rotation-based merge
+ if(!adaptive_sort_build_params(first, len, comp, n_keys, l_intbuf, l_base, l_build_buf, xbuf)){
stable_sort(first, first+len, comp, xbuf);
return;
}
- //Otherwise, continue in adaptive_sort
+ //Otherwise, continue the adaptive_sort
BOOST_MOVE_ADAPTIVE_SORT_PRINT("\n After collect_unique: ", len);
size_type const n_key_plus_buf = l_intbuf+n_keys;
//l_build_buf is always power of two if l_intbuf is zero
BOOST_ASSERT(l_intbuf || (0 == (l_build_buf & (l_build_buf-1))));
//Classic merge sort until internal buffer and xbuf are exhausted
- size_type const l_merged = build_blocks
+ size_type const l_merged = adaptive_sort_build_blocks
(first+n_key_plus_buf-l_build_buf, len-n_key_plus_buf+l_build_buf, l_base, l_build_buf, xbuf, comp);
BOOST_MOVE_ADAPTIVE_SORT_PRINT(" After build_blocks: ", len);
//Non-trivial merge
- bool const buffer_right = combine_all_blocks
+ bool const buffer_right = adaptive_sort_combine_all_blocks
(first, n_keys, first+n_keys, len-n_keys, l_merged, l_intbuf, xbuf, comp);
//Sort keys and buffer and merge the whole sequence
- final_merge(buffer_right, first, l_intbuf, n_keys, len, xbuf, comp);
+ adaptive_sort_final_merge(buffer_right, first, l_intbuf, n_keys, len, xbuf, comp);
}
+// Main explanation of the merge algorithm.
+//
+// csqrtlen = ceil(sqrt(len));
+//
+// * First, csqrtlen [to be used as buffer] + (len/csqrtlen - 1) [to be used as keys] => to_collect
+// unique elements are extracted from elements to be sorted and placed in the beginning of the range.
+//
+// * Step "combine_blocks": the leading (len1-to_collect) elements plus trailing len2 elements
+// are merged with a non-trivial ("smart") algorithm to form an ordered range trailing "len-to_collect" elements.
+//
+// Explanation of the "combine_blocks" step:
+//
+// * Trailing [first+to_collect, first+len1) elements are divided in groups of cqrtlen elements.
+// Remaining elements that can't form a group are grouped in the front of those elements.
+// * Trailing [first+len1, first+len1+len2) elements are divided in groups of cqrtlen elements.
+// Remaining elements that can't form a group are grouped in the back of those elements.
+// * Groups are selection-sorted by first or last element (depending wheter they
+// merged to left or right) and keys are reordered accordingly as an imitation-buffer.
+// * Elements of each block pair is merged using the csqrtlen buffer taking into account
+// if they belong to the first half or second half (marked by the key).
+//
+// * In the final merge step leading "to_collect" elements are merged with rotations
+// with the rest of merged elements in the "combine_blocks" step.
+//
+// Corner cases:
+//
+// * If no "to_collect" elements can be extracted:
+//
+// * If more than a minimum number of elements is extracted
+// then reduces the number of elements used as buffer and keys in the
+// and "combine_blocks" steps. If "combine_blocks" has no enough keys due to this reduction
+// then uses a rotation based smart merge.
+//
+// * If the minimum number of keys can't be extracted, a rotation-based merge is performed.
+//
+// * If auxiliary memory is more or equal than min(len1, len2), a buffered merge is performed.
+//
+// * If the len1 or len2 are less than 2*csqrtlen then a rotation-based merge is performed.
+//
+// * If auxiliary memory is more than csqrtlen+n_keys*sizeof(std::size_t),
+// then no csqrtlen need to be extracted and "combine_blocks" will use integral
+// keys to combine blocks.
template<class RandIt, class Compare>
void adaptive_merge_impl
( RandIt first
@@ -2144,134 +2251,43 @@ void adaptive_merge_impl
//Calculate ideal parameters and try to collect needed unique keys
size_type l_block = size_type(ceil_sqrt(len));
+ //One range is not big enough to extract keys and the internal buffer so a
+ //rotation-based based merge will do just fine
if(len1 <= l_block*2 || len2 <= l_block*2){
merge_bufferless(first, first+len1, first+len1+len2, comp);
return;
}
- size_type l_intbuf = xbuf.capacity() >= l_block ? 0u : l_block;
-
- //This is the minimum number of case to implement the ideal algorithm
- //ceil(len/l_block) - 1 (as the first block is used as buffer)
- size_type n_keys = l_block;
- while(n_keys >= (len-l_intbuf-n_keys)/l_block){
- --n_keys;
- }
- ++n_keys;
- BOOST_ASSERT(n_keys < l_block);
-
- if(xbuf.template supports_aligned_trailing<size_type>(l_block, n_keys)){
- n_keys = 0u;
- }
-
+ //Detail the number of keys and internal buffer. If xbuf has enough memory, no
+ //internal buffer is needed so l_intbuf will remain 0.
+ size_type l_intbuf = 0;
+ size_type n_keys = adaptive_merge_n_keys_intbuf(l_block, len, xbuf, l_intbuf);
size_type const to_collect = l_intbuf+n_keys;
- size_type const collected = collect_unique(first, first+len1, to_collect, comp, xbuf);
-
+ //Try to extract needed unique values from the first range
+ size_type const collected = collect_unique(first, first+len1, to_collect, comp, xbuf);
BOOST_MOVE_ADAPTIVE_SORT_PRINT("\n A collect: ", len);
+
+ //Not the minimum number of keys is not available on the first range, so fallback to rotations
if(collected != to_collect && collected < 4){
merge_bufferless(first, first+len1, first+len1+len2, comp);
+ return;
}
- else{
- bool use_internal_buf = true;
- if (collected != to_collect){
- l_intbuf = 0u;
- n_keys = collected;
- use_internal_buf = false;
- l_block = lblock_for_combine(l_intbuf, n_keys, len, use_internal_buf);
- l_intbuf = use_internal_buf ? l_block : 0u;
- }
-
- bool xbuf_used = collected == to_collect && xbuf.capacity() >= l_block;
- size_type const l_combine = len-collected;
- size_type const l_combine1 = len1-collected;
- size_type n_bef_irreg2, n_aft_irreg2, l_irreg1, l_irreg2, midkey_idx;
- if(n_keys){
- RandIt const first_data = first+collected;
- RandIt const keys = first;
- combine_params( keys, comp, first_data, l_combine
- , l_combine1, l_block, xbuf, comp
- , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true); //Outputs
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A combine: ", len);
- if(xbuf_used){
- merge_blocks_with_buf
- (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf, xbuf_used);
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg xbf: ", len);
- }
- else if(use_internal_buf){
- #define BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF
- #ifdef BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF
- range_xbuf<RandIt, swap_op> rbuf(first_data-l_block, first_data);
- merge_blocks_with_buf
- (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, rbuf, xbuf_used);
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg buf: ", len);
- #else
- merge_blocks_left
- (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf_used);
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg lft: ", len);
- #endif
- }
- else{
- merge_blocks_bufferless
- (keys, keys[midkey_idx], comp, first_data, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp);
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg bfl: ", len);
- }
- }
- else{
- xbuf.clear();
- size_type *const uint_keys = xbuf.template aligned_trailing<size_type>(l_block);
- combine_params( uint_keys, less(), first, l_combine
- , l_combine1, l_block, xbuf, comp
- , midkey_idx, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, true); //Outputs
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A combine: ", len);
- merge_blocks_with_buf
- (uint_keys, uint_keys[midkey_idx], less(), first, l_block, l_irreg1, n_bef_irreg2, n_aft_irreg2, l_irreg2, comp, xbuf, true);
- xbuf.clear();
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A mrg lft: ", len);
- }
-
- n_keys = collected-l_intbuf;
- if(use_internal_buf){
- if(xbuf_used){
- xbuf.clear();
- //Nothing to do
- if(n_keys){
- stable_sort(first, first+n_keys, comp, xbuf);
- stable_merge(first, first+n_keys, first+len, comp, xbuf);
- }
- }
- else{
- #ifdef BOOST_MOVE_ADAPTIVE_MERGE_WITH_BUF
- xbuf.clear();
- stable_sort(first, first+collected, comp, xbuf);
- stable_merge(first, first+collected, first+len, comp, xbuf);
- #else
- xbuf.clear();
- stable_sort(first+len-l_block, first+len, comp, xbuf);
- RandIt const pos1 = lower_bound(first+n_keys, first+len-l_block, first[len-1], comp);
- RandIt const pos2 = rotate_gcd(pos1, first+len-l_block, first+len);
- stable_merge(first+n_keys, pos1, pos2, antistable<Compare>(comp), xbuf);
- if(n_keys){
- stable_sort(first, first+n_keys, comp, xbuf);
- stable_merge(first, first+n_keys, first+len, comp, xbuf);
- }
- #endif
- }
-
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A buf mrg: ", len);
- }
- else{
- stable_sort(first, first+collected, comp, xbuf);
- xbuf.clear();
- if(xbuf.capacity() >= collected){
- buffered_merge(first, first+collected, first+len1+len2, comp, xbuf);
- }
- else{
- merge_bufferless(first, first+collected, first+len1+len2, comp);
- }
- }
- BOOST_MOVE_ADAPTIVE_SORT_PRINT(" A key mrg: ", len);
+ //If not enough keys but more than minimum, adjust the internal buffer and key count
+ bool use_internal_buf = collected == to_collect;
+ if (!use_internal_buf){
+ l_intbuf = 0u;
+ n_keys = collected;
+ l_block = lblock_for_combine(l_intbuf, n_keys, len, use_internal_buf);
+ //If use_internal_buf is false, then then internal buffer will be zero and rotation-based combination will be used
+ l_intbuf = use_internal_buf ? l_block : 0u;
}
+
+ bool const xbuf_used = collected == to_collect && xbuf.capacity() >= l_block;
+ //Merge trailing elements using smart merges
+ adaptive_merge_combine_blocks(first, len1, len2, collected, n_keys, l_block, use_internal_buf, xbuf_used, comp, xbuf);
+ //Merge buffer and keys with the rest of the values
+ adaptive_merge_final_merge (first, len1, len2, collected, l_intbuf, l_block, use_internal_buf, xbuf_used, comp, xbuf);
}
}
diff --git a/boost/move/algo/detail/basic_op.hpp b/boost/move/algo/detail/basic_op.hpp
index 936f7a2a3d..ea0ce1b8e0 100644
--- a/boost/move/algo/detail/basic_op.hpp
+++ b/boost/move/algo/detail/basic_op.hpp
@@ -21,12 +21,14 @@
#include <boost/move/utility_core.hpp>
#include <boost/move/adl_move_swap.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
namespace boost {
namespace movelib {
struct forward_t{};
struct backward_t{};
+struct three_way_t{};
struct move_op
{
@@ -41,6 +43,13 @@ struct move_op
template <class SourceIt, class DestinationIt>
DestinationIt operator()(backward_t, SourceIt first, SourceIt last, DestinationIt dest_last)
{ return ::boost::move_backward(first, last, dest_last); }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2>
+ void operator()(three_way_t, SourceIt srcit, DestinationIt1 dest1it, DestinationIt2 dest2it)
+ {
+ *dest2it = boost::move(*dest1it);
+ *dest1it = boost::move(*srcit);
+ }
};
struct swap_op
@@ -56,6 +65,15 @@ struct swap_op
template <class SourceIt, class DestinationIt>
DestinationIt operator()(backward_t, SourceIt first, SourceIt last, DestinationIt dest_begin)
{ return boost::adl_move_swap_ranges_backward(first, last, dest_begin); }
+
+ template <class SourceIt, class DestinationIt1, class DestinationIt2>
+ void operator()(three_way_t, SourceIt srcit, DestinationIt1 dest1it, DestinationIt2 dest2it)
+ {
+ typename ::boost::movelib::iterator_traits<SourceIt>::value_type tmp(boost::move(*dest2it));
+ *dest2it = boost::move(*dest1it);
+ *dest1it = boost::move(*srcit);
+ *srcit = boost::move(tmp);
+ }
};
}} //namespace boost::movelib
diff --git a/boost/move/algo/detail/merge.hpp b/boost/move/algo/detail/merge.hpp
index a0a5afa3e6..11d5740a6a 100644
--- a/boost/move/algo/detail/merge.hpp
+++ b/boost/move/algo/detail/merge.hpp
@@ -167,7 +167,7 @@ void op_merge_left( RandIt buf_first
op(forward_t(), first2, last2, buf_first);
return;
}
- else if(comp(*first2, *first1)){
+ else if(comp(*first2, *first1)){
op(first2, buf_first);
++first2;
}
@@ -214,7 +214,7 @@ void op_merge_right
{
RandIt const first2 = last1;
while(first1 != last1){
- if(last2 == first2){
+ if(last2 == first2){
op(backward_t(), first1, last1, buf_last);
return;
}
@@ -230,7 +230,7 @@ void op_merge_right
++last1;
}
}
- if(last2 != buf_last){ //In case all remaining elements are in the same place
+ if(last2 != buf_last){ //In case all remaining elements are in the same place
//(e.g. buffer is exactly the size of the first half
//and all elements from the second half are less)
op(backward_t(), first2, last2, buf_last);
@@ -257,9 +257,104 @@ void swap_merge_right
op_merge_right(first1, last1, last2, buf_last, comp, swap_op());
}
-// cost: min(L1,L2)^2+max(L1,L2)
+template <class BidirIt, class Distance, class Compare>
+void merge_bufferless_ONlogN_recursive
+ (BidirIt first, BidirIt middle, BidirIt last, Distance len1, Distance len2, Compare comp)
+{
+ typedef typename iterator_traits<BidirIt>::size_type size_type;
+ while(1) {
+ //#define MERGE_BUFFERLESS_RECURSIVE_OPT
+ #ifndef MERGE_BUFFERLESS_RECURSIVE_OPT
+ if (len2 == 0) {
+ return;
+ }
+
+ if (!len1) {
+ return;
+ }
+
+ if ((len1 | len2) == 1) {
+ if (comp(*middle, *first))
+ adl_move_swap(*first, *middle);
+ return;
+ }
+ #else
+ if (len2 == 0) {
+ return;
+ }
+
+ if (!len1) {
+ return;
+ }
+ BidirIt middle_prev = middle; --middle_prev;
+ if(!comp(*middle, *middle_prev))
+ return;
+
+ while(true) {
+ if (comp(*middle, *first))
+ break;
+ ++first;
+ if(--len1 == 1)
+ break;
+ }
+
+ if (len1 == 1 && len2 == 1) {
+ //comp(*middle, *first) == true already tested in the loop
+ adl_move_swap(*first, *middle);
+ return;
+ }
+ #endif
+
+ BidirIt first_cut = first;
+ BidirIt second_cut = middle;
+ Distance len11 = 0;
+ Distance len22 = 0;
+ if (len1 > len2) {
+ len11 = len1 / 2;
+ first_cut += len11;
+ second_cut = lower_bound(middle, last, *first_cut, comp);
+ len22 = size_type(second_cut - middle);
+ }
+ else {
+ len22 = len2 / 2;
+ second_cut += len22;
+ first_cut = upper_bound(first, middle, *second_cut, comp);
+ len11 = size_type(first_cut - first);
+ }
+ BidirIt new_middle = rotate_gcd(first_cut, middle, second_cut);
+
+ //Avoid one recursive call doing a manual tail call elimination on the biggest range
+ const Distance len_internal = len11+len22;
+ if( len_internal < (len1 + len2 - len_internal) ) {
+ merge_bufferless_ONlogN_recursive(first, first_cut, new_middle, len11, len22, comp);
+ //merge_bufferless_recursive(new_middle, second_cut, last, len1 - len11, len2 - len22, comp);
+ first = new_middle;
+ middle = second_cut;
+ len1 -= len11;
+ len2 -= len22;
+ }
+ else {
+ //merge_bufferless_recursive(first, first_cut, new_middle, len11, len22, comp);
+ merge_bufferless_ONlogN_recursive(new_middle, second_cut, last, len1 - len11, len2 - len22, comp);
+ middle = first_cut;
+ last = new_middle;
+ len1 = len11;
+ len2 = len22;
+ }
+ }
+}
+
+//Complexity: NlogN
+template<class BidirIt, class Compare>
+void merge_bufferless_ONlogN(BidirIt first, BidirIt middle, BidirIt last, Compare comp)
+{
+ merge_bufferless_ONlogN_recursive
+ (first, middle, last, middle - first, last - middle, comp);
+}
+
+//Complexity: min(len1,len2)^2 + max(len1,len2)
template<class RandIt, class Compare>
-void merge_bufferless(RandIt first, RandIt middle, RandIt last, Compare comp)
+void merge_bufferless_ON2(RandIt first, RandIt middle, RandIt last, Compare comp)
{
if((middle - first) < (last - middle)){
while(first != middle){
@@ -271,12 +366,12 @@ void merge_bufferless(RandIt first, RandIt middle, RandIt last, Compare comp)
}
do{
++first;
- } while(first != middle && !comp(*middle, *first));
+ } while(first != middle && !comp(*middle, *first));
}
}
else{
while(middle != last){
- RandIt p = upper_bound(first, middle, last[-1], comp);
+ RandIt p = upper_bound(first, middle, last[-1], comp);
last = rotate_gcd(p, middle, last);
middle = p;
if(middle == first){
@@ -290,10 +385,21 @@ void merge_bufferless(RandIt first, RandIt middle, RandIt last, Compare comp)
}
}
+template<class RandIt, class Compare>
+void merge_bufferless(RandIt first, RandIt middle, RandIt last, Compare comp)
+{
+ //#define BOOST_ADAPTIVE_MERGE_NLOGN_MERGE
+ #ifdef BOOST_ADAPTIVE_MERGE_NLOGN_MERGE
+ merge_bufferless_ONlogN(first, middle, last, comp);
+ #else
+ merge_bufferless_ON2(first, middle, last, comp);
+ #endif //BOOST_ADAPTIVE_MERGE_NLOGN_MERGE
+}
+
template<class Comp>
struct antistable
{
- antistable(Comp &comp)
+ explicit antistable(Comp &comp)
: m_comp(comp)
{}
@@ -306,6 +412,49 @@ struct antistable
Comp &m_comp;
};
+template <class Comp>
+class negate
+{
+ public:
+ negate()
+ {}
+
+ explicit negate(Comp comp)
+ : m_comp(comp)
+ {}
+
+ template <class T1, class T2>
+ bool operator()(const T1& l, const T2& r)
+ {
+ return !m_comp(l, r);
+ }
+
+ private:
+ Comp m_comp;
+};
+
+
+template <class Comp>
+class inverse
+{
+ public:
+ inverse()
+ {}
+
+ explicit inverse(Comp comp)
+ : m_comp(comp)
+ {}
+
+ template <class T1, class T2>
+ bool operator()(const T1& l, const T2& r)
+ {
+ return m_comp(r, l);
+ }
+
+ private:
+ Comp m_comp;
+};
+
// [r_first, r_last) are already in the right part of the destination range.
template <class Compare, class InputIterator, class InputOutIterator, class Op>
void op_merge_with_right_placed
diff --git a/boost/move/algo/detail/merge_sort.hpp b/boost/move/algo/detail/merge_sort.hpp
index 8101fce654..892639b05f 100644
--- a/boost/move/algo/detail/merge_sort.hpp
+++ b/boost/move/algo/detail/merge_sort.hpp
@@ -41,6 +41,21 @@ namespace movelib {
static const unsigned MergeSortInsertionSortThreshold = 16;
+template <class RandIt, class Compare>
+void inplace_stable_sort(RandIt first, RandIt last, Compare comp)
+{
+ typedef typename iterator_traits<RandIt>::size_type size_type;
+ if (size_type(last - first) <= size_type(MergeSortInsertionSortThreshold)) {
+ insertion_sort(first, last, comp);
+ return;
+ }
+ RandIt middle = first + (last - first) / 2;
+ inplace_stable_sort(first, middle, comp);
+ inplace_stable_sort(middle, last, comp);
+ merge_bufferless_ONlogN_recursive
+ (first, middle, last, size_type(middle - first), size_type(last - middle), comp);
+}
+
// @endcond
template<class RandIt, class RandIt2, class Compare>
diff --git a/boost/move/algo/move.hpp b/boost/move/algo/move.hpp
index 943f286c94..d35f04a399 100644
--- a/boost/move/algo/move.hpp
+++ b/boost/move/algo/move.hpp
@@ -125,10 +125,10 @@ F uninitialized_move(I f, I l, F r
}
}
BOOST_CATCH(...){
- for (; back != r; ++back){
+ for (; back != r; ++back){
back->~input_value_type();
}
- BOOST_RETHROW;
+ BOOST_RETHROW;
}
BOOST_CATCH_END
return r;
diff --git a/boost/move/core.hpp b/boost/move/core.hpp
index c9cbec23a1..1dd8a8c271 100644
--- a/boost/move/core.hpp
+++ b/boost/move/core.hpp
@@ -260,8 +260,8 @@
#define BOOST_COPYABLE_AND_MOVABLE(TYPE)\
public:\
- TYPE& operator=(TYPE &t)\
- { this->operator=(const_cast<const TYPE &>(t)); return *this;}\
+ BOOST_MOVE_FORCEINLINE TYPE& operator=(TYPE &t)\
+ { this->operator=(const_cast<const TYPE&>(t)); return *this;}\
public:\
BOOST_MOVE_FORCEINLINE operator ::boost::rv<TYPE>&() \
{ return *BOOST_MOVE_TO_RV_CAST(::boost::rv<TYPE>*, this); }\
diff --git a/boost/move/detail/fwd_macros.hpp b/boost/move/detail/fwd_macros.hpp
index e091890dc5..7132436783 100644
--- a/boost/move/detail/fwd_macros.hpp
+++ b/boost/move/detail/fwd_macros.hpp
@@ -64,16 +64,22 @@ namespace move_detail {
#endif //!defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
//BOOST_MOVE_REPEATN(MACRO)
+#define BOOST_MOVE_REPEAT(x, MACRO) BOOST_MOVE_REPEAT_I(x,MACRO)
+#define BOOST_MOVE_REPEAT_I(x, MACRO) BOOST_MOVE_REPEAT##x(MACRO)
#define BOOST_MOVE_REPEAT0(MACRO)
-#define BOOST_MOVE_REPEAT1(MACRO) MACRO
-#define BOOST_MOVE_REPEAT2(MACRO) BOOST_MOVE_REPEAT1(MACRO), MACRO
-#define BOOST_MOVE_REPEAT3(MACRO) BOOST_MOVE_REPEAT2(MACRO), MACRO
-#define BOOST_MOVE_REPEAT4(MACRO) BOOST_MOVE_REPEAT3(MACRO), MACRO
-#define BOOST_MOVE_REPEAT5(MACRO) BOOST_MOVE_REPEAT4(MACRO), MACRO
-#define BOOST_MOVE_REPEAT6(MACRO) BOOST_MOVE_REPEAT5(MACRO), MACRO
-#define BOOST_MOVE_REPEAT7(MACRO) BOOST_MOVE_REPEAT6(MACRO), MACRO
-#define BOOST_MOVE_REPEAT8(MACRO) BOOST_MOVE_REPEAT7(MACRO), MACRO
-#define BOOST_MOVE_REPEAT9(MACRO) BOOST_MOVE_REPEAT8(MACRO), MACRO
+#define BOOST_MOVE_REPEAT1(MACRO) MACRO
+#define BOOST_MOVE_REPEAT2(MACRO) BOOST_MOVE_REPEAT1(MACRO), MACRO
+#define BOOST_MOVE_REPEAT3(MACRO) BOOST_MOVE_REPEAT2(MACRO), MACRO
+#define BOOST_MOVE_REPEAT4(MACRO) BOOST_MOVE_REPEAT3(MACRO), MACRO
+#define BOOST_MOVE_REPEAT5(MACRO) BOOST_MOVE_REPEAT4(MACRO), MACRO
+#define BOOST_MOVE_REPEAT6(MACRO) BOOST_MOVE_REPEAT5(MACRO), MACRO
+#define BOOST_MOVE_REPEAT7(MACRO) BOOST_MOVE_REPEAT6(MACRO), MACRO
+#define BOOST_MOVE_REPEAT8(MACRO) BOOST_MOVE_REPEAT7(MACRO), MACRO
+#define BOOST_MOVE_REPEAT9(MACRO) BOOST_MOVE_REPEAT8(MACRO), MACRO
+#define BOOST_MOVE_REPEAT10(MACRO) BOOST_MOVE_REPEAT9(MACRO), MACRO
+#define BOOST_MOVE_REPEAT11(MACRO) BOOST_MOVE_REPEAT10(MACRO), MACRO
+#define BOOST_MOVE_REPEAT12(MACRO) BOOST_MOVE_REPEAT11(MACRO), MACRO
+#define BOOST_MOVE_REPEAT13(MACRO) BOOST_MOVE_REPEAT12(MACRO), MACRO
//BOOST_MOVE_FWDN
#define BOOST_MOVE_FWD0
@@ -99,6 +105,54 @@ namespace move_detail {
#define BOOST_MOVE_FWDQ8 BOOST_MOVE_FWDQ7, ::boost::forward<Q7>(q7)
#define BOOST_MOVE_FWDQ9 BOOST_MOVE_FWDQ8, ::boost::forward<Q8>(q8)
+//BOOST_MOVE_TMPL_GETN
+#define BOOST_MOVE_TMPL_GET0
+#define BOOST_MOVE_TMPL_GET1 p.template get<0>()
+#define BOOST_MOVE_TMPL_GET2 BOOST_MOVE_TMPL_GET1, p.template get<1>()
+#define BOOST_MOVE_TMPL_GET3 BOOST_MOVE_TMPL_GET2, p.template get<2>()
+#define BOOST_MOVE_TMPL_GET4 BOOST_MOVE_TMPL_GET3, p.template get<3>()
+#define BOOST_MOVE_TMPL_GET5 BOOST_MOVE_TMPL_GET4, p.template get<4>()
+#define BOOST_MOVE_TMPL_GET6 BOOST_MOVE_TMPL_GET5, p.template get<5>()
+#define BOOST_MOVE_TMPL_GET7 BOOST_MOVE_TMPL_GET6, p.template get<6>()
+#define BOOST_MOVE_TMPL_GET8 BOOST_MOVE_TMPL_GET7, p.template get<7>()
+#define BOOST_MOVE_TMPL_GET9 BOOST_MOVE_TMPL_GET8, p.template get<8>()
+
+//BOOST_MOVE_TMPL_GETQN
+#define BOOST_MOVE_TMPL_GETQ0
+#define BOOST_MOVE_TMPL_GETQ1 q.template get<0>()
+#define BOOST_MOVE_TMPL_GETQ2 BOOST_MOVE_TMPL_GETQ1, q.template get<1>()
+#define BOOST_MOVE_TMPL_GETQ3 BOOST_MOVE_TMPL_GETQ2, q.template get<2>()
+#define BOOST_MOVE_TMPL_GETQ4 BOOST_MOVE_TMPL_GETQ3, q.template get<3>()
+#define BOOST_MOVE_TMPL_GETQ5 BOOST_MOVE_TMPL_GETQ4, q.template get<4>()
+#define BOOST_MOVE_TMPL_GETQ6 BOOST_MOVE_TMPL_GETQ5, q.template get<5>()
+#define BOOST_MOVE_TMPL_GETQ7 BOOST_MOVE_TMPL_GETQ6, q.template get<6>()
+#define BOOST_MOVE_TMPL_GETQ8 BOOST_MOVE_TMPL_GETQ7, q.template get<7>()
+#define BOOST_MOVE_TMPL_GETQ9 BOOST_MOVE_TMPL_GETQ8, q.template get<8>()
+
+//BOOST_MOVE_GET_IDXN
+#define BOOST_MOVE_GET_IDX0
+#define BOOST_MOVE_GET_IDX1 get<0>(p)
+#define BOOST_MOVE_GET_IDX2 BOOST_MOVE_GET_IDX1, get<1>(p)
+#define BOOST_MOVE_GET_IDX3 BOOST_MOVE_GET_IDX2, get<2>(p)
+#define BOOST_MOVE_GET_IDX4 BOOST_MOVE_GET_IDX3, get<3>(p)
+#define BOOST_MOVE_GET_IDX5 BOOST_MOVE_GET_IDX4, get<4>(p)
+#define BOOST_MOVE_GET_IDX6 BOOST_MOVE_GET_IDX5, get<5>(p)
+#define BOOST_MOVE_GET_IDX7 BOOST_MOVE_GET_IDX6, get<6>(p)
+#define BOOST_MOVE_GET_IDX8 BOOST_MOVE_GET_IDX7, get<7>(p)
+#define BOOST_MOVE_GET_IDX9 BOOST_MOVE_GET_IDX8, get<8>(p)
+
+//BOOST_MOVE_GET_IDXQN
+#define BOOST_MOVE_GET_IDXQ0
+#define BOOST_MOVE_GET_IDXQ1 get<0>(q)
+#define BOOST_MOVE_GET_IDXQ2 BOOST_MOVE_GET_IDXQ1, get<1>(q)
+#define BOOST_MOVE_GET_IDXQ3 BOOST_MOVE_GET_IDXQ2, get<2>(q)
+#define BOOST_MOVE_GET_IDXQ4 BOOST_MOVE_GET_IDXQ3, get<3>(q)
+#define BOOST_MOVE_GET_IDXQ5 BOOST_MOVE_GET_IDXQ4, get<4>(q)
+#define BOOST_MOVE_GET_IDXQ6 BOOST_MOVE_GET_IDXQ5, get<5>(q)
+#define BOOST_MOVE_GET_IDXQ7 BOOST_MOVE_GET_IDXQ6, get<6>(q)
+#define BOOST_MOVE_GET_IDXQ8 BOOST_MOVE_GET_IDXQ7, get<7>(q)
+#define BOOST_MOVE_GET_IDXQ9 BOOST_MOVE_GET_IDXQ8, get<8>(q)
+
//BOOST_MOVE_ARGN
#define BOOST_MOVE_ARG0
#define BOOST_MOVE_ARG1 p0
@@ -572,6 +626,45 @@ namespace move_detail {
#define BOOST_MOVE_I8 BOOST_MOVE_I1
#define BOOST_MOVE_I9 BOOST_MOVE_I1
+//BOOST_MOVE_BOOL
+# define BOOST_MOVE_BOOL(x) BOOST_MOVE_BOOL_I(x)
+# define BOOST_MOVE_BOOL_I(x) BOOST_MOVE_BOOL##x
+# define BOOST_MOVE_BOOL0 0
+# define BOOST_MOVE_BOOL1 1
+# define BOOST_MOVE_BOOL2 1
+# define BOOST_MOVE_BOOL3 1
+# define BOOST_MOVE_BOOL4 1
+# define BOOST_MOVE_BOOL5 1
+# define BOOST_MOVE_BOOL6 1
+# define BOOST_MOVE_BOOL7 1
+# define BOOST_MOVE_BOOL8 1
+# define BOOST_MOVE_BOOL9 1
+
+//BOOST_MOVE_I_IF
+#define BOOST_MOVE_I_IF(x) BOOST_MOVE_I_IF_I (BOOST_MOVE_BOOL(x))
+#define BOOST_MOVE_I_IF_I(x) BOOST_MOVE_I_IF_I2(x)
+#define BOOST_MOVE_I_IF_I2(x) BOOST_MOVE_IF_I_##x
+#define BOOST_MOVE_IF_I_0
+#define BOOST_MOVE_IF_I_1 ,
+
+//BOOST_MOVE_IF
+#define BOOST_MOVE_IF(cond, t, f) BOOST_MOVE_IF_I(cond, t, f)
+#define BOOST_MOVE_IF_I(cond, t, f) BOOST_MOVE_IIF(BOOST_MOVE_BOOL(cond), t, f)
+
+#define BOOST_MOVE_IIF(bit, t, f) BOOST_MOVE_IIF_I(bit, t, f)
+#define BOOST_MOVE_IIF_I(bit, t, f) BOOST_MOVE_IIF_##bit(t, f)
+#define BOOST_MOVE_IIF_0(t, f) f
+#define BOOST_MOVE_IIF_1(t, f) t
+
+/*
+#define BOOST_MOVE_IIF(bit, t, f) BOOST_MOVE_IIF_OO((bit, t, f))
+#define BOOST_MOVE_IIF_OO(par) BOOST_MOVE_IIF_I ## par
+#define BOOST_MOVE_IIF_I(bit, t, f) BOOST_MOVE_IIF_II(BOOST_MOVE_IIF_ ## bit(t, f))
+#define BOOST_MOVE_IIF_II(id) id
+#define BOOST_MOVE_IIF_0(t, f) f
+#define BOOST_MOVE_IIF_1(t, f) t
+*/
+
//BOOST_MOVE_COLON
#define BOOST_MOVE_COLON0
#define BOOST_MOVE_COLON1 :
@@ -584,6 +677,103 @@ namespace move_detail {
#define BOOST_MOVE_COLON8 BOOST_MOVE_COLON1
#define BOOST_MOVE_COLON9 BOOST_MOVE_COLON1
+//BOOST_MOVE_BITOR
+#define BOOST_MOVE_BITOR(x,y) BOOST_MOVE_BITOR_I(x,y)
+#define BOOST_MOVE_BITOR_I(x,y) BOOST_MOVE_BITOR##x##y
+#define BOOST_MOVE_BITOR00 0
+#define BOOST_MOVE_BITOR01 1
+#define BOOST_MOVE_BITOR10 1
+#define BOOST_MOVE_BITOR11 1
+
+//BOOST_MOVE_OR
+#define BOOST_MOVE_OR(x, y) BOOST_MOVE_OR_I(x, y)
+#define BOOST_MOVE_OR_I(x, y) BOOST_MOVE_BITOR(BOOST_MOVE_BOOL(x), BOOST_MOVE_BOOL(y))
+
+//BOOST_MOVE_BITAND
+#define BOOST_MOVE_BITAND(x,y) BOOST_MOVE_BITAND_I(x,y)
+#define BOOST_MOVE_BITAND_I(x,y) BOOST_MOVE_BITAND##x##y
+#define BOOST_MOVE_BITAND00 0
+#define BOOST_MOVE_BITAND01 0
+#define BOOST_MOVE_BITAND10 0
+#define BOOST_MOVE_BITAND11 1
+
+//BOOST_MOVE_AND
+#define BOOST_MOVE_AND(x, y) BOOST_MOVE_AND_I(x, y)
+#define BOOST_MOVE_AND_I(x, y) BOOST_MOVE_BITAND(BOOST_MOVE_BOOL(x), BOOST_MOVE_BOOL(y))
+
+//BOOST_MOVE_DEC
+#define BOOST_MOVE_DEC(x) BOOST_MOVE_DEC_I(x)
+#define BOOST_MOVE_DEC_I(x) BOOST_MOVE_DEC##x
+#define BOOST_MOVE_DEC1 0
+#define BOOST_MOVE_DEC2 1
+#define BOOST_MOVE_DEC3 2
+#define BOOST_MOVE_DEC4 3
+#define BOOST_MOVE_DEC5 4
+#define BOOST_MOVE_DEC6 5
+#define BOOST_MOVE_DEC7 6
+#define BOOST_MOVE_DEC8 7
+#define BOOST_MOVE_DEC9 8
+#define BOOST_MOVE_DEC10 9
+#define BOOST_MOVE_DEC11 10
+#define BOOST_MOVE_DEC12 11
+#define BOOST_MOVE_DEC13 12
+#define BOOST_MOVE_DEC14 13
+
+//BOOST_MOVE_SUB
+#define BOOST_MOVE_SUB(x, y) BOOST_MOVE_SUB_I(x,y)
+#define BOOST_MOVE_SUB_I(x, y) BOOST_MOVE_SUB##y(x)
+#define BOOST_MOVE_SUB0(x) x
+#define BOOST_MOVE_SUB1(x) BOOST_MOVE_DEC(x)
+#define BOOST_MOVE_SUB2(x) BOOST_MOVE_SUB1(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB3(x) BOOST_MOVE_SUB2(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB4(x) BOOST_MOVE_SUB3(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB5(x) BOOST_MOVE_SUB4(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB6(x) BOOST_MOVE_SUB5(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB7(x) BOOST_MOVE_SUB6(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB8(x) BOOST_MOVE_SUB7(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB9(x) BOOST_MOVE_SUB8(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB10(x) BOOST_MOVE_SUB9(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB11(x) BOOST_MOVE_SUB10(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB12(x) BOOST_MOVE_SUB11(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB13(x) BOOST_MOVE_SUB12(BOOST_MOVE_DEC(x))
+#define BOOST_MOVE_SUB14(x) BOOST_MOVE_SUB13(BOOST_MOVE_DEC(x))
+
+//BOOST_MOVE_INC
+#define BOOST_MOVE_INC(x) BOOST_MOVE_INC_I(x)
+#define BOOST_MOVE_INC_I(x) BOOST_MOVE_INC##x
+#define BOOST_MOVE_INC0 1
+#define BOOST_MOVE_INC1 2
+#define BOOST_MOVE_INC2 3
+#define BOOST_MOVE_INC3 4
+#define BOOST_MOVE_INC4 5
+#define BOOST_MOVE_INC5 6
+#define BOOST_MOVE_INC6 7
+#define BOOST_MOVE_INC7 8
+#define BOOST_MOVE_INC8 9
+#define BOOST_MOVE_INC9 10
+#define BOOST_MOVE_INC10 11
+#define BOOST_MOVE_INC11 12
+#define BOOST_MOVE_INC12 13
+#define BOOST_MOVE_INC13 14
+
+//BOOST_MOVE_ADD
+#define BOOST_MOVE_ADD(x, y) BOOST_MOVE_ADD_I(x,y)
+#define BOOST_MOVE_ADD_I(x, y) BOOST_MOVE_ADD##y(x)
+#define BOOST_MOVE_ADD0(x) x
+#define BOOST_MOVE_ADD1(x) BOOST_MOVE_INC(x)
+#define BOOST_MOVE_ADD2(x) BOOST_MOVE_ADD1(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD3(x) BOOST_MOVE_ADD2(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD4(x) BOOST_MOVE_ADD3(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD5(x) BOOST_MOVE_ADD4(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD6(x) BOOST_MOVE_ADD5(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD7(x) BOOST_MOVE_ADD6(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD8(x) BOOST_MOVE_ADD7(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD9(x) BOOST_MOVE_ADD8(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD10(x) BOOST_MOVE_ADD9(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD11(x) BOOST_MOVE_ADD10(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD12(x) BOOST_MOVE_ADD11(BOOST_MOVE_INC(x))
+#define BOOST_MOVE_ADD13(x) BOOST_MOVE_ADD12(BOOST_MOVE_INC(x))
+
//BOOST_MOVE_ITERATE_2TON
#define BOOST_MOVE_ITERATE_2TO2(MACROFUNC) MACROFUNC(2)
#define BOOST_MOVE_ITERATE_2TO3(MACROFUNC) BOOST_MOVE_ITERATE_2TO2(MACROFUNC) MACROFUNC(3)
@@ -618,7 +808,6 @@ namespace move_detail {
#define BOOST_MOVE_ITERATE_0TO9(MACROFUNC) BOOST_MOVE_ITERATE_0TO8(MACROFUNC) MACROFUNC(9)
//BOOST_MOVE_ITERATE_NTON
-#define BOOST_MOVE_ITERATE_0TO0(MACROFUNC) MACROFUNC(0)
#define BOOST_MOVE_ITERATE_1TO1(MACROFUNC) MACROFUNC(1)
#define BOOST_MOVE_ITERATE_2TO2(MACROFUNC) MACROFUNC(2)
#define BOOST_MOVE_ITERATE_3TO3(MACROFUNC) MACROFUNC(3)
@@ -629,28 +818,34 @@ namespace move_detail {
#define BOOST_MOVE_ITERATE_8TO8(MACROFUNC) MACROFUNC(8)
#define BOOST_MOVE_ITERATE_9TO9(MACROFUNC) MACROFUNC(9)
-//BOOST_MOVE_ITER2D_0TO9
-#define BOOST_MOVE_ITER2DLOW_0TO0(MACROFUNC2D, M) MACROFUNC2D(M, 0)
-#define BOOST_MOVE_ITER2DLOW_0TO1(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO0(MACROFUNC2D, M) MACROFUNC2D(M, 1)
-#define BOOST_MOVE_ITER2DLOW_0TO2(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO1(MACROFUNC2D, M) MACROFUNC2D(M, 2)
-#define BOOST_MOVE_ITER2DLOW_0TO3(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO2(MACROFUNC2D, M) MACROFUNC2D(M, 3)
-#define BOOST_MOVE_ITER2DLOW_0TO4(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO3(MACROFUNC2D, M) MACROFUNC2D(M, 4)
-#define BOOST_MOVE_ITER2DLOW_0TO5(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO4(MACROFUNC2D, M) MACROFUNC2D(M, 5)
-#define BOOST_MOVE_ITER2DLOW_0TO6(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO5(MACROFUNC2D, M) MACROFUNC2D(M, 6)
-#define BOOST_MOVE_ITER2DLOW_0TO7(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO6(MACROFUNC2D, M) MACROFUNC2D(M, 7)
-#define BOOST_MOVE_ITER2DLOW_0TO8(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO7(MACROFUNC2D, M) MACROFUNC2D(M, 8)
-#define BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TO8(MACROFUNC2D, M) MACROFUNC2D(M, 9)
-//
-#define BOOST_MOVE_ITER2D_0TO0(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 0)
-#define BOOST_MOVE_ITER2D_0TO1(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO0(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 1)
-#define BOOST_MOVE_ITER2D_0TO2(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO1(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 2)
-#define BOOST_MOVE_ITER2D_0TO3(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO2(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 3)
-#define BOOST_MOVE_ITER2D_0TO4(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO3(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 4)
-#define BOOST_MOVE_ITER2D_0TO5(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO4(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 5)
-#define BOOST_MOVE_ITER2D_0TO6(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO5(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 6)
-#define BOOST_MOVE_ITER2D_0TO7(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO6(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 7)
-#define BOOST_MOVE_ITER2D_0TO8(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO7(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 8)
-#define BOOST_MOVE_ITER2D_0TO9(MACROFUNC2D) BOOST_MOVE_ITER2D_0TO8(MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TO9(MACROFUNC2D, 9)
+//BOOST_MOVE_ITER2D_0TOMAX
+#define BOOST_MOVE_ITER2DLOW_0TOMAX0(MACROFUNC2D, M) MACROFUNC2D(M, 0)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX1(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX0(MACROFUNC2D, M) MACROFUNC2D(M, 1)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX2(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX1(MACROFUNC2D, M) MACROFUNC2D(M, 2)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX3(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX2(MACROFUNC2D, M) MACROFUNC2D(M, 3)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX4(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX3(MACROFUNC2D, M) MACROFUNC2D(M, 4)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX5(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX4(MACROFUNC2D, M) MACROFUNC2D(M, 5)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX6(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX5(MACROFUNC2D, M) MACROFUNC2D(M, 6)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX7(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX6(MACROFUNC2D, M) MACROFUNC2D(M, 7)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX8(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX7(MACROFUNC2D, M) MACROFUNC2D(M, 8)
+#define BOOST_MOVE_ITER2DLOW_0TOMAX9(MACROFUNC2D, M) BOOST_MOVE_ITER2DLOW_0TOMAX8(MACROFUNC2D, M) MACROFUNC2D(M, 9)
+
+#define BOOST_MOVE_ITER2D_0TOMAX0(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 0)
+#define BOOST_MOVE_ITER2D_0TOMAX1(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX0(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 1)
+#define BOOST_MOVE_ITER2D_0TOMAX2(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX1(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 2)
+#define BOOST_MOVE_ITER2D_0TOMAX3(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX2(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 3)
+#define BOOST_MOVE_ITER2D_0TOMAX4(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX3(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 4)
+#define BOOST_MOVE_ITER2D_0TOMAX5(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX4(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 5)
+#define BOOST_MOVE_ITER2D_0TOMAX6(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX5(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 6)
+#define BOOST_MOVE_ITER2D_0TOMAX7(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX6(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 7)
+#define BOOST_MOVE_ITER2D_0TOMAX8(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX7(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 8)
+#define BOOST_MOVE_ITER2D_0TOMAX9(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX8(MAX, MACROFUNC2D) BOOST_MOVE_ITER2DLOW_0TOMAX##MAX(MACROFUNC2D, 9)
+
+#define BOOST_MOVE_ITER2D_0TOMAX(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX_I (MAX, MACROFUNC2D)
+#define BOOST_MOVE_ITER2D_0TOMAX_I(MAX, MACROFUNC2D) BOOST_MOVE_ITER2D_0TOMAX##MAX(MAX, MACROFUNC2D)
+
+
+
//BOOST_MOVE_CAT
#define BOOST_MOVE_CAT(a, b) BOOST_MOVE_CAT_I(a, b)
diff --git a/boost/move/detail/meta_utils_core.hpp b/boost/move/detail/meta_utils_core.hpp
index 4d715a060a..40dbb6efc3 100644
--- a/boost/move/detail/meta_utils_core.hpp
+++ b/boost/move/detail/meta_utils_core.hpp
@@ -114,6 +114,18 @@ struct is_same<T, T>
static const bool value = true;
};
+//////////////////////////////////////
+// enable_if_same
+//////////////////////////////////////
+template <class T, class U, class R = void>
+struct enable_if_same : enable_if<is_same<T, U>, R> {};
+
+//////////////////////////////////////
+// disable_if_same
+//////////////////////////////////////
+template <class T, class U, class R = void>
+struct disable_if_same : disable_if<is_same<T, U>, R> {};
+
} //namespace move_detail {
} //namespace boost {
diff --git a/boost/move/detail/reverse_iterator.hpp b/boost/move/detail/reverse_iterator.hpp
new file mode 100644
index 0000000000..73f59ce79f
--- /dev/null
+++ b/boost/move/detail/reverse_iterator.hpp
@@ -0,0 +1,171 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// (C) Copyright Ion Gaztanaga 2014-2014
+//
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// See http://www.boost.org/libs/move for documentation.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+#ifndef BOOST_MOVE_DETAIL_REVERSE_ITERATOR_HPP
+#define BOOST_MOVE_DETAIL_REVERSE_ITERATOR_HPP
+
+#ifndef BOOST_CONFIG_HPP
+# include <boost/config.hpp>
+#endif
+
+#if defined(BOOST_HAS_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <boost/move/detail/config_begin.hpp>
+#include <boost/move/detail/iterator_traits.hpp>
+#include <boost/move/detail/meta_utils.hpp>
+
+namespace boost {
+namespace movelib {
+
+template<class It>
+class reverse_iterator
+{
+ public:
+ typedef typename boost::movelib::iterator_traits<It>::pointer pointer;
+ typedef typename boost::movelib::iterator_traits<It>::reference reference;
+ typedef typename boost::movelib::iterator_traits<It>::difference_type difference_type;
+ typedef typename boost::movelib::iterator_traits<It>::iterator_category iterator_category;
+ typedef typename boost::movelib::iterator_traits<It>::value_type value_type;
+
+
+ typedef It iterator_type;
+
+ reverse_iterator()
+ : m_current() //Value initialization to achieve "null iterators" (N3644)
+ {}
+
+ explicit reverse_iterator(It r)
+ : m_current(r)
+ {}
+
+ reverse_iterator(const reverse_iterator& r)
+ : m_current(r.base())
+ {}
+
+ template<class OtherIt>
+ reverse_iterator( const reverse_iterator<OtherIt>& r
+ , typename boost::move_detail::enable_if_convertible<OtherIt, It>::type* =0
+ )
+ : m_current(r.base())
+ {}
+
+ reverse_iterator & operator=( const reverse_iterator& r)
+ { m_current = r.base(); return *this; }
+
+ template<class OtherIt>
+ typename boost::move_detail::enable_if_convertible<OtherIt, It, reverse_iterator &>::type
+ operator=( const reverse_iterator<OtherIt>& r)
+ { m_current = r.base(); return *this; }
+
+ It base() const
+ { return m_current; }
+
+ reference operator*() const
+ {
+ It temp(m_current);
+ --temp;
+ reference r = *temp;
+ return r;
+ }
+
+ pointer operator->() const
+ {
+ It temp(m_current);
+ --temp;
+ return iterator_arrow_result(temp);
+ }
+
+ reference operator[](difference_type off) const
+ {
+ return this->m_current[-off - 1];
+ }
+
+ reverse_iterator& operator++()
+ {
+ --m_current;
+ return *this;
+ }
+
+ reverse_iterator operator++(int)
+ {
+ reverse_iterator temp((*this));
+ --m_current;
+ return temp;
+ }
+
+ reverse_iterator& operator--()
+ {
+ ++m_current;
+ return *this;
+ }
+
+ reverse_iterator operator--(int)
+ {
+ reverse_iterator temp((*this));
+ ++m_current;
+ return temp;
+ }
+
+ friend bool operator==(const reverse_iterator& l, const reverse_iterator& r)
+ { return l.m_current == r.m_current; }
+
+ friend bool operator!=(const reverse_iterator& l, const reverse_iterator& r)
+ { return l.m_current != r.m_current; }
+
+ friend bool operator<(const reverse_iterator& l, const reverse_iterator& r)
+ { return l.m_current > r.m_current; }
+
+ friend bool operator<=(const reverse_iterator& l, const reverse_iterator& r)
+ { return l.m_current >= r.m_current; }
+
+ friend bool operator>(const reverse_iterator& l, const reverse_iterator& r)
+ { return l.m_current < r.m_current; }
+
+ friend bool operator>=(const reverse_iterator& l, const reverse_iterator& r)
+ { return l.m_current <= r.m_current; }
+
+ reverse_iterator& operator+=(difference_type off)
+ { m_current -= off; return *this; }
+
+ reverse_iterator& operator-=(difference_type off)
+ { m_current += off; return *this; }
+
+ friend reverse_iterator operator+(reverse_iterator l, difference_type off)
+ { return (l += off); }
+
+ friend reverse_iterator operator+(difference_type off, reverse_iterator r)
+ { return (r += off); }
+
+ friend reverse_iterator operator-(reverse_iterator l, difference_type off)
+ { return (l-= off); }
+
+ friend difference_type operator-(const reverse_iterator& l, const reverse_iterator& r)
+ { return r.m_current - l.m_current; }
+
+ private:
+ It m_current; // the wrapped iterator
+};
+
+template< class Iterator >
+reverse_iterator<Iterator> make_reverse_iterator( Iterator i )
+{
+ return reverse_iterator<Iterator>(i);
+}
+
+} //namespace movelib {
+} //namespace boost {
+
+#include <boost/move/detail/config_end.hpp>
+
+#endif //BOOST_MOVE_DETAIL_REVERSE_ITERATOR_HPP
diff --git a/boost/move/detail/type_traits.hpp b/boost/move/detail/type_traits.hpp
index 816fdca7b2..1b5d8388e4 100644
--- a/boost/move/detail/type_traits.hpp
+++ b/boost/move/detail/type_traits.hpp
@@ -55,8 +55,10 @@
// BOOST_MOVE_IS_POD(T) should evaluate to true if T is a POD type
// BOOST_MOVE_HAS_TRIVIAL_CONSTRUCTOR(T) should evaluate to true if "T x;" has no effect
// BOOST_MOVE_HAS_TRIVIAL_COPY(T) should evaluate to true if T(t) <==> memcpy
+// (Note: this trait does not guarantee T is copy constructible, the copy constructor could be deleted but still be trivial)
// BOOST_MOVE_HAS_TRIVIAL_MOVE_CONSTRUCTOR(T) should evaluate to true if T(boost::move(t)) <==> memcpy
// BOOST_MOVE_HAS_TRIVIAL_ASSIGN(T) should evaluate to true if t = u <==> memcpy
+// (Note: this trait does not guarantee T is assignable , the copy assignmen could be deleted but still be trivial)
// BOOST_MOVE_HAS_TRIVIAL_MOVE_ASSIGN(T) should evaluate to true if t = boost::move(u) <==> memcpy
// BOOST_MOVE_HAS_TRIVIAL_DESTRUCTOR(T) should evaluate to true if ~T() has no effect
// BOOST_MOVE_HAS_NOTHROW_CONSTRUCTOR(T) should evaluate to true if "T x;" can not throw
@@ -117,9 +119,7 @@
# define BOOST_MOVE_HAS_TRIVIAL_CONSTRUCTOR(T) __has_trivial_constructor(T)
# endif
# if __has_feature(has_trivial_copy)
-# //There are problems with deleted copy constructors detected as trivially copyable.
-# //http://stackoverflow.com/questions/12754886/has-trivial-copy-behaves-differently-in-clang-and-gcc-whos-right
-# define BOOST_MOVE_HAS_TRIVIAL_COPY(T) (__has_trivial_copy(T) && ::boost::move_detail::is_copy_constructible<T>::value)
+# define BOOST_MOVE_HAS_TRIVIAL_COPY(T) __has_trivial_copy(T)
# endif
# if __has_feature(has_trivial_assign)
# define BOOST_MOVE_HAS_TRIVIAL_ASSIGN(T) (__has_trivial_assign(T) )
@@ -235,7 +235,9 @@
#endif
#ifdef BOOST_MOVE_HAS_TRIVIAL_COPY
- #define BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) BOOST_MOVE_HAS_TRIVIAL_COPY(T)
+ #define BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) ::boost::move_detail::is_pod<T>::value ||\
+ (::boost::move_detail::is_copy_constructible<T>::value &&\
+ BOOST_MOVE_HAS_TRIVIAL_COPY(T))
#else
#define BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) ::boost::move_detail::is_pod<T>::value
#endif
@@ -246,12 +248,6 @@
#define BOOST_MOVE_IS_TRIVIALLY_DEFAULT_CONSTRUCTIBLE(T) ::boost::move_detail::is_pod<T>::value
#endif
-#ifdef BOOST_MOVE_HAS_TRIVIAL_COPY
- #define BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) BOOST_MOVE_HAS_TRIVIAL_COPY(T)
-#else
- #define BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) ::boost::move_detail::is_pod<T>::value
-#endif
-
#ifdef BOOST_MOVE_HAS_TRIVIAL_MOVE_CONSTRUCTOR
#define BOOST_MOVE_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T) BOOST_MOVE_HAS_TRIVIAL_MOVE_CONSTRUCTOR(T)
#else
@@ -259,7 +255,9 @@
#endif
#ifdef BOOST_MOVE_HAS_TRIVIAL_ASSIGN
- #define BOOST_MOVE_IS_TRIVIALLY_COPY_ASSIGNABLE(T) BOOST_MOVE_HAS_TRIVIAL_ASSIGN(T)
+ #define BOOST_MOVE_IS_TRIVIALLY_COPY_ASSIGNABLE(T) ::boost::move_detail::is_pod<T>::value ||\
+ ( ::boost::move_detail::is_copy_assignable<T>::value &&\
+ BOOST_MOVE_HAS_TRIVIAL_ASSIGN(T))
#else
#define BOOST_MOVE_IS_TRIVIALLY_COPY_ASSIGNABLE(T) ::boost::move_detail::is_pod<T>::value
#endif
@@ -821,9 +819,7 @@ struct is_trivially_copy_constructible
{
//In several compilers BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE return true even with
//deleted copy constructors so make sure the type is copy constructible.
- static const bool value = ::boost::move_detail::is_pod<T>::value ||
- ( ::boost::move_detail::is_copy_constructible<T>::value &&
- BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) );
+ static const bool value = BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T);
};
//////////////////////////////////////
@@ -831,7 +827,7 @@ struct is_trivially_copy_constructible
//////////////////////////////////////
template<class T>
struct is_trivially_move_constructible
-{ static const bool value = BOOST_MOVE_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T); };
+{ static const bool value = BOOST_MOVE_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T); };
//////////////////////////////////////
// is_trivially_copy_assignable
@@ -841,9 +837,7 @@ struct is_trivially_copy_assignable
{
//In several compilers BOOST_MOVE_IS_TRIVIALLY_COPY_CONSTRUCTIBLE return true even with
//deleted copy constructors so make sure the type is copy constructible.
- static const bool value = ::boost::move_detail::is_pod<T>::value ||
- ( ::boost::move_detail::is_copy_assignable<T>::value &&
- BOOST_MOVE_IS_TRIVIALLY_COPY_ASSIGNABLE(T) );
+ static const bool value = BOOST_MOVE_IS_TRIVIALLY_COPY_ASSIGNABLE(T);
};
//////////////////////////////////////
@@ -1005,7 +999,7 @@ BOOST_MOVE_ALIGNED_STORAGE_WITH_BOOST_ALIGNMENT(0x1000)
template<class T, std::size_t Len>
union aligned_union
-{
+{
T aligner;
char dummy[Len];
};
@@ -1023,7 +1017,7 @@ struct aligned_next<Len, Align, T, true>
//End of search defaults to max_align_t
template<std::size_t Len, std::size_t Align>
struct aligned_next<Len, Align, max_align_t, false>
-{ typedef aligned_union<max_align_t, Len> type; };
+{ typedef aligned_union<max_align_t, Len> type; };
//Now define a search list through types
#define BOOST_MOVE_ALIGNED_NEXT_STEP(TYPE, NEXT_TYPE)\