summaryrefslogtreecommitdiff
path: root/boost/mpi
diff options
context:
space:
mode:
Diffstat (limited to 'boost/mpi')
-rw-r--r--boost/mpi/collectives/all_gather.hpp4
-rw-r--r--boost/mpi/collectives/gather.hpp4
-rw-r--r--boost/mpi/collectives/scatter.hpp4
-rw-r--r--boost/mpi/communicator.hpp483
-rw-r--r--boost/mpi/config.hpp31
-rw-r--r--boost/mpi/datatype.hpp12
-rw-r--r--boost/mpi/detail/antiques.hpp4
-rw-r--r--boost/mpi/detail/broadcast_sc.hpp17
-rw-r--r--boost/mpi/detail/communicator_sc.hpp38
-rw-r--r--boost/mpi/detail/mpi_datatype_primitive.hpp6
-rw-r--r--boost/mpi/detail/offsets.hpp5
-rw-r--r--boost/mpi/detail/point_to_point.hpp27
-rw-r--r--boost/mpi/detail/request_handlers.hpp631
-rw-r--r--boost/mpi/nonblocking.hpp63
-rw-r--r--boost/mpi/operations.hpp5
-rw-r--r--boost/mpi/request.hpp136
-rw-r--r--boost/mpi/skeleton_and_content.hpp330
-rw-r--r--boost/mpi/skeleton_and_content_types.hpp363
-rw-r--r--boost/mpi/status.hpp1
19 files changed, 1259 insertions, 905 deletions
diff --git a/boost/mpi/collectives/all_gather.hpp b/boost/mpi/collectives/all_gather.hpp
index 4adaeb9c87..cf75f329fc 100644
--- a/boost/mpi/collectives/all_gather.hpp
+++ b/boost/mpi/collectives/all_gather.hpp
@@ -57,8 +57,8 @@ all_gather_impl(const communicator& comm, const T* in_values, int n,
std::vector<int> oasizes(nproc);
int oasize = oa.size();
BOOST_MPI_CHECK_RESULT(MPI_Allgather,
- (&oasize, 1, MPI_INTEGER,
- c_data(oasizes), 1, MPI_INTEGER,
+ (&oasize, 1, MPI_INT,
+ c_data(oasizes), 1, MPI_INT,
MPI_Comm(comm)));
// Gather the archives, which can be of different sizes, so
// we need to use allgatherv.
diff --git a/boost/mpi/collectives/gather.hpp b/boost/mpi/collectives/gather.hpp
index 386bfdd1a1..34f37997ba 100644
--- a/boost/mpi/collectives/gather.hpp
+++ b/boost/mpi/collectives/gather.hpp
@@ -69,8 +69,8 @@ gather_impl(const communicator& comm, const T* in_values, int n, T* out_values,
std::vector<int> oasizes(is_root ? nproc : 0);
int oasize = oa.size();
BOOST_MPI_CHECK_RESULT(MPI_Gather,
- (&oasize, 1, MPI_INTEGER,
- c_data(oasizes), 1, MPI_INTEGER,
+ (&oasize, 1, MPI_INT,
+ c_data(oasizes), 1, MPI_INT,
root, MPI_Comm(comm)));
// Gather the archives, which can be of different sizes, so
// we need to use gatherv.
diff --git a/boost/mpi/collectives/scatter.hpp b/boost/mpi/collectives/scatter.hpp
index 0c91b1e6aa..ae3adcbcfa 100644
--- a/boost/mpi/collectives/scatter.hpp
+++ b/boost/mpi/collectives/scatter.hpp
@@ -100,8 +100,8 @@ dispatch_scatter_sendbuf(const communicator& comm,
// Distribute the sizes
int myarchsize;
BOOST_MPI_CHECK_RESULT(MPI_Scatter,
- (non_const_data(archsizes), 1, MPI_INTEGER,
- &myarchsize, 1, MPI_INTEGER, root, comm));
+ (non_const_data(archsizes), 1, MPI_INT,
+ &myarchsize, 1, MPI_INT, root, comm));
std::vector<int> offsets;
if (root == comm.rank()) {
sizes2offsets(archsizes, offsets);
diff --git a/boost/mpi/communicator.hpp b/boost/mpi/communicator.hpp
index 6e55b167f9..ff889d00a3 100644
--- a/boost/mpi/communicator.hpp
+++ b/boost/mpi/communicator.hpp
@@ -34,9 +34,6 @@
// For (de-)serializing skeletons and content
#include <boost/mpi/skeleton_and_content_fwd.hpp>
-// For (de-)serializing arrays
-#include <boost/serialization/array.hpp>
-
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/status.hpp>
#include <boost/mpi/request.hpp>
@@ -802,23 +799,7 @@ class BOOST_MPI_DECL communicator
* Split the communicator into multiple, disjoint communicators
* each of which is based on a particular color. This is a
* collective operation that returns a new communicator that is a
- * subgroup of @p this. This routine is functionally equivalent to
- * @c MPI_Comm_split.
- *
- * @param color The color of this process. All processes with the
- * same @p color value will be placed into the same group.
- *
- * @returns A new communicator containing all of the processes in
- * @p this that have the same @p color.
- */
- communicator split(int color) const;
-
- /**
- * Split the communicator into multiple, disjoint communicators
- * each of which is based on a particular color. This is a
- * collective operation that returns a new communicator that is a
- * subgroup of @p this. This routine is functionally equivalent to
- * @c MPI_Comm_split.
+ * subgroup of @p this.
*
* @param color The color of this process. All processes with the
* same @p color value will be placed into the same group.
@@ -833,6 +814,7 @@ class BOOST_MPI_DECL communicator
* @p this that have the same @p color.
*/
communicator split(int color, int key) const;
+ communicator split(int color) const;
/**
* Determine if the communicator is in fact an intercommunicator
@@ -1157,11 +1139,15 @@ inline bool operator!=(const communicator& comm1, const communicator& comm2)
return !(comm1 == comm2);
}
+}} // boost::mpi
/************************************************************************
* Implementation details *
************************************************************************/
+#include <boost/mpi/detail/request_handlers.hpp>
+
+namespace boost { namespace mpi {
/**
* INTERNAL ONLY (using the same 'end' name might be considerd unfortunate
*/
@@ -1308,6 +1294,7 @@ template<typename T>
void
communicator::send_impl(int dest, int tag, const T& value, mpl::true_) const
{
+ // received by recv or trivial handler.
BOOST_MPI_CHECK_RESULT(MPI_Send,
(const_cast<T*>(&value), 1, get_mpi_datatype<T>(value),
dest, tag, MPI_Comm(*this)));
@@ -1355,26 +1342,37 @@ communicator::array_send_impl(int dest, int tag, const T* values, int n,
mpl::false_) const
{
packed_oarchive oa(*this);
- oa << n << boost::serialization::make_array(values, n);
+ T const* v = values;
+ while (v < values+n) {
+ oa << *v++;
+ }
send(dest, tag, oa);
}
template<typename T, typename A>
void communicator::send_vector(int dest, int tag,
- const std::vector<T,A>& value, mpl::true_ true_type) const
+ const std::vector<T,A>& values, mpl::true_ primitive) const
{
- // send the vector size
- typename std::vector<T,A>::size_type size = value.size();
- send(dest, tag, size);
- // send the data
- this->array_send_impl(dest, tag, value.data(), size, true_type);
+#if defined(BOOST_MPI_USE_IMPROBE)
+ array_send_impl(dest, tag, values.data(), values.size(), primitive);
+#else
+ {
+ // non blocking recv by legacy_dynamic_primitive_array_handler
+ // blocking recv by recv_vector(source,tag,value,primitive)
+ // send the vector size
+ typename std::vector<T,A>::size_type size = values.size();
+ send(dest, tag, size);
+ // send the data
+ this->array_send_impl(dest, tag, values.data(), size, primitive);
+ }
+#endif
}
template<typename T, typename A>
void communicator::send_vector(int dest, int tag,
- const std::vector<T,A>& value, mpl::false_ false_type) const
+ const std::vector<T,A>& value, mpl::false_ primitive) const
{
- this->send_impl(dest, tag, value, false_type);
+ this->send_impl(dest, tag, value, primitive);
}
template<typename T, typename A>
@@ -1396,7 +1394,6 @@ template<typename T>
status communicator::recv_impl(int source, int tag, T& value, mpl::true_) const
{
status stat;
-
BOOST_MPI_CHECK_RESULT(MPI_Recv,
(const_cast<T*>(&value), 1,
get_mpi_datatype<T>(value),
@@ -1444,38 +1441,42 @@ status
communicator::array_recv_impl(int source, int tag, T* values, int n,
mpl::false_) const
{
- // Receive the message
packed_iarchive ia(*this);
status stat = recv(source, tag, ia);
-
- // Determine how much data we are going to receive
- int count;
- ia >> count;
-
- // Deserialize the data in the message
- boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
- ia >> arr;
-
- if (count > n) {
- boost::throw_exception(
- std::range_error("communicator::recv: message receive overflow"));
+ T* v = values;
+ while (v != values+n) {
+ ia >> *v++;
}
-
- stat.m_count = count;
+ stat.m_count = n;
return stat;
}
template<typename T, typename A>
status communicator::recv_vector(int source, int tag,
- std::vector<T,A>& value, mpl::true_ true_type) const
+ std::vector<T,A>& values, mpl::true_ primitive) const
{
- // receive the vector size
- typename std::vector<T,A>::size_type size = 0;
- recv(source, tag, size);
- // size the vector
- value.resize(size);
- // receive the data
- return this->array_recv_impl(source, tag, value.data(), size, true_type);
+#if defined(BOOST_MPI_USE_IMPROBE)
+ {
+ MPI_Message msg;
+ status stat;
+ BOOST_MPI_CHECK_RESULT(MPI_Mprobe, (source,tag,*this,&msg,&stat.m_status));
+ int count;
+ BOOST_MPI_CHECK_RESULT(MPI_Get_count, (&stat.m_status,get_mpi_datatype<T>(),&count));
+ values.resize(count);
+ BOOST_MPI_CHECK_RESULT(MPI_Mrecv, (values.data(), count, get_mpi_datatype<T>(), &msg, &stat.m_status));
+ return stat;
+ }
+#else
+ {
+ // receive the vector size
+ typename std::vector<T,A>::size_type size = 0;
+ recv(source, tag, size);
+ // size the vector
+ values.resize(size);
+ // receive the data
+ return this->array_recv_impl(source, tag, values.data(), size, primitive);
+ }
+#endif
}
template<typename T, typename A>
@@ -1542,12 +1543,7 @@ template<typename T>
request
communicator::isend_impl(int dest, int tag, const T& value, mpl::true_) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (const_cast<T*>(&value), 1,
- get_mpi_datatype<T>(value),
- dest, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_trivial_send(*this, dest, tag, value);
}
// We're sending a type that does not have an associated MPI
@@ -1560,7 +1556,7 @@ communicator::isend_impl(int dest, int tag, const T& value, mpl::false_) const
shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
*archive << value;
request result = isend(dest, tag, *archive);
- result.m_data = archive;
+ result.preserve(archive);
return result;
}
@@ -1581,16 +1577,9 @@ request communicator::isend(int dest, int tag, const std::vector<T,A>& values) c
template<typename T, class A>
request
communicator::isend_vector(int dest, int tag, const std::vector<T,A>& values,
- mpl::true_) const
+ mpl::true_ primitive) const
{
- std::size_t size = values.size();
- request req = this->isend_impl(dest, tag, size, mpl::true_());
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (const_cast<T*>(values.data()), size,
- get_mpi_datatype<T>(),
- dest, tag, MPI_Comm(*this), &req.m_requests[1]));
- return req;
-
+ return request::make_dynamic_primitive_array_send(*this, dest, tag, values);
}
template<typename T, class A>
@@ -1606,12 +1595,7 @@ request
communicator::array_isend_impl(int dest, int tag, const T* values, int n,
mpl::true_) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (const_cast<T*>(values), n,
- get_mpi_datatype<T>(*values),
- dest, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_trivial_send(*this, dest, tag, values, n);
}
template<typename T>
@@ -1620,9 +1604,12 @@ communicator::array_isend_impl(int dest, int tag, const T* values, int n,
mpl::false_) const
{
shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
- *archive << n << boost::serialization::make_array(values, n);
+ T const* v = values;
+ while (v < values+n) {
+ *archive << *v++;
+ }
request result = isend(dest, tag, *archive);
- result.m_data = archive;
+ result.preserve(archive);
return result;
}
@@ -1634,323 +1621,20 @@ request communicator::isend(int dest, int tag, const T* values, int n) const
return array_isend_impl(dest, tag, values, n, is_mpi_datatype<T>());
}
-namespace detail {
- /**
- * Internal data structure that stores everything required to manage
- * the receipt of serialized data via a request object.
- */
- template<typename T>
- struct serialized_irecv_data
- {
- serialized_irecv_data(const communicator& comm, int source, int tag,
- T& value)
- : comm(comm), source(source), tag(tag), ia(comm), value(value)
- {
- }
-
- void deserialize(status& stat)
- {
- ia >> value;
- stat.m_count = 1;
- }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_iarchive ia;
- T& value;
- };
-
- template<>
- struct serialized_irecv_data<packed_iarchive>
- {
- serialized_irecv_data(const communicator& comm, int source, int tag,
- packed_iarchive& ia)
- : comm(comm), source(source), tag(tag), ia(ia) { }
-
- void deserialize(status&) { /* Do nothing. */ }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_iarchive& ia;
- };
-
- /**
- * Internal data structure that stores everything required to manage
- * the receipt of an array of serialized data via a request object.
- */
- template<typename T>
- struct serialized_array_irecv_data
- {
- serialized_array_irecv_data(const communicator& comm, int source, int tag,
- T* values, int n)
- : comm(comm), source(source), tag(tag), ia(comm), values(values), n(n)
- {
- }
-
- void deserialize(status& stat);
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_iarchive ia;
- T* values;
- int n;
- };
-
- template<typename T>
- void serialized_array_irecv_data<T>::deserialize(status& stat)
- {
- // Determine how much data we are going to receive
- int count;
- ia >> count;
-
- // Deserialize the data in the message
- boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
- ia >> arr;
-
- if (count > n) {
- boost::throw_exception(
- std::range_error("communicator::recv: message receive overflow"));
- }
-
- stat.m_count = count;
- }
-
- /**
- * Internal data structure that stores everything required to manage
- * the receipt of an array of primitive data but unknown size.
- * Such an array can have been send with blocking operation and so must
- * be compatible with the (size_t,raw_data[]) format.
- */
- template<typename T, class A>
- struct dynamic_array_irecv_data
- {
- BOOST_STATIC_ASSERT_MSG(is_mpi_datatype<T>::value, "Can only be specialized for MPI datatypes.");
-
- dynamic_array_irecv_data(const communicator& comm, int source, int tag,
- std::vector<T,A>& values)
- : comm(comm), source(source), tag(tag), count(-1), values(values)
- {
- }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- std::vector<T,A>& values;
- };
-
-}
-
-template<typename T>
-optional<status>
-request::handle_serialized_irecv(request* self, request_action action)
-{
- typedef detail::serialized_irecv_data<T> data_t;
- shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
-
- if (action == ra_wait) {
- status stat;
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Wait for the count message to complete
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests, &stat.m_status));
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(), MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- }
-
- // Wait until we have received the entire message
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests + 1, &stat.m_status));
-
- data->deserialize(stat);
- return stat;
- } else if (action == ra_test) {
- status stat;
- int flag = 0;
-
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Check if the count message has completed
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests, &flag, &stat.m_status));
- if (flag) {
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(),MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- } else
- return optional<status>(); // We have not finished yet
- }
-
- // Check if we have received the message data
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests + 1, &flag, &stat.m_status));
- if (flag) {
- data->deserialize(stat);
- return stat;
- } else
- return optional<status>();
- } else {
- return optional<status>();
- }
-}
-
-template<typename T>
-optional<status>
-request::handle_serialized_array_irecv(request* self, request_action action)
-{
- typedef detail::serialized_array_irecv_data<T> data_t;
- shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
-
- if (action == ra_wait) {
- status stat;
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Wait for the count message to complete
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests, &stat.m_status));
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(), MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- }
-
- // Wait until we have received the entire message
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests + 1, &stat.m_status));
-
- data->deserialize(stat);
- return stat;
- } else if (action == ra_test) {
- status stat;
- int flag = 0;
-
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Check if the count message has completed
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests, &flag, &stat.m_status));
- if (flag) {
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(),MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- } else
- return optional<status>(); // We have not finished yet
- }
-
- // Check if we have received the message data
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests + 1, &flag, &stat.m_status));
- if (flag) {
- data->deserialize(stat);
- return stat;
- } else
- return optional<status>();
- } else {
- return optional<status>();
- }
-}
-
-template<typename T, class A>
-optional<status>
-request::handle_dynamic_primitive_array_irecv(request* self, request_action action)
-{
- typedef detail::dynamic_array_irecv_data<T,A> data_t;
- shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
-
- if (action == ra_wait) {
- status stat;
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Wait for the count message to complete
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests, &stat.m_status));
- // Resize our buffer and get ready to receive its data
- data->values.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&(data->values[0]), data->values.size(), get_mpi_datatype<T>(),
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- }
-
- // Wait until we have received the entire message
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests + 1, &stat.m_status));
- return stat;
- } else if (action == ra_test) {
- status stat;
- int flag = 0;
-
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Check if the count message has completed
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests, &flag, &stat.m_status));
- if (flag) {
- // Resize our buffer and get ready to receive its data
- data->values.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&(data->values[0]), data->values.size(),MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- } else
- return optional<status>(); // We have not finished yet
- }
-
- // Check if we have received the message data
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests + 1, &flag, &stat.m_status));
- if (flag) {
- return stat;
- } else
- return optional<status>();
- } else {
- return optional<status>();
- }
-}
-
// We're receiving a type that has an associated MPI datatype, so we
// map directly to that datatype.
template<typename T>
request
communicator::irecv_impl(int source, int tag, T& value, mpl::true_) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (const_cast<T*>(&value), 1,
- get_mpi_datatype<T>(value),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_trivial_recv(*this, source, tag, value);
}
template<typename T>
request
communicator::irecv_impl(int source, int tag, T& value, mpl::false_) const
{
- typedef detail::serialized_irecv_data<T> data_t;
- shared_ptr<data_t> data(new data_t(*this, source, tag, value));
- request req;
- req.m_data = data;
- req.m_handler = request::handle_serialized_irecv<T>;
-
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&data->count, 1,
- get_mpi_datatype<std::size_t>(data->count),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
-
- return req;
+ return request::make_serialized(*this, source, tag, value);
}
template<typename T>
@@ -1965,12 +1649,7 @@ request
communicator::array_irecv_impl(int source, int tag, T* values, int n,
mpl::true_) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (const_cast<T*>(values), n,
- get_mpi_datatype<T>(*values),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_trivial_recv(*this, source, tag, values, n);
}
template<typename T>
@@ -1978,37 +1657,15 @@ request
communicator::array_irecv_impl(int source, int tag, T* values, int n,
mpl::false_) const
{
- typedef detail::serialized_array_irecv_data<T> data_t;
- shared_ptr<data_t> data(new data_t(*this, source, tag, values, n));
- request req;
- req.m_data = data;
- req.m_handler = request::handle_serialized_array_irecv<T>;
-
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&data->count, 1,
- get_mpi_datatype<std::size_t>(data->count),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
-
- return req;
+ return request::make_serialized_array(*this, source, tag, values, n);
}
template<typename T, class A>
request
communicator::irecv_vector(int source, int tag, std::vector<T,A>& values,
- mpl::true_) const
+ mpl::true_ primitive) const
{
- typedef detail::dynamic_array_irecv_data<T,A> data_t;
- shared_ptr<data_t> data(new data_t(*this, source, tag, values));
- request req;
- req.m_data = data;
- req.m_handler = request::handle_dynamic_primitive_array_irecv<T,A>;
-
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&data->count, 1,
- get_mpi_datatype<std::size_t>(data->count),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
-
- return req;
+ return request::make_dynamic_primitive_array_recv(*this, source, tag, values);
}
template<typename T, class A>
diff --git a/boost/mpi/config.hpp b/boost/mpi/config.hpp
index c83277f66b..48c0a9fe29 100644
--- a/boost/mpi/config.hpp
+++ b/boost/mpi/config.hpp
@@ -32,9 +32,33 @@
*/
#define BOOST_MPI_HOMOGENEOUS
+#if defined MPI_VERSION
+/** @brief Major version of the underlying MPI implementation supproted standard.
+ *
+ * If, for some reason, MPI_VERSION is not supported, you should probably set that
+ * according to your MPI documentation
+ */
+# define BOOST_MPI_VERSION MPI_VERSION
+#else
+// assume a safe default
+# define BOOST_MPI_VERSION 2
+#endif
+
+#if defined MPI_SUBVERSION
+/** @brief Major version of the underlying MPI implementation supproted standard.
+ *
+ * If, for some reason, MPI_SUBVERSION is not supported, you should probably set that
+ * according to your MPI documentation
+ */
+# define BOOST_MPI_SUBVERSION MPI_SUBVERSION
+#else
+// assume a safe default
+# define BOOST_MPI_SUBVERSION 2
+#endif
+
// If this is an MPI-2 implementation, define configuration macros for
// the features we are interested in.
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
+#if BOOST_MPI_VERSION >= 2
/** @brief Determine if the MPI implementation has support for memory
* allocation.
*
@@ -96,6 +120,11 @@
// Configuration for MPICH
#endif
+#if BOOST_MPI_VERSION >= 3 && (!defined(I_MPI_NUMVERSION))
+// This is intel
+#define BOOST_MPI_USE_IMPROBE 1
+#endif
+
/*****************************************************************************
* *
* DLL import/export options *
diff --git a/boost/mpi/datatype.hpp b/boost/mpi/datatype.hpp
index 1f069977d0..b50662229a 100644
--- a/boost/mpi/datatype.hpp
+++ b/boost/mpi/datatype.hpp
@@ -280,16 +280,16 @@ struct is_mpi_datatype<std::array<T, N> >
// Define wchar_t specialization of is_mpi_datatype, if possible.
#if !defined(BOOST_NO_INTRINSIC_WCHAR_T) && \
- (defined(MPI_WCHAR) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
+ (defined(MPI_WCHAR) || (BOOST_MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(wchar_t, MPI_WCHAR, builtin);
#endif
// Define long long or __int64 specialization of is_mpi_datatype, if possible.
#if defined(BOOST_HAS_LONG_LONG) && \
- (defined(MPI_LONG_LONG_INT) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
+ (defined(MPI_LONG_LONG_INT) || (BOOST_MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(long long, MPI_LONG_LONG_INT, builtin);
#elif defined(BOOST_HAS_MS_INT64) && \
- (defined(MPI_LONG_LONG_INT) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
+ (defined(MPI_LONG_LONG_INT) || (BOOST_MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(__int64, MPI_LONG_LONG_INT, builtin);
#endif
@@ -300,16 +300,16 @@ BOOST_MPI_DATATYPE(__int64, MPI_LONG_LONG_INT, builtin);
// MPI_UNSIGNED_LONG_LONG.
#if defined(BOOST_HAS_LONG_LONG) && \
(defined(MPI_UNSIGNED_LONG_LONG) \
- || (defined(MPI_VERSION) && MPI_VERSION >= 2))
+ || (BOOST_MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG, builtin);
#elif defined(BOOST_HAS_MS_INT64) && \
(defined(MPI_UNSIGNED_LONG_LONG) \
- || (defined(MPI_VERSION) && MPI_VERSION >= 2))
+ || (BOOST_MPI_VERSION >= 2))
BOOST_MPI_DATATYPE(unsigned __int64, MPI_UNSIGNED_LONG_LONG, builtin);
#endif
// Define signed char specialization of is_mpi_datatype, if possible.
-#if defined(MPI_SIGNED_CHAR) || (defined(MPI_VERSION) && MPI_VERSION >= 2)
+#if defined(MPI_SIGNED_CHAR) || (BOOST_MPI_VERSION >= 2)
BOOST_MPI_DATATYPE(signed char, MPI_SIGNED_CHAR, builtin);
#endif
diff --git a/boost/mpi/detail/antiques.hpp b/boost/mpi/detail/antiques.hpp
index 0bd235b2c1..a84760f8f1 100644
--- a/boost/mpi/detail/antiques.hpp
+++ b/boost/mpi/detail/antiques.hpp
@@ -19,10 +19,10 @@ namespace detail {
// serve as an incentive to get rid of this when those compilers
// are dropped.
template <typename T, typename A>
- T* c_data(std::vector<T,A>& v) { return &(v[0]); }
+ T* c_data(std::vector<T,A>& v) { return v.empty() ? static_cast<T*>(0) : &(v[0]); }
template <typename T, typename A>
- T const* c_data(std::vector<T,A> const& v) { return &(v[0]); }
+ T const* c_data(std::vector<T,A> const& v) { return v.empty() ? static_cast<T const*>(0) : &(v[0]); }
// Some old MPI implementation (OpenMPI 1.6 for example) have non
// conforming API w.r.t. constness.
diff --git a/boost/mpi/detail/broadcast_sc.hpp b/boost/mpi/detail/broadcast_sc.hpp
index c84da662a2..51a8dff62d 100644
--- a/boost/mpi/detail/broadcast_sc.hpp
+++ b/boost/mpi/detail/broadcast_sc.hpp
@@ -14,14 +14,6 @@
namespace boost { namespace mpi {
template<typename T>
-inline void
-broadcast(const communicator& comm, skeleton_proxy<T>& proxy, int root)
-{
- const skeleton_proxy<T>& const_proxy(proxy);
- broadcast(comm, const_proxy, root);
-}
-
-template<typename T>
void
broadcast(const communicator& comm, const skeleton_proxy<T>& proxy, int root)
{
@@ -36,6 +28,15 @@ broadcast(const communicator& comm, const skeleton_proxy<T>& proxy, int root)
}
}
+template<typename T>
+inline void
+broadcast(const communicator& comm, skeleton_proxy<T>& proxy, int root)
+{
+ const skeleton_proxy<T>& const_proxy(proxy);
+ broadcast(comm, const_proxy, root);
+}
+
+
} } // end namespace boost::mpi
#endif // BOOST_MPI_BROADCAST_SC_HPP
diff --git a/boost/mpi/detail/communicator_sc.hpp b/boost/mpi/detail/communicator_sc.hpp
index 1dfcc3c52d..83fa3a3e39 100644
--- a/boost/mpi/detail/communicator_sc.hpp
+++ b/boost/mpi/detail/communicator_sc.hpp
@@ -50,46 +50,10 @@ communicator::isend(int dest, int tag, const skeleton_proxy<T>& proxy) const
*archive << proxy.object;
request result = isend(dest, tag, *archive);
- result.m_data = archive;
+ result.preserve(archive);
return result;
}
-namespace detail {
- template<typename T>
- struct serialized_irecv_data<const skeleton_proxy<T> >
- {
- serialized_irecv_data(const communicator& comm, int source, int tag,
- skeleton_proxy<T> proxy)
- : comm(comm), source(source), tag(tag), isa(comm),
- ia(isa.get_skeleton()), proxy(proxy) { }
-
- void deserialize(status& stat)
- {
- isa >> proxy.object;
- stat.m_count = 1;
- }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_skeleton_iarchive isa;
- packed_iarchive& ia;
- skeleton_proxy<T> proxy;
- };
-
- template<typename T>
- struct serialized_irecv_data<skeleton_proxy<T> >
- : public serialized_irecv_data<const skeleton_proxy<T> >
- {
- typedef serialized_irecv_data<const skeleton_proxy<T> > inherited;
-
- serialized_irecv_data(const communicator& comm, int source, int tag,
- const skeleton_proxy<T>& proxy)
- : inherited(comm, source, tag, proxy) { }
- };
-}
-
} } // end namespace boost::mpi
#endif // BOOST_MPI_COMMUNICATOR_SC_HPP
diff --git a/boost/mpi/detail/mpi_datatype_primitive.hpp b/boost/mpi/detail/mpi_datatype_primitive.hpp
index 6a82624e43..fc05d78612 100644
--- a/boost/mpi/detail/mpi_datatype_primitive.hpp
+++ b/boost/mpi/detail/mpi_datatype_primitive.hpp
@@ -50,7 +50,7 @@ public:
: is_committed(false),
origin()
{
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
+#if BOOST_MPI_VERSION >= 2
BOOST_MPI_CHECK_RESULT(MPI_Get_address,(const_cast<void*>(orig), &origin));
#else
BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(orig), &origin));
@@ -77,7 +77,7 @@ public:
{
if (!is_committed)
{
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
+#if BOOST_MPI_VERSION >= 2
BOOST_MPI_CHECK_RESULT(MPI_Type_create_struct,
(
addresses.size(),
@@ -120,7 +120,7 @@ private:
// store address, type and length
MPI_Aint a;
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
+#if BOOST_MPI_VERSION >= 2
BOOST_MPI_CHECK_RESULT(MPI_Get_address,(const_cast<void*>(p), &a));
#else
BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(p), &a));
diff --git a/boost/mpi/detail/offsets.hpp b/boost/mpi/detail/offsets.hpp
index 7e5ab7dff6..0cfc9c9331 100644
--- a/boost/mpi/detail/offsets.hpp
+++ b/boost/mpi/detail/offsets.hpp
@@ -9,6 +9,7 @@
#define BOOST_MPI_OFFSETS_HPP
#include <vector>
+#include <boost/mpi/config.hpp>
#include <boost/mpi/communicator.hpp>
namespace boost { namespace mpi {
@@ -16,10 +17,10 @@ namespace detail {
// Convert a sequence of sizes [S0..Sn] to a sequence displacement
// [O0..On] where O[0] = 0 and O[k+1] = O[k]+S[k].
-void sizes2offsets(int const* sizes, int* offsets, int n);
+void BOOST_MPI_DECL sizes2offsets(int const* sizes, int* offsets, int n);
// Same as size2offset(sizes.data(), offsets.data(), sizes.size())
-void sizes2offsets(std::vector<int> const& sizes, std::vector<int>& offsets);
+void BOOST_MPI_DECL sizes2offsets(std::vector<int> const& sizes, std::vector<int>& offsets);
// Given a sequence of sizes (typically the number of records dispatched
// to each process in a scater) and a sequence of displacements (typically the
diff --git a/boost/mpi/detail/point_to_point.hpp b/boost/mpi/detail/point_to_point.hpp
index 06db34ce94..74116cf8e5 100644
--- a/boost/mpi/detail/point_to_point.hpp
+++ b/boost/mpi/detail/point_to_point.hpp
@@ -13,11 +13,16 @@
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
-namespace boost { namespace mpi { namespace detail {
+namespace boost { namespace mpi {
+
+class request;
+class communicator;
+
+namespace detail {
/** Sends a packed archive using MPI_Send. */
BOOST_MPI_DECL void
-packed_archive_send(MPI_Comm comm, int dest, int tag,
+packed_archive_send(communicator const& comm, int dest, int tag,
const packed_oarchive& ar);
/** Sends a packed archive using MPI_Isend.
@@ -26,25 +31,21 @@ packed_archive_send(MPI_Comm comm, int dest, int tag,
* for each packet will be placed into the out_requests array, up to
* num_out_requests packets. The number of packets sent will be
* returned from the function.
- *
- * @pre num_out_requests >= 2
*/
-BOOST_MPI_DECL int
-packed_archive_isend(MPI_Comm comm, int dest, int tag,
- const packed_oarchive& ar,
- MPI_Request* out_requests, int num_out_requests);
+BOOST_MPI_DECL request
+packed_archive_isend(communicator const& comm, int dest, int tag,
+ const packed_oarchive& ar);
/**
* \overload
*/
-BOOST_MPI_DECL int
-packed_archive_isend(MPI_Comm comm, int dest, int tag,
- const packed_iarchive& ar,
- MPI_Request* out_requests, int num_out_requests);
+BOOST_MPI_DECL request
+packed_archive_isend(communicator const& comm, int dest, int tag,
+ const packed_iarchive& ar);
/** Receives a packed archive using MPI_Recv. */
BOOST_MPI_DECL void
-packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar,
+packed_archive_recv(communicator const& comm, int source, int tag, packed_iarchive& ar,
MPI_Status& status);
} } } // end namespace boost::mpi::detail
diff --git a/boost/mpi/detail/request_handlers.hpp b/boost/mpi/detail/request_handlers.hpp
new file mode 100644
index 0000000000..2ad627639d
--- /dev/null
+++ b/boost/mpi/detail/request_handlers.hpp
@@ -0,0 +1,631 @@
+// Copyright (C) 2018 Alain Miniussi <alain.miniussi@oca.eu>.
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Request implementation dtails
+
+// This header should be included only after the communicator and request
+// classes has been defined.
+#ifndef BOOST_MPI_REQUEST_HANDLERS_HPP
+#define BOOST_MPI_REQUEST_HANDLERS_HPP
+
+#include <boost/mpi/skeleton_and_content_types.hpp>
+
+namespace boost { namespace mpi {
+
+namespace detail {
+/**
+ * Internal data structure that stores everything required to manage
+ * the receipt of serialized data via a request object.
+ */
+template<typename T>
+struct serialized_irecv_data {
+ serialized_irecv_data(const communicator& comm, T& value)
+ : m_ia(comm), m_value(value) {}
+
+ void deserialize(status& stat)
+ {
+ m_ia >> m_value;
+ stat.m_count = 1;
+ }
+
+ std::size_t m_count;
+ packed_iarchive m_ia;
+ T& m_value;
+};
+
+template<>
+struct serialized_irecv_data<packed_iarchive>
+{
+ serialized_irecv_data(communicator const&, packed_iarchive& ia) : m_ia(ia) { }
+
+ void deserialize(status&) { /* Do nothing. */ }
+
+ std::size_t m_count;
+ packed_iarchive& m_ia;
+};
+
+/**
+ * Internal data structure that stores everything required to manage
+ * the receipt of an array of serialized data via a request object.
+ */
+template<typename T>
+struct serialized_array_irecv_data
+{
+ serialized_array_irecv_data(const communicator& comm, T* values, int n)
+ : m_count(0), m_ia(comm), m_values(values), m_nb(n) {}
+
+ void deserialize(status& stat);
+
+ std::size_t m_count;
+ packed_iarchive m_ia;
+ T* m_values;
+ int m_nb;
+};
+
+template<typename T>
+void serialized_array_irecv_data<T>::deserialize(status& stat)
+{
+ T* v = m_values;
+ T* end = m_values+m_nb;
+ while (v < end) {
+ m_ia >> *v++;
+ }
+ stat.m_count = m_nb;
+}
+
+/**
+ * Internal data structure that stores everything required to manage
+ * the receipt of an array of primitive data but unknown size.
+ * Such an array can have been send with blocking operation and so must
+ * be compatible with the (size_t,raw_data[]) format.
+ */
+template<typename T, class A>
+struct dynamic_array_irecv_data
+{
+ BOOST_STATIC_ASSERT_MSG(is_mpi_datatype<T>::value, "Can only be specialized for MPI datatypes.");
+
+ dynamic_array_irecv_data(std::vector<T,A>& values)
+ : m_count(-1), m_values(values) {}
+
+ std::size_t m_count;
+ std::vector<T,A>& m_values;
+};
+
+template<typename T>
+struct serialized_irecv_data<const skeleton_proxy<T> >
+{
+ serialized_irecv_data(const communicator& comm, skeleton_proxy<T> proxy)
+ : m_isa(comm), m_ia(m_isa.get_skeleton()), m_proxy(proxy) { }
+
+ void deserialize(status& stat)
+ {
+ m_isa >> m_proxy.object;
+ stat.m_count = 1;
+ }
+
+ std::size_t m_count;
+ packed_skeleton_iarchive m_isa;
+ packed_iarchive& m_ia;
+ skeleton_proxy<T> m_proxy;
+};
+
+template<typename T>
+struct serialized_irecv_data<skeleton_proxy<T> >
+ : public serialized_irecv_data<const skeleton_proxy<T> >
+{
+ typedef serialized_irecv_data<const skeleton_proxy<T> > inherited;
+
+ serialized_irecv_data(const communicator& comm, const skeleton_proxy<T>& proxy)
+ : inherited(comm, proxy) { }
+};
+}
+
+#if BOOST_MPI_VERSION >= 3
+template<class Data>
+class request::probe_handler
+ : public request::handler,
+ protected Data {
+
+protected:
+ template<typename I1>
+ probe_handler(communicator const& comm, int source, int tag, I1& i1)
+ : Data(comm, i1),
+ m_comm(comm),
+ m_source(source),
+ m_tag(tag) {}
+ // no variadic template for now
+ template<typename I1, typename I2>
+ probe_handler(communicator const& comm, int source, int tag, I1& i1, I2& i2)
+ : Data(comm, i1, i2),
+ m_comm(comm),
+ m_source(source),
+ m_tag(tag) {}
+
+public:
+ bool active() const { return m_source != MPI_PROC_NULL; }
+ optional<MPI_Request&> trivial() { return boost::none; }
+ void cancel() { m_source = MPI_PROC_NULL; }
+
+ status wait() {
+ MPI_Message msg;
+ status stat;
+ BOOST_MPI_CHECK_RESULT(MPI_Mprobe, (m_source,m_tag,m_comm,&msg,&stat.m_status));
+ return unpack(msg, stat);
+ }
+
+ optional<status> test() {
+ status stat;
+ int flag = 0;
+ MPI_Message msg;
+ BOOST_MPI_CHECK_RESULT(MPI_Improbe, (m_source,m_tag,m_comm,&flag,&msg,&stat.m_status));
+ if (flag) {
+ return unpack(msg, stat);
+ } else {
+ return optional<status>();
+ }
+ }
+
+protected:
+ friend class request;
+
+ status unpack(MPI_Message& msg, status& stat) {
+ int count;
+ MPI_Datatype datatype = this->Data::datatype();
+ BOOST_MPI_CHECK_RESULT(MPI_Get_count, (&stat.m_status, datatype, &count));
+ this->Data::resize(count);
+ BOOST_MPI_CHECK_RESULT(MPI_Mrecv, (this->Data::buffer(), count, datatype, &msg, &stat.m_status));
+ this->Data::deserialize();
+ m_source = MPI_PROC_NULL;
+ stat.m_count = 1;
+ return stat;
+ }
+
+ communicator const& m_comm;
+ int m_source;
+ int m_tag;
+};
+#endif // BOOST_MPI_VERSION >= 3
+
+namespace detail {
+template<class A>
+struct dynamic_primitive_array_data {
+ dynamic_primitive_array_data(communicator const&, A& arr) : m_buffer(arr) {}
+
+ void* buffer() { return m_buffer.data(); }
+ void resize(std::size_t sz) { m_buffer.resize(sz); }
+ void deserialize() {}
+ MPI_Datatype datatype() { return get_mpi_datatype<typename A::value_type>(); }
+
+ A& m_buffer;
+};
+
+template<typename T>
+struct serialized_data {
+ serialized_data(communicator const& comm, T& value) : m_archive(comm), m_value(value) {}
+
+ void* buffer() { return m_archive.address(); }
+ void resize(std::size_t sz) { m_archive.resize(sz); }
+ void deserialize() { m_archive >> m_value; }
+ MPI_Datatype datatype() { return MPI_PACKED; }
+
+ packed_iarchive m_archive;
+ T& m_value;
+};
+
+template<>
+struct serialized_data<packed_iarchive> {
+ serialized_data(communicator const& comm, packed_iarchive& ar) : m_archive(ar) {}
+
+ void* buffer() { return m_archive.address(); }
+ void resize(std::size_t sz) { m_archive.resize(sz); }
+ void deserialize() {}
+ MPI_Datatype datatype() { return MPI_PACKED; }
+
+ packed_iarchive& m_archive;
+};
+
+template<typename T>
+struct serialized_data<const skeleton_proxy<T> > {
+ serialized_data(communicator const& comm, skeleton_proxy<T> skel)
+ : m_proxy(skel),
+ m_archive(comm) {}
+
+ void* buffer() { return m_archive.get_skeleton().address(); }
+ void resize(std::size_t sz) { m_archive.get_skeleton().resize(sz); }
+ void deserialize() { m_archive >> m_proxy.object; }
+ MPI_Datatype datatype() { return MPI_PACKED; }
+
+ skeleton_proxy<T> m_proxy;
+ packed_skeleton_iarchive m_archive;
+};
+
+template<typename T>
+struct serialized_data<skeleton_proxy<T> >
+ : public serialized_data<const skeleton_proxy<T> > {
+ typedef serialized_data<const skeleton_proxy<T> > super;
+ serialized_data(communicator const& comm, skeleton_proxy<T> skel)
+ : super(comm, skel) {}
+};
+
+template<typename T>
+struct serialized_array_data {
+ serialized_array_data(communicator const& comm, T* values, int nb)
+ : m_archive(comm), m_values(values), m_nb(nb) {}
+
+ void* buffer() { return m_archive.address(); }
+ void resize(std::size_t sz) { m_archive.resize(sz); }
+ void deserialize() {
+ T* end = m_values + m_nb;
+ T* v = m_values;
+ while (v != end) {
+ m_archive >> *v++;
+ }
+ }
+ MPI_Datatype datatype() { return MPI_PACKED; }
+
+ packed_iarchive m_archive;
+ T* m_values;
+ int m_nb;
+};
+
+}
+
+class request::legacy_handler : public request::handler {
+public:
+ legacy_handler(communicator const& comm, int source, int tag);
+
+ void cancel() {
+ for (int i = 0; i < 2; ++i) {
+ if (m_requests[i] != MPI_REQUEST_NULL) {
+ BOOST_MPI_CHECK_RESULT(MPI_Cancel, (m_requests+i));
+ }
+ }
+ }
+
+ bool active() const;
+ optional<MPI_Request&> trivial();
+
+ MPI_Request m_requests[2];
+ communicator m_comm;
+ int m_source;
+ int m_tag;
+};
+
+template<typename T>
+class request::legacy_serialized_handler
+ : public request::legacy_handler,
+ protected detail::serialized_irecv_data<T> {
+public:
+ typedef detail::serialized_irecv_data<T> extra;
+ legacy_serialized_handler(communicator const& comm, int source, int tag, T& value)
+ : legacy_handler(comm, source, tag),
+ extra(comm, value) {
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (&this->extra::m_count, 1,
+ get_mpi_datatype(this->extra::m_count),
+ source, tag, comm, m_requests+0));
+
+ }
+
+ status wait() {
+ status stat;
+ if (m_requests[1] == MPI_REQUEST_NULL) {
+ // Wait for the count message to complete
+ BOOST_MPI_CHECK_RESULT(MPI_Wait,
+ (m_requests, &stat.m_status));
+ // Resize our buffer and get ready to receive its data
+ this->extra::m_ia.resize(this->extra::m_count);
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (this->extra::m_ia.address(), this->extra::m_ia.size(), MPI_PACKED,
+ stat.source(), stat.tag(),
+ MPI_Comm(m_comm), m_requests + 1));
+ }
+
+ // Wait until we have received the entire message
+ BOOST_MPI_CHECK_RESULT(MPI_Wait,
+ (m_requests + 1, &stat.m_status));
+
+ this->deserialize(stat);
+ return stat;
+ }
+
+ optional<status> test() {
+ status stat;
+ int flag = 0;
+
+ if (m_requests[1] == MPI_REQUEST_NULL) {
+ // Check if the count message has completed
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (m_requests, &flag, &stat.m_status));
+ if (flag) {
+ // Resize our buffer and get ready to receive its data
+ this->extra::m_ia.resize(this->extra::m_count);
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (this->extra::m_ia.address(), this->extra::m_ia.size(),MPI_PACKED,
+ stat.source(), stat.tag(),
+ MPI_Comm(m_comm), m_requests + 1));
+ } else
+ return optional<status>(); // We have not finished yet
+ }
+
+ // Check if we have received the message data
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (m_requests + 1, &flag, &stat.m_status));
+ if (flag) {
+ this->deserialize(stat);
+ return stat;
+ } else
+ return optional<status>();
+ }
+};
+
+template<typename T>
+class request::legacy_serialized_array_handler
+ : public request::legacy_handler,
+ protected detail::serialized_array_irecv_data<T> {
+ typedef detail::serialized_array_irecv_data<T> extra;
+
+public:
+ legacy_serialized_array_handler(communicator const& comm, int source, int tag, T* values, int n)
+ : legacy_handler(comm, source, tag),
+ extra(comm, values, n) {
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (&this->extra::m_count, 1,
+ get_mpi_datatype(this->extra::m_count),
+ source, tag, comm, m_requests+0));
+ }
+
+ status wait() {
+ status stat;
+ if (m_requests[1] == MPI_REQUEST_NULL) {
+ // Wait for the count message to complete
+ BOOST_MPI_CHECK_RESULT(MPI_Wait,
+ (m_requests, &stat.m_status));
+ // Resize our buffer and get ready to receive its data
+ this->extra::m_ia.resize(this->extra::m_count);
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (this->extra::m_ia.address(), this->extra::m_ia.size(), MPI_PACKED,
+ stat.source(), stat.tag(),
+ MPI_Comm(m_comm), m_requests + 1));
+ }
+
+ // Wait until we have received the entire message
+ BOOST_MPI_CHECK_RESULT(MPI_Wait,
+ (m_requests + 1, &stat.m_status));
+
+ this->deserialize(stat);
+ return stat;
+ }
+
+ optional<status> test() {
+ status stat;
+ int flag = 0;
+
+ if (m_requests[1] == MPI_REQUEST_NULL) {
+ // Check if the count message has completed
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (m_requests, &flag, &stat.m_status));
+ if (flag) {
+ // Resize our buffer and get ready to receive its data
+ this->extra::m_ia.resize(this->extra::m_count);
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (this->extra::m_ia.address(), this->extra::m_ia.size(),MPI_PACKED,
+ stat.source(), stat.tag(),
+ MPI_Comm(m_comm), m_requests + 1));
+ } else
+ return optional<status>(); // We have not finished yet
+ }
+
+ // Check if we have received the message data
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (m_requests + 1, &flag, &stat.m_status));
+ if (flag) {
+ this->deserialize(stat);
+ return stat;
+ } else
+ return optional<status>();
+ }
+};
+
+template<typename T, class A>
+class request::legacy_dynamic_primitive_array_handler
+ : public request::legacy_handler,
+ protected detail::dynamic_array_irecv_data<T,A>
+{
+ typedef detail::dynamic_array_irecv_data<T,A> extra;
+
+public:
+ legacy_dynamic_primitive_array_handler(communicator const& comm, int source, int tag, std::vector<T,A>& values)
+ : legacy_handler(comm, source, tag),
+ extra(values) {
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (&this->extra::m_count, 1,
+ get_mpi_datatype(this->extra::m_count),
+ source, tag, comm, m_requests+0));
+ }
+
+ status wait() {
+ status stat;
+ if (m_requests[1] == MPI_REQUEST_NULL) {
+ // Wait for the count message to complete
+ BOOST_MPI_CHECK_RESULT(MPI_Wait,
+ (m_requests, &stat.m_status));
+ // Resize our buffer and get ready to receive its data
+ this->extra::m_values.resize(this->extra::m_count);
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (&(this->extra::m_values[0]), this->extra::m_values.size(), get_mpi_datatype<T>(),
+ stat.source(), stat.tag(),
+ MPI_Comm(m_comm), m_requests + 1));
+ }
+ // Wait until we have received the entire message
+ BOOST_MPI_CHECK_RESULT(MPI_Wait,
+ (m_requests + 1, &stat.m_status));
+ return stat;
+ }
+
+ optional<status> test() {
+ status stat;
+ int flag = 0;
+
+ if (m_requests[1] == MPI_REQUEST_NULL) {
+ // Check if the count message has completed
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (m_requests, &flag, &stat.m_status));
+ if (flag) {
+ // Resize our buffer and get ready to receive its data
+ this->extra::m_values.resize(this->extra::m_count);
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (&(this->extra::m_values[0]), this->extra::m_values.size(),MPI_PACKED,
+ stat.source(), stat.tag(),
+ MPI_Comm(m_comm), m_requests + 1));
+ } else
+ return optional<status>(); // We have not finished yet
+ }
+
+ // Check if we have received the message data
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (m_requests + 1, &flag, &stat.m_status));
+ if (flag) {
+ return stat;
+ } else
+ return optional<status>();
+ }
+};
+
+class request::trivial_handler : public request::handler {
+
+public:
+ trivial_handler();
+
+ status wait();
+ optional<status> test();
+ void cancel();
+
+ bool active() const;
+ optional<MPI_Request&> trivial();
+
+private:
+ friend class request;
+ MPI_Request m_request;
+};
+
+class request::dynamic_handler : public request::handler {
+ dynamic_handler();
+
+ status wait();
+ optional<status> test();
+ void cancel();
+
+ bool active() const;
+ optional<MPI_Request&> trivial();
+
+private:
+ friend class request;
+ MPI_Request m_requests[2];
+};
+
+template<typename T>
+request request::make_serialized(communicator const& comm, int source, int tag, T& value) {
+#if defined(BOOST_MPI_USE_IMPROBE)
+ return request(new probe_handler<detail::serialized_data<T> >(comm, source, tag, value));
+#else
+ return request(new legacy_serialized_handler<T>(comm, source, tag, value));
+#endif
+}
+
+template<typename T>
+request request::make_serialized_array(communicator const& comm, int source, int tag, T* values, int n) {
+#if defined(BOOST_MPI_USE_IMPROBE)
+ return request(new probe_handler<detail::serialized_array_data<T> >(comm, source, tag, values, n));
+#else
+ return request(new legacy_serialized_array_handler<T>(comm, source, tag, values, n));
+#endif
+}
+
+template<typename T, class A>
+request request::make_dynamic_primitive_array_recv(communicator const& comm, int source, int tag,
+ std::vector<T,A>& values) {
+#if defined(BOOST_MPI_USE_IMPROBE)
+ return request(new probe_handler<detail::dynamic_primitive_array_data<std::vector<T,A> > >(comm,source,tag,values));
+#else
+ return request(new legacy_dynamic_primitive_array_handler<T,A>(comm, source, tag, values));
+#endif
+}
+
+template<typename T>
+request
+request::make_trivial_send(communicator const& comm, int dest, int tag, T const* values, int n) {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (const_cast<T*>(values), n,
+ get_mpi_datatype<T>(),
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+}
+
+template<typename T>
+request
+request::make_trivial_send(communicator const& comm, int dest, int tag, T const& value) {
+ return make_trivial_send(comm, dest, tag, &value, 1);
+}
+
+template<typename T>
+request
+request::make_trivial_recv(communicator const& comm, int dest, int tag, T* values, int n) {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (values, n,
+ get_mpi_datatype<T>(),
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+}
+
+template<typename T>
+request
+request::make_trivial_recv(communicator const& comm, int dest, int tag, T& value) {
+ return make_trivial_recv(comm, dest, tag, &value, 1);
+}
+
+template<typename T, class A>
+request request::make_dynamic_primitive_array_send(communicator const& comm, int dest, int tag,
+ std::vector<T,A> const& values) {
+#if defined(BOOST_MPI_USE_IMPROBE)
+ return make_trivial_send(comm, dest, tag, values.data(), values.size());
+#else
+ {
+ // non blocking recv by legacy_dynamic_primitive_array_handler
+ // blocking recv by status recv_vector(source,tag,value,primitive)
+ boost::shared_ptr<std::size_t> size(new std::size_t(values.size()));
+ dynamic_handler* handler = new dynamic_handler;
+ request req(handler);
+ req.preserve(size);
+
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (size.get(), 1,
+ get_mpi_datatype(*size),
+ dest, tag, comm, handler->m_requests+0));
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (const_cast<T*>(values.data()), *size,
+ get_mpi_datatype<T>(),
+ dest, tag, comm, handler->m_requests+1));
+ return req;
+ }
+#endif
+}
+
+inline
+request::legacy_handler::legacy_handler(communicator const& comm, int source, int tag)
+ : m_comm(comm),
+ m_source(source),
+ m_tag(tag)
+{
+ m_requests[0] = MPI_REQUEST_NULL;
+ m_requests[1] = MPI_REQUEST_NULL;
+}
+
+}}
+
+#endif // BOOST_MPI_REQUEST_HANDLERS_HPP
diff --git a/boost/mpi/nonblocking.hpp b/boost/mpi/nonblocking.hpp
index cf762d9cfb..fe944be8ee 100644
--- a/boost/mpi/nonblocking.hpp
+++ b/boost/mpi/nonblocking.hpp
@@ -59,10 +59,7 @@ wait_any(ForwardIterator first, ForwardIterator last)
ForwardIterator current = first;
while (true) {
// Check if we have found a completed request. If so, return it.
- bool current_is_active =
- ( current->m_requests[0] != MPI_REQUEST_NULL ||
- current->m_requests[1] != MPI_REQUEST_NULL) ;
- if (current_is_active) {
+ if (current->active()) {
optional<status> result = current->test();
if (bool(result)) {
return std::make_pair(*result, current);
@@ -75,10 +72,7 @@ wait_any(ForwardIterator first, ForwardIterator last)
// We could probably ignore non trivial request that are inactive,
// but we can assume that a mix of trivial and non trivial requests
// is unlikely enough not to care.
- bool current_trivial_request =
- ( !bool(current->m_handler) &&
- current->m_requests[1] == MPI_REQUEST_NULL);
- all_trivial_requests = all_trivial_requests && current_trivial_request;
+ all_trivial_requests = all_trivial_requests && current->trivial();
// Move to the next request.
++n;
@@ -89,8 +83,9 @@ wait_any(ForwardIterator first, ForwardIterator last)
if (all_trivial_requests) {
std::vector<MPI_Request> requests;
requests.reserve(n);
- for (current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
+ for (current = first; current != last; ++current) {
+ requests.push_back(*current->trivial());
+ }
// Let MPI wait until one of these operations completes.
int index;
@@ -106,7 +101,7 @@ wait_any(ForwardIterator first, ForwardIterator last)
// Find the iterator corresponding to the completed request.
current = first;
advance(current, index);
- current->m_requests[0] = requests[index];
+ *current->trivial() = requests[index];
return std::make_pair(stat, current);
}
@@ -209,10 +204,7 @@ wait_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
+ all_trivial_requests = all_trivial_requests && current->trivial();
}
}
}
@@ -225,7 +217,7 @@ wait_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
std::vector<MPI_Request> requests;
requests.reserve(num_outstanding_requests);
for (ForwardIterator current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
+ requests.push_back(*current->trivial());
// Let MPI wait until all of these operations completes.
std::vector<MPI_Status> stats(num_outstanding_requests);
@@ -263,7 +255,7 @@ wait_all(ForwardIterator first, ForwardIterator last)
difference_type num_outstanding_requests = distance(first, last);
- std::vector<bool> completed(num_outstanding_requests);
+ std::vector<bool> completed(num_outstanding_requests, false);
while (num_outstanding_requests > 0) {
bool all_trivial_requests = true;
@@ -280,10 +272,7 @@ wait_all(ForwardIterator first, ForwardIterator last)
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
+ all_trivial_requests = all_trivial_requests && current->trivial();
}
}
}
@@ -296,7 +285,7 @@ wait_all(ForwardIterator first, ForwardIterator last)
std::vector<MPI_Request> requests;
requests.reserve(num_outstanding_requests);
for (ForwardIterator current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
+ requests.push_back(*current->trivial());
// Let MPI wait until all of these operations completes.
BOOST_MPI_CHECK_RESULT(MPI_Waitall,
@@ -348,10 +337,10 @@ test_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
for (; first != last; ++first) {
// If we have a non-trivial request, then no requests can be
// completed.
- if (first->m_handler || first->m_requests[1] != MPI_REQUEST_NULL)
+ if (!first->trivial()) {
return optional<OutputIterator>();
-
- requests.push_back(first->m_requests[0]);
+ }
+ requests.push_back(*first->trivial());
}
int flag = 0;
@@ -381,10 +370,10 @@ test_all(ForwardIterator first, ForwardIterator last)
for (; first != last; ++first) {
// If we have a non-trivial request, then no requests can be
// completed.
- if (first->m_handler || first->m_requests[1] != MPI_REQUEST_NULL)
+ if (!first->trivial()) {
return false;
-
- requests.push_back(first->m_requests[0]);
+ }
+ requests.push_back(*first->trivial());
}
int flag = 0;
@@ -467,10 +456,7 @@ wait_some(BidirectionalIterator first, BidirectionalIterator last,
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
+ all_trivial_requests = all_trivial_requests && current->trivial();
// Move to the next request.
++n;
@@ -492,7 +478,7 @@ wait_some(BidirectionalIterator first, BidirectionalIterator last,
std::vector<MPI_Status> stats(n);
requests.reserve(n);
for (current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
+ requests.push_back(*current->trivial());
// Let MPI wait until some of these operations complete.
int num_completed;
@@ -518,7 +504,7 @@ wait_some(BidirectionalIterator first, BidirectionalIterator last,
// Finish up the request and swap it into the "completed
// requests" partition.
- current->m_requests[0] = requests[indices[index]];
+ *current->trivial() = requests[indices[index]];
--start_of_completed;
iter_swap(current, start_of_completed);
}
@@ -583,10 +569,7 @@ wait_some(BidirectionalIterator first, BidirectionalIterator last)
// Check if this request (and all others before it) are "trivial"
// requests, e.g., they can be represented with a single
// MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
+ all_trivial_requests = all_trivial_requests && current->trivial();
// Move to the next request.
++n;
@@ -603,7 +586,7 @@ wait_some(BidirectionalIterator first, BidirectionalIterator last)
std::vector<int> indices(n);
requests.reserve(n);
for (current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
+ requests.push_back(*current->trivial());
// Let MPI wait until some of these operations complete.
int num_completed;
@@ -624,7 +607,7 @@ wait_some(BidirectionalIterator first, BidirectionalIterator last)
// Finish up the request and swap it into the "completed
// requests" partition.
- current->m_requests[0] = requests[indices[index]];
+ *current->trivial() = requests[indices[index]];
--start_of_completed;
iter_swap(current, start_of_completed);
}
diff --git a/boost/mpi/operations.hpp b/boost/mpi/operations.hpp
index 5af8c8fafa..8988491c99 100644
--- a/boost/mpi/operations.hpp
+++ b/boost/mpi/operations.hpp
@@ -23,7 +23,8 @@
#include <boost/mpl/if.hpp>
#include <boost/mpl/and.hpp>
#include <boost/mpi/datatype.hpp>
-#include <boost/utility/enable_if.hpp>
+#include <boost/core/enable_if.hpp>
+#include <boost/core/uncaught_exceptions.hpp>
#include <functional>
namespace boost { namespace mpi {
@@ -302,7 +303,7 @@ namespace detail {
~user_op()
{
- if (std::uncaught_exception()) {
+ if (boost::core::uncaught_exceptions() > 0) {
// Ignore failure cases: there are obviously other problems
// already, and we don't want to cause program termination if
// MPI_Op_free fails.
diff --git a/boost/mpi/request.hpp b/boost/mpi/request.hpp
index 16a2fb18f9..8732cd2b56 100644
--- a/boost/mpi/request.hpp
+++ b/boost/mpi/request.hpp
@@ -13,6 +13,7 @@
#define BOOST_MPI_REQUEST_HPP
#include <boost/mpi/config.hpp>
+#include <boost/mpi/status.hpp>
#include <boost/optional.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/mpi/packed_iarchive.hpp>
@@ -38,11 +39,60 @@ class BOOST_MPI_DECL request
request();
/**
+ * Send a known number of primitive objects in one MPI request.
+ */
+ template<typename T>
+ static request make_trivial_send(communicator const& comm, int dest, int tag, T const& value);
+ template<typename T>
+ static request make_trivial_send(communicator const& comm, int dest, int tag, T const* values, int n);
+ static request make_packed_send(communicator const& comm, int dest, int tag, void const* values, std::size_t n);
+
+ static request make_bottom_send(communicator const& comm, int dest, int tag, MPI_Datatype tp);
+ static request make_empty_send(communicator const& comm, int dest, int tag);
+ /**
+ * Receive a known number of primitive objects in one MPI request.
+ */
+ template<typename T>
+ static request make_trivial_recv(communicator const& comm, int dest, int tag, T& value);
+ template<typename T>
+ static request make_trivial_recv(communicator const& comm, int dest, int tag, T* values, int n);
+
+ static request make_bottom_recv(communicator const& comm, int dest, int tag, MPI_Datatype tp);
+ static request make_empty_recv(communicator const& comm, int dest, int tag);
+ /**
+ * Construct request for simple data of unknown size.
+ */
+ static request make_dynamic();
+ /**
+ * Constructs request for serialized data.
+ */
+ template<typename T>
+ static request make_serialized(communicator const& comm, int source, int tag, T& value);
+ /**
+ * Constructs request for array of complex data.
+ */
+ template<typename T>
+ static request make_serialized_array(communicator const& comm, int source, int tag, T* values, int n);
+ /**
+ * Request to recv array of primitive data.
+ */
+ template<typename T, class A>
+ static request
+ make_dynamic_primitive_array_recv(communicator const& comm, int source, int tag,
+ std::vector<T,A>& values);
+ /**
+ * Request to send array of primitive data.
+ */
+ template<typename T, class A>
+ static request
+ make_dynamic_primitive_array_send(communicator const& comm, int source, int tag,
+ std::vector<T,A> const& values);
+ /**
* Wait until the communication associated with this request has
* completed, then return a @c status object describing the
* communication.
*/
- status wait();
+ status wait() { return m_handler ? m_handler->wait() : status(); }
/**
* Determine whether the communication associated with this request
@@ -52,58 +102,58 @@ class BOOST_MPI_DECL request
* yet. Note that once @c test() returns a @c status object, the
* request has completed and @c wait() should not be called.
*/
- optional<status> test();
+ optional<status> test() { return active() ? m_handler->test() : optional<status>(); }
/**
* Cancel a pending communication, assuming it has not already been
* completed.
*/
- void cancel();
-
- private:
- enum request_action { ra_wait, ra_test, ra_cancel };
- typedef optional<status> (*handler_type)(request* self,
- request_action action);
-
+ void cancel() { if (m_handler) { m_handler->cancel(); } m_preserved.reset(); }
+
/**
- * INTERNAL ONLY
- *
- * Handles the non-blocking receive of a serialized value.
+ * The trivial MPI requet implenting this request, provided it's trivial.
+ * Probably irrelevant to most users.
*/
- template<typename T>
- static optional<status>
- handle_serialized_irecv(request* self, request_action action);
+ optional<MPI_Request&> trivial() { return (m_handler
+ ? m_handler->trivial()
+ : optional<MPI_Request&>()); }
/**
- * INTERNAL ONLY
- *
- * Handles the non-blocking receive of an array of serialized values.
+ * Is this request potentialy pending ?
*/
- template<typename T>
- static optional<status>
- handle_serialized_array_irecv(request* self, request_action action);
-
- /**
- * INTERNAL ONLY
- *
- * Handles the non-blocking receive of a dynamic array of primitive values.
- */
- template<typename T, class A>
- static optional<status>
- handle_dynamic_primitive_array_irecv(request* self, request_action action);
-
- public: // template friends are not portable
-
- /// INTERNAL ONLY
- MPI_Request m_requests[2];
-
- /// INTERNAL ONLY
- handler_type m_handler;
-
- /// INTERNAL ONLY
- shared_ptr<void> m_data;
-
- friend class communicator;
+ bool active() const { return bool(m_handler) && m_handler->active(); }
+
+ // Some data might need protection while the reqest is processed.
+ void preserve(boost::shared_ptr<void> d);
+
+ class handler {
+ public:
+ virtual ~handler() = 0;
+ virtual status wait() = 0;
+ virtual optional<status> test() = 0;
+ virtual void cancel() = 0;
+
+ virtual bool active() const = 0;
+ virtual optional<MPI_Request&> trivial() = 0;
+ };
+
+ private:
+
+ request(handler *h) : m_handler(h) {};
+
+ // specific implementations
+ class legacy_handler;
+ class trivial_handler;
+ class dynamic_handler;
+ template<typename T> class legacy_serialized_handler;
+ template<typename T> class legacy_serialized_array_handler;
+ template<typename T, class A> class legacy_dynamic_primitive_array_handler;
+#if BOOST_MPI_VERSION >= 3
+ template<class Data> class probe_handler;
+#endif
+ private:
+ shared_ptr<handler> m_handler;
+ shared_ptr<void> m_preserved;
};
} } // end namespace boost::mpi
diff --git a/boost/mpi/skeleton_and_content.hpp b/boost/mpi/skeleton_and_content.hpp
index dcd13bfe57..20fc135e23 100644
--- a/boost/mpi/skeleton_and_content.hpp
+++ b/boost/mpi/skeleton_and_content.hpp
@@ -25,338 +25,10 @@
#include <boost/mpi/config.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/detail/forward_skeleton_iarchive.hpp>
-#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
-#include <boost/mpi/detail/ignore_iprimitive.hpp>
-#include <boost/mpi/detail/ignore_oprimitive.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/archive/detail/register_archive.hpp>
+#include <boost/mpi/skeleton_and_content_types.hpp>
namespace boost { namespace mpi {
-/**
- * @brief A proxy that requests that the skeleton of an object be
- * transmitted.
- *
- * The @c skeleton_proxy is a lightweight proxy object used to
- * indicate that the skeleton of an object, not the object itself,
- * should be transmitted. It can be used with the @c send and @c recv
- * operations of communicators or the @c broadcast collective. When a
- * @c skeleton_proxy is sent, Boost.MPI generates a description
- * containing the structure of the stored object. When that skeleton
- * is received, the receiving object is reshaped to match the
- * structure. Once the skeleton of an object as been transmitted, its
- * @c content can be transmitted separately (often several times)
- * without changing the structure of the object.
- */
-template <class T>
-struct BOOST_MPI_DECL skeleton_proxy
-{
- /**
- * Constructs a @c skeleton_proxy that references object @p x.
- *
- * @param x the object whose structure will be transmitted or
- * altered.
- */
- skeleton_proxy(T& x)
- : object(x)
- {}
-
- T& object;
-};
-
-/**
- * @brief Create a skeleton proxy object.
- *
- * This routine creates an instance of the skeleton_proxy class. It
- * will typically be used when calling @c send, @c recv, or @c
- * broadcast, to indicate that only the skeleton (structure) of an
- * object should be transmitted and not its contents.
- *
- * @param x the object whose structure will be transmitted.
- *
- * @returns a skeleton_proxy object referencing @p x
- */
-template <class T>
-inline const skeleton_proxy<T> skeleton(T& x)
-{
- return skeleton_proxy<T>(x);
-}
-
-namespace detail {
- /// @brief a class holding an MPI datatype
- /// INTERNAL ONLY
- /// the type is freed upon destruction
- class BOOST_MPI_DECL mpi_datatype_holder : public boost::noncopyable
- {
- public:
- mpi_datatype_holder()
- : is_committed(false)
- {}
-
- mpi_datatype_holder(MPI_Datatype t, bool committed = true)
- : d(t)
- , is_committed(committed)
- {}
-
- void commit()
- {
- BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&d));
- is_committed=true;
- }
-
- MPI_Datatype get_mpi_datatype() const
- {
- return d;
- }
-
- ~mpi_datatype_holder()
- {
- int finalized=0;
- BOOST_MPI_CHECK_RESULT(MPI_Finalized,(&finalized));
- if (!finalized && is_committed)
- BOOST_MPI_CHECK_RESULT(MPI_Type_free,(&d));
- }
-
- private:
- MPI_Datatype d;
- bool is_committed;
- };
-} // end namespace detail
-
-/** @brief A proxy object that transfers the content of an object
- * without its structure.
- *
- * The @c content class indicates that Boost.MPI should transmit or
- * receive the content of an object, but without any information
- * about the structure of the object. It is only meaningful to
- * transmit the content of an object after the receiver has already
- * received the skeleton for the same object.
- *
- * Most users will not use @c content objects directly. Rather, they
- * will invoke @c send, @c recv, or @c broadcast operations using @c
- * get_content().
- */
-class BOOST_MPI_DECL content
-{
-public:
- /**
- * Constructs an empty @c content object. This object will not be
- * useful for any Boost.MPI operations until it is reassigned.
- */
- content() {}
-
- /**
- * This routine initializes the @c content object with an MPI data
- * type that refers to the content of an object without its structure.
- *
- * @param d the MPI data type referring to the content of the object.
- *
- * @param committed @c true indicates that @c MPI_Type_commit has
- * already been excuted for the data type @p d.
- */
- content(MPI_Datatype d, bool committed=true)
- : holder(new detail::mpi_datatype_holder(d,committed))
- {}
-
- /**
- * Replace the MPI data type referencing the content of an object.
- *
- * @param d the new MPI data type referring to the content of the
- * object.
- *
- * @returns *this
- */
- const content& operator=(MPI_Datatype d)
- {
- holder.reset(new detail::mpi_datatype_holder(d));
- return *this;
- }
-
- /**
- * Retrieve the MPI data type that refers to the content of the
- * object.
- *
- * @returns the MPI data type, which should only be transmitted or
- * received using @c MPI_BOTTOM as the address.
- */
- MPI_Datatype get_mpi_datatype() const
- {
- return holder->get_mpi_datatype();
- }
-
- /**
- * Commit the MPI data type referring to the content of the
- * object.
- */
- void commit()
- {
- holder->commit();
- }
-
-private:
- boost::shared_ptr<detail::mpi_datatype_holder> holder;
-};
-
-/** @brief Returns the content of an object, suitable for transmission
- * via Boost.MPI.
- *
- * The function creates an absolute MPI datatype for the object,
- * where all offsets are counted from the address 0 (a.k.a. @c
- * MPI_BOTTOM) instead of the address @c &x of the object. This
- * allows the creation of MPI data types for complex data structures
- * containing pointers, such as linked lists or trees.
- *
- * The disadvantage, compared to relative MPI data types is that for
- * each object a new MPI data type has to be created.
- *
- * The contents of an object can only be transmitted when the
- * receiver already has an object with the same structure or shape as
- * the sender. To accomplish this, first transmit the skeleton of the
- * object using, e.g., @c skeleton() or @c skeleton_proxy.
- *
- * The type @c T has to allow creation of an absolute MPI data type
- * (content).
- *
- * @param x the object for which the content will be transmitted.
- *
- * @returns the content of the object @p x, which can be used for
- * transmission via @c send, @c recv, or @c broadcast.
- */
-template <class T> const content get_content(const T& x);
-
-/** @brief An archiver that reconstructs a data structure based on the
- * binary skeleton stored in a buffer.
- *
- * The @c packed_skeleton_iarchive class is an Archiver (as in the
- * Boost.Serialization library) that can construct the the shape of a
- * data structure based on a binary skeleton stored in a buffer. The
- * @c packed_skeleton_iarchive is typically used by the receiver of a
- * skeleton, to prepare a data structure that will eventually receive
- * content separately.
- *
- * Users will not generally need to use @c packed_skeleton_iarchive
- * directly. Instead, use @c skeleton or @c get_skeleton.
- */
-class BOOST_MPI_DECL packed_skeleton_iarchive
- : public detail::ignore_iprimitive,
- public detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>
-{
-public:
- /**
- * Construct a @c packed_skeleton_iarchive for the given
- * communicator.
- *
- * @param comm The communicator over which this archive will be
- * transmitted.
- *
- * @param flags Control the serialization of the skeleton. Refer to
- * the Boost.Serialization documentation before changing the
- * default flags.
- */
- packed_skeleton_iarchive(MPI_Comm const & comm,
- unsigned int flags = boost::archive::no_header)
- : detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(skeleton_archive_)
- , skeleton_archive_(comm,flags)
- {}
-
- /**
- * Construct a @c packed_skeleton_iarchive that unpacks a skeleton
- * from the given @p archive.
- *
- * @param archive the archive from which the skeleton will be
- * unpacked.
- *
- */
- explicit packed_skeleton_iarchive(packed_iarchive & archive)
- : detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(archive)
- , skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
- {}
-
- /**
- * Retrieve the archive corresponding to this skeleton.
- */
- const packed_iarchive& get_skeleton() const
- {
- return this->implementation_archive;
- }
-
- /**
- * Retrieve the archive corresponding to this skeleton.
- */
- packed_iarchive& get_skeleton()
- {
- return this->implementation_archive;
- }
-
-private:
- /// Store the actual archive that holds the structure, unless the
- /// user overrides this with their own archive.
- packed_iarchive skeleton_archive_;
-};
-
-/** @brief An archiver that records the binary skeleton of a data
- * structure into a buffer.
- *
- * The @c packed_skeleton_oarchive class is an Archiver (as in the
- * Boost.Serialization library) that can record the shape of a data
- * structure (called the "skeleton") into a binary representation
- * stored in a buffer. The @c packed_skeleton_oarchive is typically
- * used by the send of a skeleton, to pack the skeleton of a data
- * structure for transmission separately from the content.
- *
- * Users will not generally need to use @c packed_skeleton_oarchive
- * directly. Instead, use @c skeleton or @c get_skeleton.
- */
-class BOOST_MPI_DECL packed_skeleton_oarchive
- : public detail::ignore_oprimitive,
- public detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>
-{
-public:
- /**
- * Construct a @c packed_skeleton_oarchive for the given
- * communicator.
- *
- * @param comm The communicator over which this archive will be
- * transmitted.
- *
- * @param flags Control the serialization of the skeleton. Refer to
- * the Boost.Serialization documentation before changing the
- * default flags.
- */
- packed_skeleton_oarchive(MPI_Comm const & comm,
- unsigned int flags = boost::archive::no_header)
- : detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(skeleton_archive_)
- , skeleton_archive_(comm,flags)
- {}
-
- /**
- * Construct a @c packed_skeleton_oarchive that packs a skeleton
- * into the given @p archive.
- *
- * @param archive the archive to which the skeleton will be packed.
- *
- */
- explicit packed_skeleton_oarchive(packed_oarchive & archive)
- : detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(archive)
- , skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
- {}
-
- /**
- * Retrieve the archive corresponding to this skeleton.
- */
- const packed_oarchive& get_skeleton() const
- {
- return this->implementation_archive;
- }
-
-private:
- /// Store the actual archive that holds the structure.
- packed_oarchive skeleton_archive_;
-};
-
namespace detail {
typedef boost::mpi::detail::forward_skeleton_oarchive<boost::mpi::packed_skeleton_oarchive,boost::mpi::packed_oarchive> type1;
typedef boost::mpi::detail::forward_skeleton_iarchive<boost::mpi::packed_skeleton_iarchive,boost::mpi::packed_iarchive> type2;
diff --git a/boost/mpi/skeleton_and_content_types.hpp b/boost/mpi/skeleton_and_content_types.hpp
new file mode 100644
index 0000000000..825f2dfaee
--- /dev/null
+++ b/boost/mpi/skeleton_and_content_types.hpp
@@ -0,0 +1,363 @@
+// (C) Copyright 2005 Matthias Troyer
+// (C) Copyright 2006 Douglas Gregor <doug.gregor -at gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors: Matthias Troyer
+// Douglas Gregor
+
+/** @file skeleton_and_content.hpp
+ *
+ * This header provides facilities that allow the structure of data
+ * types (called the "skeleton") to be transmitted and received
+ * separately from the content stored in those data types. These
+ * facilities are useful when the data in a stable data structure
+ * (e.g., a mesh or a graph) will need to be transmitted
+ * repeatedly. In this case, transmitting the skeleton only once
+ * saves both communication effort (it need not be sent again) and
+ * local computation (serialization need only be performed once for
+ * the content).
+ */
+#ifndef BOOST_MPI_SKELETON_AND_CONTENT_TYPES_HPP
+#define BOOST_MPI_SKELETON_AND_CONTENT_TYPES_HPP
+
+#include <boost/mpi/config.hpp>
+#include <boost/archive/detail/auto_link_archive.hpp>
+#include <boost/mpi/packed_iarchive.hpp>
+#include <boost/mpi/packed_oarchive.hpp>
+#include <boost/mpi/detail/forward_skeleton_iarchive.hpp>
+#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
+#include <boost/mpi/detail/ignore_iprimitive.hpp>
+#include <boost/mpi/detail/ignore_oprimitive.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/archive/detail/register_archive.hpp>
+
+namespace boost { namespace mpi {
+
+/**
+ * @brief A proxy that requests that the skeleton of an object be
+ * transmitted.
+ *
+ * The @c skeleton_proxy is a lightweight proxy object used to
+ * indicate that the skeleton of an object, not the object itself,
+ * should be transmitted. It can be used with the @c send and @c recv
+ * operations of communicators or the @c broadcast collective. When a
+ * @c skeleton_proxy is sent, Boost.MPI generates a description
+ * containing the structure of the stored object. When that skeleton
+ * is received, the receiving object is reshaped to match the
+ * structure. Once the skeleton of an object as been transmitted, its
+ * @c content can be transmitted separately (often several times)
+ * without changing the structure of the object.
+ */
+template <class T>
+struct BOOST_MPI_DECL skeleton_proxy
+{
+ /**
+ * Constructs a @c skeleton_proxy that references object @p x.
+ *
+ * @param x the object whose structure will be transmitted or
+ * altered.
+ */
+ skeleton_proxy(T& x)
+ : object(x)
+ {}
+
+ T& object;
+};
+
+/**
+ * @brief Create a skeleton proxy object.
+ *
+ * This routine creates an instance of the skeleton_proxy class. It
+ * will typically be used when calling @c send, @c recv, or @c
+ * broadcast, to indicate that only the skeleton (structure) of an
+ * object should be transmitted and not its contents.
+ *
+ * @param x the object whose structure will be transmitted.
+ *
+ * @returns a skeleton_proxy object referencing @p x
+ */
+template <class T>
+inline const skeleton_proxy<T> skeleton(T& x)
+{
+ return skeleton_proxy<T>(x);
+}
+
+namespace detail {
+ /// @brief a class holding an MPI datatype
+ /// INTERNAL ONLY
+ /// the type is freed upon destruction
+ class BOOST_MPI_DECL mpi_datatype_holder : public boost::noncopyable
+ {
+ public:
+ mpi_datatype_holder()
+ : is_committed(false)
+ {}
+
+ mpi_datatype_holder(MPI_Datatype t, bool committed = true)
+ : d(t)
+ , is_committed(committed)
+ {}
+
+ void commit()
+ {
+ BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&d));
+ is_committed=true;
+ }
+
+ MPI_Datatype get_mpi_datatype() const
+ {
+ return d;
+ }
+
+ ~mpi_datatype_holder()
+ {
+ int finalized=0;
+ BOOST_MPI_CHECK_RESULT(MPI_Finalized,(&finalized));
+ if (!finalized && is_committed)
+ BOOST_MPI_CHECK_RESULT(MPI_Type_free,(&d));
+ }
+
+ private:
+ MPI_Datatype d;
+ bool is_committed;
+ };
+} // end namespace detail
+
+/** @brief A proxy object that transfers the content of an object
+ * without its structure.
+ *
+ * The @c content class indicates that Boost.MPI should transmit or
+ * receive the content of an object, but without any information
+ * about the structure of the object. It is only meaningful to
+ * transmit the content of an object after the receiver has already
+ * received the skeleton for the same object.
+ *
+ * Most users will not use @c content objects directly. Rather, they
+ * will invoke @c send, @c recv, or @c broadcast operations using @c
+ * get_content().
+ */
+class BOOST_MPI_DECL content
+{
+public:
+ /**
+ * Constructs an empty @c content object. This object will not be
+ * useful for any Boost.MPI operations until it is reassigned.
+ */
+ content() {}
+
+ /**
+ * This routine initializes the @c content object with an MPI data
+ * type that refers to the content of an object without its structure.
+ *
+ * @param d the MPI data type referring to the content of the object.
+ *
+ * @param committed @c true indicates that @c MPI_Type_commit has
+ * already been excuted for the data type @p d.
+ */
+ content(MPI_Datatype d, bool committed=true)
+ : holder(new detail::mpi_datatype_holder(d,committed))
+ {}
+
+ /**
+ * Replace the MPI data type referencing the content of an object.
+ *
+ * @param d the new MPI data type referring to the content of the
+ * object.
+ *
+ * @returns *this
+ */
+ const content& operator=(MPI_Datatype d)
+ {
+ holder.reset(new detail::mpi_datatype_holder(d));
+ return *this;
+ }
+
+ /**
+ * Retrieve the MPI data type that refers to the content of the
+ * object.
+ *
+ * @returns the MPI data type, which should only be transmitted or
+ * received using @c MPI_BOTTOM as the address.
+ */
+ MPI_Datatype get_mpi_datatype() const
+ {
+ return holder->get_mpi_datatype();
+ }
+
+ /**
+ * Commit the MPI data type referring to the content of the
+ * object.
+ */
+ void commit()
+ {
+ holder->commit();
+ }
+
+private:
+ boost::shared_ptr<detail::mpi_datatype_holder> holder;
+};
+
+/** @brief Returns the content of an object, suitable for transmission
+ * via Boost.MPI.
+ *
+ * The function creates an absolute MPI datatype for the object,
+ * where all offsets are counted from the address 0 (a.k.a. @c
+ * MPI_BOTTOM) instead of the address @c &x of the object. This
+ * allows the creation of MPI data types for complex data structures
+ * containing pointers, such as linked lists or trees.
+ *
+ * The disadvantage, compared to relative MPI data types is that for
+ * each object a new MPI data type has to be created.
+ *
+ * The contents of an object can only be transmitted when the
+ * receiver already has an object with the same structure or shape as
+ * the sender. To accomplish this, first transmit the skeleton of the
+ * object using, e.g., @c skeleton() or @c skeleton_proxy.
+ *
+ * The type @c T has to allow creation of an absolute MPI data type
+ * (content).
+ *
+ * @param x the object for which the content will be transmitted.
+ *
+ * @returns the content of the object @p x, which can be used for
+ * transmission via @c send, @c recv, or @c broadcast.
+ */
+template <class T> const content get_content(const T& x);
+
+/** @brief An archiver that reconstructs a data structure based on the
+ * binary skeleton stored in a buffer.
+ *
+ * The @c packed_skeleton_iarchive class is an Archiver (as in the
+ * Boost.Serialization library) that can construct the the shape of a
+ * data structure based on a binary skeleton stored in a buffer. The
+ * @c packed_skeleton_iarchive is typically used by the receiver of a
+ * skeleton, to prepare a data structure that will eventually receive
+ * content separately.
+ *
+ * Users will not generally need to use @c packed_skeleton_iarchive
+ * directly. Instead, use @c skeleton or @c get_skeleton.
+ */
+class BOOST_MPI_DECL packed_skeleton_iarchive
+ : public detail::ignore_iprimitive,
+ public detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>
+{
+public:
+ /**
+ * Construct a @c packed_skeleton_iarchive for the given
+ * communicator.
+ *
+ * @param comm The communicator over which this archive will be
+ * transmitted.
+ *
+ * @param flags Control the serialization of the skeleton. Refer to
+ * the Boost.Serialization documentation before changing the
+ * default flags.
+ */
+ packed_skeleton_iarchive(MPI_Comm const & comm,
+ unsigned int flags = boost::archive::no_header)
+ : detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(skeleton_archive_)
+ , skeleton_archive_(comm,flags)
+ {}
+
+ /**
+ * Construct a @c packed_skeleton_iarchive that unpacks a skeleton
+ * from the given @p archive.
+ *
+ * @param archive the archive from which the skeleton will be
+ * unpacked.
+ *
+ */
+ explicit packed_skeleton_iarchive(packed_iarchive & archive)
+ : detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(archive)
+ , skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
+ {}
+
+ /**
+ * Retrieve the archive corresponding to this skeleton.
+ */
+ const packed_iarchive& get_skeleton() const
+ {
+ return this->implementation_archive;
+ }
+
+ /**
+ * Retrieve the archive corresponding to this skeleton.
+ */
+ packed_iarchive& get_skeleton()
+ {
+ return this->implementation_archive;
+ }
+
+private:
+ /// Store the actual archive that holds the structure, unless the
+ /// user overrides this with their own archive.
+ packed_iarchive skeleton_archive_;
+};
+
+/** @brief An archiver that records the binary skeleton of a data
+ * structure into a buffer.
+ *
+ * The @c packed_skeleton_oarchive class is an Archiver (as in the
+ * Boost.Serialization library) that can record the shape of a data
+ * structure (called the "skeleton") into a binary representation
+ * stored in a buffer. The @c packed_skeleton_oarchive is typically
+ * used by the send of a skeleton, to pack the skeleton of a data
+ * structure for transmission separately from the content.
+ *
+ * Users will not generally need to use @c packed_skeleton_oarchive
+ * directly. Instead, use @c skeleton or @c get_skeleton.
+ */
+class BOOST_MPI_DECL packed_skeleton_oarchive
+ : public detail::ignore_oprimitive,
+ public detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>
+{
+public:
+ /**
+ * Construct a @c packed_skeleton_oarchive for the given
+ * communicator.
+ *
+ * @param comm The communicator over which this archive will be
+ * transmitted.
+ *
+ * @param flags Control the serialization of the skeleton. Refer to
+ * the Boost.Serialization documentation before changing the
+ * default flags.
+ */
+ packed_skeleton_oarchive(MPI_Comm const & comm,
+ unsigned int flags = boost::archive::no_header)
+ : detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(skeleton_archive_)
+ , skeleton_archive_(comm,flags)
+ {}
+
+ /**
+ * Construct a @c packed_skeleton_oarchive that packs a skeleton
+ * into the given @p archive.
+ *
+ * @param archive the archive to which the skeleton will be packed.
+ *
+ */
+ explicit packed_skeleton_oarchive(packed_oarchive & archive)
+ : detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(archive)
+ , skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
+ {}
+
+ /**
+ * Retrieve the archive corresponding to this skeleton.
+ */
+ const packed_oarchive& get_skeleton() const
+ {
+ return this->implementation_archive;
+ }
+
+private:
+ /// Store the actual archive that holds the structure.
+ packed_oarchive skeleton_archive_;
+};
+
+
+} } // end namespace boost::mpi
+
+#endif // BOOST_MPI_SKELETON_AND_CONTENT_TYPES_HPP
diff --git a/boost/mpi/status.hpp b/boost/mpi/status.hpp
index d444faa41d..326c0a8c08 100644
--- a/boost/mpi/status.hpp
+++ b/boost/mpi/status.hpp
@@ -14,6 +14,7 @@
#include <boost/mpi/config.hpp>
#include <boost/optional.hpp>
+#include <boost/mpl/bool.hpp>
namespace boost { namespace mpi {