summaryrefslogtreecommitdiff
path: root/boost/mpi/collectives
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2019-12-05 15:12:59 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2019-12-05 15:12:59 +0900
commitb8cf34c691623e4ec329053cbbf68522a855882d (patch)
tree34da08632a99677f6b79ecb65e5b655a5b69a67f /boost/mpi/collectives
parent3fdc3e5ee96dca5b11d1694975a65200787eab86 (diff)
downloadboost-b8cf34c691623e4ec329053cbbf68522a855882d.tar.gz
boost-b8cf34c691623e4ec329053cbbf68522a855882d.tar.bz2
boost-b8cf34c691623e4ec329053cbbf68522a855882d.zip
Imported Upstream version 1.67.0upstream/1.67.0
Diffstat (limited to 'boost/mpi/collectives')
-rw-r--r--boost/mpi/collectives/all_gather.hpp131
-rw-r--r--boost/mpi/collectives/all_gatherv.hpp140
-rw-r--r--boost/mpi/collectives/all_to_all.hpp6
-rw-r--r--boost/mpi/collectives/broadcast.hpp27
-rw-r--r--boost/mpi/collectives/gather.hpp170
-rw-r--r--boost/mpi/collectives/gatherv.hpp37
-rw-r--r--boost/mpi/collectives/scatter.hpp212
-rw-r--r--boost/mpi/collectives/scatterv.hpp203
8 files changed, 591 insertions, 335 deletions
diff --git a/boost/mpi/collectives/all_gather.hpp b/boost/mpi/collectives/all_gather.hpp
index da73186c64..4adaeb9c87 100644
--- a/boost/mpi/collectives/all_gather.hpp
+++ b/boost/mpi/collectives/all_gather.hpp
@@ -1,52 +1,107 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
+// Copyright (C) 2005, 2006 Douglas Gregor.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
-// Message Passing Interface 1.1 -- Section 4.7. Gather-to-all
-#ifndef BOOST_MPI_ALL_GATHER_HPP
-#define BOOST_MPI_ALL_GATHER_HPP
+// Message Passing Interface 1.1 -- Section 4.5. Gather
+#ifndef BOOST_MPI_ALLGATHER_HPP
+#define BOOST_MPI_ALLGATHER_HPP
+#include <cassert>
+#include <cstddef>
+#include <numeric>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
-#include <boost/serialization/vector.hpp>
-
-// all_gather falls back to gather+broadcast in some cases
-#include <boost/mpi/collectives/broadcast.hpp>
-#include <boost/mpi/collectives/gather.hpp>
+#include <boost/mpi/packed_oarchive.hpp>
+#include <boost/mpi/packed_iarchive.hpp>
+#include <boost/mpi/detail/point_to_point.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/environment.hpp>
+#include <boost/mpi/detail/offsets.hpp>
+#include <boost/mpi/detail/antiques.hpp>
+#include <boost/assert.hpp>
namespace boost { namespace mpi {
namespace detail {
- // We're all-gathering for a type that has an associated MPI
- // datatype, so we'll use MPI_Gather to do all of the work.
- template<typename T>
- void
- all_gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, mpl::true_)
- {
- MPI_Datatype type = boost::mpi::get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Allgather,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, comm));
- }
+// We're all-gathering for a type that has an associated MPI
+// datatype, so we'll use MPI_Gather to do all of the work.
+template<typename T>
+void
+all_gather_impl(const communicator& comm, const T* in_values, int n,
+ T* out_values, mpl::true_)
+{
+ MPI_Datatype type = get_mpi_datatype<T>(*in_values);
+ BOOST_MPI_CHECK_RESULT(MPI_Allgather,
+ (const_cast<T*>(in_values), n, type,
+ out_values, n, type, comm));
+}
- // We're all-gathering for a type that has no associated MPI
- // type. So, we'll do a manual gather followed by a broadcast.
- template<typename T>
- void
- all_gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, mpl::false_)
- {
- gather(comm, in_values, n, out_values, 0);
- broadcast(comm, out_values, comm.size() * n, 0);
+// We're all-gathering for a type that does not have an
+// associated MPI datatype, so we'll need to serialize
+// it.
+template<typename T>
+void
+all_gather_impl(const communicator& comm, const T* in_values, int n,
+ T* out_values, int const* sizes, int const* skips, mpl::false_)
+{
+ int nproc = comm.size();
+ // first, gather all size, these size can be different for
+ // each process
+ packed_oarchive oa(comm);
+ for (int i = 0; i < n; ++i) {
+ oa << in_values[i];
}
+ std::vector<int> oasizes(nproc);
+ int oasize = oa.size();
+ BOOST_MPI_CHECK_RESULT(MPI_Allgather,
+ (&oasize, 1, MPI_INTEGER,
+ c_data(oasizes), 1, MPI_INTEGER,
+ MPI_Comm(comm)));
+ // Gather the archives, which can be of different sizes, so
+ // we need to use allgatherv.
+ // Every thing is contiguous, so the offsets can be
+ // deduced from the collected sizes.
+ std::vector<int> offsets(nproc);
+ sizes2offsets(oasizes, offsets);
+ packed_iarchive::buffer_type recv_buffer(std::accumulate(oasizes.begin(), oasizes.end(), 0));
+ BOOST_MPI_CHECK_RESULT(MPI_Allgatherv,
+ (const_cast<void*>(oa.address()), int(oa.size()), MPI_BYTE,
+ c_data(recv_buffer), c_data(oasizes), c_data(offsets), MPI_BYTE,
+ MPI_Comm(comm)));
+ for (int src = 0; src < nproc; ++src) {
+ int nb = sizes ? sizes[src] : n;
+ int skip = skips ? skips[src] : 0;
+ std::advance(out_values, skip);
+ if (src == comm.rank()) { // this is our local data
+ for (int i = 0; i < nb; ++i) {
+ *out_values++ = *in_values++;
+ }
+ } else {
+ packed_iarchive ia(comm, recv_buffer, boost::archive::no_header, offsets[src]);
+ for (int i = 0; i < nb; ++i) {
+ ia >> *out_values++;
+ }
+ }
+ }
+}
+
+// We're all-gathering for a type that does not have an
+// associated MPI datatype, so we'll need to serialize
+// it.
+template<typename T>
+void
+all_gather_impl(const communicator& comm, const T* in_values, int n,
+ T* out_values, mpl::false_ isnt_mpi_type)
+{
+ all_gather_impl(comm, in_values, n, out_values, (int const*)0, (int const*)0, isnt_mpi_type);
+}
} // end namespace detail
template<typename T>
-inline void
+void
all_gather(const communicator& comm, const T& in_value, T* out_values)
{
detail::all_gather_impl(comm, &in_value, 1, out_values, is_mpi_datatype<T>());
@@ -54,15 +109,15 @@ all_gather(const communicator& comm, const T& in_value, T* out_values)
template<typename T>
void
-all_gather(const communicator& comm, const T& in_value,
- std::vector<T>& out_values)
+all_gather(const communicator& comm, const T& in_value, std::vector<T>& out_values)
{
+ using detail::c_data;
out_values.resize(comm.size());
- ::boost::mpi::all_gather(comm, &in_value, 1, &out_values[0]);
+ ::boost::mpi::all_gather(comm, in_value, c_data(out_values));
}
template<typename T>
-inline void
+void
all_gather(const communicator& comm, const T* in_values, int n, T* out_values)
{
detail::all_gather_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
@@ -70,11 +125,11 @@ all_gather(const communicator& comm, const T* in_values, int n, T* out_values)
template<typename T>
void
-all_gather(const communicator& comm, const T* in_values, int n,
- std::vector<T>& out_values)
+all_gather(const communicator& comm, const T* in_values, int n, std::vector<T>& out_values)
{
+ using detail::c_data;
out_values.resize(comm.size() * n);
- ::boost::mpi::all_gather(comm, in_values, n, &out_values[0]);
+ ::boost::mpi::all_gather(comm, in_values, n, c_data(out_values));
}
} } // end namespace boost::mpi
diff --git a/boost/mpi/collectives/all_gatherv.hpp b/boost/mpi/collectives/all_gatherv.hpp
new file mode 100644
index 0000000000..064412f5d7
--- /dev/null
+++ b/boost/mpi/collectives/all_gatherv.hpp
@@ -0,0 +1,140 @@
+// Copyright (C) 2005, 2006 Douglas Gregor.
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Message Passing Interface 1.1 -- Section 4.5. Gatherv
+#ifndef BOOST_MPI_ALLGATHERV_HPP
+#define BOOST_MPI_ALLGATHERV_HPP
+
+#include <cassert>
+#include <cstddef>
+#include <numeric>
+#include <vector>
+
+#include <boost/mpi/exception.hpp>
+#include <boost/mpi/datatype.hpp>
+#include <boost/mpi/packed_oarchive.hpp>
+#include <boost/mpi/packed_iarchive.hpp>
+#include <boost/mpi/detail/point_to_point.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/environment.hpp>
+#include <boost/mpi/detail/offsets.hpp>
+#include <boost/mpi/detail/antiques.hpp>
+#include <boost/assert.hpp>
+#include <boost/scoped_array.hpp>
+
+namespace boost { namespace mpi {
+
+namespace detail {
+// We're all-gathering for a type that has an associated MPI
+// datatype, so we'll use MPI_Gather to do all of the work.
+template<typename T>
+void
+all_gatherv_impl(const communicator& comm, const T* in_values,
+ T* out_values, int const* sizes, int const* displs, mpl::true_)
+{
+ // Make displacements if not provided
+ scoped_array<int> new_offsets_mem(make_offsets(comm, sizes, displs, -1));
+ if (new_offsets_mem) displs = new_offsets_mem.get();
+ MPI_Datatype type = get_mpi_datatype<T>(*in_values);
+ BOOST_MPI_CHECK_RESULT(MPI_Allgatherv,
+ (const_cast<T*>(in_values), sizes[comm.rank()], type,
+ out_values,
+ const_cast<int*>(sizes),
+ const_cast<int*>(displs),
+ type,
+ comm));
+}
+
+// We're all-gathering for a type that does not have an
+// associated MPI datatype, so we'll need to serialize
+// it.
+template<typename T>
+void
+all_gatherv_impl(const communicator& comm, const T* in_values,
+ T* out_values, int const* sizes, int const* displs,
+ mpl::false_ isnt_mpi_type)
+{
+ // convert displacement to offsets to skip
+ scoped_array<int> skipped(make_skipped_slots(comm, sizes, displs));
+ all_gather_impl(comm, in_values, sizes[comm.rank()], out_values,
+ sizes, skipped.get(), isnt_mpi_type);
+}
+} // end namespace detail
+
+template<typename T>
+void
+all_gatherv(const communicator& comm, const T& in_value, T* out_values,
+ const std::vector<int>& sizes)
+{
+ using detail::c_data;
+ assert(sizes.size() == comm.size());
+ assert(sizes[comm.rank()] == 1);
+ detail::all_gatherv_impl(comm, &in_value, out_values, c_data(sizes), 0, is_mpi_datatype<T>());
+}
+
+template<typename T>
+void
+all_gatherv(const communicator& comm, const T* in_values, T* out_values,
+ const std::vector<int>& sizes)
+{
+ using detail::c_data;
+ assert(int(sizes.size()) == comm.size());
+ detail::all_gatherv_impl(comm, in_values, out_values, c_data(sizes), 0, is_mpi_datatype<T>());
+}
+
+template<typename T>
+void
+all_gatherv(const communicator& comm, std::vector<T> const& in_values, std::vector<T>& out_values,
+ const std::vector<int>& sizes)
+{
+ using detail::c_data;
+ assert(int(sizes.size()) == comm.size());
+ assert(int(in_values.size()) == sizes[comm.rank()]);
+ out_values.resize(std::accumulate(sizes.begin(), sizes.end(), 0));
+ ::boost::mpi::all_gatherv(comm, c_data(in_values), c_data(out_values), sizes);
+}
+
+
+template<typename T>
+void
+all_gatherv(const communicator& comm, const T& in_value, T* out_values,
+ const std::vector<int>& sizes, const std::vector<int>& displs)
+{
+ using detail::c_data;
+ assert(sizes.size() == comm.size());
+ assert(displs.size() == comm.size());
+ detail::all_gatherv_impl(comm, &in_value, 1, out_values,
+ c_data(sizes), c_data(displs), is_mpi_datatype<T>());
+}
+
+template<typename T>
+void
+all_gatherv(const communicator& comm, const T* in_values, T* out_values,
+ const std::vector<int>& sizes, const std::vector<int>& displs)
+{
+ using detail::c_data;
+ assert(sizes.size() == comm.size());
+ assert(displs.size() == comm.size());
+ detail::all_gatherv_impl(comm, in_values, out_values,
+ c_data(sizes), c_data(displs), is_mpi_datatype<T>());
+}
+
+template<typename T>
+void
+all_gatherv(const communicator& comm, std::vector<T> const& in_values, std::vector<T>& out_values,
+ const std::vector<int>& sizes, const std::vector<int>& displs)
+{
+ using detail::c_data;
+ assert(sizes.size() == comm.size());
+ assert(displs.size() == comm.size());
+ assert(in_values.size() == sizes[comm.rank()]);
+ out_values.resize(std::accumulate(sizes.begin(), sizes.end(), 0));
+ ::boost::mpi::all_gatherv(comm, c_data(in_values), c_data(out_values), sizes, displs);
+}
+
+} } // end namespace boost::mpi
+
+#endif // BOOST_MPI_ALL_GATHERV_HPP
diff --git a/boost/mpi/collectives/all_to_all.hpp b/boost/mpi/collectives/all_to_all.hpp
index 8c33c2a167..4f20be73d5 100644
--- a/boost/mpi/collectives/all_to_all.hpp
+++ b/boost/mpi/collectives/all_to_all.hpp
@@ -22,7 +22,7 @@
namespace boost { namespace mpi {
namespace detail {
- // We're performaing an all-to-all with a type that has an
+ // We're performing an all-to-all with a type that has an
// associated MPI datatype, so we'll use MPI_Alltoall to do all of
// the work.
template<typename T>
@@ -38,9 +38,7 @@ namespace detail {
// We're performing an all-to-all with a type that does not have an
// associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Alltoall, so
- // we'll just have to send individual messages to the other
- // processes.
+ // it.
template<typename T>
void
all_to_all_impl(const communicator& comm, const T* in_values, int n,
diff --git a/boost/mpi/collectives/broadcast.hpp b/boost/mpi/collectives/broadcast.hpp
index d5160cff7f..f8b27f0b4f 100644
--- a/boost/mpi/collectives/broadcast.hpp
+++ b/boost/mpi/collectives/broadcast.hpp
@@ -100,22 +100,35 @@ namespace detail {
}
// We're sending a type that does not have an associated MPI
- // datatype, so we'll need to serialize it. Unfortunately, this
- // means that we cannot use MPI_Bcast, so we'll just send from the
- // root to everyone else.
+ // datatype, so we'll need to serialize it.
template<typename T>
void
broadcast_impl(const communicator& comm, T* values, int n, int root,
- mpl::false_)
+ mpl::false_ non_mpi_datatype)
{
+ // Implementation proposed by Lorenz Hübschle-Schneider
if (comm.rank() == root) {
packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
+ for (int i = 0; i < n; ++i) {
oa << values[i];
- broadcast(comm, oa, root);
+ }
+ std::size_t asize = oa.size();
+ broadcast(comm, asize, root);
+ void const* aptr = oa.address();
+ BOOST_MPI_CHECK_RESULT(MPI_Bcast,
+ (const_cast<void*>(aptr), asize,
+ MPI_BYTE,
+ root, MPI_Comm(comm)));
} else {
packed_iarchive ia(comm);
- broadcast(comm, ia, root);
+ std::size_t asize;
+ broadcast(comm, asize, root);
+ ia.resize(asize);
+ void* aptr = ia.address();
+ BOOST_MPI_CHECK_RESULT(MPI_Bcast,
+ (aptr, asize,
+ MPI_BYTE,
+ root, MPI_Comm(comm)));
for (int i = 0; i < n; ++i)
ia >> values[i];
}
diff --git a/boost/mpi/collectives/gather.hpp b/boost/mpi/collectives/gather.hpp
index 70dfd65313..386bfdd1a1 100644
--- a/boost/mpi/collectives/gather.hpp
+++ b/boost/mpi/collectives/gather.hpp
@@ -8,6 +8,9 @@
#ifndef BOOST_MPI_GATHER_HPP
#define BOOST_MPI_GATHER_HPP
+#include <cassert>
+#include <cstddef>
+#include <numeric>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
#include <vector>
@@ -16,89 +19,116 @@
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
+#include <boost/mpi/detail/offsets.hpp>
+#include <boost/mpi/detail/antiques.hpp>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
namespace detail {
- // We're gathering at the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Gather to do all of the work.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Gather,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, root, comm));
- }
+// We're gathering at the root for a type that has an associated MPI
+// datatype, so we'll use MPI_Gather to do all of the work.
+template<typename T>
+void
+gather_impl(const communicator& comm, const T* in_values, int n,
+ T* out_values, int root, mpl::true_)
+{
+ MPI_Datatype type = get_mpi_datatype<T>(*in_values);
+ BOOST_MPI_CHECK_RESULT(MPI_Gather,
+ (const_cast<T*>(in_values), n, type,
+ out_values, n, type, root, comm));
+}
- // We're gathering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Gather to do all of the work.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Gather,
- (const_cast<T*>(in_values), n, type,
- 0, n, type, root, comm));
- }
+// We're gathering from a non-root for a type that has an associated MPI
+// datatype, so we'll use MPI_Gather to do all of the work.
+template<typename T>
+void
+gather_impl(const communicator& comm, const T* in_values, int n, int root,
+ mpl::true_ is_mpi_type)
+{
+ assert(comm.rank() != root);
+ gather_impl(comm, in_values, n, (T*)0, root, is_mpi_type);
+}
- // We're gathering at the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gather, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int size = comm.size();
-
- for (int src = 0; src < size; ++src) {
- if (src == root)
- std::copy(in_values, in_values + n, out_values + n * src);
- else
- comm.recv(src, tag, out_values + n * src, n);
+// We're gathering at the root for a type that does not have an
+// associated MPI datatype, so we'll need to serialize
+// it.
+template<typename T>
+void
+gather_impl(const communicator& comm, const T* in_values, int n, T* out_values,
+ int const* nslot, int const* nskip, int root, mpl::false_)
+{
+ int nproc = comm.size();
+ // first, gather all size, these size can be different for
+ // each process
+ packed_oarchive oa(comm);
+ for (int i = 0; i < n; ++i) {
+ oa << in_values[i];
+ }
+ bool is_root = comm.rank() == root;
+ std::vector<int> oasizes(is_root ? nproc : 0);
+ int oasize = oa.size();
+ BOOST_MPI_CHECK_RESULT(MPI_Gather,
+ (&oasize, 1, MPI_INTEGER,
+ c_data(oasizes), 1, MPI_INTEGER,
+ root, MPI_Comm(comm)));
+ // Gather the archives, which can be of different sizes, so
+ // we need to use gatherv.
+ // Everything is contiguous (in the transmitted archive), so
+ // the offsets can be deduced from the collected sizes.
+ std::vector<int> offsets;
+ if (is_root) sizes2offsets(oasizes, offsets);
+ packed_iarchive::buffer_type recv_buffer(is_root ? std::accumulate(oasizes.begin(), oasizes.end(), 0) : 0);
+ BOOST_MPI_CHECK_RESULT(MPI_Gatherv,
+ (const_cast<void*>(oa.address()), int(oa.size()), MPI_BYTE,
+ c_data(recv_buffer), c_data(oasizes), c_data(offsets), MPI_BYTE,
+ root, MPI_Comm(comm)));
+ if (is_root) {
+ for (int src = 0; src < nproc; ++src) {
+ // handle variadic case
+ int nb = nslot ? nslot[src] : n;
+ int skip = nskip ? nskip[src] : 0;
+ std::advance(out_values, skip);
+ if (src == root) {
+ BOOST_ASSERT(nb == n);
+ for (int i = 0; i < nb; ++i) {
+ *out_values++ = *in_values++;
+ }
+ } else {
+ packed_iarchive ia(comm, recv_buffer, boost::archive::no_header, offsets[src]);
+ for (int i = 0; i < nb; ++i) {
+ ia >> *out_values++;
+ }
+ }
}
}
+}
- // We're gathering at a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gather, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
- comm.send(root, tag, in_values, n);
- }
+// We're gathering at a non-root for a type that does not have an
+// associated MPI datatype, so we'll need to serialize
+// it.
+template<typename T>
+void
+gather_impl(const communicator& comm, const T* in_values, int n, T* out_values,int root,
+ mpl::false_ is_mpi_type)
+{
+ gather_impl(comm, in_values, n, out_values, (int const*)0, (int const*)0, root, is_mpi_type);
+}
} // end namespace detail
template<typename T>
void
gather(const communicator& comm, const T& in_value, T* out_values, int root)
{
- if (comm.rank() == root)
- detail::gather_impl(comm, &in_value, 1, out_values, root,
- is_mpi_datatype<T>());
- else
- detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
+ BOOST_ASSERT(out_values || (comm.rank() != root));
+ detail::gather_impl(comm, &in_value, 1, out_values, root, is_mpi_datatype<T>());
}
template<typename T>
void gather(const communicator& comm, const T& in_value, int root)
{
BOOST_ASSERT(comm.rank() != root);
- detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
+ detail::gather_impl(comm, &in_value, 1, (T*)0, root, is_mpi_datatype<T>());
}
template<typename T>
@@ -106,12 +136,11 @@ void
gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
int root)
{
+ using detail::c_data;
if (comm.rank() == root) {
out_values.resize(comm.size());
- ::boost::mpi::gather(comm, in_value, &out_values[0], root);
- } else {
- ::boost::mpi::gather(comm, in_value, root);
}
+ ::boost::mpi::gather(comm, in_value, c_data(out_values), root);
}
template<typename T>
@@ -119,11 +148,8 @@ void
gather(const communicator& comm, const T* in_values, int n, T* out_values,
int root)
{
- if (comm.rank() == root)
- detail::gather_impl(comm, in_values, n, out_values, root,
- is_mpi_datatype<T>());
- else
- detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
+ detail::gather_impl(comm, in_values, n, out_values, root,
+ is_mpi_datatype<T>());
}
template<typename T>
@@ -133,10 +159,8 @@ gather(const communicator& comm, const T* in_values, int n,
{
if (comm.rank() == root) {
out_values.resize(comm.size() * n);
- ::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
- }
- else
- ::boost::mpi::gather(comm, in_values, n, root);
+ }
+ ::boost::mpi::gather(comm, in_values, n, out_values.data(), root);
}
template<typename T>
diff --git a/boost/mpi/collectives/gatherv.hpp b/boost/mpi/collectives/gatherv.hpp
index eb5f9c16dc..6b8d706fc6 100644
--- a/boost/mpi/collectives/gatherv.hpp
+++ b/boost/mpi/collectives/gatherv.hpp
@@ -8,15 +8,18 @@
#ifndef BOOST_MPI_GATHERV_HPP
#define BOOST_MPI_GATHERV_HPP
+#include <vector>
+
#include <boost/mpi/exception.hpp>
#include <boost/mpi/datatype.hpp>
-#include <vector>
#include <boost/mpi/packed_oarchive.hpp>
#include <boost/mpi/packed_iarchive.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
+#include <boost/mpi/detail/offsets.hpp>
#include <boost/assert.hpp>
+#include <boost/scoped_array.hpp>
namespace boost { namespace mpi {
@@ -58,41 +61,21 @@ namespace detail {
gatherv_impl(const communicator& comm, const T* in_values, int in_size,
T* out_values, const int* sizes, const int* displs, int root, mpl::false_)
{
- int tag = environment::collectives_tag();
- int nprocs = comm.size();
-
- for (int src = 0; src < nprocs; ++src) {
- if (src == root)
- // Our own values will never be transmitted: just copy them.
- std::copy(in_values, in_values + in_size, out_values + displs[src]);
- else {
-// comm.recv(src, tag, out_values + displs[src], sizes[src]);
- // Receive archive
- packed_iarchive ia(comm);
- MPI_Status status;
- detail::packed_archive_recv(comm, src, tag, ia, status);
- for (int i = 0; i < sizes[src]; ++i)
- ia >> out_values[ displs[src] + i ];
- }
- }
+ // convert displacement to offsets to skip
+ scoped_array<int> skipped(make_skipped_slots(comm, sizes, displs, root));
+ gather_impl(comm, in_values, in_size, out_values, sizes, skipped.get(), root, mpl::false_());
}
// We're gathering at a non-root for a type that does not have an
// associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gatherv, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
+ // it.
template<typename T>
void
gatherv_impl(const communicator& comm, const T* in_values, int in_size, int root,
mpl::false_)
{
- int tag = environment::collectives_tag();
-// comm.send(root, tag, in_values, in_size);
- packed_oarchive oa(comm);
- for (int i = 0; i < in_size; ++i)
- oa << in_values[i];
- detail::packed_archive_send(comm, root, tag, oa);
+ gather_impl(comm, in_values, in_size, (T*)0,(int const*)0,(int const*)0, root,
+ mpl::false_());
}
} // end namespace detail
diff --git a/boost/mpi/collectives/scatter.hpp b/boost/mpi/collectives/scatter.hpp
index 196682dd59..0c91b1e6aa 100644
--- a/boost/mpi/collectives/scatter.hpp
+++ b/boost/mpi/collectives/scatter.hpp
@@ -16,94 +16,147 @@
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
+#include <boost/mpi/detail/offsets.hpp>
+#include <boost/mpi/detail/antiques.hpp>
#include <boost/assert.hpp>
namespace boost { namespace mpi {
namespace detail {
- // We're scattering from the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatter to do all of the work.
- template<typename T>
- void
- scatter_impl(const communicator& comm, const T* in_values, T* out_values,
- int n, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatter,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, root, comm));
- }
+// We're scattering from the root for a type that has an associated MPI
+// datatype, so we'll use MPI_Scatter to do all of the work.
+template<typename T>
+void
+scatter_impl(const communicator& comm, const T* in_values, T* out_values,
+ int n, int root, mpl::true_)
+{
+ MPI_Datatype type = get_mpi_datatype<T>(*in_values);
+ BOOST_MPI_CHECK_RESULT(MPI_Scatter,
+ (const_cast<T*>(in_values), n, type,
+ out_values, n, type, root, comm));
+}
- // We're scattering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatter to do all of the work.
- template<typename T>
- void
- scatter_impl(const communicator& comm, T* out_values, int n, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*out_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatter,
- (0, n, type,
- out_values, n, type,
- root, comm));
+// We're scattering from a non-root for a type that has an associated MPI
+// datatype, so we'll use MPI_Scatter to do all of the work.
+template<typename T>
+void
+scatter_impl(const communicator& comm, T* out_values, int n, int root,
+ mpl::true_)
+{
+ MPI_Datatype type = get_mpi_datatype<T>(*out_values);
+ BOOST_MPI_CHECK_RESULT(MPI_Scatter,
+ (0, n, type,
+ out_values, n, type,
+ root, comm));
+}
+
+// Fill the sendbuf while keeping trac of the slot's footprints
+// Used in the first steps of both scatter and scatterv
+// Nslots contains the number of slots being sent
+// to each process (identical values for scatter).
+// skiped_slots, if present, is deduced from the
+// displacement array authorised be the MPI API,
+// for some yet to be determined reason.
+template<typename T>
+void
+fill_scatter_sendbuf(const communicator& comm, T const* values,
+ int const* nslots, int const* skipped_slots,
+ packed_oarchive::buffer_type& sendbuf, std::vector<int>& archsizes) {
+ int nproc = comm.size();
+ archsizes.resize(nproc);
+
+ for (int dest = 0; dest < nproc; ++dest) {
+ if (skipped_slots) { // wee need to keep this for backward compatibility
+ for(int k= 0; k < skipped_slots[dest]; ++k) ++values;
+ }
+ packed_oarchive procarchive(comm);
+ for (int i = 0; i < nslots[dest]; ++i) {
+ procarchive << *values++;
+ }
+ int archsize = procarchive.size();
+ sendbuf.resize(sendbuf.size() + archsize);
+ archsizes[dest] = archsize;
+ char const* aptr = static_cast<char const*>(procarchive.address());
+ std::copy(aptr, aptr+archsize, sendbuf.end()-archsize);
}
+}
- // We're scattering from the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatter, so
- // we'll just have the root send individual messages to the other
- // processes.
- template<typename T>
- void
- scatter_impl(const communicator& comm, const T* in_values, T* out_values,
- int n, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int size = comm.size();
-
- for (int dest = 0; dest < size; ++dest) {
- if (dest == root) {
- // Our own values will never be transmitted: just copy them.
- std::copy(in_values + dest * n, in_values + (dest + 1) * n, out_values);
- } else {
- // Send archive
- packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
- oa << in_values[dest * n + i];
- detail::packed_archive_send(comm, dest, tag, oa);
- }
+template<typename T, class A>
+T*
+non_const_data(std::vector<T,A> const& v) {
+ using detail::c_data;
+ return const_cast<T*>(c_data(v));
+}
+
+// Dispatch the sendbuf among proc.
+// Used in the second steps of both scatter and scatterv
+// in_value is only provide in the non variadic case.
+template<typename T>
+void
+dispatch_scatter_sendbuf(const communicator& comm,
+ packed_oarchive::buffer_type const& sendbuf, std::vector<int> const& archsizes,
+ T const* in_values,
+ T* out_values, int n, int root) {
+ // Distribute the sizes
+ int myarchsize;
+ BOOST_MPI_CHECK_RESULT(MPI_Scatter,
+ (non_const_data(archsizes), 1, MPI_INTEGER,
+ &myarchsize, 1, MPI_INTEGER, root, comm));
+ std::vector<int> offsets;
+ if (root == comm.rank()) {
+ sizes2offsets(archsizes, offsets);
+ }
+ // Get my proc archive
+ packed_iarchive::buffer_type recvbuf;
+ recvbuf.resize(myarchsize);
+ BOOST_MPI_CHECK_RESULT(MPI_Scatterv,
+ (non_const_data(sendbuf), non_const_data(archsizes), c_data(offsets), MPI_BYTE,
+ c_data(recvbuf), recvbuf.size(), MPI_BYTE,
+ root, MPI_Comm(comm)));
+ // Unserialize
+ if ( in_values != 0 && root == comm.rank()) {
+ // Our own local values are already here: just copy them.
+ std::copy(in_values + root * n, in_values + (root + 1) * n, out_values);
+ } else {
+ // Otherwise deserialize:
+ packed_iarchive iarchv(comm, recvbuf);
+ for (int i = 0; i < n; ++i) {
+ iarchv >> out_values[i];
}
}
+}
- // We're scattering to a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to de-serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatter, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- scatter_impl(const communicator& comm, T* out_values, int n, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
-
- packed_iarchive ia(comm);
- MPI_Status status;
- detail::packed_archive_recv(comm, root, tag, ia, status);
- for (int i = 0; i < n; ++i)
- ia >> out_values[i];
+// We're scattering from the root for a type that does not have an
+// associated MPI datatype, so we'll need to serialize it.
+template<typename T>
+void
+scatter_impl(const communicator& comm, const T* in_values, T* out_values,
+ int n, int root, mpl::false_)
+{
+ packed_oarchive::buffer_type sendbuf;
+ std::vector<int> archsizes;
+
+ if (root == comm.rank()) {
+ std::vector<int> nslots(comm.size(), n);
+ fill_scatter_sendbuf(comm, in_values, c_data(nslots), (int const*)0, sendbuf, archsizes);
}
+ dispatch_scatter_sendbuf(comm, sendbuf, archsizes, in_values, out_values, n, root);
+}
+
+template<typename T>
+void
+scatter_impl(const communicator& comm, T* out_values, int n, int root,
+ mpl::false_ is_mpi_type)
+{
+ scatter_impl(comm, (T const*)0, out_values, n, root, is_mpi_type);
+}
} // end namespace detail
template<typename T>
void
scatter(const communicator& comm, const T* in_values, T& out_value, int root)
{
- if (comm.rank() == root)
- detail::scatter_impl(comm, in_values, &out_value, 1, root,
- is_mpi_datatype<T>());
- else
- detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
+ detail::scatter_impl(comm, in_values, &out_value, 1, root, is_mpi_datatype<T>());
}
template<typename T>
@@ -111,11 +164,8 @@ void
scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
int root)
{
- if (comm.rank() == root)
- ::boost::mpi::scatter<T>(comm, &in_values[0], out_value, root);
- else
- ::boost::mpi::scatter<T>(comm, static_cast<const T*>(0), out_value,
- root);
+ using detail::c_data;
+ ::boost::mpi::scatter<T>(comm, c_data(in_values), out_value, root);
}
template<typename T>
@@ -130,11 +180,7 @@ void
scatter(const communicator& comm, const T* in_values, T* out_values, int n,
int root)
{
- if (comm.rank() == root)
- detail::scatter_impl(comm, in_values, out_values, n, root,
- is_mpi_datatype<T>());
- else
- detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
+ detail::scatter_impl(comm, in_values, out_values, n, root, is_mpi_datatype<T>());
}
template<typename T>
@@ -142,11 +188,7 @@ void
scatter(const communicator& comm, const std::vector<T>& in_values,
T* out_values, int n, int root)
{
- if (comm.rank() == root)
- ::boost::mpi::scatter(comm, &in_values[0], out_values, n, root);
- else
- ::boost::mpi::scatter(comm, static_cast<const T*>(0), out_values,
- n, root);
+ ::boost::mpi::scatter(comm, &in_values[0], out_values, n, root);
}
template<typename T>
diff --git a/boost/mpi/collectives/scatterv.hpp b/boost/mpi/collectives/scatterv.hpp
index 6e6f27002b..57e073c81f 100644
--- a/boost/mpi/collectives/scatterv.hpp
+++ b/boost/mpi/collectives/scatterv.hpp
@@ -8,93 +8,90 @@
#ifndef BOOST_MPI_SCATTERV_HPP
#define BOOST_MPI_SCATTERV_HPP
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/detail/point_to_point.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/assert.hpp>
+#include <boost/scoped_array.hpp>
+#include <boost/mpi/collectives/scatter.hpp>
+#include <boost/mpi/detail/offsets.hpp>
+#include <boost/mpi/detail/antiques.hpp>
namespace boost { namespace mpi {
namespace detail {
- // We're scattering from the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatterv to do all of the work.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, const T* in_values, const int* sizes,
- const int* displs, T* out_values, int out_size, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatterv,
- (const_cast<T*>(in_values), const_cast<int*>(sizes),
- const_cast<int*>(displs), type,
- out_values, out_size, type, root, comm));
- }
- // We're scattering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatterv to do all of the work.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, T* out_values, int out_size, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*out_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatterv,
- (0, 0, 0, type,
- out_values, out_size, type,
- root, comm));
- }
+//////////////////////////////////////////////
+/// Implementation for MPI primitive types ///
+//////////////////////////////////////////////
+
+// We're scattering from the root for a type that has an associated MPI
+// datatype, so we'll use MPI_Scatterv to do all of the work.
+template<typename T>
+void
+scatterv_impl(const communicator& comm, const T* in_values, T* out_values, int out_size,
+ const int* sizes, const int* displs, int root, mpl::true_)
+{
+ assert(!sizes || out_size == sizes[comm.rank()]);
+ assert(bool(sizes) == bool(in_values));
+
+ scoped_array<int> new_offsets_mem(make_offsets(comm, sizes, displs, root));
+ if (new_offsets_mem) displs = new_offsets_mem.get();
+ MPI_Datatype type = get_mpi_datatype<T>(*in_values);
+ BOOST_MPI_CHECK_RESULT(MPI_Scatterv,
+ (const_cast<T*>(in_values), const_cast<int*>(sizes),
+ const_cast<int*>(displs), type,
+ out_values, out_size, type, root, comm));
+}
- // We're scattering from the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatterv, so
- // we'll just have the root send individual messages to the other
- // processes.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, const T* in_values, const int* sizes,
- const int* displs, T* out_values, int out_size, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int nprocs = comm.size();
-
- for (int dest = 0; dest < nprocs; ++dest) {
- if (dest == root) {
- // Our own values will never be transmitted: just copy them.
- std::copy(in_values + displs[dest],
- in_values + displs[dest] + out_size, out_values);
- } else {
- // Send archive
- packed_oarchive oa(comm);
- for (int i = 0; i < sizes[dest]; ++i)
- oa << in_values[ displs[dest] + i ];
- detail::packed_archive_send(comm, dest, tag, oa);
- }
+// We're scattering from a non-root for a type that has an associated MPI
+// datatype, so we'll use MPI_Scatterv to do all of the work.
+template<typename T>
+void
+scatterv_impl(const communicator& comm, T* out_values, int out_size, int root,
+ mpl::true_ is_mpi_type)
+{
+ scatterv_impl(comm, (T const*)0, out_values, out_size,
+ (const int*)0, (const int*)0, root, is_mpi_type);
+}
+
+//////////////////////////////////////////////////
+/// Implementation for non MPI primitive types ///
+//////////////////////////////////////////////////
+
+// We're scattering from the root for a type that does not have an
+// associated MPI datatype, so we'll need to serialize it.
+template<typename T>
+void
+scatterv_impl(const communicator& comm, const T* in_values, T* out_values, int out_size,
+ int const* sizes, int const* displs, int root, mpl::false_)
+{
+ packed_oarchive::buffer_type sendbuf;
+ bool is_root = comm.rank() == root;
+ int nproc = comm.size();
+ std::vector<int> archsizes;
+ if (is_root) {
+ assert(out_size == sizes[comm.rank()]);
+ archsizes.resize(nproc);
+ std::vector<int> skipped;
+ if (displs) {
+ skipped.resize(nproc);
+ offsets2skipped(sizes, displs, c_data(skipped), nproc);
+ displs = c_data(skipped);
}
+ fill_scatter_sendbuf(comm, in_values, sizes, (int const*)0, sendbuf, archsizes);
}
+ dispatch_scatter_sendbuf(comm, sendbuf, archsizes, (T const*)0, out_values, out_size, root);
+}
+
+// We're scattering to a non-root for a type that does not have an
+// associated MPI datatype. input data not needed.
+// it.
+template<typename T>
+void
+scatterv_impl(const communicator& comm, T* out_values, int n, int root,
+ mpl::false_ isnt_mpi_type)
+{
+ assert(root != comm.rank());
+ scatterv_impl(comm, (T const*)0, out_values, n, (int const*)0, (int const*)0, root, isnt_mpi_type);
+}
- // We're scattering to a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to de-serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatterv, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, T* out_values, int out_size, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
-
- packed_iarchive ia(comm);
- MPI_Status status;
- detail::packed_archive_recv(comm, root, tag, ia, status);
- for (int i = 0; i < out_size; ++i)
- ia >> out_values[i];
- }
} // end namespace detail
template<typename T>
@@ -103,13 +100,9 @@ scatterv(const communicator& comm, const T* in_values,
const std::vector<int>& sizes, const std::vector<int>& displs,
T* out_values, int out_size, int root)
{
- int rank = comm.rank();
- if (rank == root)
- detail::scatterv_impl(comm, in_values, &sizes[0], &displs[0],
- out_values, out_size, root, is_mpi_datatype<T>());
- else
- detail::scatterv_impl(comm, out_values, out_size, root,
- is_mpi_datatype<T>());
+ using detail::c_data;
+ scatterv_impl(comm, in_values, out_values, out_size, c_data(sizes), c_data(displs),
+ root, is_mpi_datatype<T>());
}
template<typename T>
@@ -118,12 +111,9 @@ scatterv(const communicator& comm, const std::vector<T>& in_values,
const std::vector<int>& sizes, const std::vector<int>& displs,
T* out_values, int out_size, int root)
{
- if (comm.rank() == root)
- ::boost::mpi::scatterv(comm, &in_values[0], sizes, displs,
- out_values, out_size, root);
- else
- ::boost::mpi::scatterv(comm, static_cast<const T*>(0), sizes, displs,
- out_values, out_size, root);
+ using detail::c_data;
+ ::boost::mpi::scatterv(comm, c_data(in_values), sizes, displs,
+ out_values, out_size, root);
}
template<typename T>
@@ -141,16 +131,10 @@ void
scatterv(const communicator& comm, const T* in_values,
const std::vector<int>& sizes, T* out_values, int root)
{
- int nprocs = comm.size();
- int myrank = comm.rank();
-
- std::vector<int> displs(nprocs);
- for (int rank = 0, aux = 0; rank < nprocs; ++rank) {
- displs[rank] = aux;
- aux += sizes[rank];
- }
- ::boost::mpi::scatterv(comm, in_values, sizes, displs, out_values,
- sizes[myrank], root);
+ using detail::c_data;
+ detail::scatterv_impl(comm, in_values, out_values, sizes[comm.rank()],
+ c_data(sizes), (int const*)0,
+ root, is_mpi_datatype<T>());
}
template<typename T>
@@ -161,6 +145,23 @@ scatterv(const communicator& comm, const std::vector<T>& in_values,
::boost::mpi::scatterv(comm, &in_values[0], sizes, out_values, root);
}
+template<typename T>
+void
+scatterv(const communicator& comm, const T* in_values,
+ T* out_values, int n, int root)
+{
+ detail::scatterv_impl(comm, in_values, out_values, n, (int const*)0, (int const*)0,
+ root, is_mpi_datatype<T>());
+}
+
+template<typename T>
+void
+scatterv(const communicator& comm, const std::vector<T>& in_values,
+ T* out_values, int out_size, int root)
+{
+ ::boost::mpi::scatterv(comm, &in_values[0], out_values, out_size, root);
+}
+
} } // end namespace boost::mpi
#endif // BOOST_MPI_SCATTERV_HPP