summaryrefslogtreecommitdiff
path: root/libs/mpi
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2019-12-05 15:24:45 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2019-12-05 15:24:45 +0900
commit5ce1cfc2525b06c0a9e38531813781de0281c96d (patch)
tree19cc66c6cf6396db288813b2558cc350f1deede2 /libs/mpi
parent3c1df2168531ad5580076ae08d529054689aeedd (diff)
downloadboost-5ce1cfc2525b06c0a9e38531813781de0281c96d.tar.gz
boost-5ce1cfc2525b06c0a9e38531813781de0281c96d.tar.bz2
boost-5ce1cfc2525b06c0a9e38531813781de0281c96d.zip
Imported Upstream version 1.71.0upstream/1.71.0
Diffstat (limited to 'libs/mpi')
-rw-r--r--libs/mpi/doc/c_mapping.qbk626
-rw-r--r--libs/mpi/doc/collective.qbk367
-rw-r--r--libs/mpi/doc/communicator.qbk122
-rw-r--r--libs/mpi/doc/getting_started.qbk252
-rw-r--r--libs/mpi/doc/introduction.qbk53
-rw-r--r--libs/mpi/doc/mpi.introduction.qbk53
-rw-r--r--libs/mpi/doc/mpi.qbk2216
-rw-r--r--libs/mpi/doc/mpi_autodoc.xml135
-rw-r--r--libs/mpi/doc/point_to_point.qbk176
-rw-r--r--libs/mpi/doc/python.qbk222
-rw-r--r--libs/mpi/doc/skeleton_and_content.qbk101
-rw-r--r--libs/mpi/doc/threading.qbk35
-rw-r--r--libs/mpi/doc/tutorial.qbk132
-rw-r--r--libs/mpi/doc/user_data_types.qbk103
-rw-r--r--libs/mpi/example/cartesian_communicator.cpp4
-rw-r--r--libs/mpi/src/broadcast.cpp27
-rw-r--r--libs/mpi/src/communicator.cpp35
-rw-r--r--libs/mpi/src/environment.cpp3
-rw-r--r--libs/mpi/src/point_to_point.cpp113
-rw-r--r--libs/mpi/src/request.cpp299
-rw-r--r--libs/mpi/test/Jamfile.v211
-rw-r--r--libs/mpi/test/all_gather_test.cpp11
-rw-r--r--libs/mpi/test/all_reduce_test.cpp31
-rw-r--r--libs/mpi/test/all_to_all_test.cpp12
-rw-r--r--libs/mpi/test/block_nonblock_test.cpp57
-rw-r--r--libs/mpi/test/broadcast_stl_test.cpp23
-rw-r--r--libs/mpi/test/broadcast_test.cpp17
-rw-r--r--libs/mpi/test/cartesian_topology_init_test.cpp6
-rw-r--r--libs/mpi/test/cartesian_topology_test.cpp10
-rw-r--r--libs/mpi/test/debugger.cpp22
-rw-r--r--libs/mpi/test/debugger.hpp2
-rw-r--r--libs/mpi/test/gather_test.cpp11
-rw-r--r--libs/mpi/test/graph_topology_test.cpp11
-rw-r--r--libs/mpi/test/groups_test.cpp10
-rw-r--r--libs/mpi/test/is_mpi_op_test.cpp38
-rw-r--r--libs/mpi/test/mt_init_test.cpp31
-rw-r--r--libs/mpi/test/mt_level_test.cpp9
-rw-r--r--libs/mpi/test/non_blocking_any_source.cpp60
-rw-r--r--libs/mpi/test/nonblocking_test.cpp33
-rw-r--r--libs/mpi/test/pointer_test.cpp16
-rw-r--r--libs/mpi/test/reduce_test.cpp10
-rw-r--r--libs/mpi/test/ring_test.cpp19
-rw-r--r--libs/mpi/test/scan_test.cpp12
-rw-r--r--libs/mpi/test/scatter_test.cpp17
-rw-r--r--libs/mpi/test/sendrecv_test.cpp9
-rw-r--r--libs/mpi/test/sendrecv_vector.cpp3
-rw-r--r--libs/mpi/test/skeleton_content_test.cpp18
-rw-r--r--libs/mpi/test/version_test.cpp46
-rw-r--r--libs/mpi/test/wait_any_test.cpp10
49 files changed, 2936 insertions, 2703 deletions
diff --git a/libs/mpi/doc/c_mapping.qbk b/libs/mpi/doc/c_mapping.qbk
new file mode 100644
index 0000000000..05864c4ef8
--- /dev/null
+++ b/libs/mpi/doc/c_mapping.qbk
@@ -0,0 +1,626 @@
+[section:c_mapping Mapping from C MPI to Boost.MPI]
+
+This section provides tables that map from the functions and constants
+of the standard C MPI to their Boost.MPI equivalents. It will be most
+useful for users that are already familiar with the C or Fortran
+interfaces to MPI, or for porting existing parallel programs to Boost.MPI.
+
+[table Point-to-point communication
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_ANY_SOURCE`] [`any_source`]]
+
+ [[`MPI_ANY_TAG`] [`any_tag`]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node40.html#Node40
+`MPI_Bsend`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Bsend_init`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node42.html#Node42
+`MPI_Buffer_attach`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node42.html#Node42
+`MPI_Buffer_detach`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
+`MPI_Cancel`]]
+ [[memberref boost::mpi::request::cancel
+`request::cancel`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node35.html#Node35
+`MPI_Get_count`]]
+ [[memberref boost::mpi::status::count `status::count`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
+`MPI_Ibsend`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
+`MPI_Iprobe`]]
+ [[memberref boost::mpi::communicator::iprobe `communicator::iprobe`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
+`MPI_Irsend`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
+`MPI_Isend`]]
+ [[memberref boost::mpi::communicator::isend
+`communicator::isend`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
+`MPI_Issend`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
+`MPI_Irecv`]]
+ [[memberref boost::mpi::communicator::isend
+`communicator::irecv`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
+`MPI_Probe`]]
+ [[memberref boost::mpi::communicator::probe `communicator::probe`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node53.html#Node53
+`MPI_PROC_NULL`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node34.html#Node34 `MPI_Recv`]]
+ [[memberref boost::mpi::communicator::recv
+`communicator::recv`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Recv_init`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Request_free`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node40.html#Node40
+`MPI_Rsend`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Rsend_init`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node31.html#Node31
+`MPI_Send`]]
+ [[memberref boost::mpi::communicator::send
+`communicator::send`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node52.html#Node52
+`MPI_Sendrecv`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node52.html#Node52
+`MPI_Sendrecv_replace`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Send_init`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node40.html#Node40
+`MPI_Ssend`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Ssend_init`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Start`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
+`MPI_Startall`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Test`]] [[memberref boost::mpi::request::wait `request::test`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Testall`]] [[funcref boost::mpi::test_all `test_all`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Testany`]] [[funcref boost::mpi::test_any `test_any`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Testsome`]] [[funcref boost::mpi::test_some `test_some`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
+`MPI_Test_cancelled`]]
+ [[memberref boost::mpi::status::cancelled
+`status::cancelled`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Wait`]] [[memberref boost::mpi::request::wait
+`request::wait`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Waitall`]] [[funcref boost::mpi::wait_all `wait_all`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Waitany`]] [[funcref boost::mpi::wait_any `wait_any`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
+`MPI_Waitsome`]] [[funcref boost::mpi::wait_some `wait_some`]]]
+]
+
+Boost.MPI automatically maps C and C++ data types to their MPI
+equivalents. The following table illustrates the mappings between C++
+types and MPI datatype constants.
+
+[table Datatypes
+ [[C Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_CHAR`] [`signed char`]]
+ [[`MPI_SHORT`] [`signed short int`]]
+ [[`MPI_INT`] [`signed int`]]
+ [[`MPI_LONG`] [`signed long int`]]
+ [[`MPI_UNSIGNED_CHAR`] [`unsigned char`]]
+ [[`MPI_UNSIGNED_SHORT`] [`unsigned short int`]]
+ [[`MPI_UNSIGNED_INT`] [`unsigned int`]]
+ [[`MPI_UNSIGNED_LONG`] [`unsigned long int`]]
+ [[`MPI_FLOAT`] [`float`]]
+ [[`MPI_DOUBLE`] [`double`]]
+ [[`MPI_LONG_DOUBLE`] [`long double`]]
+ [[`MPI_BYTE`] [unused]]
+ [[`MPI_PACKED`] [used internally for [link
+mpi.tutorial.user_data_types serialized data types]]]
+ [[`MPI_LONG_LONG_INT`] [`long long int`, if supported by compiler]]
+ [[`MPI_UNSIGNED_LONG_LONG_INT`] [`unsigned long long int`, if
+supported by compiler]]
+ [[`MPI_FLOAT_INT`] [`std::pair<float, int>`]]
+ [[`MPI_DOUBLE_INT`] [`std::pair<double, int>`]]
+ [[`MPI_LONG_INT`] [`std::pair<long, int>`]]
+ [[`MPI_2INT`] [`std::pair<int, int>`]]
+ [[`MPI_SHORT_INT`] [`std::pair<short, int>`]]
+ [[`MPI_LONG_DOUBLE_INT`] [`std::pair<long double, int>`]]
+]
+
+Boost.MPI does not provide direct wrappers to the MPI derived
+datatypes functionality. Instead, Boost.MPI relies on the
+_Serialization_ library to construct MPI datatypes for user-defined
+classes. The section on [link mpi.tutorial.user_data_types user-defined
+data types] describes this mechanism, which is used for types that
+marked as "MPI datatypes" using [classref
+boost::mpi::is_mpi_datatype `is_mpi_datatype`].
+
+The derived datatypes table that follows describes which C++ types
+correspond to the functionality of the C MPI's datatype
+constructor. Boost.MPI may not actually use the C MPI function listed
+when building datatypes of a certain form. Since the actual datatypes
+built by Boost.MPI are typically hidden from the user, many of these
+operations are called internally by Boost.MPI.
+
+[table Derived datatypes
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node56.html#Node56
+`MPI_Address`]] [used automatically in Boost.MPI for MPI version 1.x]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-20-html/node76.htm#Node76
+`MPI_Get_address`]] [used automatically in Boost.MPI for MPI version 2.0 and higher]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node58.html#Node58
+`MPI_Type_commit`]] [used automatically in Boost.MPI]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
+`MPI_Type_contiguous`]] [arrays]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node56.html#Node56
+`MPI_Type_extent`]] [used automatically in Boost.MPI]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node58.html#Node58
+`MPI_Type_free`]] [used automatically in Boost.MPI]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
+`MPI_Type_hindexed`]] [any type used as a subobject]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
+`MPI_Type_hvector`]] [unused]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
+`MPI_Type_indexed`]] [any type used as a subobject]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node57.html#Node57
+`MPI_Type_lb`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node56.html#Node56
+`MPI_Type_size`]] [used automatically in Boost.MPI]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
+`MPI_Type_struct`]] [user-defined classes and structs with MPI 1.x]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-20-html/node76.htm#Node76
+`MPI_Type_create_struct`]] [user-defined classes and structs with MPI 2.0 and higher]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node57.html#Node57
+`MPI_Type_ub`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
+`MPI_Type_vector`]] [used automatically in Boost.MPI]]
+]
+
+MPI's packing facilities store values into a contiguous buffer, which
+can later be transmitted via MPI and unpacked into separate values via
+MPI's unpacking facilities. As with datatypes, Boost.MPI provides an
+abstract interface to MPI's packing and unpacking facilities. In
+particular, the two archive classes [classref
+boost::mpi::packed_oarchive `packed_oarchive`] and [classref
+boost::mpi::packed_iarchive `packed_iarchive`] can be used
+to pack or unpack a contiguous buffer using MPI's facilities.
+
+[table Packing and unpacking
+ [[C Function] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node62.html#Node62
+`MPI_Pack`]] [[classref
+boost::mpi::packed_oarchive `packed_oarchive`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node62.html#Node62
+`MPI_Pack_size`]] [used internally by Boost.MPI]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node62.html#Node62
+`MPI_Unpack`]] [[classref
+boost::mpi::packed_iarchive `packed_iarchive`]]]
+]
+
+Boost.MPI supports a one-to-one mapping for most of the MPI
+collectives. For each collective provided by Boost.MPI, the underlying
+C MPI collective will be invoked when it is possible (and efficient)
+to do so.
+
+[table Collectives
+ [[C Function] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node73.html#Node73
+`MPI_Allgather`]] [[funcref boost::mpi::all_gather `all_gather`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node73.html#Node73
+`MPI_Allgatherv`]] [most uses supported by [funcref boost::mpi::all_gather `all_gather`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node82.html#Node82
+`MPI_Allreduce`]] [[funcref boost::mpi::all_reduce `all_reduce`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node75.html#Node75
+`MPI_Alltoall`]] [[funcref boost::mpi::all_to_all `all_to_all`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node75.html#Node75
+`MPI_Alltoallv`]] [most uses supported by [funcref boost::mpi::all_to_all `all_to_all`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node66.html#Node66
+`MPI_Barrier`]] [[memberref
+boost::mpi::communicator::barrier `communicator::barrier`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node67.html#Node67
+`MPI_Bcast`]] [[funcref boost::mpi::broadcast `broadcast`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node69.html#Node69
+`MPI_Gather`]] [[funcref boost::mpi::gather `gather`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node69.html#Node69
+`MPI_Gatherv`]] [most uses supported by [funcref boost::mpi::gather `gather`],
+other usages supported by [funcref boost::mpi::gatherv `gatherv`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node77.html#Node77
+`MPI_Reduce`]] [[funcref boost::mpi::reduce `reduce`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node83.html#Node83
+`MPI_Reduce_scatter`]] [unsupported]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node84.html#Node84
+`MPI_Scan`]] [[funcref boost::mpi::scan `scan`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node71.html#Node71
+`MPI_Scatter`]] [[funcref boost::mpi::scatter `scatter`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node71.html#Node71
+`MPI_Scatterv`]] [most uses supported by [funcref boost::mpi::scatter `scatter`],
+other uses supported by [funcref boost::mpi::scatterv `scatterv`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-20-html/node145.htm#Node145
+`MPI_IN_PLACE`]] [supported implicitly by [funcref boost::mpi::all_reduce
+`all_reduce` by omitting the output value]]]
+]
+
+Boost.MPI uses function objects to specify how reductions should occur
+in its equivalents to `MPI_Allreduce`, `MPI_Reduce`, and
+`MPI_Scan`. The following table illustrates how
+[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node78.html#Node78
+predefined] and
+[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node80.html#Node80
+user-defined] reduction operations can be mapped between the C MPI and
+Boost.MPI.
+
+[table Reduction operations
+ [[C Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_BAND`] [[classref boost::mpi::bitwise_and `bitwise_and`]]]
+ [[`MPI_BOR`] [[classref boost::mpi::bitwise_or `bitwise_or`]]]
+ [[`MPI_BXOR`] [[classref boost::mpi::bitwise_xor `bitwise_xor`]]]
+ [[`MPI_LAND`] [`std::logical_and`]]
+ [[`MPI_LOR`] [`std::logical_or`]]
+ [[`MPI_LXOR`] [[classref boost::mpi::logical_xor `logical_xor`]]]
+ [[`MPI_MAX`] [[classref boost::mpi::maximum `maximum`]]]
+ [[`MPI_MAXLOC`] [unsupported]]
+ [[`MPI_MIN`] [[classref boost::mpi::minimum `minimum`]]]
+ [[`MPI_MINLOC`] [unsupported]]
+ [[`MPI_Op_create`] [used internally by Boost.MPI]]
+ [[`MPI_Op_free`] [used internally by Boost.MPI]]
+ [[`MPI_PROD`] [`std::multiplies`]]
+ [[`MPI_SUM`] [`std::plus`]]
+]
+
+MPI defines several special communicators, including `MPI_COMM_WORLD`
+(including all processes that the local process can communicate with),
+`MPI_COMM_SELF` (including only the local process), and
+`MPI_COMM_EMPTY` (including no processes). These special communicators
+are all instances of the [classref boost::mpi::communicator
+`communicator`] class in Boost.MPI.
+
+[table Predefined communicators
+ [[C Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_COMM_WORLD`] [a default-constructed [classref boost::mpi::communicator `communicator`]]]
+ [[`MPI_COMM_SELF`] [a [classref boost::mpi::communicator `communicator`] that contains only the current process]]
+ [[`MPI_COMM_EMPTY`] [a [classref boost::mpi::communicator `communicator`] that evaluates false]]
+]
+
+Boost.MPI supports groups of processes through its [classref
+boost::mpi::group `group`] class.
+
+[table Group operations and constants
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_GROUP_EMPTY`] [a default-constructed [classref
+ boost::mpi::group `group`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
+ `MPI_Group_size`]] [[memberref boost::mpi::group::size `group::size`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
+ `MPI_Group_rank`]] [memberref boost::mpi::group::rank `group::rank`]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
+ `MPI_Group_translate_ranks`]] [memberref boost::mpi::group::translate_ranks `group::translate_ranks`]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
+ `MPI_Group_compare`]] [operators `==` and `!=`]]
+ [[`MPI_IDENT`] [operators `==` and `!=`]]
+ [[`MPI_SIMILAR`] [operators `==` and `!=`]]
+ [[`MPI_UNEQUAL`] [operators `==` and `!=`]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Comm_group`]] [[memberref
+ boost::mpi::communicator::group `communicator::group`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_union`]] [operator `|` for groups]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_intersection`]] [operator `&` for groups]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_difference`]] [operator `-` for groups]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_incl`]] [[memberref boost::mpi::group::include `group::include`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_excl`]] [[memberref boost::mpi::group::include `group::exclude`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_range_incl`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
+ `MPI_Group_range_excl`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node99.html#Node99
+ `MPI_Group_free`]] [used automatically in Boost.MPI]]
+]
+
+Boost.MPI provides manipulation of communicators through the [classref
+boost::mpi::communicator `communicator`] class.
+
+[table Communicator operations
+ [[C Function] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node101.html#Node101
+ `MPI_Comm_size`]] [[memberref boost::mpi::communicator::size `communicator::size`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node101.html#Node101
+ `MPI_Comm_rank`]] [[memberref boost::mpi::communicator::rank
+ `communicator::rank`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node101.html#Node101
+ `MPI_Comm_compare`]] [operators `==` and `!=`]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node102.html#Node102
+ `MPI_Comm_dup`]] [[classref boost::mpi::communicator `communicator`]
+ class constructor using `comm_duplicate`]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node102.html#Node102
+ `MPI_Comm_create`]] [[classref boost::mpi::communicator
+ `communicator`] constructor]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node102.html#Node102
+ `MPI_Comm_split`]] [[memberref boost::mpi::communicator::split
+ `communicator::split`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node103.html#Node103
+ `MPI_Comm_free`]] [used automatically in Boost.MPI]]
+]
+
+Boost.MPI currently provides support for inter-communicators via the
+[classref boost::mpi::intercommunicator `intercommunicator`] class.
+
+[table Inter-communicator operations
+ [[C Function] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node112.html#Node112
+ `MPI_Comm_test_inter`]] [use [memberref boost::mpi::communicator::as_intercommunicator `communicator::as_intercommunicator`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node112.html#Node112
+ `MPI_Comm_remote_size`]] [[memberref boost::mpi::intercommunicator::remote_size] `intercommunicator::remote_size`]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node112.html#Node112
+ `MPI_Comm_remote_group`]] [[memberref boost::mpi::intercommunicator::remote_group `intercommunicator::remote_group`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node113.html#Node113
+ `MPI_Intercomm_create`]] [[classref boost::mpi::intercommunicator `intercommunicator`] constructor]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node113.html#Node113
+ `MPI_Intercomm_merge`]] [[memberref boost::mpi::intercommunicator::merge `intercommunicator::merge`]]]
+]
+
+Boost.MPI currently provides no support for attribute caching.
+
+[table Attributes and caching
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_NULL_COPY_FN`] [unsupported]]
+ [[`MPI_NULL_DELETE_FN`] [unsupported]]
+ [[`MPI_KEYVAL_INVALID`] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Keyval_create`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Copy_function`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Delete_function`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Keyval_free`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Attr_put`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Attr_get`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
+ `MPI_Attr_delete`]] [unsupported]]
+]
+
+Boost.MPI will provide complete support for creating communicators
+with different topologies and later querying those topologies. Support
+for graph topologies is provided via an interface to the
+[@http://www.boost.org/libs/graph/doc/index.html Boost Graph Library
+(BGL)], where a communicator can be created which matches the
+structure of any BGL graph, and the graph topology of a communicator
+can be viewed as a BGL graph for use in existing, generic graph
+algorithms.
+
+[table Process topologies
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_GRAPH`] [unnecessary; use [memberref boost::mpi::communicator::as_graph_communicator `communicator::as_graph_communicator`]]]
+ [[`MPI_CART`] [unnecessary; use [memberref boost::mpi::communicator::has_cartesian_topology `communicator::has_cartesian_topology`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node133.html#Node133
+ `MPI_Cart_create`]] [[classref boost::mpi::cartesian_communicator `cartesian_communicator`]
+ constructor]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node134.html#Node134
+ `MPI_Dims_create`]] [[funcref boost::mpi::cartesian_dimensions `cartesian_dimensions`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node135.html#Node135
+ `MPI_Graph_create`]] [[classref
+ boost::mpi::graph_communicator
+ `graph_communicator ctors`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Topo_test`]] [[memberref
+ boost::mpi::communicator::as_graph_communicator
+ `communicator::as_graph_communicator`], [memberref
+ boost::mpi::communicator::has_cartesian_topology
+ `communicator::has_cartesian_topology`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Graphdims_get`]] [[funcref boost::mpi::num_vertices
+ `num_vertices`], [funcref boost::mpi::num_edges `num_edges`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Graph_get`]] [[funcref boost::mpi::vertices
+ `vertices`], [funcref boost::mpi::edges `edges`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Cartdim_get`]] [[memberref boost::mpi::cartesian_communicator::ndims `cartesian_communicator::ndims` ]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Cart_get`]] [[memberref boost::mpi::cartesian_communicator::topology `cartesian_communicator::topology` ]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Cart_rank`]] [[memberref boost::mpi::cartesian_communicator::rank `cartesian_communicator::rank` ]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Cart_coords`]] [[memberref boost::mpi::cartesian_communicator::coordinates `cartesian_communicator::coordinates` ]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Graph_neighbors_count`]] [[funcref boost::mpi::out_degree
+ `out_degree`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
+ `MPI_Graph_neighbors`]] [[funcref boost::mpi::out_edges
+ `out_edges`], [funcref boost::mpi::adjacent_vertices `adjacent_vertices`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node137.html#Node137
+ `MPI_Cart_shift`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node138.html#Node138
+ `MPI_Cart_sub`]] [[classref boost::mpi::cartesian_communicator `cartesian_communicator`]
+ constructor]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node139.html#Node139
+ `MPI_Cart_map`]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node139.html#Node139
+ `MPI_Graph_map`]] [unsupported]]
+]
+
+Boost.MPI supports environmental inquires through the [classref
+boost::mpi::environment `environment`] class.
+
+[table Environmental inquiries
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_TAG_UB`] [unnecessary; use [memberref
+ boost::mpi::environment::max_tag `environment::max_tag`]]]
+ [[`MPI_HOST`] [unnecessary; use [memberref
+ boost::mpi::environment::host_rank `environment::host_rank`]]]
+ [[`MPI_IO`] [unnecessary; use [memberref
+ boost::mpi::environment::io_rank `environment::io_rank`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node143.html#Node147
+ `MPI_Get_processor_name`]]
+ [[memberref boost::mpi::environment::processor_name
+ `environment::processor_name`]]]
+]
+
+Boost.MPI translates MPI errors into exceptions, reported via the
+[classref boost::mpi::exception `exception`] class.
+
+[table Error handling
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_ERRORS_ARE_FATAL`] [unused; errors are translated into
+ Boost.MPI exceptions]]
+ [[`MPI_ERRORS_RETURN`] [unused; errors are translated into
+ Boost.MPI exceptions]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
+ `MPI_errhandler_create`]] [unused; errors are translated into
+ Boost.MPI exceptions]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
+ `MPI_errhandler_set`]] [unused; errors are translated into
+ Boost.MPI exceptions]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
+ `MPI_errhandler_get`]] [unused; errors are translated into
+ Boost.MPI exceptions]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
+ `MPI_errhandler_free`]] [unused; errors are translated into
+ Boost.MPI exceptions]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
+ `MPI_Error_string`]] [used internally by Boost.MPI]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node149.html#Node149
+ `MPI_Error_class`]] [[memberref boost::mpi::exception::error_class `exception::error_class`]]]
+]
+
+The MPI timing facilities are exposed via the Boost.MPI [classref
+boost::mpi::timer `timer`] class, which provides an interface
+compatible with the [@http://www.boost.org/libs/timer/index.html Boost
+Timer library].
+
+[table Timing facilities
+ [[C Function/Constant] [Boost.MPI Equivalent]]
+
+ [[`MPI_WTIME_IS_GLOBAL`] [unnecessary; use [memberref
+ boost::mpi::timer::time_is_global `timer::time_is_global`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node150.html#Node150
+ `MPI_Wtime`]] [use [memberref boost::mpi::timer::elapsed
+ `timer::elapsed`] to determine the time elapsed from some specific
+ starting point]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node150.html#Node150
+ `MPI_Wtick`]] [[memberref boost::mpi::timer::elapsed_min `timer::elapsed_min`]]]
+]
+
+MPI startup and shutdown are managed by the construction and
+destruction of the Boost.MPI [classref boost::mpi::environment
+`environment`] class.
+
+[table Startup/shutdown facilities
+ [[C Function] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
+ `MPI_Init`]] [[classref boost::mpi::environment `environment`]
+ constructor]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
+ `MPI_Finalize`]] [[classref boost::mpi::environment `environment`]
+ destructor]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
+ `MPI_Initialized`]] [[memberref boost::mpi::environment::initialized
+ `environment::initialized`]]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
+ `MPI_Abort`]] [[memberref boost::mpi::environment::abort
+ `environment::abort`]]]
+]
+
+Boost.MPI does not provide any support for the profiling facilities in
+MPI 1.1.
+
+[table Profiling interface
+ [[C Function] [Boost.MPI Equivalent]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node153.html#Node153
+ `PMPI_*` routines]] [unsupported]]
+ [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node156.html#Node156
+ `MPI_Pcontrol`]] [unsupported]]
+]
+
+[endsect:c_mapping]
diff --git a/libs/mpi/doc/collective.qbk b/libs/mpi/doc/collective.qbk
new file mode 100644
index 0000000000..101c7f5c70
--- /dev/null
+++ b/libs/mpi/doc/collective.qbk
@@ -0,0 +1,367 @@
+[section:collectives Collective operations]
+
+[link mpi.tutorial.point_to_point Point-to-point operations] are the
+core message passing primitives in Boost.MPI. However, many
+message-passing applications also require higher-level communication
+algorithms that combine or summarize the data stored on many different
+processes. These algorithms support many common tasks such as
+"broadcast this value to all processes", "compute the sum of the
+values on all processors" or "find the global minimum."
+
+[section:broadcast Broadcast]
+The [funcref boost::mpi::broadcast `broadcast`] algorithm is
+by far the simplest collective operation. It broadcasts a value from a
+single process to all other processes within a [classref
+boost::mpi::communicator communicator]. For instance, the
+following program broadcasts "Hello, World!" from process 0 to every
+other process. (`hello_world_broadcast.cpp`)
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <string>
+ #include <boost/serialization/string.hpp>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ std::string value;
+ if (world.rank() == 0) {
+ value = "Hello, World!";
+ }
+
+ broadcast(world, value, 0);
+
+ std::cout << "Process #" << world.rank() << " says " << value
+ << std::endl;
+ return 0;
+ }
+
+Running this program with seven processes will produce a result such
+as:
+
+[pre
+Process #0 says Hello, World!
+Process #2 says Hello, World!
+Process #1 says Hello, World!
+Process #4 says Hello, World!
+Process #3 says Hello, World!
+Process #5 says Hello, World!
+Process #6 says Hello, World!
+]
+[endsect:broadcast]
+
+[section:gather Gather]
+The [funcref boost::mpi::gather `gather`] collective gathers
+the values produced by every process in a communicator into a vector
+of values on the "root" process (specified by an argument to
+`gather`). The /i/th element in the vector will correspond to the
+value gathered from the /i/th process. For instance, in the following
+program each process computes its own random number. All of these
+random numbers are gathered at process 0 (the "root" in this case),
+which prints out the values that correspond to each processor.
+(`random_gather.cpp`)
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <vector>
+ #include <cstdlib>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ std::srand(time(0) + world.rank());
+ int my_number = std::rand();
+ if (world.rank() == 0) {
+ std::vector<int> all_numbers;
+ gather(world, my_number, all_numbers, 0);
+ for (int proc = 0; proc < world.size(); ++proc)
+ std::cout << "Process #" << proc << " thought of "
+ << all_numbers[proc] << std::endl;
+ } else {
+ gather(world, my_number, 0);
+ }
+
+ return 0;
+ }
+
+Executing this program with seven processes will result in output such
+as the following. Although the random values will change from one run
+to the next, the order of the processes in the output will remain the
+same because only process 0 writes to `std::cout`.
+
+[pre
+Process #0 thought of 332199874
+Process #1 thought of 20145617
+Process #2 thought of 1862420122
+Process #3 thought of 480422940
+Process #4 thought of 1253380219
+Process #5 thought of 949458815
+Process #6 thought of 650073868
+]
+
+The `gather` operation collects values from every process into a
+vector at one process. If instead the values from every process need
+to be collected into identical vectors on every process, use the
+[funcref boost::mpi::all_gather `all_gather`] algorithm,
+which is semantically equivalent to calling `gather` followed by a
+`broadcast` of the resulting vector.
+
+[endsect:gather]
+
+[section:scatter Scatter]
+The [funcref boost::mpi::scatter `scatter`] collective scatters
+the values from a vector in the "root" process in a communicator into
+values in all the processes of the communicator.
+ The /i/th element in the vector will correspond to the
+value received by the /i/th process. For instance, in the following
+program, the root process produces a vector of random nomber and send
+one value to each process that will print it. (`random_scatter.cpp`)
+
+ #include <boost/mpi.hpp>
+ #include <boost/mpi/collectives.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ #include <vector>
+
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(time(0) + world.rank());
+ std::vector<int> all;
+ int mine = -1;
+ if (world.rank() == 0) {
+ all.resize(world.size());
+ std::generate(all.begin(), all.end(), std::rand);
+ }
+ mpi::scatter(world, all, mine, 0);
+ for (int r = 0; r < world.size(); ++r) {
+ world.barrier();
+ if (r == world.rank()) {
+ std::cout << "Rank " << r << " got " << mine << '\n';
+ }
+ }
+ return 0;
+ }
+
+Executing this program with seven processes will result in output such
+as the following. Although the random values will change from one run
+to the next, the order of the processes in the output will remain the
+same because of the barrier.
+
+[pre
+Rank 0 got 1409381269
+Rank 1 got 17045268
+Rank 2 got 440120016
+Rank 3 got 936998224
+Rank 4 got 1827129182
+Rank 5 got 1951746047
+Rank 6 got 2117359639
+]
+
+[endsect:scatter]
+
+[section:reduce Reduce]
+
+The [funcref boost::mpi::reduce `reduce`] collective
+summarizes the values from each process into a single value at the
+user-specified "root" process. The Boost.MPI `reduce` operation is
+similar in spirit to the STL _accumulate_ operation, because it takes
+a sequence of values (one per process) and combines them via a
+function object. For instance, we can randomly generate values in each
+process and the compute the minimum value over all processes via a
+call to [funcref boost::mpi::reduce `reduce`]
+(`random_min.cpp`):
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ std::srand(time(0) + world.rank());
+ int my_number = std::rand();
+
+ if (world.rank() == 0) {
+ int minimum;
+ reduce(world, my_number, minimum, mpi::minimum<int>(), 0);
+ std::cout << "The minimum value is " << minimum << std::endl;
+ } else {
+ reduce(world, my_number, mpi::minimum<int>(), 0);
+ }
+
+ return 0;
+ }
+
+The use of `mpi::minimum<int>` indicates that the minimum value
+should be computed. `mpi::minimum<int>` is a binary function object
+that compares its two parameters via `<` and returns the smaller
+value. Any associative binary function or function object will
+work provided it's stateless. For instance, to concatenate strings with `reduce` one could use
+the function object `std::plus<std::string>` (`string_cat.cpp`):
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <string>
+ #include <functional>
+ #include <boost/serialization/string.hpp>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ std::string names[10] = { "zero ", "one ", "two ", "three ",
+ "four ", "five ", "six ", "seven ",
+ "eight ", "nine " };
+
+ std::string result;
+ reduce(world,
+ world.rank() < 10? names[world.rank()]
+ : std::string("many "),
+ result, std::plus<std::string>(), 0);
+
+ if (world.rank() == 0)
+ std::cout << "The result is " << result << std::endl;
+
+ return 0;
+ }
+
+In this example, we compute a string for each process and then perform
+a reduction that concatenates all of the strings together into one,
+long string. Executing this program with seven processors yields the
+following output:
+
+[pre
+The result is zero one two three four five six
+]
+
+[h4 Binary operations for reduce]
+Any kind of binary function objects can be used with `reduce`. For
+instance, and there are many such function objects in the C++ standard
+`<functional>` header and the Boost.MPI header
+`<boost/mpi/operations.hpp>`. Or, you can create your own
+function object. Function objects used with `reduce` must be
+associative, i.e. `f(x, f(y, z))` must be equivalent to `f(f(x, y),
+z)`. If they are also commutative (i..e, `f(x, y) == f(y, x)`),
+Boost.MPI can use a more efficient implementation of `reduce`. To
+state that a function object is commutative, you will need to
+specialize the class [classref boost::mpi::is_commutative
+`is_commutative`]. For instance, we could modify the previous example
+by telling Boost.MPI that string concatenation is commutative:
+
+ namespace boost { namespace mpi {
+
+ template<>
+ struct is_commutative<std::plus<std::string>, std::string>
+ : mpl::true_ { };
+
+ } } // end namespace boost::mpi
+
+By adding this code prior to `main()`, Boost.MPI will assume that
+string concatenation is commutative and employ a different parallel
+algorithm for the `reduce` operation. Using this algorithm, the
+program outputs the following when run with seven processes:
+
+[pre
+The result is zero one four five six two three
+]
+
+Note how the numbers in the resulting string are in a different order:
+this is a direct result of Boost.MPI reordering operations. The result
+in this case differed from the non-commutative result because string
+concatenation is not commutative: `f("x", "y")` is not the same as
+`f("y", "x")`, because argument order matters. For truly commutative
+operations (e.g., integer addition), the more efficient commutative
+algorithm will produce the same result as the non-commutative
+algorithm. Boost.MPI also performs direct mappings from function
+objects in `<functional>` to `MPI_Op` values predefined by MPI (e.g.,
+`MPI_SUM`, `MPI_MAX`); if you have your own function objects that can
+take advantage of this mapping, see the class template [classref
+boost::mpi::is_mpi_op `is_mpi_op`].
+
+[warning Due to the underlying MPI limitations, it is important to note that the operation must be stateless.]
+
+[h4 All process variant]
+
+Like [link mpi.tutorial.collectives.gather `gather`], `reduce` has an "all"
+variant called [funcref boost::mpi::all_reduce `all_reduce`]
+that performs the reduction operation and broadcasts the result to all
+processes. This variant is useful, for instance, in establishing
+global minimum or maximum values.
+
+The following code (`global_min.cpp`) shows a broadcasting version of
+the `random_min.cpp` example:
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+ int minimum;
+
+ mpi::all_reduce(world, my_number, minimum, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << minimum << std::endl;
+ }
+
+ return 0;
+ }
+
+In that example we provide both input and output values, requiring
+twice as much space, which can be a problem depending on the size
+of the transmitted data.
+If there is no need to preserve the input value, the output value
+can be omitted. In that case the input value will be overridden with
+the output value and Boost.MPI is able, in some situation, to implement
+the operation with a more space efficient solution (using the `MPI_IN_PLACE`
+flag of the MPI C mapping), as in the following example (`in_place_global_min.cpp`):
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+
+ mpi::all_reduce(world, my_number, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << my_number << std::endl;
+ }
+
+ return 0;
+ }
+
+
+[endsect:reduce]
+
+[endsect:collectives]
diff --git a/libs/mpi/doc/communicator.qbk b/libs/mpi/doc/communicator.qbk
new file mode 100644
index 0000000000..9298bf5e03
--- /dev/null
+++ b/libs/mpi/doc/communicator.qbk
@@ -0,0 +1,122 @@
+[section:communicators Communicators]
+[section:managing Managing communicators]
+
+Communication with Boost.MPI always occurs over a communicator. A
+communicator contains a set of processes that can send messages among
+themselves and perform collective operations. There can be many
+communicators within a single program, each of which contains its own
+isolated communication space that acts independently of the other
+communicators.
+
+When the MPI environment is initialized, only the "world" communicator
+(called `MPI_COMM_WORLD` in the MPI C and Fortran bindings) is
+available. The "world" communicator, accessed by default-constructing
+a [classref boost::mpi::communicator mpi::communicator]
+object, contains all of the MPI processes present when the program
+begins execution. Other communicators can then be constructed by
+duplicating or building subsets of the "world" communicator. For
+instance, in the following program we split the processes into two
+groups: one for processes generating data and the other for processes
+that will collect the data. (`generate_collect.cpp`)
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ #include <boost/serialization/vector.hpp>
+ namespace mpi = boost::mpi;
+
+ enum message_tags {msg_data_packet, msg_broadcast_data, msg_finished};
+
+ void generate_data(mpi::communicator local, mpi::communicator world);
+ void collect_data(mpi::communicator local, mpi::communicator world);
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ bool is_generator = world.rank() < 2 * world.size() / 3;
+ mpi::communicator local = world.split(is_generator? 0 : 1);
+ if (is_generator) generate_data(local, world);
+ else collect_data(local, world);
+
+ return 0;
+ }
+
+When communicators are split in this way, their processes retain
+membership in both the original communicator (which is not altered by
+the split) and the new communicator. However, the ranks of the
+processes may be different from one communicator to the next, because
+the rank values within a communicator are always contiguous values
+starting at zero. In the example above, the first two thirds of the
+processes become "generators" and the remaining processes become
+"collectors". The ranks of the "collectors" in the `world`
+communicator will be 2/3 `world.size()` and greater, whereas the ranks
+of the same collector processes in the `local` communicator will start
+at zero. The following excerpt from `collect_data()` (in
+`generate_collect.cpp`) illustrates how to manage multiple
+communicators:
+
+ mpi::status msg = world.probe();
+ if (msg.tag() == msg_data_packet) {
+ // Receive the packet of data
+ std::vector<int> data;
+ world.recv(msg.source(), msg.tag(), data);
+
+ // Tell each of the collectors that we'll be broadcasting some data
+ for (int dest = 1; dest < local.size(); ++dest)
+ local.send(dest, msg_broadcast_data, msg.source());
+
+ // Broadcast the actual data.
+ broadcast(local, data, 0);
+ }
+
+The code in this except is executed by the "master" collector, e.g.,
+the node with rank 2/3 `world.size()` in the `world` communicator and
+rank 0 in the `local` (collector) communicator. It receives a message
+from a generator via the `world` communicator, then broadcasts the
+message to each of the collectors via the `local` communicator.
+
+For more control in the creation of communicators for subgroups of
+processes, the Boost.MPI [classref boost::mpi::group `group`] provides
+facilities to compute the union (`|`), intersection (`&`), and
+difference (`-`) of two groups, generate arbitrary subgroups, etc.
+
+[endsect:managing]
+
+[section:cartesian_communicator Cartesian communicator]
+
+A communicator can be organised as a cartesian grid, here a basic example:
+
+ #include <vector>
+ #include <iostream>
+
+ #include <boost/mpi/communicator.hpp>
+ #include <boost/mpi/collectives.hpp>
+ #include <boost/mpi/environment.hpp>
+ #include <boost/mpi/cartesian_communicator.hpp>
+
+ #include <boost/test/minimal.hpp>
+
+ namespace mpi = boost::mpi;
+ int test_main(int argc, char* argv[])
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ if (world.size() != 24) return -1;
+ mpi::cartesian_dimension dims[] = {{2, true}, {3,true}, {4,true}};
+ mpi::cartesian_communicator cart(world, mpi::cartesian_topology(dims));
+ for (int r = 0; r < cart.size(); ++r) {
+ cart.barrier();
+ if (r == cart.rank()) {
+ std::vector<int> c = cart.coordinates(r);
+ std::cout << "rk :" << r << " coords: "
+ << c[0] << ' ' << c[1] << ' ' << c[2] << '\n';
+ }
+ }
+ return 0;
+ }
+
+[endsect:cartesian_communicator]
+[endsect:communicators]
diff --git a/libs/mpi/doc/getting_started.qbk b/libs/mpi/doc/getting_started.qbk
new file mode 100644
index 0000000000..068cdd2da1
--- /dev/null
+++ b/libs/mpi/doc/getting_started.qbk
@@ -0,0 +1,252 @@
+[section:getting_started Getting started]
+
+Getting started with Boost.MPI requires a working MPI implementation,
+a recent version of Boost, and some configuration information.
+
+[section:implementation MPI Implementation]
+To get started with Boost.MPI, you will first need a working
+MPI implementation. There are many conforming _MPI_implementations_
+available. Boost.MPI should work with any of the
+implementations, although it has only been tested extensively with:
+
+* [@http://www.open-mpi.org Open MPI]
+* [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH2]
+* [@https://software.intel.com/en-us/intel-mpi-library Intel MPI]
+
+You can test your implementation using the following simple program,
+which passes a message from one processor to another. Each processor
+prints a message to standard output.
+
+ #include <mpi.h>
+ #include <iostream>
+
+ int main(int argc, char* argv[])
+ {
+ MPI_Init(&argc, &argv);
+
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (rank == 0) {
+ int value = 17;
+ int result = MPI_Send(&value, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
+ if (result == MPI_SUCCESS)
+ std::cout << "Rank 0 OK!" << std::endl;
+ } else if (rank == 1) {
+ int value;
+ int result = MPI_Recv(&value, 1, MPI_INT, 0, 0, MPI_COMM_WORLD,
+ MPI_STATUS_IGNORE);
+ if (result == MPI_SUCCESS && value == 17)
+ std::cout << "Rank 1 OK!" << std::endl;
+ }
+ MPI_Finalize();
+ return 0;
+ }
+
+You should compile and run this program on two processors. To do this,
+consult the documentation for your MPI implementation. With _OpenMPI_, for
+instance, you compile with the `mpiCC` or `mpic++` compiler, boot the
+LAM/MPI daemon, and run your program via `mpirun`. For instance, if
+your program is called `mpi-test.cpp`, use the following commands:
+
+[pre
+mpiCC -o mpi-test mpi-test.cpp
+lamboot
+mpirun -np 2 ./mpi-test
+lamhalt
+]
+
+When you run this program, you will see both `Rank 0 OK!` and `Rank 1
+OK!` printed to the screen. However, they may be printed in any order
+and may even overlap each other. The following output is perfectly
+legitimate for this MPI program:
+
+[pre
+Rank Rank 1 OK!
+0 OK!
+]
+
+If your output looks something like the above, your MPI implementation
+appears to be working with a C++ compiler and we're ready to move on.
+[endsect]
+
+[section:config Configure and Build]
+
+As the rest of Boost, Boost.MPI uses version 2 of the
+[@http://www.boost.org/doc/html/bbv2.html Boost.Build] system for
+configuring and building the library binary.
+
+Please refer to the general Boost installation instructions for
+[@http://www.boost.org/doc/libs/release/more/getting_started/unix-variants.html#prepare-to-use-a-boost-library-binary Unix Variant]
+(including Unix, Linux and MacOS) or
+[@http://www.boost.org/doc/libs/1_58_0/more/getting_started/windows.html#prepare-to-use-a-boost-library-binary Windows].
+The simplified build instructions should apply on most platforms with a few specific modifications described below.
+
+[section:bootstrap Bootstrap]
+
+As explained in the boost installation instructions, running the bootstrap (`./bootstrap.sh` for unix variants or `bootstrap.bat` for Windows) from the boost root directory will produce a 'project-config.jam` file. You need to edit that file and add the following line:
+
+ using mpi ;
+
+Alternatively, you can provided explicitly the list of Boost libraries you want to build.
+Please refer to the `--help` option` of the `bootstrap` script.
+
+[endsect:bootstrap]
+[section:setup Setting up your MPI Implementation]
+
+First, you need to scan the =include/boost/mpi/config.hpp= file and check if some
+settings needs to be modified for your MPI implementation or preferences.
+
+In particular, the [macroref BOOST_MPI_HOMOGENEOUS] macro, that you will need to comment out
+if you plan to run on an heterogeneous set of machines. See the [link mpi.tutorial.performance_optimizations.homogeneous_machines optimization] notes below.
+
+Most MPI implementations requires specific compilation and link options.
+In order to mask theses details to the user, most MPI implementations provides
+wrappers which silently pass those options to the compiler.
+
+Depending on your MPI implementation, some work might be needed to tell Boost which
+specific MPI option to use. This is done through the `using mpi ;` directive in the `project-config.jam` file those general form is (do not forget to leave spaces around *:* and before *;*):
+
+[pre
+using mpi
+ : \[<MPI compiler wrapper>\]
+ : \[<compilation and link options>\]
+ : \[<mpi runner>\] ;
+]
+
+Depending on your installation and MPI distribution, the build system might be able to find all the required informations and you just need to specify:
+
+[pre
+using mpi ;
+]
+
+[section:troubleshooting Trouble shooting]
+
+Most of the time, specially with production HPC clusters, some work will need to be done.
+
+Here is a list of the most common issues and suggestions on how to fix those.
+
+* [*Your wrapper is not in your path or does ot have a standard name ]
+
+You will need to tell the build system how to call it using the first parameter:
+
+[pre
+using mpi : /opt/mpi/bullxmpi/1.2.8.3/bin/mpicc ;
+]
+
+[warning
+Boost.MPI only uses the C interface, so specifying the C wrapper should be enough. But some implementations will insist on using the C++ bindings.
+]
+
+* [*Your wrapper is really eccentric or does not exist]
+
+You will need to provide the compilation and link options through de second parameter using 'jam' directives.
+The following type configuration used to be required for some specific Intel MPI implementation:
+
+[pre
+using mpi : mpiicc :
+ <library-path>/softs/intel/impi/5.0.1.035/intel64/lib
+ <library-path>/softs/intel/impi/5.0.1.035/intel64/lib/release_mt
+ <include>/softs/intel/impi/5.0.1.035/intel64/include
+ <find-shared-library>mpifort
+ <find-shared-library>mpi_mt
+ <find-shared-library>mpigi
+ <find-shared-library>dl
+ <find-shared-library>rt ;
+]
+
+As a convenience, MPI wrappers usually have an options that provide the required informations, and, it usually starts with `--show`. You can use those to find out the the requested jam directive:
+[pre
+$ mpiicc -show
+icc -I/softs/...\/include ... -L/softs/...\/lib ... -Xlinker -rpath -Xlinker \/softs/...\/lib .... -lmpi -ldl -lrt -lpthread
+$
+]
+[pre
+$ mpicc --showme
+icc -I/opt/...\/include -pthread -L/opt/...\/lib -lmpi -ldl -lm -lnuma -Wl,--export-dynamic -lrt -lnsl -lutil -lm -ldl
+$ mpicc --showme:compile
+-I/opt/mpi/bullxmpi/1.2.8.3/include -pthread
+$ mpicc --showme:link
+-pthread -L/opt/...\/lib -lmpi -ldl -lm -lnuma -Wl,--export-dynamic -lrt -lnsl -lutil -lm -ldl
+$
+]
+
+To see the results of MPI auto-detection, pass `--debug-configuration` on
+the bjam command line.
+
+* [*The launch syntax cannot be detected]
+
+[note This is only used when [link mpi.getting_started.config.tests running the tests].]
+
+If you need to use a special command to launch an MPI program, you will need to specify it through the third parameter of the `using mpi` directive.
+
+So, assuming you launch the `all_gather_test` program with:
+
+[pre
+$mpiexec.hydra -np 4 all_gather_test
+]
+
+The directive will look like:
+
+[pre
+using mpi : mpiicc :
+ \[<compilation and link options>\]
+ : mpiexec.hydra -n ;
+]
+
+[endsect:troubleshooting]
+[endsect:setup]
+[section:build Build]
+
+To build the whole Boost distribution:
+[pre
+$cd <boost distribution>
+$./b2
+]
+To build the Boost.MPI library and dependancies:
+[pre
+$cd <boost distribution>\/lib/mpi/build
+$..\/../../b2
+]
+
+[endsect:build]
+[section:tests Tests]
+
+You can run the regression tests with:
+[pre
+$cd <boost distribution>\/lib/mpi/test
+$..\/../../b2
+]
+
+[endsect:tests]
+[section:installation Installation]
+
+To install the whole Boost distribution:
+[pre
+$cd <boost distribution>
+$./b2 install
+]
+
+[endsect:installation]
+[endsect:config]
+[section:using Using Boost.MPI]
+
+To build applications based on Boost.MPI, compile and link them as you
+normally would for MPI programs, but remember to link against the
+`boost_mpi` and `boost_serialization` libraries, e.g.,
+
+[pre
+mpic++ -I/path/to/boost/mpi my_application.cpp -Llibdir \
+ -lboost_mpi-gcc-mt-1_35 -lboost_serialization-gcc-d-1_35.a
+]
+
+If you plan to use the [link mpi.python Python bindings] for
+Boost.MPI in conjunction with the C++ Boost.MPI, you will also need to
+link against the boost_mpi_python library, e.g., by adding
+`-lboost_mpi_python-gcc-mt-1_35` to your link command. This step will
+only be necessary if you intend to [link mpi.python.user_data
+register C++ types] or use the [link
+mpi.python.skeleton_content skeleton/content mechanism] from
+within Python.
+
+[endsect:using]
+[endsect:getting_started]
diff --git a/libs/mpi/doc/introduction.qbk b/libs/mpi/doc/introduction.qbk
new file mode 100644
index 0000000000..f45efef415
--- /dev/null
+++ b/libs/mpi/doc/introduction.qbk
@@ -0,0 +1,53 @@
+[section:introduction Introduction]
+
+Boost.MPI is a library for message passing in high-performance
+parallel applications. A Boost.MPI program is one or more processes
+that can communicate either via sending and receiving individual
+messages (point-to-point communication) or by coordinating as a group
+(collective communication). Unlike communication in threaded
+environments or using a shared-memory library, Boost.MPI processes can
+be spread across many different machines, possibly with different
+operating systems and underlying architectures.
+
+Boost.MPI is not a completely new parallel programming
+library. Rather, it is a C++-friendly interface to the standard
+Message Passing Interface (_MPI_), the most popular library interface
+for high-performance, distributed computing. MPI defines
+a library interface, available from C, Fortran, and C++, for which
+there are many _MPI_implementations_. Although there exist C++
+bindings for MPI, they offer little functionality over the C
+bindings. The Boost.MPI library provides an alternative C++ interface
+to MPI that better supports modern C++ development styles, including
+complete support for user-defined data types and C++ Standard Library
+types, arbitrary function objects for collective algorithms, and the
+use of modern C++ library techniques to maintain maximal
+efficiency.
+
+At present, Boost.MPI supports the majority of functionality in MPI
+1.1. The thin abstractions in Boost.MPI allow one to easily combine it
+with calls to the underlying C MPI library. Boost.MPI currently
+supports:
+
+* Communicators: Boost.MPI supports the creation,
+ destruction, cloning, and splitting of MPI communicators, along with
+ manipulation of process groups.
+* Point-to-point communication: Boost.MPI supports
+ point-to-point communication of primitive and user-defined data
+ types with send and receive operations, with blocking and
+ non-blocking interfaces.
+* Collective communication: Boost.MPI supports collective
+ operations such as [funcref boost::mpi::reduce `reduce`]
+ and [funcref boost::mpi::gather `gather`] with both
+ built-in and user-defined data types and function objects.
+* MPI Datatypes: Boost.MPI can build MPI data types for
+ user-defined types using the _Serialization_ library.
+* Separating structure from content: Boost.MPI can transfer the shape
+ (or "skeleton") of complex data structures (lists, maps,
+ etc.) and then separately transfer their content. This facility
+ optimizes for cases where the data within a large, static data
+ structure needs to be transmitted many times.
+
+Boost.MPI can be accessed either through its native C++ bindings, or
+through its alternative, [link mpi.python Python interface].
+
+[endsect:introduction]
diff --git a/libs/mpi/doc/mpi.introduction.qbk b/libs/mpi/doc/mpi.introduction.qbk
new file mode 100644
index 0000000000..1c1970b8e0
--- /dev/null
+++ b/libs/mpi/doc/mpi.introduction.qbk
@@ -0,0 +1,53 @@
+[section:intro Introduction]
+
+Boost.MPI is a library for message passing in high-performance
+parallel applications. A Boost.MPI program is one or more processes
+that can communicate either via sending and receiving individual
+messages (point-to-point communication) or by coordinating as a group
+(collective communication). Unlike communication in threaded
+environments or using a shared-memory library, Boost.MPI processes can
+be spread across many different machines, possibly with different
+operating systems and underlying architectures.
+
+Boost.MPI is not a completely new parallel programming
+library. Rather, it is a C++-friendly interface to the standard
+Message Passing Interface (_MPI_), the most popular library interface
+for high-performance, distributed computing. MPI defines
+a library interface, available from C, Fortran, and C++, for which
+there are many _MPI_implementations_. Although there exist C++
+bindings for MPI, they offer little functionality over the C
+bindings. The Boost.MPI library provides an alternative C++ interface
+to MPI that better supports modern C++ development styles, including
+complete support for user-defined data types and C++ Standard Library
+types, arbitrary function objects for collective algorithms, and the
+use of modern C++ library techniques to maintain maximal
+efficiency.
+
+At present, Boost.MPI supports the majority of functionality in MPI
+1.1. The thin abstractions in Boost.MPI allow one to easily combine it
+with calls to the underlying C MPI library. Boost.MPI currently
+supports:
+
+* Communicators: Boost.MPI supports the creation,
+ destruction, cloning, and splitting of MPI communicators, along with
+ manipulation of process groups.
+* Point-to-point communication: Boost.MPI supports
+ point-to-point communication of primitive and user-defined data
+ types with send and receive operations, with blocking and
+ non-blocking interfaces.
+* Collective communication: Boost.MPI supports collective
+ operations such as [funcref boost::mpi::reduce `reduce`]
+ and [funcref boost::mpi::gather `gather`] with both
+ built-in and user-defined data types and function objects.
+* MPI Datatypes: Boost.MPI can build MPI data types for
+ user-defined types using the _Serialization_ library.
+* Separating structure from content: Boost.MPI can transfer the shape
+ (or "skeleton") of complex data structures (lists, maps,
+ etc.) and then separately transfer their content. This facility
+ optimizes for cases where the data within a large, static data
+ structure needs to be transmitted many times.
+
+Boost.MPI can be accessed either through its native C++ bindings, or
+through its alternative, [link mpi.python Python interface].
+
+[endsect]
diff --git a/libs/mpi/doc/mpi.qbk b/libs/mpi/doc/mpi.qbk
index f784cad9ee..2a2a70cff8 100644
--- a/libs/mpi/doc/mpi.qbk
+++ b/libs/mpi/doc/mpi.qbk
@@ -1,12 +1,8 @@
[library Boost.MPI
+ [quickbook 1.6]
[authors [Gregor, Douglas], [Troyer, Matthias] ]
[copyright 2005 2006 2007 Douglas Gregor, Matthias Troyer, Trustees of Indiana University]
- [purpose
- A generic, user-friendly interface to MPI, the Message
- Passing Interface.
- ]
[id mpi]
- [dirname mpi]
[license
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
@@ -32,2173 +28,14 @@
[def _accumulate_ [@http://www.sgi.com/tech/stl/accumulate.html
`accumulate`]]
-[/ QuickBook Document version 1.0 ]
-
-[section:intro Introduction]
-
-Boost.MPI is a library for message passing in high-performance
-parallel applications. A Boost.MPI program is one or more processes
-that can communicate either via sending and receiving individual
-messages (point-to-point communication) or by coordinating as a group
-(collective communication). Unlike communication in threaded
-environments or using a shared-memory library, Boost.MPI processes can
-be spread across many different machines, possibly with different
-operating systems and underlying architectures.
-
-Boost.MPI is not a completely new parallel programming
-library. Rather, it is a C++-friendly interface to the standard
-Message Passing Interface (_MPI_), the most popular library interface
-for high-performance, distributed computing. MPI defines
-a library interface, available from C, Fortran, and C++, for which
-there are many _MPI_implementations_. Although there exist C++
-bindings for MPI, they offer little functionality over the C
-bindings. The Boost.MPI library provides an alternative C++ interface
-to MPI that better supports modern C++ development styles, including
-complete support for user-defined data types and C++ Standard Library
-types, arbitrary function objects for collective algorithms, and the
-use of modern C++ library techniques to maintain maximal
-efficiency.
-
-At present, Boost.MPI supports the majority of functionality in MPI
-1.1. The thin abstractions in Boost.MPI allow one to easily combine it
-with calls to the underlying C MPI library. Boost.MPI currently
-supports:
-
-* Communicators: Boost.MPI supports the creation,
- destruction, cloning, and splitting of MPI communicators, along with
- manipulation of process groups.
-* Point-to-point communication: Boost.MPI supports
- point-to-point communication of primitive and user-defined data
- types with send and receive operations, with blocking and
- non-blocking interfaces.
-* Collective communication: Boost.MPI supports collective
- operations such as [funcref boost::mpi::reduce `reduce`]
- and [funcref boost::mpi::gather `gather`] with both
- built-in and user-defined data types and function objects.
-* MPI Datatypes: Boost.MPI can build MPI data types for
- user-defined types using the _Serialization_ library.
-* Separating structure from content: Boost.MPI can transfer the shape
- (or "skeleton") of complex data structures (lists, maps,
- etc.) and then separately transfer their content. This facility
- optimizes for cases where the data within a large, static data
- structure needs to be transmitted many times.
-
-Boost.MPI can be accessed either through its native C++ bindings, or
-through its alternative, [link mpi.python Python interface].
-
-[endsect]
-
-[section:getting_started Getting started]
-
-Getting started with Boost.MPI requires a working MPI implementation,
-a recent version of Boost, and some configuration information.
-
-[section:mpi_impl MPI Implementation]
-To get started with Boost.MPI, you will first need a working
-MPI implementation. There are many conforming _MPI_implementations_
-available. Boost.MPI should work with any of the
-implementations, although it has only been tested extensively with:
-
-* [@http://www.open-mpi.org Open MPI]
-* [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH2]
-* [@https://software.intel.com/en-us/intel-mpi-library Intel MPI]
-
-You can test your implementation using the following simple program,
-which passes a message from one processor to another. Each processor
-prints a message to standard output.
-
- #include <mpi.h>
- #include <iostream>
-
- int main(int argc, char* argv[])
- {
- MPI_Init(&argc, &argv);
-
- int rank;
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (rank == 0) {
- int value = 17;
- int result = MPI_Send(&value, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
- if (result == MPI_SUCCESS)
- std::cout << "Rank 0 OK!" << std::endl;
- } else if (rank == 1) {
- int value;
- int result = MPI_Recv(&value, 1, MPI_INT, 0, 0, MPI_COMM_WORLD,
- MPI_STATUS_IGNORE);
- if (result == MPI_SUCCESS && value == 17)
- std::cout << "Rank 1 OK!" << std::endl;
- }
- MPI_Finalize();
- return 0;
- }
-
-You should compile and run this program on two processors. To do this,
-consult the documentation for your MPI implementation. With _OpenMPI_, for
-instance, you compile with the `mpiCC` or `mpic++` compiler, boot the
-LAM/MPI daemon, and run your program via `mpirun`. For instance, if
-your program is called `mpi-test.cpp`, use the following commands:
-
-[pre
-mpiCC -o mpi-test mpi-test.cpp
-lamboot
-mpirun -np 2 ./mpi-test
-lamhalt
-]
-
-When you run this program, you will see both `Rank 0 OK!` and `Rank 1
-OK!` printed to the screen. However, they may be printed in any order
-and may even overlap each other. The following output is perfectly
-legitimate for this MPI program:
-
-[pre
-Rank Rank 1 OK!
-0 OK!
-]
-
-If your output looks something like the above, your MPI implementation
-appears to be working with a C++ compiler and we're ready to move on.
-[endsect]
-
-[section:config Configure and Build]
-
-[section:bjam Build Environment]
-
-As the rest of Boost, Boost.MPI uses version 2 of the
-[@http://www.boost.org/doc/html/bbv2.html Boost.Build] system for
-configuring and building the library binary.
-
-Please refer to the general Boost installation instructions for
-[@http://www.boost.org/doc/libs/release/more/getting_started/unix-variants.html#prepare-to-use-a-boost-library-binary Unix Variant]
-(including Unix, Linux and MacOS) or
-[@http://www.boost.org/doc/libs/1_58_0/more/getting_started/windows.html#prepare-to-use-a-boost-library-binary Windows].
-The simplified build instructions should apply on most platforms with a few specific modifications described below.
-[endsect]
-
-[section:bootstraping Bootstrap]
-
-As described in the boost installation instructions, go to to root of your Boost source distribution
-and run the `bootstrap` script (`./bootstrap.sh` for unix variants or `bootstrap.bat` for Windows).
-That will generate a 'project-config.jam` file in the root directory.
-Use your favourite text editor and add the following line:
-
- using mpi ;
-
-Alternatively, you can provided explicitly the list of Boost libraries you want to build.
-Please refer to the `--help` option` of the `bootstrap` script.
-[endsect]
-
-[section:mpi_setup Setting up your MPI Implementation]
-
-First, you need to scan the =include/boost/mpi/config.hpp= file and check if some
-settings needs to be modified for your MPI implementation or preferences.
-
-In particular, the [macroref BOOST_MPI_HOMOGENEOUS] macro, that you will need to comment out
-if you plan to run on an heterogeneous set of machines. See the [link mpi.homogeneous_machines optimization] notes below.
-
-Most MPI implementations requires specific compilation and link options.
-In order to mask theses options to the user, most MPI implementations provides
-wrappers which silently pass those options to the compiler.
-
-Depending on your MPI implementation, some work might be needed to tell Boost which
-specific MPI option to use. This is done through the `using mpi ;` directive of the `project-config.jam` file.
-
-The general form is the following (do not forget to leave spaces around *:* and before *;*):
-
-[pre
-using mpi
- : \[<MPI compiler wrapper>\]
- : \[<compilation and link options>\]
- : \[<mpi runner>\] ;
-]
-
-* [* If you're lucky]
-
-For those who uses _MPICH_, _OpenMPI_ or some of their derivatives, configuration can be
-almost automatic. In fact, if your `mpicxx` command is in your path, you just need to use:
-
-[pre
-using mpi ;
-]
-
-The directive will find the wrapper and deduce the options to use.
-
-* [*If your wrapper is not in your path]
-
-...or if it does not have a usual wrapper name, you will need to tell the build system where to find it:
-
-[pre
-using mpi : /opt/mpi/bullxmpi/1.2.8.3/bin/mpicc ;
-]
-
-* [*If your wrapper is really eccentric]
-
-or does not exist at all (it happens), you need to
-provide the compilation and build options to the build environment using `jam` directives.
-For example, the following could be used for a specific Intel MPI implementation:
-
-[pre
-using mpi : mpiicc :
- <library-path>/softs/intel/impi/5.0.1.035/intel64/lib
- <library-path>/softs/intel/impi/5.0.1.035/intel64/lib/release_mt
- <include>/softs/intel/impi/5.0.1.035/intel64/include
- <find-shared-library>mpifort
- <find-shared-library>mpi_mt
- <find-shared-library>mpigi
- <find-shared-library>dl
- <find-shared-library>rt ;
-]
-
-To do that, you need to guess the libraries and include directories associated with your environment.
-You can refer to the your specific MPI environment documentation.
-Most of the time though, your wrapper have an option that provide that information, it usually starts with `--show`:
-[pre
-$ mpiicc -show
-icc -I/softs/...\/include ... -L/softs/...\/lib ... -Xlinker -rpath -Xlinker \/softs/...\/lib .... -lmpi -ldl -lrt -lpthread
-$
-]
-[pre
-$ mpicc --showme
-icc -I/opt/...\/include -pthread -L/opt/...\/lib -lmpi -ldl -lm -lnuma -Wl,--export-dynamic -lrt -lnsl -lutil -lm -ldl
-$ mpicc --showme:compile
--I/opt/mpi/bullxmpi/1.2.8.3/include -pthread
-$ mpicc --showme:link
--pthread -L/opt/...\/lib -lmpi -ldl -lm -lnuma -Wl,--export-dynamic -lrt -lnsl -lutil -lm -ldl
-$
-]
-
-To see the results of MPI auto-detection, pass `--debug-configuration` on
-the bjam command line.
-
-* [*If you want to run the regression tests]
-
-...Which is a good thing.
-
-The (optional) third argument configures Boost.MPI for running
-regression tests. These parameters specify the executable used to
-launch jobs (the default is "mpirun") followed by any necessary arguments
-to this to run tests and tell the program to expect the number of
-processors to follow (default: "-np"). With the default parameters,
-for instance, the test harness will execute, e.g.,
-
-[pre
-mpirun -np 4 all_gather_test
-]
-
-Some implementations provides alternative launcher that can be more convenient. For example, Intel's MPI provides the `mpiexec.hydra`:
-
-[pre
-$mpiexec.hydra -np 4 all_gather_test
-]
-
-which does not requires any daemon to be running (as opposed to their `mpirun` command). Such a launcher need to be specified though:
-
-[pre
-using mpi : mpiicc :
- .....
- : mpiexec.hydra -n ;
-]
-
-[endsect]
-[section:installation Build and Install]
-
-To build the whole Boost distribution:
-[pre
-$cd <boost distribution>
-$./b2 install
-]
-
-[tip
-Or, if you have a multi-cpu machine (say 24):
-
-[pre
-$cd <boost distribution>
-$./b2 -j24 install
-]
-]
-
-Installation of Boost.MPI can be performed in the build step by
-specifying `install` on the command line and (optionally) providing an
-installation location, e.g.,
-
-[pre
-$./b2 install
-]
-
-This command will install libraries into a default system location. To
-change the path where libraries will be installed, add the option
-`--prefix=PATH`.
-
-Then, you can run the regression tests with:
-[pre
-$cd <boost distribution/lib/mpi/test
-$../../../b2
-]
-
-[endsect]
-[endsect]
-[section:using Using Boost.MPI]
-
-To build applications based on Boost.MPI, compile and link them as you
-normally would for MPI programs, but remember to link against the
-`boost_mpi` and `boost_serialization` libraries, e.g.,
-
-[pre
-mpic++ -I/path/to/boost/mpi my_application.cpp -Llibdir \
- -lboost_mpi-gcc-mt-1_35 -lboost_serialization-gcc-d-1_35.a
-]
-
-If you plan to use the [link mpi.python Python bindings] for
-Boost.MPI in conjunction with the C++ Boost.MPI, you will also need to
-link against the boost_mpi_python library, e.g., by adding
-`-lboost_mpi_python-gcc-mt-1_35` to your link command. This step will
-only be necessary if you intend to [link mpi.python_user_data
-register C++ types] or use the [link
-mpi.python_skeleton_content skeleton/content mechanism] from
-within Python.
-
-[endsect]
-
-[endsect]
-
-[section:tutorial Tutorial]
-
-A Boost.MPI program consists of many cooperating processes (possibly
-running on different computers) that communicate among themselves by
-passing messages. Boost.MPI is a library (as is the lower-level MPI),
-not a language, so the first step in a Boost.MPI is to create an
-[classref boost::mpi::environment mpi::environment] object
-that initializes the MPI environment and enables communication among
-the processes. The [classref boost::mpi::environment
-mpi::environment] object is initialized with the program arguments
-(which it may modify) in your main program. The creation of this
-object initializes MPI, and its destruction will finalize MPI. In the
-vast majority of Boost.MPI programs, an instance of [classref
-boost::mpi::environment mpi::environment] will be declared
-in `main` at the very beginning of the program.
-
-Communication with MPI always occurs over a *communicator*,
-which can be created be simply default-constructing an object of type
-[classref boost::mpi::communicator mpi::communicator]. This
-communicator can then be queried to determine how many processes are
-running (the "size" of the communicator) and to give a unique number
-to each process, from zero to the size of the communicator (i.e., the
-"rank" of the process):
-
- #include <boost/mpi/environment.hpp>
- #include <boost/mpi/communicator.hpp>
- #include <iostream>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
- std::cout << "I am process " << world.rank() << " of " << world.size()
- << "." << std::endl;
- return 0;
- }
-
-If you run this program with 7 processes, for instance, you will
-receive output such as:
-
-[pre
-I am process 5 of 7.
-I am process 0 of 7.
-I am process 1 of 7.
-I am process 6 of 7.
-I am process 2 of 7.
-I am process 4 of 7.
-I am process 3 of 7.
-]
-
-Of course, the processes can execute in a different order each time,
-so the ranks might not be strictly increasing. More interestingly, the
-text could come out completely garbled, because one process can start
-writing "I am a process" before another process has finished writing
-"of 7.".
-
-If you should still have an MPI library supporting only MPI 1.1 you
-will need to pass the command line arguments to the environment
-constructor as shown in this example:
-
- #include <boost/mpi/environment.hpp>
- #include <boost/mpi/communicator.hpp>
- #include <iostream>
- namespace mpi = boost::mpi;
-
- int main(int argc, char* argv[])
- {
- mpi::environment env(argc, argv);
- mpi::communicator world;
- std::cout << "I am process " << world.rank() << " of " << world.size()
- << "." << std::endl;
- return 0;
- }
-
-[section:point_to_point Point-to-Point communication]
-
-As a message passing library, MPI's primary purpose is to routine
-messages from one process to another, i.e., point-to-point. MPI
-contains routines that can send messages, receive messages, and query
-whether messages are available. Each message has a source process, a
-target process, a tag, and a payload containing arbitrary data. The
-source and target processes are the ranks of the sender and receiver
-of the message, respectively. Tags are integers that allow the
-receiver to distinguish between different messages coming from the
-same sender.
-
-The following program uses two MPI processes to write "Hello, world!"
-to the screen (`hello_world.cpp`):
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <string>
- #include <boost/serialization/string.hpp>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- if (world.rank() == 0) {
- world.send(1, 0, std::string("Hello"));
- std::string msg;
- world.recv(1, 1, msg);
- std::cout << msg << "!" << std::endl;
- } else {
- std::string msg;
- world.recv(0, 0, msg);
- std::cout << msg << ", ";
- std::cout.flush();
- world.send(0, 1, std::string("world"));
- }
-
- return 0;
- }
-
-The first processor (rank 0) passes the message "Hello" to the second
-processor (rank 1) using tag 0. The second processor prints the string
-it receives, along with a comma, then passes the message "world" back
-to processor 0 with a different tag. The first processor then writes
-this message with the "!" and exits. All sends are accomplished with
-the [memberref boost::mpi::communicator::send
-communicator::send] method and all receives use a corresponding
-[memberref boost::mpi::communicator::recv
-communicator::recv] call.
-
-[section:nonblocking Non-blocking communication]
-
-The default MPI communication operations--`send` and `recv`--may have
-to wait until the entire transmission is completed before they can
-return. Sometimes this *blocking* behavior has a negative impact on
-performance, because the sender could be performing useful computation
-while it is waiting for the transmission to occur. More important,
-however, are the cases where several communication operations must
-occur simultaneously, e.g., a process will both send and receive at
-the same time.
-
-Let's revisit our "Hello, world!" program from the previous
-section. The core of this program transmits two messages:
-
- if (world.rank() == 0) {
- world.send(1, 0, std::string("Hello"));
- std::string msg;
- world.recv(1, 1, msg);
- std::cout << msg << "!" << std::endl;
- } else {
- std::string msg;
- world.recv(0, 0, msg);
- std::cout << msg << ", ";
- std::cout.flush();
- world.send(0, 1, std::string("world"));
- }
-
-The first process passes a message to the second process, then
-prepares to receive a message. The second process does the send and
-receive in the opposite order. However, this sequence of events is
-just that--a *sequence*--meaning that there is essentially no
-parallelism. We can use non-blocking communication to ensure that the
-two messages are transmitted simultaneously
-(`hello_world_nonblocking.cpp`):
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <string>
- #include <boost/serialization/string.hpp>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- if (world.rank() == 0) {
- mpi::request reqs[2];
- std::string msg, out_msg = "Hello";
- reqs[0] = world.isend(1, 0, out_msg);
- reqs[1] = world.irecv(1, 1, msg);
- mpi::wait_all(reqs, reqs + 2);
- std::cout << msg << "!" << std::endl;
- } else {
- mpi::request reqs[2];
- std::string msg, out_msg = "world";
- reqs[0] = world.isend(0, 1, out_msg);
- reqs[1] = world.irecv(0, 0, msg);
- mpi::wait_all(reqs, reqs + 2);
- std::cout << msg << ", ";
- }
-
- return 0;
- }
-
-We have replaced calls to the [memberref
-boost::mpi::communicator::send communicator::send] and
-[memberref boost::mpi::communicator::recv
-communicator::recv] members with similar calls to their non-blocking
-counterparts, [memberref boost::mpi::communicator::isend
-communicator::isend] and [memberref
-boost::mpi::communicator::irecv communicator::irecv]. The
-prefix *i* indicates that the operations return immediately with a
-[classref boost::mpi::request mpi::request] object, which
-allows one to query the status of a communication request (see the
-[memberref boost::mpi::request::test test] method) or wait
-until it has completed (see the [memberref
-boost::mpi::request::wait wait] method). Multiple requests
-can be completed at the same time with the [funcref
-boost::mpi::wait_all wait_all] operation.
-
-[important The MPI standard requires users to keep the request
-handle for a non-blocking communication, and to call the "wait"
-operation (or successfully test for completion) to complete the send
-or receive. Unlike most C MPI implementations, which allow the user to
-discard the request for a non-blocking send, Boost.MPI requires the
-user to call "wait" or "test", since the request object might contain
-temporary buffers that have to be kept until the send is
-completed. Moreover, the MPI standard does not guarantee that the
-receive makes any progress before a call to "wait" or "test", although
-most implementations of the C MPI do allow receives to progress before
-the call to "wait" or "test". Boost.MPI, on the other hand, generally
-requires "test" or "wait" calls to make progress.]
-
-If you run this program multiple times, you may see some strange
-results: namely, some runs will produce:
-
- Hello, world!
-
-while others will produce:
-
- world!
- Hello,
-
-or even some garbled version of the letters in "Hello" and
-"world". This indicates that there is some parallelism in the program,
-because after both messages are (simultaneously) transmitted, both
-processes will concurrent execute their print statements. For both
-performance and correctness, non-blocking communication operations are
-critical to many parallel applications using MPI.
-
-[endsect]
-
-[section:user_data_types User-defined data types]
-
-The inclusion of `boost/serialization/string.hpp` in the previous
-examples is very important: it makes values of type `std::string`
-serializable, so that they can be be transmitted using Boost.MPI. In
-general, built-in C++ types (`int`s, `float`s, characters, etc.) can
-be transmitted over MPI directly, while user-defined and
-library-defined types will need to first be serialized (packed) into a
-format that is amenable to transmission. Boost.MPI relies on the
-_Serialization_ library to serialize and deserialize data types.
-
-For types defined by the standard library (such as `std::string` or
-`std::vector`) and some types in Boost (such as `boost::variant`), the
-_Serialization_ library already contains all of the required
-serialization code. In these cases, you need only include the
-appropriate header from the `boost/serialization` directory.
-
-[def _gps_position_ [link gps_position `gps_position`]]
-For types that do not already have a serialization header, you will
-first need to implement serialization code before the types can be
-transmitted using Boost.MPI. Consider a simple class _gps_position_
-that contains members `degrees`, `minutes`, and `seconds`. This class
-is made serializable by making it a friend of
-`boost::serialization::access` and introducing the templated
-`serialize()` function, as follows:[#gps_position]
-
- class gps_position
- {
- private:
- friend class boost::serialization::access;
-
- template<class Archive>
- void serialize(Archive & ar, const unsigned int version)
- {
- ar & degrees;
- ar & minutes;
- ar & seconds;
- }
-
- int degrees;
- int minutes;
- float seconds;
- public:
- gps_position(){};
- gps_position(int d, int m, float s) :
- degrees(d), minutes(m), seconds(s)
- {}
- };
-
-Complete information about making types serializable is beyond the
-scope of this tutorial. For more information, please see the
-_Serialization_ library tutorial from which the above example was
-extracted. One important side benefit of making types serializable for
-Boost.MPI is that they become serializable for any other usage, such
-as storing the objects to disk and manipulated them in XML.
-
-
-Some serializable types, like _gps_position_ above, have a fixed
-amount of data stored at fixed offsets and are fully defined by
-the values of their data member (most POD with no pointers are a good example).
-When this is the case, Boost.MPI can optimize their serialization and
-transmission by avoiding extraneous copy operations.
-To enable this optimization, users must specialize the type trait [classref
-boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.:
-
- namespace boost { namespace mpi {
- template <>
- struct is_mpi_datatype<gps_position> : mpl::true_ { };
- } }
-
-For non-template types we have defined a macro to simplify declaring a type
-as an MPI datatype
-
- BOOST_IS_MPI_DATATYPE(gps_position)
-
-For composite traits, the specialization of [classref
-boost::mpi::is_mpi_datatype `is_mpi_datatype`] may depend on
-`is_mpi_datatype` itself. For instance, a `boost::array` object is
-fixed only when the type of the parameter it stores is fixed:
-
- namespace boost { namespace mpi {
- template <typename T, std::size_t N>
- struct is_mpi_datatype<array<T, N> >
- : public is_mpi_datatype<T> { };
- } }
-
-The redundant copy elimination optimization can only be applied when
-the shape of the data type is completely fixed. Variable-length types
-(e.g., strings, linked lists) and types that store pointers cannot use
-the optimization, but Boost.MPI will be unable to detect this error at
-compile time. Attempting to perform this optimization when it is not
-correct will likely result in segmentation faults and other strange
-program behavior.
-
-Boost.MPI can transmit any user-defined data type from one process to
-another. Built-in types can be transmitted without any extra effort;
-library-defined types require the inclusion of a serialization header;
-and user-defined types will require the addition of serialization
-code. Fixed data types can be optimized for transmission using the
-[classref boost::mpi::is_mpi_datatype `is_mpi_datatype`]
-type trait.
-
-[endsect]
-[endsect]
-
-[section:collectives Collective operations]
-
-[link mpi.point_to_point Point-to-point operations] are the
-core message passing primitives in Boost.MPI. However, many
-message-passing applications also require higher-level communication
-algorithms that combine or summarize the data stored on many different
-processes. These algorithms support many common tasks such as
-"broadcast this value to all processes", "compute the sum of the
-values on all processors" or "find the global minimum."
-
-[section:broadcast Broadcast]
-The [funcref boost::mpi::broadcast `broadcast`] algorithm is
-by far the simplest collective operation. It broadcasts a value from a
-single process to all other processes within a [classref
-boost::mpi::communicator communicator]. For instance, the
-following program broadcasts "Hello, World!" from process 0 to every
-other process. (`hello_world_broadcast.cpp`)
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <string>
- #include <boost/serialization/string.hpp>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- std::string value;
- if (world.rank() == 0) {
- value = "Hello, World!";
- }
-
- broadcast(world, value, 0);
-
- std::cout << "Process #" << world.rank() << " says " << value
- << std::endl;
- return 0;
- }
-
-Running this program with seven processes will produce a result such
-as:
-
-[pre
-Process #0 says Hello, World!
-Process #2 says Hello, World!
-Process #1 says Hello, World!
-Process #4 says Hello, World!
-Process #3 says Hello, World!
-Process #5 says Hello, World!
-Process #6 says Hello, World!
-]
-[endsect]
-
-[section:gather Gather]
-The [funcref boost::mpi::gather `gather`] collective gathers
-the values produced by every process in a communicator into a vector
-of values on the "root" process (specified by an argument to
-`gather`). The /i/th element in the vector will correspond to the
-value gathered from the /i/th process. For instance, in the following
-program each process computes its own random number. All of these
-random numbers are gathered at process 0 (the "root" in this case),
-which prints out the values that correspond to each processor.
-(`random_gather.cpp`)
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <vector>
- #include <cstdlib>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- std::srand(time(0) + world.rank());
- int my_number = std::rand();
- if (world.rank() == 0) {
- std::vector<int> all_numbers;
- gather(world, my_number, all_numbers, 0);
- for (int proc = 0; proc < world.size(); ++proc)
- std::cout << "Process #" << proc << " thought of "
- << all_numbers[proc] << std::endl;
- } else {
- gather(world, my_number, 0);
- }
-
- return 0;
- }
-
-Executing this program with seven processes will result in output such
-as the following. Although the random values will change from one run
-to the next, the order of the processes in the output will remain the
-same because only process 0 writes to `std::cout`.
-
-[pre
-Process #0 thought of 332199874
-Process #1 thought of 20145617
-Process #2 thought of 1862420122
-Process #3 thought of 480422940
-Process #4 thought of 1253380219
-Process #5 thought of 949458815
-Process #6 thought of 650073868
-]
-
-The `gather` operation collects values from every process into a
-vector at one process. If instead the values from every process need
-to be collected into identical vectors on every process, use the
-[funcref boost::mpi::all_gather `all_gather`] algorithm,
-which is semantically equivalent to calling `gather` followed by a
-`broadcast` of the resulting vector.
-
-[endsect]
-
-[section:scatter Scatter]
-The [funcref boost::mpi::scatter `scatter`] collective scatters
-the values from a vector in the "root" process in a communicator into
-values in all the processes of the communicator.
- The /i/th element in the vector will correspond to the
-value received by the /i/th process. For instance, in the following
-program, the root process produces a vector of random nomber and send
-one value to each process that will print it. (`random_scatter.cpp`)
-
- #include <boost/mpi.hpp>
- #include <boost/mpi/collectives.hpp>
- #include <iostream>
- #include <cstdlib>
- #include <vector>
-
- namespace mpi = boost::mpi;
-
- int main(int argc, char* argv[])
- {
- mpi::environment env(argc, argv);
- mpi::communicator world;
-
- std::srand(time(0) + world.rank());
- std::vector<int> all;
- int mine = -1;
- if (world.rank() == 0) {
- all.resize(world.size());
- std::generate(all.begin(), all.end(), std::rand);
- }
- mpi::scatter(world, all, mine, 0);
- for (int r = 0; r < world.size(); ++r) {
- world.barrier();
- if (r == world.rank()) {
- std::cout << "Rank " << r << " got " << mine << '\n';
- }
- }
- return 0;
- }
-
-Executing this program with seven processes will result in output such
-as the following. Although the random values will change from one run
-to the next, the order of the processes in the output will remain the
-same because of the barrier.
-
-[pre
-Rank 0 got 1409381269
-Rank 1 got 17045268
-Rank 2 got 440120016
-Rank 3 got 936998224
-Rank 4 got 1827129182
-Rank 5 got 1951746047
-Rank 6 got 2117359639
-]
-
-[endsect]
-
-[section:reduce Reduce]
-
-The [funcref boost::mpi::reduce `reduce`] collective
-summarizes the values from each process into a single value at the
-user-specified "root" process. The Boost.MPI `reduce` operation is
-similar in spirit to the STL _accumulate_ operation, because it takes
-a sequence of values (one per process) and combines them via a
-function object. For instance, we can randomly generate values in each
-process and the compute the minimum value over all processes via a
-call to [funcref boost::mpi::reduce `reduce`]
-(`random_min.cpp`):
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <cstdlib>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- std::srand(time(0) + world.rank());
- int my_number = std::rand();
-
- if (world.rank() == 0) {
- int minimum;
- reduce(world, my_number, minimum, mpi::minimum<int>(), 0);
- std::cout << "The minimum value is " << minimum << std::endl;
- } else {
- reduce(world, my_number, mpi::minimum<int>(), 0);
- }
-
- return 0;
- }
-
-The use of `mpi::minimum<int>` indicates that the minimum value
-should be computed. `mpi::minimum<int>` is a binary function object
-that compares its two parameters via `<` and returns the smaller
-value. Any associative binary function or function object will
-work provided it's stateless. For instance, to concatenate strings with `reduce` one could use
-the function object `std::plus<std::string>` (`string_cat.cpp`):
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <string>
- #include <functional>
- #include <boost/serialization/string.hpp>
- namespace mpi = boost::mpi;
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- std::string names[10] = { "zero ", "one ", "two ", "three ",
- "four ", "five ", "six ", "seven ",
- "eight ", "nine " };
-
- std::string result;
- reduce(world,
- world.rank() < 10? names[world.rank()]
- : std::string("many "),
- result, std::plus<std::string>(), 0);
-
- if (world.rank() == 0)
- std::cout << "The result is " << result << std::endl;
-
- return 0;
- }
-
-In this example, we compute a string for each process and then perform
-a reduction that concatenates all of the strings together into one,
-long string. Executing this program with seven processors yields the
-following output:
-
-[pre
-The result is zero one two three four five six
-]
-
-[h4 Binary operations for reduce]
-Any kind of binary function objects can be used with `reduce`. For
-instance, and there are many such function objects in the C++ standard
-`<functional>` header and the Boost.MPI header
-`<boost/mpi/operations.hpp>`. Or, you can create your own
-function object. Function objects used with `reduce` must be
-associative, i.e. `f(x, f(y, z))` must be equivalent to `f(f(x, y),
-z)`. If they are also commutative (i..e, `f(x, y) == f(y, x)`),
-Boost.MPI can use a more efficient implementation of `reduce`. To
-state that a function object is commutative, you will need to
-specialize the class [classref boost::mpi::is_commutative
-`is_commutative`]. For instance, we could modify the previous example
-by telling Boost.MPI that string concatenation is commutative:
-
- namespace boost { namespace mpi {
-
- template<>
- struct is_commutative<std::plus<std::string>, std::string>
- : mpl::true_ { };
-
- } } // end namespace boost::mpi
-
-By adding this code prior to `main()`, Boost.MPI will assume that
-string concatenation is commutative and employ a different parallel
-algorithm for the `reduce` operation. Using this algorithm, the
-program outputs the following when run with seven processes:
-
-[pre
-The result is zero one four five six two three
-]
-
-Note how the numbers in the resulting string are in a different order:
-this is a direct result of Boost.MPI reordering operations. The result
-in this case differed from the non-commutative result because string
-concatenation is not commutative: `f("x", "y")` is not the same as
-`f("y", "x")`, because argument order matters. For truly commutative
-operations (e.g., integer addition), the more efficient commutative
-algorithm will produce the same result as the non-commutative
-algorithm. Boost.MPI also performs direct mappings from function
-objects in `<functional>` to `MPI_Op` values predefined by MPI (e.g.,
-`MPI_SUM`, `MPI_MAX`); if you have your own function objects that can
-take advantage of this mapping, see the class template [classref
-boost::mpi::is_mpi_op `is_mpi_op`].
-
-[warning Due to the unerlying MPI limitations, it is important to note that the opertation must be stateless.]
-
-[h4 All process variant]
-
-Like [link mpi.gather `gather`], `reduce` has an "all"
-variant called [funcref boost::mpi::all_reduce `all_reduce`]
-that performs the reduction operation and broadcasts the result to all
-processes. This variant is useful, for instance, in establishing
-global minimum or maximum values.
-
-The following code (`global_min.cpp`) shows a broadcasting version of
-the `random_min.cpp` example:
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <cstdlib>
- namespace mpi = boost::mpi;
-
- int main(int argc, char* argv[])
- {
- mpi::environment env(argc, argv);
- mpi::communicator world;
-
- std::srand(world.rank());
- int my_number = std::rand();
- int minimum;
-
- mpi::all_reduce(world, my_number, minimum, mpi::minimum<int>());
-
- if (world.rank() == 0) {
- std::cout << "The minimum value is " << minimum << std::endl;
- }
-
- return 0;
- }
-
-In that example we provide both input and output values, requiring
-twice as much space, which can be a problem depending on the size
-of the transmitted data.
-If there is no need to preserve the input value, the output value
-can be omitted. In that case the input value will be overridden with
-the output value and Boost.MPI is able, in some situation, to implement
-the operation with a more space efficient solution (using the `MPI_IN_PLACE`
-flag of the MPI C mapping), as in the following example (`in_place_global_min.cpp`):
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <cstdlib>
- namespace mpi = boost::mpi;
-
- int main(int argc, char* argv[])
- {
- mpi::environment env(argc, argv);
- mpi::communicator world;
-
- std::srand(world.rank());
- int my_number = std::rand();
-
- mpi::all_reduce(world, my_number, mpi::minimum<int>());
-
- if (world.rank() == 0) {
- std::cout << "The minimum value is " << my_number << std::endl;
- }
-
- return 0;
- }
-
-
-[endsect]
-
-[endsect]
-
-[section:communicators Managing communicators]
-
-Communication with Boost.MPI always occurs over a communicator. A
-communicator contains a set of processes that can send messages among
-themselves and perform collective operations. There can be many
-communicators within a single program, each of which contains its own
-isolated communication space that acts independently of the other
-communicators.
-
-When the MPI environment is initialized, only the "world" communicator
-(called `MPI_COMM_WORLD` in the MPI C and Fortran bindings) is
-available. The "world" communicator, accessed by default-constructing
-a [classref boost::mpi::communicator mpi::communicator]
-object, contains all of the MPI processes present when the program
-begins execution. Other communicators can then be constructed by
-duplicating or building subsets of the "world" communicator. For
-instance, in the following program we split the processes into two
-groups: one for processes generating data and the other for processes
-that will collect the data. (`generate_collect.cpp`)
-
- #include <boost/mpi.hpp>
- #include <iostream>
- #include <cstdlib>
- #include <boost/serialization/vector.hpp>
- namespace mpi = boost::mpi;
-
- enum message_tags {msg_data_packet, msg_broadcast_data, msg_finished};
-
- void generate_data(mpi::communicator local, mpi::communicator world);
- void collect_data(mpi::communicator local, mpi::communicator world);
-
- int main()
- {
- mpi::environment env;
- mpi::communicator world;
-
- bool is_generator = world.rank() < 2 * world.size() / 3;
- mpi::communicator local = world.split(is_generator? 0 : 1);
- if (is_generator) generate_data(local, world);
- else collect_data(local, world);
-
- return 0;
- }
-
-When communicators are split in this way, their processes retain
-membership in both the original communicator (which is not altered by
-the split) and the new communicator. However, the ranks of the
-processes may be different from one communicator to the next, because
-the rank values within a communicator are always contiguous values
-starting at zero. In the example above, the first two thirds of the
-processes become "generators" and the remaining processes become
-"collectors". The ranks of the "collectors" in the `world`
-communicator will be 2/3 `world.size()` and greater, whereas the ranks
-of the same collector processes in the `local` communicator will start
-at zero. The following excerpt from `collect_data()` (in
-`generate_collect.cpp`) illustrates how to manage multiple
-communicators:
-
- mpi::status msg = world.probe();
- if (msg.tag() == msg_data_packet) {
- // Receive the packet of data
- std::vector<int> data;
- world.recv(msg.source(), msg.tag(), data);
-
- // Tell each of the collectors that we'll be broadcasting some data
- for (int dest = 1; dest < local.size(); ++dest)
- local.send(dest, msg_broadcast_data, msg.source());
-
- // Broadcast the actual data.
- broadcast(local, data, 0);
- }
-
-The code in this except is executed by the "master" collector, e.g.,
-the node with rank 2/3 `world.size()` in the `world` communicator and
-rank 0 in the `local` (collector) communicator. It receives a message
-from a generator via the `world` communicator, then broadcasts the
-message to each of the collectors via the `local` communicator.
-
-For more control in the creation of communicators for subgroups of
-processes, the Boost.MPI [classref boost::mpi::group `group`] provides
-facilities to compute the union (`|`), intersection (`&`), and
-difference (`-`) of two groups, generate arbitrary subgroups, etc.
-
-[endsect]
-
-[section:cartesian_communicator Cartesian communicator]
-
-A communicator can be organised as a cartesian grid, here a basic example:
-
- #include <vector>
- #include <iostream>
-
- #include <boost/mpi/communicator.hpp>
- #include <boost/mpi/collectives.hpp>
- #include <boost/mpi/environment.hpp>
- #include <boost/mpi/cartesian_communicator.hpp>
-
- #include <boost/test/minimal.hpp>
-
- namespace mpi = boost::mpi;
- int test_main(int argc, char* argv[])
- {
- mpi::environment env;
- mpi::communicator world;
-
- if (world.size() != 24) return -1;
- mpi::cartesian_dimension dims[] = {{2, true}, {3,true}, {4,true}};
- mpi::cartesian_communicator cart(world, mpi::cartesian_topology(dims));
- for (int r = 0; r < cart.size(); ++r) {
- cart.barrier();
- if (r == cart.rank()) {
- std::vector<int> c = cart.coordinates(r);
- std::cout << "rk :" << r << " coords: "
- << c[0] << ' ' << c[1] << ' ' << c[2] << '\n';
- }
- }
- return 0;
- }
-
-
-
-[section:skeleton_and_content Separating structure from content]
-
-When communicating data types over MPI that are not fundamental to MPI
-(such as strings, lists, and user-defined data types), Boost.MPI must
-first serialize these data types into a buffer and then communicate
-them; the receiver then copies the results into a buffer before
-deserializing into an object on the other end. For some data types,
-this overhead can be eliminated by using [classref
-boost::mpi::is_mpi_datatype `is_mpi_datatype`]. However,
-variable-length data types such as strings and lists cannot be MPI
-data types.
-
-Boost.MPI supports a second technique for improving performance by
-separating the structure of these variable-length data structures from
-the content stored in the data structures. This feature is only
-beneficial when the shape of the data structure remains the same but
-the content of the data structure will need to be communicated several
-times. For instance, in a finite element analysis the structure of the
-mesh may be fixed at the beginning of computation but the various
-variables on the cells of the mesh (temperature, stress, etc.) will be
-communicated many times within the iterative analysis process. In this
-case, Boost.MPI allows one to first send the "skeleton" of the mesh
-once, then transmit the "content" multiple times. Since the content
-need not contain any information about the structure of the data type,
-it can be transmitted without creating separate communication buffers.
-
-To illustrate the use of skeletons and content, we will take a
-somewhat more limited example wherein a master process generates
-random number sequences into a list and transmits them to several
-slave processes. The length of the list will be fixed at program
-startup, so the content of the list (i.e., the current sequence of
-numbers) can be transmitted efficiently. The complete example is
-available in `example/random_content.cpp`. We being with the master
-process (rank 0), which builds a list, communicates its structure via
-a [funcref boost::mpi::skeleton `skeleton`], then repeatedly
-generates random number sequences to be broadcast to the slave
-processes via [classref boost::mpi::content `content`]:
-
-
- // Generate the list and broadcast its structure
- std::list<int> l(list_len);
- broadcast(world, mpi::skeleton(l), 0);
-
- // Generate content several times and broadcast out that content
- mpi::content c = mpi::get_content(l);
- for (int i = 0; i < iterations; ++i) {
- // Generate new random values
- std::generate(l.begin(), l.end(), &random);
-
- // Broadcast the new content of l
- broadcast(world, c, 0);
- }
-
- // Notify the slaves that we're done by sending all zeroes
- std::fill(l.begin(), l.end(), 0);
- broadcast(world, c, 0);
-
-
-The slave processes have a very similar structure to the master. They
-receive (via the [funcref boost::mpi::broadcast
-`broadcast()`] call) the skeleton of the data structure, then use it
-to build their own lists of integers. In each iteration, they receive
-via another `broadcast()` the new content in the data structure and
-compute some property of the data:
-
-
- // Receive the content and build up our own list
- std::list<int> l;
- broadcast(world, mpi::skeleton(l), 0);
-
- mpi::content c = mpi::get_content(l);
- int i = 0;
- do {
- broadcast(world, c, 0);
-
- if (std::find_if
- (l.begin(), l.end(),
- std::bind1st(std::not_equal_to<int>(), 0)) == l.end())
- break;
-
- // Compute some property of the data.
-
- ++i;
- } while (true);
-
-
-The skeletons and content of any Serializable data type can be
-transmitted either via the [memberref
-boost::mpi::communicator::send `send`] and [memberref
-boost::mpi::communicator::recv `recv`] members of the
-[classref boost::mpi::communicator `communicator`] class
-(for point-to-point communicators) or broadcast via the [funcref
-boost::mpi::broadcast `broadcast()`] collective. When
-separating a data structure into a skeleton and content, be careful
-not to modify the data structure (either on the sender side or the
-receiver side) without transmitting the skeleton again. Boost.MPI can
-not detect these accidental modifications to the data structure, which
-will likely result in incorrect data being transmitted or unstable
-programs.
-
-[endsect]
-
-[section:performance_optimizations Performance optimizations]
-[section:serialization_optimizations Serialization optimizations]
-
-To obtain optimal performance for small fixed-length data types not containing
-any pointers it is very important to mark them using the type traits of
-Boost.MPI and Boost.Serialization.
-
-It was already discussed that fixed length types containing no pointers can be
-using as [classref
-boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.:
-
- namespace boost { namespace mpi {
- template <>
- struct is_mpi_datatype<gps_position> : mpl::true_ { };
- } }
-
-or the equivalent macro
-
- BOOST_IS_MPI_DATATYPE(gps_position)
-
-In addition it can give a substantial performance gain to turn off tracking
-and versioning for these types, if no pointers to these types are used, by
-using the traits classes or helper macros of Boost.Serialization:
-
- BOOST_CLASS_TRACKING(gps_position,track_never)
- BOOST_CLASS_IMPLEMENTATION(gps_position,object_serializable)
-
-[endsect]
-
-[section:homogeneous_machines Homogeneous Machines]
-
-More optimizations are possible on homogeneous machines, by avoiding
-MPI_Pack/MPI_Unpack calls but using direct bitwise copy. This feature is
-enabled by default by defining the macro [macroref BOOST_MPI_HOMOGENEOUS] in the include
-file `boost/mpi/config.hpp`.
-That definition must be consistent when building Boost.MPI and
-when building the application.
-
-In addition all classes need to be marked both as is_mpi_datatype and
-as is_bitwise_serializable, by using the helper macro of Boost.Serialization:
-
- BOOST_IS_BITWISE_SERIALIZABLE(gps_position)
-
-Usually it is safe to serialize a class for which is_mpi_datatype is true
-by using binary copy of the bits. The exception are classes for which
-some members should be skipped for serialization.
-
-[endsect]
-[endsect]
-
-
-[section:c_mapping Mapping from C MPI to Boost.MPI]
-
-This section provides tables that map from the functions and constants
-of the standard C MPI to their Boost.MPI equivalents. It will be most
-useful for users that are already familiar with the C or Fortran
-interfaces to MPI, or for porting existing parallel programs to Boost.MPI.
-
-[table Point-to-point communication
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_ANY_SOURCE`] [`any_source`]]
-
- [[`MPI_ANY_TAG`] [`any_tag`]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node40.html#Node40
-`MPI_Bsend`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Bsend_init`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node42.html#Node42
-`MPI_Buffer_attach`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node42.html#Node42
-`MPI_Buffer_detach`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
-`MPI_Cancel`]]
- [[memberref boost::mpi::request::cancel
-`request::cancel`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node35.html#Node35
-`MPI_Get_count`]]
- [[memberref boost::mpi::status::count `status::count`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
-`MPI_Ibsend`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
-`MPI_Iprobe`]]
- [[memberref boost::mpi::communicator::iprobe `communicator::iprobe`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
-`MPI_Irsend`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
-`MPI_Isend`]]
- [[memberref boost::mpi::communicator::isend
-`communicator::isend`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
-`MPI_Issend`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node46.html#Node46
-`MPI_Irecv`]]
- [[memberref boost::mpi::communicator::isend
-`communicator::irecv`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
-`MPI_Probe`]]
- [[memberref boost::mpi::communicator::probe `communicator::probe`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node53.html#Node53
-`MPI_PROC_NULL`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node34.html#Node34 `MPI_Recv`]]
- [[memberref boost::mpi::communicator::recv
-`communicator::recv`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Recv_init`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Request_free`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node40.html#Node40
-`MPI_Rsend`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Rsend_init`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node31.html#Node31
-`MPI_Send`]]
- [[memberref boost::mpi::communicator::send
-`communicator::send`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node52.html#Node52
-`MPI_Sendrecv`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node52.html#Node52
-`MPI_Sendrecv_replace`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Send_init`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node40.html#Node40
-`MPI_Ssend`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Ssend_init`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Start`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node51.html#Node51
-`MPI_Startall`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Test`]] [[memberref boost::mpi::request::wait `request::test`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Testall`]] [[funcref boost::mpi::test_all `test_all`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Testany`]] [[funcref boost::mpi::test_any `test_any`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Testsome`]] [[funcref boost::mpi::test_some `test_some`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node50.html#Node50
-`MPI_Test_cancelled`]]
- [[memberref boost::mpi::status::cancelled
-`status::cancelled`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Wait`]] [[memberref boost::mpi::request::wait
-`request::wait`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Waitall`]] [[funcref boost::mpi::wait_all `wait_all`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Waitany`]] [[funcref boost::mpi::wait_any `wait_any`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node47.html#Node47
-`MPI_Waitsome`]] [[funcref boost::mpi::wait_some `wait_some`]]]
-]
-
-Boost.MPI automatically maps C and C++ data types to their MPI
-equivalents. The following table illustrates the mappings between C++
-types and MPI datatype constants.
-
-[table Datatypes
- [[C Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_CHAR`] [`signed char`]]
- [[`MPI_SHORT`] [`signed short int`]]
- [[`MPI_INT`] [`signed int`]]
- [[`MPI_LONG`] [`signed long int`]]
- [[`MPI_UNSIGNED_CHAR`] [`unsigned char`]]
- [[`MPI_UNSIGNED_SHORT`] [`unsigned short int`]]
- [[`MPI_UNSIGNED_INT`] [`unsigned int`]]
- [[`MPI_UNSIGNED_LONG`] [`unsigned long int`]]
- [[`MPI_FLOAT`] [`float`]]
- [[`MPI_DOUBLE`] [`double`]]
- [[`MPI_LONG_DOUBLE`] [`long double`]]
- [[`MPI_BYTE`] [unused]]
- [[`MPI_PACKED`] [used internally for [link
-mpi.user_data_types serialized data types]]]
- [[`MPI_LONG_LONG_INT`] [`long long int`, if supported by compiler]]
- [[`MPI_UNSIGNED_LONG_LONG_INT`] [`unsigned long long int`, if
-supported by compiler]]
- [[`MPI_FLOAT_INT`] [`std::pair<float, int>`]]
- [[`MPI_DOUBLE_INT`] [`std::pair<double, int>`]]
- [[`MPI_LONG_INT`] [`std::pair<long, int>`]]
- [[`MPI_2INT`] [`std::pair<int, int>`]]
- [[`MPI_SHORT_INT`] [`std::pair<short, int>`]]
- [[`MPI_LONG_DOUBLE_INT`] [`std::pair<long double, int>`]]
-]
-
-Boost.MPI does not provide direct wrappers to the MPI derived
-datatypes functionality. Instead, Boost.MPI relies on the
-_Serialization_ library to construct MPI datatypes for user-defined
-classes. The section on [link mpi.user_data_types user-defined
-data types] describes this mechanism, which is used for types that
-marked as "MPI datatypes" using [classref
-boost::mpi::is_mpi_datatype `is_mpi_datatype`].
-
-The derived datatypes table that follows describes which C++ types
-correspond to the functionality of the C MPI's datatype
-constructor. Boost.MPI may not actually use the C MPI function listed
-when building datatypes of a certain form. Since the actual datatypes
-built by Boost.MPI are typically hidden from the user, many of these
-operations are called internally by Boost.MPI.
-
-[table Derived datatypes
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node56.html#Node56
-`MPI_Address`]] [used automatically in Boost.MPI for MPI version 1.x]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-20-html/node76.htm#Node76
-`MPI_Get_address`]] [used automatically in Boost.MPI for MPI version 2.0 and higher]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node58.html#Node58
-`MPI_Type_commit`]] [used automatically in Boost.MPI]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
-`MPI_Type_contiguous`]] [arrays]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node56.html#Node56
-`MPI_Type_extent`]] [used automatically in Boost.MPI]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node58.html#Node58
-`MPI_Type_free`]] [used automatically in Boost.MPI]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
-`MPI_Type_hindexed`]] [any type used as a subobject]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
-`MPI_Type_hvector`]] [unused]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
-`MPI_Type_indexed`]] [any type used as a subobject]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node57.html#Node57
-`MPI_Type_lb`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node56.html#Node56
-`MPI_Type_size`]] [used automatically in Boost.MPI]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
-`MPI_Type_struct`]] [user-defined classes and structs with MPI 1.x]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-20-html/node76.htm#Node76
-`MPI_Type_create_struct`]] [user-defined classes and structs with MPI 2.0 and higher]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node57.html#Node57
-`MPI_Type_ub`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node55.html#Node55
-`MPI_Type_vector`]] [used automatically in Boost.MPI]]
-]
-
-MPI's packing facilities store values into a contiguous buffer, which
-can later be transmitted via MPI and unpacked into separate values via
-MPI's unpacking facilities. As with datatypes, Boost.MPI provides an
-abstract interface to MPI's packing and unpacking facilities. In
-particular, the two archive classes [classref
-boost::mpi::packed_oarchive `packed_oarchive`] and [classref
-boost::mpi::packed_iarchive `packed_iarchive`] can be used
-to pack or unpack a contiguous buffer using MPI's facilities.
-
-[table Packing and unpacking
- [[C Function] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node62.html#Node62
-`MPI_Pack`]] [[classref
-boost::mpi::packed_oarchive `packed_oarchive`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node62.html#Node62
-`MPI_Pack_size`]] [used internally by Boost.MPI]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node62.html#Node62
-`MPI_Unpack`]] [[classref
-boost::mpi::packed_iarchive `packed_iarchive`]]]
-]
-
-Boost.MPI supports a one-to-one mapping for most of the MPI
-collectives. For each collective provided by Boost.MPI, the underlying
-C MPI collective will be invoked when it is possible (and efficient)
-to do so.
-
-[table Collectives
- [[C Function] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node73.html#Node73
-`MPI_Allgather`]] [[funcref boost::mpi::all_gather `all_gather`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node73.html#Node73
-`MPI_Allgatherv`]] [most uses supported by [funcref boost::mpi::all_gather `all_gather`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node82.html#Node82
-`MPI_Allreduce`]] [[funcref boost::mpi::all_reduce `all_reduce`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node75.html#Node75
-`MPI_Alltoall`]] [[funcref boost::mpi::all_to_all `all_to_all`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node75.html#Node75
-`MPI_Alltoallv`]] [most uses supported by [funcref boost::mpi::all_to_all `all_to_all`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node66.html#Node66
-`MPI_Barrier`]] [[memberref
-boost::mpi::communicator::barrier `communicator::barrier`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node67.html#Node67
-`MPI_Bcast`]] [[funcref boost::mpi::broadcast `broadcast`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node69.html#Node69
-`MPI_Gather`]] [[funcref boost::mpi::gather `gather`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node69.html#Node69
-`MPI_Gatherv`]] [most uses supported by [funcref boost::mpi::gather `gather`],
-other usages supported by [funcref boost::mpi::gatherv `gatherv`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node77.html#Node77
-`MPI_Reduce`]] [[funcref boost::mpi::reduce `reduce`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node83.html#Node83
-`MPI_Reduce_scatter`]] [unsupported]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node84.html#Node84
-`MPI_Scan`]] [[funcref boost::mpi::scan `scan`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node71.html#Node71
-`MPI_Scatter`]] [[funcref boost::mpi::scatter `scatter`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node71.html#Node71
-`MPI_Scatterv`]] [most uses supported by [funcref boost::mpi::scatter `scatter`],
-other uses supported by [funcref boost::mpi::scatterv `scatterv`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-20-html/node145.htm#Node145
-`MPI_IN_PLACE`]] [supported implicitly by [funcref boost::mpi::all_reduce
-`all_reduce` by omitting the output value]]]
-]
-
-Boost.MPI uses function objects to specify how reductions should occur
-in its equivalents to `MPI_Allreduce`, `MPI_Reduce`, and
-`MPI_Scan`. The following table illustrates how
-[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node78.html#Node78
-predefined] and
-[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node80.html#Node80
-user-defined] reduction operations can be mapped between the C MPI and
-Boost.MPI.
-
-[table Reduction operations
- [[C Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_BAND`] [[classref boost::mpi::bitwise_and `bitwise_and`]]]
- [[`MPI_BOR`] [[classref boost::mpi::bitwise_or `bitwise_or`]]]
- [[`MPI_BXOR`] [[classref boost::mpi::bitwise_xor `bitwise_xor`]]]
- [[`MPI_LAND`] [`std::logical_and`]]
- [[`MPI_LOR`] [`std::logical_or`]]
- [[`MPI_LXOR`] [[classref boost::mpi::logical_xor `logical_xor`]]]
- [[`MPI_MAX`] [[classref boost::mpi::maximum `maximum`]]]
- [[`MPI_MAXLOC`] [unsupported]]
- [[`MPI_MIN`] [[classref boost::mpi::minimum `minimum`]]]
- [[`MPI_MINLOC`] [unsupported]]
- [[`MPI_Op_create`] [used internally by Boost.MPI]]
- [[`MPI_Op_free`] [used internally by Boost.MPI]]
- [[`MPI_PROD`] [`std::multiplies`]]
- [[`MPI_SUM`] [`std::plus`]]
-]
-
-MPI defines several special communicators, including `MPI_COMM_WORLD`
-(including all processes that the local process can communicate with),
-`MPI_COMM_SELF` (including only the local process), and
-`MPI_COMM_EMPTY` (including no processes). These special communicators
-are all instances of the [classref boost::mpi::communicator
-`communicator`] class in Boost.MPI.
-
-[table Predefined communicators
- [[C Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_COMM_WORLD`] [a default-constructed [classref boost::mpi::communicator `communicator`]]]
- [[`MPI_COMM_SELF`] [a [classref boost::mpi::communicator `communicator`] that contains only the current process]]
- [[`MPI_COMM_EMPTY`] [a [classref boost::mpi::communicator `communicator`] that evaluates false]]
-]
-
-Boost.MPI supports groups of processes through its [classref
-boost::mpi::group `group`] class.
-
-[table Group operations and constants
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_GROUP_EMPTY`] [a default-constructed [classref
- boost::mpi::group `group`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
- `MPI_Group_size`]] [[memberref boost::mpi::group::size `group::size`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
- `MPI_Group_rank`]] [memberref boost::mpi::group::rank `group::rank`]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
- `MPI_Group_translate_ranks`]] [memberref boost::mpi::group::translate_ranks `group::translate_ranks`]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node97.html#Node97
- `MPI_Group_compare`]] [operators `==` and `!=`]]
- [[`MPI_IDENT`] [operators `==` and `!=`]]
- [[`MPI_SIMILAR`] [operators `==` and `!=`]]
- [[`MPI_UNEQUAL`] [operators `==` and `!=`]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Comm_group`]] [[memberref
- boost::mpi::communicator::group `communicator::group`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_union`]] [operator `|` for groups]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_intersection`]] [operator `&` for groups]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_difference`]] [operator `-` for groups]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_incl`]] [[memberref boost::mpi::group::include `group::include`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_excl`]] [[memberref boost::mpi::group::include `group::exclude`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_range_incl`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node98.html#Node98
- `MPI_Group_range_excl`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node99.html#Node99
- `MPI_Group_free`]] [used automatically in Boost.MPI]]
-]
-
-Boost.MPI provides manipulation of communicators through the [classref
-boost::mpi::communicator `communicator`] class.
-
-[table Communicator operations
- [[C Function] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node101.html#Node101
- `MPI_Comm_size`]] [[memberref boost::mpi::communicator::size `communicator::size`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node101.html#Node101
- `MPI_Comm_rank`]] [[memberref boost::mpi::communicator::rank
- `communicator::rank`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node101.html#Node101
- `MPI_Comm_compare`]] [operators `==` and `!=`]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node102.html#Node102
- `MPI_Comm_dup`]] [[classref boost::mpi::communicator `communicator`]
- class constructor using `comm_duplicate`]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node102.html#Node102
- `MPI_Comm_create`]] [[classref boost::mpi::communicator
- `communicator`] constructor]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node102.html#Node102
- `MPI_Comm_split`]] [[memberref boost::mpi::communicator::split
- `communicator::split`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node103.html#Node103
- `MPI_Comm_free`]] [used automatically in Boost.MPI]]
-]
-
-Boost.MPI currently provides support for inter-communicators via the
-[classref boost::mpi::intercommunicator `intercommunicator`] class.
-
-[table Inter-communicator operations
- [[C Function] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node112.html#Node112
- `MPI_Comm_test_inter`]] [use [memberref boost::mpi::communicator::as_intercommunicator `communicator::as_intercommunicator`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node112.html#Node112
- `MPI_Comm_remote_size`]] [[memberref boost::mpi::intercommunicator::remote_size] `intercommunicator::remote_size`]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node112.html#Node112
- `MPI_Comm_remote_group`]] [[memberref boost::mpi::intercommunicator::remote_group `intercommunicator::remote_group`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node113.html#Node113
- `MPI_Intercomm_create`]] [[classref boost::mpi::intercommunicator `intercommunicator`] constructor]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node113.html#Node113
- `MPI_Intercomm_merge`]] [[memberref boost::mpi::intercommunicator::merge `intercommunicator::merge`]]]
-]
-
-Boost.MPI currently provides no support for attribute caching.
-
-[table Attributes and caching
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_NULL_COPY_FN`] [unsupported]]
- [[`MPI_NULL_DELETE_FN`] [unsupported]]
- [[`MPI_KEYVAL_INVALID`] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Keyval_create`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Copy_function`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Delete_function`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Keyval_free`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Attr_put`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Attr_get`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node119.html#Node119
- `MPI_Attr_delete`]] [unsupported]]
-]
-
-Boost.MPI will provide complete support for creating communicators
-with different topologies and later querying those topologies. Support
-for graph topologies is provided via an interface to the
-[@http://www.boost.org/libs/graph/doc/index.html Boost Graph Library
-(BGL)], where a communicator can be created which matches the
-structure of any BGL graph, and the graph topology of a communicator
-can be viewed as a BGL graph for use in existing, generic graph
-algorithms.
-
-[table Process topologies
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_GRAPH`] [unnecessary; use [memberref boost::mpi::communicator::as_graph_communicator `communicator::as_graph_communicator`]]]
- [[`MPI_CART`] [unnecessary; use [memberref boost::mpi::communicator::has_cartesian_topology `communicator::has_cartesian_topology`]]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node133.html#Node133
- `MPI_Cart_create`]] [[classref boost::mpi::cartesian_communicator `cartesian_communicator`]
- constructor]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node134.html#Node134
- `MPI_Dims_create`]] [[funcref boost::mpi::cartesian_dimensions `cartesian_dimensions`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node135.html#Node135
- `MPI_Graph_create`]] [[classref
- boost::mpi::graph_communicator
- `graph_communicator ctors`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Topo_test`]] [[memberref
- boost::mpi::communicator::as_graph_communicator
- `communicator::as_graph_communicator`], [memberref
- boost::mpi::communicator::has_cartesian_topology
- `communicator::has_cartesian_topology`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Graphdims_get`]] [[funcref boost::mpi::num_vertices
- `num_vertices`], [funcref boost::mpi::num_edges `num_edges`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Graph_get`]] [[funcref boost::mpi::vertices
- `vertices`], [funcref boost::mpi::edges `edges`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Cartdim_get`]] [[memberref boost::mpi::cartesian_communicator::ndims `cartesian_communicator::ndims` ]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Cart_get`]] [[memberref boost::mpi::cartesian_communicator::topology `cartesian_communicator::topology` ]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Cart_rank`]] [[memberref boost::mpi::cartesian_communicator::rank `cartesian_communicator::rank` ]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Cart_coords`]] [[memberref boost::mpi::cartesian_communicator::coordinates `cartesian_communicator::coordinates` ]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Graph_neighbors_count`]] [[funcref boost::mpi::out_degree
- `out_degree`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node136.html#Node136
- `MPI_Graph_neighbors`]] [[funcref boost::mpi::out_edges
- `out_edges`], [funcref boost::mpi::adjacent_vertices `adjacent_vertices`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node137.html#Node137
- `MPI_Cart_shift`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node138.html#Node138
- `MPI_Cart_sub`]] [[classref boost::mpi::cartesian_communicator `cartesian_communicator`]
- constructor]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node139.html#Node139
- `MPI_Cart_map`]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node139.html#Node139
- `MPI_Graph_map`]] [unsupported]]
-]
-
-Boost.MPI supports environmental inquires through the [classref
-boost::mpi::environment `environment`] class.
-
-[table Environmental inquiries
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_TAG_UB`] [unnecessary; use [memberref
- boost::mpi::environment::max_tag `environment::max_tag`]]]
- [[`MPI_HOST`] [unnecessary; use [memberref
- boost::mpi::environment::host_rank `environment::host_rank`]]]
- [[`MPI_IO`] [unnecessary; use [memberref
- boost::mpi::environment::io_rank `environment::io_rank`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node143.html#Node147
- `MPI_Get_processor_name`]]
- [[memberref boost::mpi::environment::processor_name
- `environment::processor_name`]]]
-]
-
-Boost.MPI translates MPI errors into exceptions, reported via the
-[classref boost::mpi::exception `exception`] class.
-
-[table Error handling
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_ERRORS_ARE_FATAL`] [unused; errors are translated into
- Boost.MPI exceptions]]
- [[`MPI_ERRORS_RETURN`] [unused; errors are translated into
- Boost.MPI exceptions]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
- `MPI_errhandler_create`]] [unused; errors are translated into
- Boost.MPI exceptions]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
- `MPI_errhandler_set`]] [unused; errors are translated into
- Boost.MPI exceptions]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
- `MPI_errhandler_get`]] [unused; errors are translated into
- Boost.MPI exceptions]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
- `MPI_errhandler_free`]] [unused; errors are translated into
- Boost.MPI exceptions]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node148.html#Node148
- `MPI_Error_string`]] [used internally by Boost.MPI]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node149.html#Node149
- `MPI_Error_class`]] [[memberref boost::mpi::exception::error_class `exception::error_class`]]]
-]
-
-The MPI timing facilities are exposed via the Boost.MPI [classref
-boost::mpi::timer `timer`] class, which provides an interface
-compatible with the [@http://www.boost.org/libs/timer/index.html Boost
-Timer library].
-
-[table Timing facilities
- [[C Function/Constant] [Boost.MPI Equivalent]]
-
- [[`MPI_WTIME_IS_GLOBAL`] [unnecessary; use [memberref
- boost::mpi::timer::time_is_global `timer::time_is_global`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node150.html#Node150
- `MPI_Wtime`]] [use [memberref boost::mpi::timer::elapsed
- `timer::elapsed`] to determine the time elapsed from some specific
- starting point]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node150.html#Node150
- `MPI_Wtick`]] [[memberref boost::mpi::timer::elapsed_min `timer::elapsed_min`]]]
-]
-
-MPI startup and shutdown are managed by the construction and
-destruction of the Boost.MPI [classref boost::mpi::environment
-`environment`] class.
-
-[table Startup/shutdown facilities
- [[C Function] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
- `MPI_Init`]] [[classref boost::mpi::environment `environment`]
- constructor]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
- `MPI_Finalize`]] [[classref boost::mpi::environment `environment`]
- destructor]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
- `MPI_Initialized`]] [[memberref boost::mpi::environment::initialized
- `environment::initialized`]]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node151.html#Node151
- `MPI_Abort`]] [[memberref boost::mpi::environment::abort
- `environment::abort`]]]
-]
-
-Boost.MPI does not provide any support for the profiling facilities in
-MPI 1.1.
-
-[table Profiling interface
- [[C Function] [Boost.MPI Equivalent]]
-
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node153.html#Node153
- `PMPI_*` routines]] [unsupported]]
- [[[@http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node156.html#Node156
- `MPI_Pcontrol`]] [unsupported]]
-]
-
-[endsect]
-
-[endsect]
+[include introduction.qbk]
+[include getting_started.qbk]
+[include tutorial.qbk]
+[include c_mapping.qbk]
[xinclude mpi_autodoc.xml]
-[section:python Python Bindings]
-[python]
-
-Boost.MPI provides an alternative MPI interface from the _Python_
-programming language via the `boost.mpi` module. The
-Boost.MPI Python bindings, built on top of the C++ Boost.MPI using the
-_BoostPython_ library, provide nearly all of the functionality of
-Boost.MPI within a dynamic, object-oriented language.
-
-The Boost.MPI Python module can be built and installed from the
-`libs/mpi/build` directory. Just follow the [link
-mpi.config configuration] and [link mpi.installation
-installation] instructions for the C++ Boost.MPI. Once you have
-installed the Python module, be sure that the installation location is
-in your `PYTHONPATH`.
-
-[section:python_quickstart Quickstart]
-
-[python]
-
-Getting started with the Boost.MPI Python module is as easy as
-importing `boost.mpi`. Our first "Hello, World!" program is
-just two lines long:
-
- import boost.mpi as mpi
- print "I am process %d of %d." % (mpi.rank, mpi.size)
-
-Go ahead and run this program with several processes. Be sure to
-invoke the `python` interpreter from `mpirun`, e.g.,
-
-[pre
-mpirun -np 5 python hello_world.py
-]
-
-This will return output such as:
-
-[pre
-I am process 1 of 5.
-I am process 3 of 5.
-I am process 2 of 5.
-I am process 4 of 5.
-I am process 0 of 5.
-]
-
-Point-to-point operations in Boost.MPI have nearly the same syntax in
-Python as in C++. We can write a simple two-process Python program
-that prints "Hello, world!" by transmitting Python strings:
-
- import boost.mpi as mpi
-
- if mpi.world.rank == 0:
- mpi.world.send(1, 0, 'Hello')
- msg = mpi.world.recv(1, 1)
- print msg,'!'
- else:
- msg = mpi.world.recv(0, 0)
- print (msg + ', '),
- mpi.world.send(0, 1, 'world')
-
-There are only a few notable differences between this Python code and
-the example [link mpi.point_to_point in the C++
-tutorial]. First of all, we don't need to write any initialization
-code in Python: just loading the `boost.mpi` module makes the
-appropriate `MPI_Init` and `MPI_Finalize` calls. Second, we're passing
-Python objects from one process to another through MPI. Any Python
-object that can be pickled can be transmitted; the next section will
-describe in more detail how the Boost.MPI Python layer transmits
-objects. Finally, when we receive objects with `recv`, we don't need
-to specify the type because transmission of Python objects is
-polymorphic.
-
-When experimenting with Boost.MPI in Python, don't forget that help is
-always available via `pydoc`: just pass the name of the module or
-module entity on the command line (e.g., `pydoc
-boost.mpi.communicator`) to receive complete reference
-documentation. When in doubt, try it!
-[endsect]
-
-[section:python_user_data Transmitting User-Defined Data]
-Boost.MPI can transmit user-defined data in several different ways.
-Most importantly, it can transmit arbitrary _Python_ objects by pickling
-them at the sender and unpickling them at the receiver, allowing
-arbitrarily complex Python data structures to interoperate with MPI.
-
-Boost.MPI also supports efficient serialization and transmission of
-C++ objects (that have been exposed to Python) through its C++
-interface. Any C++ type that provides (de-)serialization routines that
-meet the requirements of the Boost.Serialization library is eligible
-for this optimization, but the type must be registered in advance. To
-register a C++ type, invoke the C++ function [funcref
-boost::mpi::python::register_serialized
-register_serialized]. If your C++ types come from other Python modules
-(they probably will!), those modules will need to link against the
-`boost_mpi` and `boost_mpi_python` libraries as described in the [link
-mpi.installation installation section]. Note that you do
-*not* need to link against the Boost.MPI Python extension module.
-
-Finally, Boost.MPI supports separation of the structure of an object
-from the data it stores, allowing the two pieces to be transmitted
-separately. This "skeleton/content" mechanism, described in more
-detail in a later section, is a communication optimization suitable
-for problems with fixed data structures whose internal data changes
-frequently.
-[endsect]
-
-[section:python_collectives Collectives]
-
-Boost.MPI supports all of the MPI collectives (`scatter`, `reduce`,
-`scan`, `broadcast`, etc.) for any type of data that can be
-transmitted with the point-to-point communication operations. For the
-MPI collectives that require a user-specified operation (e.g., `reduce`
-and `scan`), the operation can be an arbitrary Python function. For
-instance, one could concatenate strings with `all_reduce`:
-
- mpi.all_reduce(my_string, lambda x,y: x + y)
-
-The following module-level functions implement MPI collectives:
- all_gather Gather the values from all processes.
- all_reduce Combine the results from all processes.
- all_to_all Every process sends data to every other process.
- broadcast Broadcast data from one process to all other processes.
- gather Gather the values from all processes to the root.
- reduce Combine the results from all processes to the root.
- scan Prefix reduction of the values from all processes.
- scatter Scatter the values stored at the root to all processes.
-[endsect]
-
-[section:python_skeleton_content Skeleton/Content Mechanism]
-Boost.MPI provides a skeleton/content mechanism that allows the
-transfer of large data structures to be split into two separate stages,
-with the skeleton (or, "shape") of the data structure sent first and
-the content (or, "data") of the data structure sent later, potentially
-several times, so long as the structure has not changed since the
-skeleton was transferred. The skeleton/content mechanism can improve
-performance when the data structure is large and its shape is fixed,
-because while the skeleton requires serialization (it has an unknown
-size), the content transfer is fixed-size and can be done without
-extra copies.
-
-To use the skeleton/content mechanism from Python, you must first
-register the type of your data structure with the skeleton/content
-mechanism *from C++*. The registration function is [funcref
-boost::mpi::python::register_skeleton_and_content
-register_skeleton_and_content] and resides in the [headerref
-boost/mpi/python.hpp <boost/mpi/python.hpp>] header.
-
-Once you have registered your C++ data structures, you can extract
-the skeleton for an instance of that data structure with `skeleton()`.
-The resulting `skeleton_proxy` can be transmitted via the normal send
-routine, e.g.,
-
- mpi.world.send(1, 0, skeleton(my_data_structure))
-
-`skeleton_proxy` objects can be received on the other end via `recv()`,
-which stores a newly-created instance of your data structure with the
-same "shape" as the sender in its `"object"` attribute:
-
- shape = mpi.world.recv(0, 0)
- my_data_structure = shape.object
-
-Once the skeleton has been transmitted, the content (accessed via
-`get_content`) can be transmitted in much the same way. Note, however,
-that the receiver also specifies `get_content(my_data_structure)` in its
-call to receive:
-
- if mpi.rank == 0:
- mpi.world.send(1, 0, get_content(my_data_structure))
- else:
- mpi.world.recv(0, 0, get_content(my_data_structure))
-
-Of course, this transmission of content can occur repeatedly, if the
-values in the data structure--but not its shape--changes.
-
-The skeleton/content mechanism is a structured way to exploit the
-interaction between custom-built MPI datatypes and `MPI_BOTTOM`, to
-eliminate extra buffer copies.
-
-[section:python_compatibility C++/Python MPI Compatibility]
-Boost.MPI is a C++ library whose facilities have been exposed to Python
-via the Boost.Python library. Since the Boost.MPI Python bindings are
-build directly on top of the C++ library, and nearly every feature of
-C++ library is available in Python, hybrid C++/Python programs using
-Boost.MPI can interact, e.g., sending a value from Python but receiving
-that value in C++ (or vice versa). However, doing so requires some
-care. Because Python objects are dynamically typed, Boost.MPI transfers
-type information along with the serialized form of the object, so that
-the object can be received even when its type is not known. This
-mechanism differs from its C++ counterpart, where the static types of
-transmitted values are always known.
-
-The only way to communicate between the C++ and Python views on
-Boost.MPI is to traffic entirely in Python objects. For Python, this
-is the normal state of affairs, so nothing will change. For C++, this
-means sending and receiving values of type `boost::python::object`,
-from the _BoostPython_ library. For instance, say we want to transmit
-an integer value from Python:
-
- comm.send(1, 0, 17)
-
-In C++, we would receive that value into a Python object and then
-`extract` an integer value:
-
-[c++]
-
- boost::python::object value;
- comm.recv(0, 0, value);
- int int_value = boost::python::extract<int>(value);
-
-In the future, Boost.MPI will be extended to allow improved
-interoperability with the C++ Boost.MPI and the C MPI bindings.
-[endsect]
-
-[section:pythonref Reference]
-The Boost.MPI Python module, `boost.mpi`, has its own
-[@boost.mpi.html reference documentation], which is also
-available using `pydoc` (from the command line) or
-`help(boost.mpi)` (from the Python interpreter).
-
-[endsect]
-
-[endsect]
+[include python.qbk]
[section:design Design Philosophy]
@@ -2236,42 +73,6 @@ the amount of effort required to interface between Boost.MPI
and the C MPI library.
[endsect]
-[section:threading Threads]
-
-There are an increasing number of hybrid parallel applications that mix
-distributed and shared memory parallelism. To know how to support that model,
-one need to know what level of threading support is guaranteed by the MPI
-implementation. There are 4 ordered level of possible threading support described
-by [enumref boost::mpi::threading::level mpi::threading::level].
-At the lowest level, you should not use threads at all, at the highest level, any
-thread can perform MPI call.
-
-If you want to use multi-threading in your MPI application, you should indicate
-in the environment constructor your preferred threading support. Then probe the
-one the library did provide, and decide what you can do with it (it could be
-nothing, then aborting is a valid option):
-
- #include <boost/mpi/environment.hpp>
- #include <boost/mpi/communicator.hpp>
- #include <iostream>
- namespace mpi = boost::mpi;
- namespace mt = mpi::threading;
-
- int main()
- {
- mpi::environment env(mt::funneled);
- if (env.thread_level() < mt::funneled) {
- env.abort(-1);
- }
- mpi::communicator world;
- std::cout << "I am process " << world.rank() << " of " << world.size()
- << "." << std::endl;
- return 0;
- }
-
-
-[endsect]
-
[section:performance Performance Evaluation]
Message-passing performance is crucial in high-performance distributed
@@ -2342,7 +143,7 @@ performance.
* *2006-09-21*: Boost.MPI accepted into Boost.
-[endsect]
+[endsect:history]
[section:acknowledge Acknowledgments]
Boost.MPI was developed with support from Zurcher Kantonalbank. Daniel
@@ -2354,5 +155,4 @@ Boost.MPI that proved the usefulness of the Serialization library in
an MPI setting and the performance benefits of specialization in a C++
abstraction layer for MPI. Jeremy Siek managed the formal review of Boost.MPI.
-[endsect]
-[endsect]
+[endsect:acknowledge]
diff --git a/libs/mpi/doc/mpi_autodoc.xml b/libs/mpi/doc/mpi_autodoc.xml
index 57ec85c4b2..b38104a630 100644
--- a/libs/mpi/doc/mpi_autodoc.xml
+++ b/libs/mpi/doc/mpi_autodoc.xml
@@ -379,7 +379,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<template-type-parameter name="T"/>
</template><parameter name="comm"><paramtype>const <classname>communicator</classname> &amp;</paramtype></parameter><parameter name="value"><paramtype><classname>skeleton_proxy</classname>&lt; T &gt; &amp;</paramtype></parameter><parameter name="root"><paramtype>int</paramtype></parameter></signature><signature><type>void</type><template>
<template-type-parameter name="T"/>
- </template><parameter name="comm"><paramtype>const <classname>communicator</classname> &amp;</paramtype></parameter><parameter name="value"><paramtype>const <classname>skeleton_proxy</classname>&lt; T &gt; &amp;</paramtype></parameter><parameter name="root"><paramtype>int</paramtype></parameter></signature><purpose>Broadcast a value from a root process to all other processes. </purpose><description><para><computeroutput>broadcast</computeroutput> is a collective algorithm that transfers a value from an arbitrary <computeroutput>root</computeroutput> process to every other process that is part of the given communicator. The <computeroutput>broadcast</computeroutput> algorithm can transmit any Serializable value, values that have associated MPI data types, packed archives, skeletons, and the content of skeletons; see the <computeroutput>send</computeroutput> primitive for communicators for a complete list. The type <computeroutput>T</computeroutput> shall be the same for all processes that are a part of the communicator <computeroutput>comm</computeroutput>, unless packed archives are being transferred: with packed archives, the root sends a <computeroutput><classname alt="boost::mpi::packed_oarchive">packed_oarchive</classname></computeroutput> or <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> whereas the other processes receive a <computeroutput><classname alt="boost::mpi::packed_iarchive">packed_iarchive</classname></computeroutput> or <computeroutput>packed_skeleton_iarchve</computeroutput>, respectively.</para><para>When the type <computeroutput>T</computeroutput> has an associated MPI data type, this routine invokes <computeroutput>MPI_Bcast</computeroutput> to perform the broadcast.</para><para>
+ </template><parameter name="comm"><paramtype>const <classname>communicator</classname> &amp;</paramtype></parameter><parameter name="value"><paramtype>const <classname>skeleton_proxy</classname>&lt; T &gt; &amp;</paramtype></parameter><parameter name="root"><paramtype>int</paramtype></parameter></signature><purpose>Broadcast a value from a root process to all other processes. </purpose><description><para><computeroutput>broadcast</computeroutput> is a collective algorithm that transfers a value from an arbitrary <computeroutput>root</computeroutput> process to every other process that is part of the given communicator. The <computeroutput>broadcast</computeroutput> algorithm can transmit any Serializable value, values that have associated MPI data types, packed archives, skeletons, and the content of skeletons; see the <computeroutput>send</computeroutput> primitive for communicators for a complete list. The type <computeroutput>T</computeroutput> shall be the same for all processes that are a part of the communicator <computeroutput>comm</computeroutput>, unless packed archives are being transferred: with packed archives, the root sends a <computeroutput><classname alt="boost::mpi::packed_oarchive">packed_oarchive</classname></computeroutput> or <computeroutput>packed_skeleton_oarchive</computeroutput> whereas the other processes receive a <computeroutput><classname alt="boost::mpi::packed_iarchive">packed_iarchive</classname></computeroutput> or <computeroutput>packed_skeleton_iarchve</computeroutput>, respectively.</para><para>When the type <computeroutput>T</computeroutput> has an associated MPI data type, this routine invokes <computeroutput>MPI_Bcast</computeroutput> to perform the broadcast.</para><para>
</para></description></overloaded-function>
@@ -544,7 +544,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
</template><parameter name="dest"><paramtype>int</paramtype><description><para>The rank of the remote process to which the data will be sent.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that will be associated with this message. Tags may be any integer between zero and an implementation-defined upper limit. This limit is accessible via <computeroutput>environment::max_tag()</computeroutput>.</para></description></parameter><parameter name="value"><paramtype>const T &amp;</paramtype><description><para>The value that will be transmitted to the receiver. The type <computeroutput>T</computeroutput> of this value must meet the aforementioned criteria for transmission. </para></description></parameter><purpose>Send data to another process. </purpose><description><para>This routine executes a potentially blocking send with tag <computeroutput>tag</computeroutput> to the process with rank <computeroutput>dest</computeroutput>. It can be received by the destination process with a matching <computeroutput>recv</computeroutput> call.</para><para>The given <computeroutput>value</computeroutput> must be suitable for transmission over MPI. There are several classes of types that meet these requirements:</para><para><itemizedlist>
<listitem><para>Types with mappings to MPI data types: If <computeroutput>is_mpi_datatype&lt;T&gt;</computeroutput> is convertible to <computeroutput>mpl::true_</computeroutput>, then <computeroutput>value</computeroutput> will be transmitted using the MPI data type <computeroutput>get_mpi_datatype&lt;T&gt;()</computeroutput>. All primitive C++ data types that have MPI equivalents, e.g., <computeroutput>int</computeroutput>, <computeroutput>float</computeroutput>, <computeroutput>char</computeroutput>, <computeroutput>double</computeroutput>, etc., have built-in mappings to MPI data types. You may turn a Serializable type with fixed structure into an MPI data type by specializing <computeroutput><classname alt="boost::mpi::is_mpi_datatype">is_mpi_datatype</classname></computeroutput> for your type.</para>
</listitem><listitem><para>Serializable types: Any type that provides the <computeroutput>serialize()</computeroutput> functionality required by the Boost.Serialization library can be transmitted and received.</para>
-</listitem><listitem><para>Packed archives and skeletons: Data that has been packed into an <computeroutput><classname alt="boost::mpi::packed_oarchive">mpi::packed_oarchive</classname></computeroutput> or the skeletons of data that have been backed into an <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">mpi::packed_skeleton_oarchive</classname></computeroutput> can be transmitted, but will be received as <computeroutput><classname alt="boost::mpi::packed_iarchive">mpi::packed_iarchive</classname></computeroutput> and <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">mpi::packed_skeleton_iarchive</classname></computeroutput>, respectively, to allow the values (or skeletons) to be extracted by the destination process.</para>
+</listitem><listitem><para>Packed archives and skeletons: Data that has been packed into an <computeroutput><classname alt="boost::mpi::packed_oarchive">mpi::packed_oarchive</classname></computeroutput> or the skeletons of data that have been backed into an <computeroutput>mpi::packed_skeleton_oarchive</computeroutput> can be transmitted, but will be received as <computeroutput><classname alt="boost::mpi::packed_iarchive">mpi::packed_iarchive</classname></computeroutput> and <computeroutput>mpi::packed_skeleton_iarchive</computeroutput>, respectively, to allow the values (or skeletons) to be extracted by the destination process.</para>
</listitem><listitem><para>Content: Content associated with a previously-transmitted skeleton can be transmitted by <computeroutput>send</computeroutput> and received by <computeroutput>recv</computeroutput>. The receiving process may only receive content into the content of a value that has been constructed with the matching skeleton.</para>
</listitem></itemizedlist>
</para><para>For types that have mappings to an MPI data type (including the concent of a type), an invocation of this routine will result in a single MPI_Send call. For variable-length data, e.g., serialized types and packed archives, two messages will be sent via MPI_Send: one containing the length of the data and the second containing the data itself.</para><para>Std::vectors of MPI data type are considered variable size, e.g. their number of elements is unknown and must be transmited (although the serialization process is skipped). You can use the array specialized versions of communication methods is both sender and receiver know the vector size.</para><para>Note that the transmission mode for variable-length data is an implementation detail that is subject to change.</para><para>
@@ -556,7 +556,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<method name="send" cv="const"><type>void</type><template>
<template-type-parameter name="T"/>
</template><parameter name="dest"><paramtype>int</paramtype><description><para>The rank of the remote process to which the skeleton will be sent.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that will be associated with this message. Tags may be any integer between zero and an implementation-defined upper limit. This limit is accessible via <computeroutput>environment::max_tag()</computeroutput>.</para></description></parameter><parameter name="proxy"><paramtype>const <classname>skeleton_proxy</classname>&lt; T &gt; &amp;</paramtype><description><para>The <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> containing a reference to the object whose skeleton will be transmitted. </para></description></parameter><purpose>Send the skeleton of an object. </purpose><description><para>This routine executes a potentially blocking send with tag <computeroutput>tag</computeroutput> to the process with rank <computeroutput>dest</computeroutput>. It can be received by the destination process with a matching <computeroutput>recv</computeroutput> call. This variation on <computeroutput>send</computeroutput> will be used when a send of a skeleton is explicitly requested via code such as:</para><para><programlisting language="c++">comm.send(dest, tag, skeleton(object));
-</programlisting></para><para>The semantics of this routine are equivalent to that of sending a <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> storing the skeleton of the <computeroutput>object</computeroutput>.</para><para>
+</programlisting></para><para>The semantics of this routine are equivalent to that of sending a <computeroutput>packed_skeleton_oarchive</computeroutput> storing the skeleton of the <computeroutput>object</computeroutput>.</para><para>
</para></description></method>
<method name="send" cv="const"><type>void</type><template>
<template-type-parameter name="T"/>
@@ -566,7 +566,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
</para></description></method>
<method name="recv" cv="const"><type><classname>status</classname></type><template>
<template-type-parameter name="T"/>
- </template><parameter name="source"><paramtype>int</paramtype><description><para>The process that will be sending data. This will either be a process rank within the communicator or the constant <computeroutput>any_source</computeroutput>, indicating that we can receive the message from any process.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that matches a particular kind of message sent by the source process. This may be any tag value permitted by <computeroutput>send</computeroutput>. Alternatively, the argument may be the constant <computeroutput>any_tag</computeroutput>, indicating that this receive matches a message with any tag.</para></description></parameter><parameter name="value"><paramtype>T &amp;</paramtype><description><para>Will contain the value of the message after a successful receive. The type of this value must match the value transmitted by the sender, unless the sender transmitted a packed archive or skeleton: in these cases, the sender transmits a <computeroutput><classname alt="boost::mpi::packed_oarchive">packed_oarchive</classname></computeroutput> or <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> and the destination receives a <computeroutput><classname alt="boost::mpi::packed_iarchive">packed_iarchive</classname></computeroutput> or <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput>, respectively.</para></description></parameter><purpose>Receive data from a remote process. </purpose><description><para>This routine blocks until it receives a message from the process <computeroutput>source</computeroutput> with the given <computeroutput>tag</computeroutput>. The type <computeroutput>T</computeroutput> of the <computeroutput>value</computeroutput> must be suitable for transmission over MPI, which includes serializable types, types that can be mapped to MPI data types (including most built-in C++ types), packed MPI archives, skeletons, and content associated with skeletons; see the documentation of <computeroutput>send</computeroutput> for a complete description.</para><para>
+ </template><parameter name="source"><paramtype>int</paramtype><description><para>The process that will be sending data. This will either be a process rank within the communicator or the constant <computeroutput>any_source</computeroutput>, indicating that we can receive the message from any process.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that matches a particular kind of message sent by the source process. This may be any tag value permitted by <computeroutput>send</computeroutput>. Alternatively, the argument may be the constant <computeroutput>any_tag</computeroutput>, indicating that this receive matches a message with any tag.</para></description></parameter><parameter name="value"><paramtype>T &amp;</paramtype><description><para>Will contain the value of the message after a successful receive. The type of this value must match the value transmitted by the sender, unless the sender transmitted a packed archive or skeleton: in these cases, the sender transmits a <computeroutput><classname alt="boost::mpi::packed_oarchive">packed_oarchive</classname></computeroutput> or <computeroutput>packed_skeleton_oarchive</computeroutput> and the destination receives a <computeroutput><classname alt="boost::mpi::packed_iarchive">packed_iarchive</classname></computeroutput> or <computeroutput>packed_skeleton_iarchive</computeroutput>, respectively.</para></description></parameter><purpose>Receive data from a remote process. </purpose><description><para>This routine blocks until it receives a message from the process <computeroutput>source</computeroutput> with the given <computeroutput>tag</computeroutput>. The type <computeroutput>T</computeroutput> of the <computeroutput>value</computeroutput> must be suitable for transmission over MPI, which includes serializable types, types that can be mapped to MPI data types (including most built-in C++ types), packed MPI archives, skeletons, and content associated with skeletons; see the documentation of <computeroutput>send</computeroutput> for a complete description.</para><para>
</para></description><returns><para>Information about the received message. </para>
</returns></method>
@@ -608,7 +608,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
</returns></method>
<method name="isend" cv="const"><type><classname>request</classname></type><template>
<template-type-parameter name="T"/>
- </template><parameter name="dest"><paramtype>int</paramtype><description><para>The rank of the remote process to which the skeleton will be sent.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that will be associated with this message. Tags may be any integer between zero and an implementation-defined upper limit. This limit is accessible via <computeroutput>environment::max_tag()</computeroutput>.</para></description></parameter><parameter name="proxy"><paramtype>const <classname>skeleton_proxy</classname>&lt; T &gt; &amp;</paramtype><description><para>The <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> containing a reference to the object whose skeleton will be transmitted.</para></description></parameter><purpose>Send the skeleton of an object without blocking. </purpose><description><para>This routine is functionally identical to the <computeroutput>send</computeroutput> method for <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> objects except that <computeroutput>isend</computeroutput> will not block while waiting for the data to be transmitted. Instead, a request object will be immediately returned, allowing one to query the status of the communication or wait until it has completed.</para><para>The semantics of this routine are equivalent to a non-blocking send of a <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> storing the skeleton of the <computeroutput>object</computeroutput>.</para><para>
+ </template><parameter name="dest"><paramtype>int</paramtype><description><para>The rank of the remote process to which the skeleton will be sent.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that will be associated with this message. Tags may be any integer between zero and an implementation-defined upper limit. This limit is accessible via <computeroutput>environment::max_tag()</computeroutput>.</para></description></parameter><parameter name="proxy"><paramtype>const <classname>skeleton_proxy</classname>&lt; T &gt; &amp;</paramtype><description><para>The <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> containing a reference to the object whose skeleton will be transmitted.</para></description></parameter><purpose>Send the skeleton of an object without blocking. </purpose><description><para>This routine is functionally identical to the <computeroutput>send</computeroutput> method for <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> objects except that <computeroutput>isend</computeroutput> will not block while waiting for the data to be transmitted. Instead, a request object will be immediately returned, allowing one to query the status of the communication or wait until it has completed.</para><para>The semantics of this routine are equivalent to a non-blocking send of a <computeroutput>packed_skeleton_oarchive</computeroutput> storing the skeleton of the <computeroutput>object</computeroutput>.</para><para>
</para></description><returns><para>a <computeroutput>request</computeroutput> object that describes this communication. </para>
</returns></method>
@@ -628,7 +628,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
</returns></method>
<method name="irecv" cv="const"><type><classname>request</classname></type><template>
<template-type-parameter name="T"/>
- </template><parameter name="source"><paramtype>int</paramtype><description><para>The process that will be sending data. This will either be a process rank within the communicator or the constant <computeroutput>any_source</computeroutput>, indicating that we can receive the message from any process.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that matches a particular kind of message sent by the source process. This may be any tag value permitted by <computeroutput>send</computeroutput>. Alternatively, the argument may be the constant <computeroutput>any_tag</computeroutput>, indicating that this receive matches a message with any tag.</para></description></parameter><parameter name="value"><paramtype>T &amp;</paramtype><description><para>Will contain the value of the message after a successful receive. The type of this value must match the value transmitted by the sender, unless the sender transmitted a packed archive or skeleton: in these cases, the sender transmits a <computeroutput><classname alt="boost::mpi::packed_oarchive">packed_oarchive</classname></computeroutput> or <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> and the destination receives a <computeroutput><classname alt="boost::mpi::packed_iarchive">packed_iarchive</classname></computeroutput> or <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput>, respectively.</para></description></parameter><purpose>Prepare to receive a message from a remote process. </purpose><description><para>The <computeroutput>irecv</computeroutput> method is functionally identical to the <computeroutput>recv</computeroutput> method and receive data in the same way, except that <computeroutput>irecv</computeroutput> will not block while waiting for data to be transmitted. Instead, it immediately returns a request object that allows one to query the status of the receive or wait until it has completed.</para><para>
+ </template><parameter name="source"><paramtype>int</paramtype><description><para>The process that will be sending data. This will either be a process rank within the communicator or the constant <computeroutput>any_source</computeroutput>, indicating that we can receive the message from any process.</para></description></parameter><parameter name="tag"><paramtype>int</paramtype><description><para>The tag that matches a particular kind of message sent by the source process. This may be any tag value permitted by <computeroutput>send</computeroutput>. Alternatively, the argument may be the constant <computeroutput>any_tag</computeroutput>, indicating that this receive matches a message with any tag.</para></description></parameter><parameter name="value"><paramtype>T &amp;</paramtype><description><para>Will contain the value of the message after a successful receive. The type of this value must match the value transmitted by the sender, unless the sender transmitted a packed archive or skeleton: in these cases, the sender transmits a <computeroutput><classname alt="boost::mpi::packed_oarchive">packed_oarchive</classname></computeroutput> or <computeroutput>packed_skeleton_oarchive</computeroutput> and the destination receives a <computeroutput><classname alt="boost::mpi::packed_iarchive">packed_iarchive</classname></computeroutput> or <computeroutput>packed_skeleton_iarchive</computeroutput>, respectively.</para></description></parameter><purpose>Prepare to receive a message from a remote process. </purpose><description><para>The <computeroutput>irecv</computeroutput> method is functionally identical to the <computeroutput>recv</computeroutput> method and receive data in the same way, except that <computeroutput>irecv</computeroutput> will not block while waiting for data to be transmitted. Instead, it immediately returns a request object that allows one to query the status of the receive or wait until it has completed.</para><para>
</para></description><returns><para>a <computeroutput>request</computeroutput> object that describes this communication. </para>
</returns></method>
@@ -659,14 +659,11 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<method name="conversion-operator" cv="const"><type>MPI_Comm</type><purpose>Access the MPI communicator associated with a Boost.MPI communicator. </purpose><description><para>This routine permits the implicit conversion from a Boost.MPI communicator to an MPI communicator.</para><para>
</para></description><returns><para>The associated MPI communicator. </para>
</returns></method>
-<method name="split" cv="const"><type><classname>communicator</classname></type><parameter name="color"><paramtype>int</paramtype><description><para>The color of this process. All processes with the same <computeroutput>color</computeroutput> value will be placed into the same group.</para></description></parameter><description><para>Split the communicator into multiple, disjoint communicators each of which is based on a particular color. This is a collective operation that returns a new communicator that is a subgroup of <computeroutput>this</computeroutput>. This routine is functionally equivalent to <computeroutput>MPI_Comm_split</computeroutput>.</para><para>
-
-</para></description><returns><para>A new communicator containing all of the processes in <computeroutput>this</computeroutput> that have the same <computeroutput>color</computeroutput>. </para>
-</returns></method>
-<method name="split" cv="const"><type><classname>communicator</classname></type><parameter name="color"><paramtype>int</paramtype><description><para>The color of this process. All processes with the same <computeroutput>color</computeroutput> value will be placed into the same group.</para></description></parameter><parameter name="key"><paramtype>int</paramtype><description><para>A key value that will be used to determine the ordering of processes with the same color in the resulting communicator. If omitted, the rank of the processes in <computeroutput>this</computeroutput> will determine the ordering of processes in the resulting group.</para></description></parameter><description><para>Split the communicator into multiple, disjoint communicators each of which is based on a particular color. This is a collective operation that returns a new communicator that is a subgroup of <computeroutput>this</computeroutput>. This routine is functionally equivalent to <computeroutput>MPI_Comm_split</computeroutput>.</para><para>
+<method name="split" cv="const"><type><classname>communicator</classname></type><parameter name="color"><paramtype>int</paramtype><description><para>The color of this process. All processes with the same <computeroutput>color</computeroutput> value will be placed into the same group.</para></description></parameter><parameter name="key"><paramtype>int</paramtype><description><para>A key value that will be used to determine the ordering of processes with the same color in the resulting communicator. If omitted, the rank of the processes in <computeroutput>this</computeroutput> will determine the ordering of processes in the resulting group.</para></description></parameter><description><para>Split the communicator into multiple, disjoint communicators each of which is based on a particular color. This is a collective operation that returns a new communicator that is a subgroup of <computeroutput>this</computeroutput>.</para><para>
</para></description><returns><para>A new communicator containing all of the processes in <computeroutput>this</computeroutput> that have the same <computeroutput>color</computeroutput>. </para>
</returns></method>
+<method name="split" cv="const"><type><classname>communicator</classname></type><parameter name="color"><paramtype>int</paramtype></parameter></method>
<method name="as_intercommunicator" cv="const"><type>optional&lt; <classname>intercommunicator</classname> &gt;</type><description><para>Determine if the communicator is in fact an intercommunicator and, if so, return that intercommunicator.</para><para>
</para></description><returns><para>an <computeroutput>optional</computeroutput> containing the intercommunicator, if this communicator is in fact an intercommunicator. Otherwise, returns an empty <computeroutput>optional</computeroutput>. </para>
</returns></method>
@@ -834,6 +831,8 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<header name="boost/mpi/config.hpp">
<para>This header provides MPI configuration details that expose the capabilities of the underlying MPI implementation, and provides auto-linking support on Windows. </para><macro name="MPICH_IGNORE_CXX_SEEK"/>
<macro name="BOOST_MPI_HOMOGENEOUS"><purpose>Comment this macro is you are running in an heterogeneous environment. </purpose><description><para>When this flag is enabled, we assume some simple, POD-like, type can be transmitted without paying the cost of portable serialization.</para><para>Comment this if your platform is not homogeneous and that portable serialization/deserialization must be performed.</para><para>It you do so, check that your MPI implementation supports thats kind of environment. </para></description></macro>
+<macro name="BOOST_MPI_VERSION"><purpose>Major version of the underlying MPI implementation supproted standard. </purpose><description><para>If, for some reason, MPI_VERSION is not supported, you should probably set that according to your MPI documentation </para></description></macro>
+<macro name="BOOST_MPI_SUBVERSION"/>
<macro name="BOOST_MPI_HAS_MEMORY_ALLOCATION"><purpose>Determine if the MPI implementation has support for memory allocation. </purpose><description><para>This macro will be defined when the underlying MPI implementation has support for the MPI-2 memory allocation routines <computeroutput>MPI_Alloc_mem</computeroutput> and <computeroutput>MPI_Free_mem</computeroutput>. When defined, the <computeroutput>allocator</computeroutput> class template will provide Standard Library-compliant access to these memory-allocation routines. </para></description></macro>
<macro name="BOOST_MPI_HAS_NOARG_INITIALIZATION"><purpose>Determine if the MPI implementation has supports initialization without command-line arguments. </purpose><description><para>This macro will be defined when the underlying implementation supports initialization of MPI without passing along command-line arguments, e.g., <computeroutput>MPI_Init(NULL, NULL)</computeroutput>. When defined, the <computeroutput>environment</computeroutput> class will provide a default constructor. This macro is always defined for MPI-2 implementations. </para></description></macro>
<macro name="BOOST_MPI_CALLING_CONVENTION"><purpose>Specifies the calling convention that will be used for callbacks from the underlying C MPI. </purpose><description><para>This is a Windows-specific macro, which will be used internally to state the calling convention of any function that is to be used as a callback from MPI. For example, the internally-defined functions that are used in a call to <computeroutput>MPI_Op_create</computeroutput>. This macro is likely only to be useful to users that wish to bypass Boost.MPI, registering their own callbacks in certain cases, e.g., through <computeroutput>MPI_Op_create</computeroutput>. </para></description></macro>
@@ -2446,14 +2445,67 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<header name="boost/mpi/request.hpp">
<para>This header defines the class <computeroutput>request</computeroutput>, which contains a request for non-blocking communication. </para><namespace name="boost">
<namespace name="mpi">
-<class name="request"><purpose>A request for a non-blocking send or receive. </purpose><description><para>This structure contains information about a non-blocking send or receive and will be returned from <computeroutput>isend</computeroutput> or <computeroutput>irecv</computeroutput>, respectively. </para></description><method-group name="public member functions">
+<class name="request"><purpose>A request for a non-blocking send or receive. </purpose><description><para>This structure contains information about a non-blocking send or receive and will be returned from <computeroutput>isend</computeroutput> or <computeroutput>irecv</computeroutput>, respectively. </para></description><class name="handler"><method-group name="public member functions">
+<method name="wait" cv="= 0" specifiers="virtual"><type><classname>status</classname></type></method>
+<method name="test" cv="= 0" specifiers="virtual"><type>optional&lt; <classname>status</classname> &gt;</type></method>
+<method name="cancel" cv="= 0" specifiers="virtual"><type>void</type></method>
+<method name="active" cv="const = 0" specifiers="virtual"><type>bool</type></method>
+<method name="trivial" cv="= 0" specifiers="virtual"><type>optional&lt; MPI_Request &amp; &gt;</type></method>
+</method-group>
+<destructor/>
+</class><class name="legacy_dynamic_primitive_array_handler"><template>
+ <template-type-parameter name="T"/>
+ <template-type-parameter name="A"/>
+ </template></class><class name="legacy_serialized_array_handler"><template>
+ <template-type-parameter name="T"/>
+ </template></class><class name="legacy_serialized_handler"><template>
+ <template-type-parameter name="T"/>
+ </template></class><method-group name="public member functions">
<method name="wait"><type><classname>status</classname></type><description><para>Wait until the communication associated with this request has completed, then return a <computeroutput>status</computeroutput> object describing the communication. </para></description></method>
<method name="test"><type>optional&lt; <classname>status</classname> &gt;</type><description><para>Determine whether the communication associated with this request has completed successfully. If so, returns the <computeroutput>status</computeroutput> object describing the communication. Otherwise, returns an empty <computeroutput>optional&lt;&gt;</computeroutput> to indicate that the communication has not completed yet. Note that once <computeroutput>test()</computeroutput> returns a <computeroutput>status</computeroutput> object, the request has completed and <computeroutput>wait()</computeroutput> should not be called. </para></description></method>
<method name="cancel"><type>void</type><description><para>Cancel a pending communication, assuming it has not already been completed. </para></description></method>
+<method name="trivial"><type>optional&lt; MPI_Request &amp; &gt;</type><description><para>The trivial MPI requet implenting this request, provided it's trivial. Probably irrelevant to most users. </para></description></method>
+<method name="active" cv="const"><type>bool</type><description><para>Is this request potentialy pending ? </para></description></method>
+<method name="preserve"><type>void</type><parameter name="d"><paramtype>boost::shared_ptr&lt; void &gt;</paramtype></parameter></method>
</method-group>
<constructor><description><para>Constructs a NULL request. </para></description></constructor>
-<method-group name="private static functions">
+<method-group name="public static functions">
+<method name="make_trivial_send" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="value"><paramtype>T const &amp;</paramtype></parameter><description><para>Send a known number of primitive objects in one MPI request. </para></description></method>
+<method name="make_trivial_send" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="values"><paramtype>T const *</paramtype></parameter><parameter name="n"><paramtype>int</paramtype></parameter></method>
+<method name="make_packed_send" specifiers="static"><type><classname>request</classname></type><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="values"><paramtype>void const *</paramtype></parameter><parameter name="n"><paramtype>std::size_t</paramtype></parameter></method>
+<method name="make_bottom_send" specifiers="static"><type><classname>request</classname></type><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="tp"><paramtype>MPI_Datatype</paramtype></parameter></method>
+<method name="make_empty_send" specifiers="static"><type><classname>request</classname></type><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter></method>
+<method name="make_trivial_recv" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="value"><paramtype>T &amp;</paramtype></parameter><description><para>Receive a known number of primitive objects in one MPI request. </para></description></method>
+<method name="make_trivial_recv" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="values"><paramtype>T *</paramtype></parameter><parameter name="n"><paramtype>int</paramtype></parameter></method>
+<method name="make_bottom_recv" specifiers="static"><type><classname>request</classname></type><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="tp"><paramtype>MPI_Datatype</paramtype></parameter></method>
+<method name="make_empty_recv" specifiers="static"><type><classname>request</classname></type><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="dest"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter></method>
+<method name="make_dynamic" specifiers="static"><type><classname>request</classname></type><description><para>Construct request for simple data of unknown size. </para></description></method>
+<method name="make_serialized" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="source"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="value"><paramtype>T &amp;</paramtype></parameter><description><para>Constructs request for serialized data. </para></description></method>
+<method name="make_serialized_array" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="source"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="values"><paramtype>T *</paramtype></parameter><parameter name="n"><paramtype>int</paramtype></parameter><description><para>Constructs request for array of complex data. </para></description></method>
+<method name="make_dynamic_primitive_array_recv" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ <template-type-parameter name="A"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="source"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="values"><paramtype>std::vector&lt; T, A &gt; &amp;</paramtype></parameter><description><para>Request to recv array of primitive data. </para></description></method>
+<method name="make_dynamic_primitive_array_send" specifiers="static"><type><classname>request</classname></type><template>
+ <template-type-parameter name="T"/>
+ <template-type-parameter name="A"/>
+ </template><parameter name="comm"><paramtype><classname>communicator</classname> const &amp;</paramtype></parameter><parameter name="source"><paramtype>int</paramtype></parameter><parameter name="tag"><paramtype>int</paramtype></parameter><parameter name="values"><paramtype>std::vector&lt; T, A &gt; const &amp;</paramtype></parameter><description><para>Request to send array of primitive data. </para></description></method>
+</method-group>
+<method-group name="private member functions">
</method-group>
+<constructor><parameter name="h"><paramtype><classname>handler</classname> *</paramtype></parameter></constructor>
</class>
@@ -2550,42 +2602,7 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<header name="boost/mpi/skeleton_and_content.hpp">
<para>This header provides facilities that allow the structure of data types (called the "skeleton") to be transmitted and received separately from the content stored in those data types. These facilities are useful when the data in a stable data structure (e.g., a mesh or a graph) will need to be transmitted repeatedly. In this case, transmitting the skeleton only once saves both communication effort (it need not be sent again) and local computation (serialization need only be performed once for the content). </para><namespace name="boost">
<namespace name="mpi">
-<class name="content"><purpose>A proxy object that transfers the content of an object without its structure. </purpose><description><para>The <computeroutput>content</computeroutput> class indicates that Boost.MPI should transmit or receive the content of an object, but without any information about the structure of the object. It is only meaningful to transmit the content of an object after the receiver has already received the skeleton for the same object.</para><para>Most users will not use <computeroutput>content</computeroutput> objects directly. Rather, they will invoke <computeroutput>send</computeroutput>, <computeroutput>recv</computeroutput>, or <computeroutput>broadcast</computeroutput> operations using <computeroutput>get_content()</computeroutput>. </para></description><method-group name="public member functions">
-<method name="get_mpi_datatype" cv="const"><type>MPI_Datatype</type><description><para>Retrieve the MPI data type that refers to the content of the object.</para><para>
-</para></description><returns><para>the MPI data type, which should only be transmitted or received using <computeroutput>MPI_BOTTOM</computeroutput> as the address. </para>
-</returns></method>
-<method name="commit"><type>void</type><description><para>Commit the MPI data type referring to the content of the object. </para></description></method>
-</method-group>
-<constructor><description><para>Constructs an empty <computeroutput>content</computeroutput> object. This object will not be useful for any Boost.MPI operations until it is reassigned. </para></description></constructor>
-<constructor><parameter name="d"><paramtype>MPI_Datatype</paramtype><description><para>the MPI data type referring to the content of the object.</para></description></parameter><parameter name="committed"><paramtype>bool</paramtype><default>true</default><description><para><computeroutput>true</computeroutput> indicates that <computeroutput>MPI_Type_commit</computeroutput> has already been excuted for the data type <computeroutput>d</computeroutput>. </para></description></parameter><description><para>This routine initializes the <computeroutput>content</computeroutput> object with an MPI data type that refers to the content of an object without its structure.</para><para>
-</para></description></constructor>
-<copy-assignment><type>const <classname>content</classname> &amp;</type><parameter name="d"><paramtype>MPI_Datatype</paramtype><description><para>the new MPI data type referring to the content of the object.</para></description></parameter><description><para>Replace the MPI data type referencing the content of an object.</para><para>
-</para></description><returns><para>*this </para>
-</returns></copy-assignment>
-</class><class name="packed_skeleton_iarchive"><inherit access="public">ignore_iprimitive</inherit><purpose>An archiver that reconstructs a data structure based on the binary skeleton stored in a buffer. </purpose><description><para>The <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput> class is an Archiver (as in the Boost.Serialization library) that can construct the the shape of a data structure based on a binary skeleton stored in a buffer. The <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput> is typically used by the receiver of a skeleton, to prepare a data structure that will eventually receive content separately.</para><para>Users will not generally need to use <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput> directly. Instead, use <computeroutput>skeleton</computeroutput> or <computeroutput>get_skeleton</computeroutput>. </para></description><method-group name="public member functions">
-<method name="get_skeleton" cv="const"><type>const <classname>packed_iarchive</classname> &amp;</type><description><para>Retrieve the archive corresponding to this skeleton. </para></description></method>
-<method name="get_skeleton"><type><classname>packed_iarchive</classname> &amp;</type><description><para>Retrieve the archive corresponding to this skeleton. </para></description></method>
-</method-group>
-<constructor><parameter name="comm"><paramtype>MPI_Comm const &amp;</paramtype><description><para>The communicator over which this archive will be transmitted.</para></description></parameter><parameter name="flags"><paramtype>unsigned int</paramtype><default>boost::archive::no_header</default><description><para>Control the serialization of the skeleton. Refer to the Boost.Serialization documentation before changing the default flags. </para></description></parameter><description><para>Construct a <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput> for the given communicator.</para><para>
-</para></description></constructor>
-<constructor specifiers="explicit"><parameter name="archive"><paramtype><classname>packed_iarchive</classname> &amp;</paramtype><description><para>the archive from which the skeleton will be unpacked. </para></description></parameter><description><para>Construct a <computeroutput><classname alt="boost::mpi::packed_skeleton_iarchive">packed_skeleton_iarchive</classname></computeroutput> that unpacks a skeleton from the given <computeroutput>archive</computeroutput>.</para><para>
-</para></description></constructor>
-</class><class name="packed_skeleton_oarchive"><inherit access="public">ignore_oprimitive</inherit><purpose>An archiver that records the binary skeleton of a data structure into a buffer. </purpose><description><para>The <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> class is an Archiver (as in the Boost.Serialization library) that can record the shape of a data structure (called the "skeleton") into a binary representation stored in a buffer. The <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> is typically used by the send of a skeleton, to pack the skeleton of a data structure for transmission separately from the content.</para><para>Users will not generally need to use <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> directly. Instead, use <computeroutput>skeleton</computeroutput> or <computeroutput>get_skeleton</computeroutput>. </para></description><method-group name="public member functions">
-<method name="get_skeleton" cv="const"><type>const <classname>packed_oarchive</classname> &amp;</type><description><para>Retrieve the archive corresponding to this skeleton. </para></description></method>
-</method-group>
-<constructor><parameter name="comm"><paramtype>MPI_Comm const &amp;</paramtype><description><para>The communicator over which this archive will be transmitted.</para></description></parameter><parameter name="flags"><paramtype>unsigned int</paramtype><default>boost::archive::no_header</default><description><para>Control the serialization of the skeleton. Refer to the Boost.Serialization documentation before changing the default flags. </para></description></parameter><description><para>Construct a <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> for the given communicator.</para><para>
-</para></description></constructor>
-<constructor specifiers="explicit"><parameter name="archive"><paramtype><classname>packed_oarchive</classname> &amp;</paramtype><description><para>the archive to which the skeleton will be packed. </para></description></parameter><description><para>Construct a <computeroutput><classname alt="boost::mpi::packed_skeleton_oarchive">packed_skeleton_oarchive</classname></computeroutput> that packs a skeleton into the given <computeroutput>archive</computeroutput>.</para><para>
-</para></description></constructor>
-</class><struct name="skeleton_proxy"><template>
- <template-type-parameter name="T"/>
- </template><purpose>A proxy that requests that the skeleton of an object be transmitted. </purpose><description><para>The <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> is a lightweight proxy object used to indicate that the skeleton of an object, not the object itself, should be transmitted. It can be used with the <computeroutput>send</computeroutput> and <computeroutput>recv</computeroutput> operations of communicators or the <computeroutput>broadcast</computeroutput> collective. When a <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> is sent, Boost.MPI generates a description containing the structure of the stored object. When that skeleton is received, the receiving object is reshaped to match the structure. Once the skeleton of an object as been transmitted, its <computeroutput>content</computeroutput> can be transmitted separately (often several times) without changing the structure of the object. </para></description><data-member name="object"><type>T &amp;</type></data-member>
-<method-group name="public member functions">
-</method-group>
-<constructor><parameter name="x"><paramtype>T &amp;</paramtype><description><para>the object whose structure will be transmitted or altered. </para></description></parameter><description><para>Constructs a <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput> that references object <computeroutput>x</computeroutput>.</para><para>
-</para></description></constructor>
-</struct>
@@ -2671,18 +2688,8 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
-<function name="skeleton"><type>const <classname>skeleton_proxy</classname>&lt; T &gt;</type><template>
- <template-type-parameter name="T"/>
- </template><parameter name="x"><paramtype>T &amp;</paramtype><description><para>the object whose structure will be transmitted.</para></description></parameter><purpose>Create a skeleton proxy object. </purpose><description><para>This routine creates an instance of the <classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname> class. It will typically be used when calling <computeroutput>send</computeroutput>, <computeroutput>recv</computeroutput>, or <computeroutput>broadcast</computeroutput>, to indicate that only the skeleton (structure) of an object should be transmitted and not its contents.</para><para>
-</para></description><returns><para>a <classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname> object referencing <computeroutput>x</computeroutput> </para>
-</returns></function>
-<function name="get_content"><type>const <classname>content</classname></type><template>
- <template-type-parameter name="T"/>
- </template><parameter name="x"><paramtype>const T &amp;</paramtype><description><para>the object for which the content will be transmitted.</para></description></parameter><purpose>Returns the content of an object, suitable for transmission via Boost.MPI. </purpose><description><para>The function creates an absolute MPI datatype for the object, where all offsets are counted from the address 0 (a.k.a. <computeroutput>MPI_BOTTOM</computeroutput>) instead of the address <computeroutput>&amp;x</computeroutput> of the object. This allows the creation of MPI data types for complex data structures containing pointers, such as linked lists or trees.</para><para>The disadvantage, compared to relative MPI data types is that for each object a new MPI data type has to be created.</para><para>The contents of an object can only be transmitted when the receiver already has an object with the same structure or shape as the sender. To accomplish this, first transmit the skeleton of the object using, e.g., <computeroutput>skeleton()</computeroutput> or <computeroutput><classname alt="boost::mpi::skeleton_proxy">skeleton_proxy</classname></computeroutput>.</para><para>The type <computeroutput>T</computeroutput> has to allow creation of an absolute MPI data type (content).</para><para>
-</para></description><returns><para>the content of the object <computeroutput>x</computeroutput>, which can be used for transmission via <computeroutput>send</computeroutput>, <computeroutput>recv</computeroutput>, or <computeroutput>broadcast</computeroutput>. </para>
-</returns></function>
</namespace>
@@ -2691,6 +2698,9 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
<header name="boost/mpi/skeleton_and_content_fwd.hpp">
<para>This header contains all of the forward declarations required to use transmit skeletons of data structures and the content of data structures separately. To actually transmit skeletons or content, include the header <computeroutput>boost/mpi/skeleton_and_content.hpp</computeroutput>. </para><namespace name="boost">
<namespace name="mpi">
+<struct name="skeleton_proxy"><template>
+ <template-type-parameter name="T"/>
+ </template></struct>
@@ -2776,9 +2786,12 @@ If wrapped in a <computeroutput><classname alt="boost::mpi::inplace_t">inplace_t
-
-
-
+<function name="skeleton"><type>const <classname>skeleton_proxy</classname>&lt; T &gt;</type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="x"><paramtype>T &amp;</paramtype></parameter></function>
+<function name="get_content"><type>const content</type><template>
+ <template-type-parameter name="T"/>
+ </template><parameter name="x"><paramtype>const T &amp;</paramtype></parameter></function>
</namespace>
diff --git a/libs/mpi/doc/point_to_point.qbk b/libs/mpi/doc/point_to_point.qbk
new file mode 100644
index 0000000000..8420f993dc
--- /dev/null
+++ b/libs/mpi/doc/point_to_point.qbk
@@ -0,0 +1,176 @@
+[section:point_to_point Point-to-Point communication]
+
+[section:blocking Blocking communication]
+
+As a message passing library, MPI's primary purpose is to routine
+messages from one process to another, i.e., point-to-point. MPI
+contains routines that can send messages, receive messages, and query
+whether messages are available. Each message has a source process, a
+target process, a tag, and a payload containing arbitrary data. The
+source and target processes are the ranks of the sender and receiver
+of the message, respectively. Tags are integers that allow the
+receiver to distinguish between different messages coming from the
+same sender.
+
+The following program uses two MPI processes to write "Hello, world!"
+to the screen (`hello_world.cpp`):
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <string>
+ #include <boost/serialization/string.hpp>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ if (world.rank() == 0) {
+ world.send(1, 0, std::string("Hello"));
+ std::string msg;
+ world.recv(1, 1, msg);
+ std::cout << msg << "!" << std::endl;
+ } else {
+ std::string msg;
+ world.recv(0, 0, msg);
+ std::cout << msg << ", ";
+ std::cout.flush();
+ world.send(0, 1, std::string("world"));
+ }
+
+ return 0;
+ }
+
+The first processor (rank 0) passes the message "Hello" to the second
+processor (rank 1) using tag 0. The second processor prints the string
+it receives, along with a comma, then passes the message "world" back
+to processor 0 with a different tag. The first processor then writes
+this message with the "!" and exits. All sends are accomplished with
+the [memberref boost::mpi::communicator::send
+communicator::send] method and all receives use a corresponding
+[memberref boost::mpi::communicator::recv
+communicator::recv] call.
+
+[endsect:blocking]
+
+[section:nonblocking Non-blocking communication]
+
+The default MPI communication operations--`send` and `recv`--may have
+to wait until the entire transmission is completed before they can
+return. Sometimes this *blocking* behavior has a negative impact on
+performance, because the sender could be performing useful computation
+while it is waiting for the transmission to occur. More important,
+however, are the cases where several communication operations must
+occur simultaneously, e.g., a process will both send and receive at
+the same time.
+
+Let's revisit our "Hello, world!" program from the previous
+[link mpi.tutorial.point_to_point.blocking section].
+The core of this program transmits two messages:
+
+ if (world.rank() == 0) {
+ world.send(1, 0, std::string("Hello"));
+ std::string msg;
+ world.recv(1, 1, msg);
+ std::cout << msg << "!" << std::endl;
+ } else {
+ std::string msg;
+ world.recv(0, 0, msg);
+ std::cout << msg << ", ";
+ std::cout.flush();
+ world.send(0, 1, std::string("world"));
+ }
+
+The first process passes a message to the second process, then
+prepares to receive a message. The second process does the send and
+receive in the opposite order. However, this sequence of events is
+just that--a *sequence*--meaning that there is essentially no
+parallelism. We can use non-blocking communication to ensure that the
+two messages are transmitted simultaneously
+(`hello_world_nonblocking.cpp`):
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <string>
+ #include <boost/serialization/string.hpp>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+
+ if (world.rank() == 0) {
+ mpi::request reqs[2];
+ std::string msg, out_msg = "Hello";
+ reqs[0] = world.isend(1, 0, out_msg);
+ reqs[1] = world.irecv(1, 1, msg);
+ mpi::wait_all(reqs, reqs + 2);
+ std::cout << msg << "!" << std::endl;
+ } else {
+ mpi::request reqs[2];
+ std::string msg, out_msg = "world";
+ reqs[0] = world.isend(0, 1, out_msg);
+ reqs[1] = world.irecv(0, 0, msg);
+ mpi::wait_all(reqs, reqs + 2);
+ std::cout << msg << ", ";
+ }
+
+ return 0;
+ }
+
+We have replaced calls to the [memberref
+boost::mpi::communicator::send communicator::send] and
+[memberref boost::mpi::communicator::recv
+communicator::recv] members with similar calls to their non-blocking
+counterparts, [memberref boost::mpi::communicator::isend
+communicator::isend] and [memberref
+boost::mpi::communicator::irecv communicator::irecv]. The
+prefix *i* indicates that the operations return immediately with a
+[classref boost::mpi::request mpi::request] object, which
+allows one to query the status of a communication request (see the
+[memberref boost::mpi::request::test test] method) or wait
+until it has completed (see the [memberref
+boost::mpi::request::wait wait] method). Multiple requests
+can be completed at the same time with the [funcref
+boost::mpi::wait_all wait_all] operation.
+
+[important Regarding communication completion/progress:
+The MPI standard requires users to keep the request
+handle for a non-blocking communication, and to call the "wait"
+operation (or successfully test for completion) to complete the send
+or receive.
+nlike most C MPI implementations, which allow the user to
+discard the request for a non-blocking send, Boost.MPI requires the
+user to call "wait" or "test", since the request object might contain
+temporary buffers that have to be kept until the send is
+completed.
+Moreover, the MPI standard does not guarantee that the
+receive makes any progress before a call to "wait" or "test", although
+most implementations of the C MPI do allow receives to progress before
+the call to "wait" or "test".
+Boost.MPI, on the other hand, generally
+requires "test" or "wait" calls to make progress.
+More specifically, Boost.MPI guarantee that calling "test" multiple time will
+eventually complete the communication (this is due to the fact that serialized communication are potentially a multi step operation.). ]
+
+If you run this program multiple times, you may see some strange
+results: namely, some runs will produce:
+
+ Hello, world!
+
+while others will produce:
+
+ world!
+ Hello,
+
+or even some garbled version of the letters in "Hello" and
+"world". This indicates that there is some parallelism in the program,
+because after both messages are (simultaneously) transmitted, both
+processes will concurrent execute their print statements. For both
+performance and correctness, non-blocking communication operations are
+critical to many parallel applications using MPI.
+
+[endsect:nonblocking]
+[endsect:point_to_point]
diff --git a/libs/mpi/doc/python.qbk b/libs/mpi/doc/python.qbk
new file mode 100644
index 0000000000..8944d7574b
--- /dev/null
+++ b/libs/mpi/doc/python.qbk
@@ -0,0 +1,222 @@
+[section:python Python Bindings]
+[python]
+
+Boost.MPI provides an alternative MPI interface from the _Python_
+programming language via the `boost.mpi` module. The
+Boost.MPI Python bindings, built on top of the C++ Boost.MPI using the
+_BoostPython_ library, provide nearly all of the functionality of
+Boost.MPI within a dynamic, object-oriented language.
+
+The Boost.MPI Python module can be built and installed from the
+`libs/mpi/build` directory. Just follow the [link
+mpi.getting_started.config configuration] and [link mpi.getting_started.config.installation
+installation] instructions for the C++ Boost.MPI. Once you have
+installed the Python module, be sure that the installation location is
+in your `PYTHONPATH`.
+
+[section:quickstart Quickstart]
+
+[python]
+
+Getting started with the Boost.MPI Python module is as easy as
+importing `boost.mpi`. Our first "Hello, World!" program is
+just two lines long:
+
+ import boost.mpi as mpi
+ print "I am process %d of %d." % (mpi.rank, mpi.size)
+
+Go ahead and run this program with several processes. Be sure to
+invoke the `python` interpreter from `mpirun`, e.g.,
+
+[pre
+mpirun -np 5 python hello_world.py
+]
+
+This will return output such as:
+
+[pre
+I am process 1 of 5.
+I am process 3 of 5.
+I am process 2 of 5.
+I am process 4 of 5.
+I am process 0 of 5.
+]
+
+Point-to-point operations in Boost.MPI have nearly the same syntax in
+Python as in C++. We can write a simple two-process Python program
+that prints "Hello, world!" by transmitting Python strings:
+
+ import boost.mpi as mpi
+
+ if mpi.world.rank == 0:
+ mpi.world.send(1, 0, 'Hello')
+ msg = mpi.world.recv(1, 1)
+ print msg,'!'
+ else:
+ msg = mpi.world.recv(0, 0)
+ print (msg + ', '),
+ mpi.world.send(0, 1, 'world')
+
+There are only a few notable differences between this Python code and
+the example [link mpi.tutorial.point_to_point in the C++
+tutorial]. First of all, we don't need to write any initialization
+code in Python: just loading the `boost.mpi` module makes the
+appropriate `MPI_Init` and `MPI_Finalize` calls. Second, we're passing
+Python objects from one process to another through MPI. Any Python
+object that can be pickled can be transmitted; the next section will
+describe in more detail how the Boost.MPI Python layer transmits
+objects. Finally, when we receive objects with `recv`, we don't need
+to specify the type because transmission of Python objects is
+polymorphic.
+
+When experimenting with Boost.MPI in Python, don't forget that help is
+always available via `pydoc`: just pass the name of the module or
+module entity on the command line (e.g., `pydoc
+boost.mpi.communicator`) to receive complete reference
+documentation. When in doubt, try it!
+[endsect:quickstart]
+
+[section:user_data Transmitting User-Defined Data]
+Boost.MPI can transmit user-defined data in several different ways.
+Most importantly, it can transmit arbitrary _Python_ objects by pickling
+them at the sender and unpickling them at the receiver, allowing
+arbitrarily complex Python data structures to interoperate with MPI.
+
+Boost.MPI also supports efficient serialization and transmission of
+C++ objects (that have been exposed to Python) through its C++
+interface. Any C++ type that provides (de-)serialization routines that
+meet the requirements of the Boost.Serialization library is eligible
+for this optimization, but the type must be registered in advance. To
+register a C++ type, invoke the C++ function [funcref
+boost::mpi::python::register_serialized
+register_serialized]. If your C++ types come from other Python modules
+(they probably will!), those modules will need to link against the
+`boost_mpi` and `boost_mpi_python` libraries as described in the [link
+mpi.getting_started.config.installation installation section]. Note that you do
+*not* need to link against the Boost.MPI Python extension module.
+
+Finally, Boost.MPI supports separation of the structure of an object
+from the data it stores, allowing the two pieces to be transmitted
+separately. This "skeleton/content" mechanism, described in more
+detail in a later section, is a communication optimization suitable
+for problems with fixed data structures whose internal data changes
+frequently.
+[endsect:user_data]
+
+[section:collectives Collectives]
+
+Boost.MPI supports all of the MPI collectives (`scatter`, `reduce`,
+`scan`, `broadcast`, etc.) for any type of data that can be
+transmitted with the point-to-point communication operations. For the
+MPI collectives that require a user-specified operation (e.g., `reduce`
+and `scan`), the operation can be an arbitrary Python function. For
+instance, one could concatenate strings with `all_reduce`:
+
+ mpi.all_reduce(my_string, lambda x,y: x + y)
+
+The following module-level functions implement MPI collectives:
+ all_gather Gather the values from all processes.
+ all_reduce Combine the results from all processes.
+ all_to_all Every process sends data to every other process.
+ broadcast Broadcast data from one process to all other processes.
+ gather Gather the values from all processes to the root.
+ reduce Combine the results from all processes to the root.
+ scan Prefix reduction of the values from all processes.
+ scatter Scatter the values stored at the root to all processes.
+[endsect:collectives]
+
+[section:skeleton_content Skeleton/Content Mechanism]
+Boost.MPI provides a skeleton/content mechanism that allows the
+transfer of large data structures to be split into two separate stages,
+with the skeleton (or, "shape") of the data structure sent first and
+the content (or, "data") of the data structure sent later, potentially
+several times, so long as the structure has not changed since the
+skeleton was transferred. The skeleton/content mechanism can improve
+performance when the data structure is large and its shape is fixed,
+because while the skeleton requires serialization (it has an unknown
+size), the content transfer is fixed-size and can be done without
+extra copies.
+
+To use the skeleton/content mechanism from Python, you must first
+register the type of your data structure with the skeleton/content
+mechanism *from C++*. The registration function is [funcref
+boost::mpi::python::register_skeleton_and_content
+register_skeleton_and_content] and resides in the [headerref
+boost/mpi/python.hpp <boost/mpi/python.hpp>] header.
+
+Once you have registered your C++ data structures, you can extract
+the skeleton for an instance of that data structure with `skeleton()`.
+The resulting `skeleton_proxy` can be transmitted via the normal send
+routine, e.g.,
+
+ mpi.world.send(1, 0, skeleton(my_data_structure))
+
+`skeleton_proxy` objects can be received on the other end via `recv()`,
+which stores a newly-created instance of your data structure with the
+same "shape" as the sender in its `"object"` attribute:
+
+ shape = mpi.world.recv(0, 0)
+ my_data_structure = shape.object
+
+Once the skeleton has been transmitted, the content (accessed via
+`get_content`) can be transmitted in much the same way. Note, however,
+that the receiver also specifies `get_content(my_data_structure)` in its
+call to receive:
+
+ if mpi.rank == 0:
+ mpi.world.send(1, 0, get_content(my_data_structure))
+ else:
+ mpi.world.recv(0, 0, get_content(my_data_structure))
+
+Of course, this transmission of content can occur repeatedly, if the
+values in the data structure--but not its shape--changes.
+
+The skeleton/content mechanism is a structured way to exploit the
+interaction between custom-built MPI datatypes and `MPI_BOTTOM`, to
+eliminate extra buffer copies.
+[endsect:skeleton_content]
+
+[section:compatibility C++/Python MPI Compatibility]
+Boost.MPI is a C++ library whose facilities have been exposed to Python
+via the Boost.Python library. Since the Boost.MPI Python bindings are
+build directly on top of the C++ library, and nearly every feature of
+C++ library is available in Python, hybrid C++/Python programs using
+Boost.MPI can interact, e.g., sending a value from Python but receiving
+that value in C++ (or vice versa). However, doing so requires some
+care. Because Python objects are dynamically typed, Boost.MPI transfers
+type information along with the serialized form of the object, so that
+the object can be received even when its type is not known. This
+mechanism differs from its C++ counterpart, where the static types of
+transmitted values are always known.
+
+The only way to communicate between the C++ and Python views on
+Boost.MPI is to traffic entirely in Python objects. For Python, this
+is the normal state of affairs, so nothing will change. For C++, this
+means sending and receiving values of type `boost::python::object`,
+from the _BoostPython_ library. For instance, say we want to transmit
+an integer value from Python:
+
+ comm.send(1, 0, 17)
+
+In C++, we would receive that value into a Python object and then
+`extract` an integer value:
+
+[c++]
+
+ boost::python::object value;
+ comm.recv(0, 0, value);
+ int int_value = boost::python::extract<int>(value);
+
+In the future, Boost.MPI will be extended to allow improved
+interoperability with the C++ Boost.MPI and the C MPI bindings.
+[endsect:compatibility]
+
+[section:reference Reference]
+The Boost.MPI Python module, `boost.mpi`, has its own
+[@boost.mpi.html reference documentation], which is also
+available using `pydoc` (from the command line) or
+`help(boost.mpi)` (from the Python interpreter).
+
+[endsect:reference]
+
+[endsect:python]
diff --git a/libs/mpi/doc/skeleton_and_content.qbk b/libs/mpi/doc/skeleton_and_content.qbk
new file mode 100644
index 0000000000..4eb424fe22
--- /dev/null
+++ b/libs/mpi/doc/skeleton_and_content.qbk
@@ -0,0 +1,101 @@
+[section:skeleton_and_content Separating structure from content]
+
+When communicating data types over MPI that are not fundamental to MPI
+(such as strings, lists, and user-defined data types), Boost.MPI must
+first serialize these data types into a buffer and then communicate
+them; the receiver then copies the results into a buffer before
+deserializing into an object on the other end. For some data types,
+this overhead can be eliminated by using [classref
+boost::mpi::is_mpi_datatype `is_mpi_datatype`]. However,
+variable-length data types such as strings and lists cannot be MPI
+data types.
+
+Boost.MPI supports a second technique for improving performance by
+separating the structure of these variable-length data structures from
+the content stored in the data structures. This feature is only
+beneficial when the shape of the data structure remains the same but
+the content of the data structure will need to be communicated several
+times. For instance, in a finite element analysis the structure of the
+mesh may be fixed at the beginning of computation but the various
+variables on the cells of the mesh (temperature, stress, etc.) will be
+communicated many times within the iterative analysis process. In this
+case, Boost.MPI allows one to first send the "skeleton" of the mesh
+once, then transmit the "content" multiple times. Since the content
+need not contain any information about the structure of the data type,
+it can be transmitted without creating separate communication buffers.
+
+To illustrate the use of skeletons and content, we will take a
+somewhat more limited example wherein a master process generates
+random number sequences into a list and transmits them to several
+slave processes. The length of the list will be fixed at program
+startup, so the content of the list (i.e., the current sequence of
+numbers) can be transmitted efficiently. The complete example is
+available in `example/random_content.cpp`. We being with the master
+process (rank 0), which builds a list, communicates its structure via
+a [funcref boost::mpi::skeleton `skeleton`], then repeatedly
+generates random number sequences to be broadcast to the slave
+processes via [classref boost::mpi::content `content`]:
+
+
+ // Generate the list and broadcast its structure
+ std::list<int> l(list_len);
+ broadcast(world, mpi::skeleton(l), 0);
+
+ // Generate content several times and broadcast out that content
+ mpi::content c = mpi::get_content(l);
+ for (int i = 0; i < iterations; ++i) {
+ // Generate new random values
+ std::generate(l.begin(), l.end(), &random);
+
+ // Broadcast the new content of l
+ broadcast(world, c, 0);
+ }
+
+ // Notify the slaves that we're done by sending all zeroes
+ std::fill(l.begin(), l.end(), 0);
+ broadcast(world, c, 0);
+
+
+The slave processes have a very similar structure to the master. They
+receive (via the [funcref boost::mpi::broadcast
+`broadcast()`] call) the skeleton of the data structure, then use it
+to build their own lists of integers. In each iteration, they receive
+via another `broadcast()` the new content in the data structure and
+compute some property of the data:
+
+
+ // Receive the content and build up our own list
+ std::list<int> l;
+ broadcast(world, mpi::skeleton(l), 0);
+
+ mpi::content c = mpi::get_content(l);
+ int i = 0;
+ do {
+ broadcast(world, c, 0);
+
+ if (std::find_if
+ (l.begin(), l.end(),
+ std::bind1st(std::not_equal_to<int>(), 0)) == l.end())
+ break;
+
+ // Compute some property of the data.
+
+ ++i;
+ } while (true);
+
+
+The skeletons and content of any Serializable data type can be
+transmitted either via the [memberref
+boost::mpi::communicator::send `send`] and [memberref
+boost::mpi::communicator::recv `recv`] members of the
+[classref boost::mpi::communicator `communicator`] class
+(for point-to-point communicators) or broadcast via the [funcref
+boost::mpi::broadcast `broadcast()`] collective. When
+separating a data structure into a skeleton and content, be careful
+not to modify the data structure (either on the sender side or the
+receiver side) without transmitting the skeleton again. Boost.MPI can
+not detect these accidental modifications to the data structure, which
+will likely result in incorrect data being transmitted or unstable
+programs.
+
+[endsect:skeleton_and_content]
diff --git a/libs/mpi/doc/threading.qbk b/libs/mpi/doc/threading.qbk
new file mode 100644
index 0000000000..6f8c1e031b
--- /dev/null
+++ b/libs/mpi/doc/threading.qbk
@@ -0,0 +1,35 @@
+[section:threading Threads]
+
+There are an increasing number of hybrid parallel applications that mix
+distributed and shared memory parallelism. To know how to support that model,
+one need to know what level of threading support is guaranteed by the MPI
+implementation. There are 4 ordered level of possible threading support described
+by [enumref boost::mpi::threading::level mpi::threading::level].
+At the lowest level, you should not use threads at all, at the highest level, any
+thread can perform MPI call.
+
+If you want to use multi-threading in your MPI application, you should indicate
+in the environment constructor your preferred threading support. Then probe the
+one the library did provide, and decide what you can do with it (it could be
+nothing, then aborting is a valid option):
+
+ #include <boost/mpi/environment.hpp>
+ #include <boost/mpi/communicator.hpp>
+ #include <iostream>
+ namespace mpi = boost::mpi;
+ namespace mt = mpi::threading;
+
+ int main()
+ {
+ mpi::environment env(mt::funneled);
+ if (env.thread_level() < mt::funneled) {
+ env.abort(-1);
+ }
+ mpi::communicator world;
+ std::cout << "I am process " << world.rank() << " of " << world.size()
+ << "." << std::endl;
+ return 0;
+ }
+
+
+[endsect:threading]
diff --git a/libs/mpi/doc/tutorial.qbk b/libs/mpi/doc/tutorial.qbk
new file mode 100644
index 0000000000..4cf2a8d0e9
--- /dev/null
+++ b/libs/mpi/doc/tutorial.qbk
@@ -0,0 +1,132 @@
+[section:tutorial Tutorial]
+
+A Boost.MPI program consists of many cooperating processes (possibly
+running on different computers) that communicate among themselves by
+passing messages. Boost.MPI is a library (as is the lower-level MPI),
+not a language, so the first step in a Boost.MPI is to create an
+[classref boost::mpi::environment mpi::environment] object
+that initializes the MPI environment and enables communication among
+the processes. The [classref boost::mpi::environment
+mpi::environment] object is initialized with the program arguments
+(which it may modify) in your main program. The creation of this
+object initializes MPI, and its destruction will finalize MPI. In the
+vast majority of Boost.MPI programs, an instance of [classref
+boost::mpi::environment mpi::environment] will be declared
+in `main` at the very beginning of the program.
+
+Communication with MPI always occurs over a *communicator*,
+which can be created be simply default-constructing an object of type
+[classref boost::mpi::communicator mpi::communicator]. This
+communicator can then be queried to determine how many processes are
+running (the "size" of the communicator) and to give a unique number
+to each process, from zero to the size of the communicator (i.e., the
+"rank" of the process):
+
+ #include <boost/mpi/environment.hpp>
+ #include <boost/mpi/communicator.hpp>
+ #include <iostream>
+ namespace mpi = boost::mpi;
+
+ int main()
+ {
+ mpi::environment env;
+ mpi::communicator world;
+ std::cout << "I am process " << world.rank() << " of " << world.size()
+ << "." << std::endl;
+ return 0;
+ }
+
+If you run this program with 7 processes, for instance, you will
+receive output such as:
+
+[pre
+I am process 5 of 7.
+I am process 0 of 7.
+I am process 1 of 7.
+I am process 6 of 7.
+I am process 2 of 7.
+I am process 4 of 7.
+I am process 3 of 7.
+]
+
+Of course, the processes can execute in a different order each time,
+so the ranks might not be strictly increasing. More interestingly, the
+text could come out completely garbled, because one process can start
+writing "I am a process" before another process has finished writing
+"of 7.".
+
+If you should still have an MPI library supporting only MPI 1.1 you
+will need to pass the command line arguments to the environment
+constructor as shown in this example:
+
+ #include <boost/mpi/environment.hpp>
+ #include <boost/mpi/communicator.hpp>
+ #include <iostream>
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+ std::cout << "I am process " << world.rank() << " of " << world.size()
+ << "." << std::endl;
+ return 0;
+ }
+
+[include point_to_point.qbk]
+[include collective.qbk]
+[include user_data_types.qbk]
+[include communicator.qbk]
+[include threading.qbk]
+[include skeleton_and_content.qbk]
+
+[section:performance_optimizations Performance optimizations]
+[section:serialization_optimizations Serialization optimizations]
+
+To obtain optimal performance for small fixed-length data types not containing
+any pointers it is very important to mark them using the type traits of
+Boost.MPI and Boost.Serialization.
+
+It was already discussed that fixed length types containing no pointers can be
+using as [classref
+boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.:
+
+ namespace boost { namespace mpi {
+ template <>
+ struct is_mpi_datatype<gps_position> : mpl::true_ { };
+ } }
+
+or the equivalent macro
+
+ BOOST_IS_MPI_DATATYPE(gps_position)
+
+In addition it can give a substantial performance gain to turn off tracking
+and versioning for these types, if no pointers to these types are used, by
+using the traits classes or helper macros of Boost.Serialization:
+
+ BOOST_CLASS_TRACKING(gps_position,track_never)
+ BOOST_CLASS_IMPLEMENTATION(gps_position,object_serializable)
+
+[endsect:serialization_optimizations]
+
+[section:homogeneous_machines Homogeneous Machines]
+
+More optimizations are possible on homogeneous machines, by avoiding
+MPI_Pack/MPI_Unpack calls but using direct bitwise copy. This feature is
+enabled by default by defining the macro [macroref BOOST_MPI_HOMOGENEOUS] in the include
+file `boost/mpi/config.hpp`.
+That definition must be consistent when building Boost.MPI and
+when building the application.
+
+In addition all classes need to be marked both as is_mpi_datatype and
+as is_bitwise_serializable, by using the helper macro of Boost.Serialization:
+
+ BOOST_IS_BITWISE_SERIALIZABLE(gps_position)
+
+Usually it is safe to serialize a class for which is_mpi_datatype is true
+by using binary copy of the bits. The exception are classes for which
+some members should be skipped for serialization.
+
+[endsect:homogeneous_machines]
+[endsect:performance_optimizations]
+[endsect:tutorial]
diff --git a/libs/mpi/doc/user_data_types.qbk b/libs/mpi/doc/user_data_types.qbk
new file mode 100644
index 0000000000..929ee6a550
--- /dev/null
+++ b/libs/mpi/doc/user_data_types.qbk
@@ -0,0 +1,103 @@
+[section:user_data_types User-defined data types]
+
+The inclusion of `boost/serialization/string.hpp` in the previous
+examples is very important: it makes values of type `std::string`
+serializable, so that they can be be transmitted using Boost.MPI. In
+general, built-in C++ types (`int`s, `float`s, characters, etc.) can
+be transmitted over MPI directly, while user-defined and
+library-defined types will need to first be serialized (packed) into a
+format that is amenable to transmission. Boost.MPI relies on the
+_Serialization_ library to serialize and deserialize data types.
+
+For types defined by the standard library (such as `std::string` or
+`std::vector`) and some types in Boost (such as `boost::variant`), the
+_Serialization_ library already contains all of the required
+serialization code. In these cases, you need only include the
+appropriate header from the `boost/serialization` directory.
+
+[def _gps_position_ [link gps_position `gps_position`]]
+For types that do not already have a serialization header, you will
+first need to implement serialization code before the types can be
+transmitted using Boost.MPI. Consider a simple class _gps_position_
+that contains members `degrees`, `minutes`, and `seconds`. This class
+is made serializable by making it a friend of
+`boost::serialization::access` and introducing the templated
+`serialize()` function, as follows:[#gps_position]
+
+ class gps_position
+ {
+ private:
+ friend class boost::serialization::access;
+
+ template<class Archive>
+ void serialize(Archive & ar, const unsigned int version)
+ {
+ ar & degrees;
+ ar & minutes;
+ ar & seconds;
+ }
+
+ int degrees;
+ int minutes;
+ float seconds;
+ public:
+ gps_position(){};
+ gps_position(int d, int m, float s) :
+ degrees(d), minutes(m), seconds(s)
+ {}
+ };
+
+Complete information about making types serializable is beyond the
+scope of this tutorial. For more information, please see the
+_Serialization_ library tutorial from which the above example was
+extracted. One important side benefit of making types serializable for
+Boost.MPI is that they become serializable for any other usage, such
+as storing the objects to disk and manipulated them in XML.
+
+
+Some serializable types, like _gps_position_ above, have a fixed
+amount of data stored at fixed offsets and are fully defined by
+the values of their data member (most POD with no pointers are a good example).
+When this is the case, Boost.MPI can optimize their serialization and
+transmission by avoiding extraneous copy operations.
+To enable this optimization, users must specialize the type trait [classref
+boost::mpi::is_mpi_datatype `is_mpi_datatype`], e.g.:
+
+ namespace boost { namespace mpi {
+ template <>
+ struct is_mpi_datatype<gps_position> : mpl::true_ { };
+ } }
+
+For non-template types we have defined a macro to simplify declaring a type
+as an MPI datatype
+
+ BOOST_IS_MPI_DATATYPE(gps_position)
+
+For composite traits, the specialization of [classref
+boost::mpi::is_mpi_datatype `is_mpi_datatype`] may depend on
+`is_mpi_datatype` itself. For instance, a `boost::array` object is
+fixed only when the type of the parameter it stores is fixed:
+
+ namespace boost { namespace mpi {
+ template <typename T, std::size_t N>
+ struct is_mpi_datatype<array<T, N> >
+ : public is_mpi_datatype<T> { };
+ } }
+
+The redundant copy elimination optimization can only be applied when
+the shape of the data type is completely fixed. Variable-length types
+(e.g., strings, linked lists) and types that store pointers cannot use
+the optimization, but Boost.MPI will be unable to detect this error at
+compile time. Attempting to perform this optimization when it is not
+correct will likely result in segmentation faults and other strange
+program behavior.
+
+Boost.MPI can transmit any user-defined data type from one process to
+another. Built-in types can be transmitted without any extra effort;
+library-defined types require the inclusion of a serialization header;
+and user-defined types will require the addition of serialization
+code. Fixed data types can be optimized for transmission using the
+[classref boost::mpi::is_mpi_datatype `is_mpi_datatype`]
+type trait.
+
+[endsect:user_data_types]
diff --git a/libs/mpi/example/cartesian_communicator.cpp b/libs/mpi/example/cartesian_communicator.cpp
index e06f150cd2..7b011041d9 100644
--- a/libs/mpi/example/cartesian_communicator.cpp
+++ b/libs/mpi/example/cartesian_communicator.cpp
@@ -13,8 +13,6 @@
#include <boost/mpi/environment.hpp>
#include <boost/mpi/cartesian_communicator.hpp>
-#include <boost/test/minimal.hpp>
-
namespace mpi = boost::mpi;
// Curly brace init make this useless, but
// - Need to support obsolete like g++ 4.3.x. for some reason
@@ -23,7 +21,7 @@ namespace mpi = boost::mpi;
// actually wan't to use bjam, which does not (make sense))
typedef mpi::cartesian_dimension cd;
-int test_main(int argc, char* argv[])
+int main(int argc, char* argv[])
{
mpi::environment env;
mpi::communicator world;
diff --git a/libs/mpi/src/broadcast.cpp b/libs/mpi/src/broadcast.cpp
index 9f89cc85ca..e171b4865e 100644
--- a/libs/mpi/src/broadcast.cpp
+++ b/libs/mpi/src/broadcast.cpp
@@ -31,19 +31,14 @@ broadcast<const packed_oarchive>(const communicator& comm,
int tag = environment::collectives_tag();
// Broadcast data to all nodes
- std::vector<MPI_Request> requests(size * 2);
- int num_requests = 0;
+ std::vector<request> requests(size-1);
+ std::vector<request>::iterator it = requests.begin();
for (int dest = 0; dest < size; ++dest) {
if (dest != root) {
- // Build up send requests for each child send.
- num_requests += detail::packed_archive_isend(comm, dest, tag, oa,
- &requests[num_requests], 2);
+ *it++ = detail::packed_archive_isend(comm, dest, tag, oa);
}
}
-
- // Complete all of the sends
- BOOST_MPI_CHECK_RESULT(MPI_Waitall,
- (num_requests, &requests[0], MPI_STATUSES_IGNORE));
+ wait_all(requests.begin(), requests.end());
}
template<>
@@ -71,20 +66,14 @@ broadcast<packed_iarchive>(const communicator& comm, packed_iarchive& ia,
detail::packed_archive_recv(comm, root, tag, ia, status);
} else {
// Broadcast data to all nodes
- std::vector<MPI_Request> requests(size * 2);
- int num_requests = 0;
+ std::vector<request> requests(size-1);
+ std::vector<request>::iterator it = requests.begin();
for (int dest = 0; dest < size; ++dest) {
if (dest != root) {
- // Build up send requests for each child send.
- num_requests += detail::packed_archive_isend(comm, dest, tag, ia,
- &requests[num_requests],
- 2);
+ *it++ = detail::packed_archive_isend(comm, dest, tag, ia);
}
}
-
- // Complete all of the sends
- BOOST_MPI_CHECK_RESULT(MPI_Waitall,
- (num_requests, &requests[0], MPI_STATUSES_IGNORE));
+ wait_all(requests.begin(), requests.end());
}
}
diff --git a/libs/mpi/src/communicator.cpp b/libs/mpi/src/communicator.cpp
index a172edd515..016c6af41a 100644
--- a/libs/mpi/src/communicator.cpp
+++ b/libs/mpi/src/communicator.cpp
@@ -217,7 +217,7 @@ void
communicator::send<packed_oarchive>(int dest, int tag,
const packed_oarchive& ar) const
{
- detail::packed_archive_send(MPI_Comm(*this), dest, tag, ar);
+ detail::packed_archive_send(*this, dest, tag, ar);
}
template<>
@@ -242,7 +242,7 @@ communicator::recv<packed_iarchive>(int source, int tag,
packed_iarchive& ar) const
{
status stat;
- detail::packed_archive_recv(MPI_Comm(*this), source, tag, ar,
+ detail::packed_archive_recv(*this, source, tag, ar,
stat.m_status);
return stat;
}
@@ -274,10 +274,7 @@ request
communicator::isend<packed_oarchive>(int dest, int tag,
const packed_oarchive& ar) const
{
- request req;
- detail::packed_archive_isend(MPI_Comm(*this), dest, tag, ar,
- &req.m_requests[0] ,2);
- return req;
+ return detail::packed_archive_isend(*this, dest, tag, ar);
}
template<>
@@ -291,20 +288,12 @@ communicator::isend<packed_skeleton_oarchive>
template<>
request communicator::isend<content>(int dest, int tag, const content& c) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (MPI_BOTTOM, 1, c.get_mpi_datatype(),
- dest, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_bottom_send(*this, dest, tag, c.get_mpi_datatype());
}
request communicator::isend(int dest, int tag) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (MPI_BOTTOM, 0, MPI_PACKED,
- dest, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_empty_send(*this, dest, tag);
}
template<>
@@ -320,27 +309,19 @@ request
communicator::irecv<const content>(int source, int tag,
const content& c) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (MPI_BOTTOM, 1, c.get_mpi_datatype(),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_bottom_recv(*this, source, tag, c.get_mpi_datatype());
}
request communicator::irecv(int source, int tag) const
{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (MPI_BOTTOM, 0, MPI_PACKED,
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
+ return request::make_empty_recv(*this, source, tag);
}
bool operator==(const communicator& comm1, const communicator& comm2)
{
int result;
BOOST_MPI_CHECK_RESULT(MPI_Comm_compare,
- ((MPI_Comm)comm1, (MPI_Comm)comm2, &result));
+ (MPI_Comm(comm1), MPI_Comm(comm2), &result));
return result == MPI_IDENT;
}
diff --git a/libs/mpi/src/environment.cpp b/libs/mpi/src/environment.cpp
index ffdfc90885..97a0a28e83 100644
--- a/libs/mpi/src/environment.cpp
+++ b/libs/mpi/src/environment.cpp
@@ -8,6 +8,7 @@
#include <boost/mpi/environment.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/mpi/detail/mpi_datatype_cache.hpp>
+#include <boost/core/uncaught_exceptions.hpp>
#include <cassert>
#include <string>
#include <exception>
@@ -137,7 +138,7 @@ environment::environment(int& argc, char** &argv, threading::level mt_level,
environment::~environment()
{
if (i_initialized) {
- if (std::uncaught_exception() && abort_on_exception) {
+ if (boost::core::uncaught_exceptions() > 0 && abort_on_exception) {
abort(-1);
} else if (!finalized()) {
detail::mpi_datatype_cache().clear();
diff --git a/libs/mpi/src/point_to_point.cpp b/libs/mpi/src/point_to_point.cpp
index 7b353f7538..6fc0ad175e 100644
--- a/libs/mpi/src/point_to_point.cpp
+++ b/libs/mpi/src/point_to_point.cpp
@@ -20,80 +20,83 @@
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/datatype.hpp>
#include <boost/mpi/exception.hpp>
+#include <boost/mpi/request.hpp>
+#include <boost/mpi/communicator.hpp>
#include <boost/mpi/detail/antiques.hpp>
#include <cassert>
namespace boost { namespace mpi { namespace detail {
void
-packed_archive_send(MPI_Comm comm, int dest, int tag,
+packed_archive_send(communicator const& comm, int dest, int tag,
const packed_oarchive& ar)
{
- std::size_t const& size = ar.size();
- BOOST_MPI_CHECK_RESULT(MPI_Send,
- (detail::unconst(&size), 1,
- get_mpi_datatype(size),
- dest, tag, comm));
- BOOST_MPI_CHECK_RESULT(MPI_Send,
- (detail::unconst(ar.address()), size,
- MPI_PACKED,
- dest, tag, comm));
+#if defined(BOOST_MPI_USE_IMPROBE)
+ {
+ void *buf = detail::unconst(ar.address());
+ BOOST_MPI_CHECK_RESULT(MPI_Send,
+ (buf, ar.size(), MPI_PACKED,
+ dest, tag, comm));
+ }
+#else
+ {
+ std::size_t const& size = ar.size();
+ BOOST_MPI_CHECK_RESULT(MPI_Send,
+ (detail::unconst(&size), 1,
+ get_mpi_datatype(size),
+ dest, tag, comm));
+ BOOST_MPI_CHECK_RESULT(MPI_Send,
+ (detail::unconst(ar.address()), size,
+ MPI_PACKED,
+ dest, tag, comm));
+ }
+#endif
}
-int
-packed_archive_isend(MPI_Comm comm, int dest, int tag,
- const packed_oarchive& ar,
- MPI_Request* out_requests, int num_out_requests)
+request
+packed_archive_isend(communicator const& comm, int dest, int tag,
+ const packed_oarchive& ar)
{
- assert(num_out_requests >= 2);
- std::size_t const& size = ar.size();
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (detail::unconst(&size), 1,
- get_mpi_datatype(size),
- dest, tag, comm, out_requests));
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (detail::unconst(ar.address()), size,
- MPI_PACKED,
- dest, tag, comm, out_requests + 1));
-
- return 2;
+ return request::make_packed_send(comm, dest, tag,
+ detail::unconst(ar.address()), ar.size());
}
-int
-packed_archive_isend(MPI_Comm comm, int dest, int tag,
- const packed_iarchive& ar,
- MPI_Request* out_requests, int num_out_requests)
+request
+packed_archive_isend(communicator const& comm, int dest, int tag,
+ const packed_iarchive& ar)
{
- assert(num_out_requests >= 2);
-
- std::size_t const& size = ar.size();
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (detail::unconst(&size), 1,
- get_mpi_datatype(size),
- dest, tag, comm, out_requests));
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (detail::unconst(ar.address()), size,
- MPI_PACKED,
- dest, tag, comm, out_requests + 1));
-
- return 2;
+ return request::make_packed_send(comm, dest, tag,
+ detail::unconst(ar.address()), ar.size());
}
void
-packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar,
+packed_archive_recv(communicator const& comm, int source, int tag, packed_iarchive& ar,
MPI_Status& status)
{
- std::size_t count;
- BOOST_MPI_CHECK_RESULT(MPI_Recv,
- (&count, 1, get_mpi_datatype(count),
- source, tag, comm, &status));
-
- // Prepare input buffer and receive the message
- ar.resize(count);
- BOOST_MPI_CHECK_RESULT(MPI_Recv,
- (ar.address(), count, MPI_PACKED,
- status.MPI_SOURCE, status.MPI_TAG,
- comm, &status));
+#if defined(BOOST_MPI_USE_IMPROBE)
+ {
+ MPI_Message msg;
+ BOOST_MPI_CHECK_RESULT(MPI_Mprobe, (source, tag, comm, &msg, &status));
+ int count;
+ BOOST_MPI_CHECK_RESULT(MPI_Get_count, (&status, MPI_PACKED, &count));
+ ar.resize(count);
+ BOOST_MPI_CHECK_RESULT(MPI_Mrecv, (ar.address(), count, MPI_PACKED, &msg, &status));
+ }
+#else
+ {
+ std::size_t count;
+ BOOST_MPI_CHECK_RESULT(MPI_Recv,
+ (&count, 1, get_mpi_datatype(count),
+ source, tag, comm, &status));
+
+ // Prepare input buffer and receive the message
+ ar.resize(count);
+ BOOST_MPI_CHECK_RESULT(MPI_Recv,
+ (ar.address(), count, MPI_PACKED,
+ status.MPI_SOURCE, status.MPI_TAG,
+ comm, &status));
+ }
+#endif
}
} } } // end namespace boost::mpi::detail
diff --git a/libs/mpi/src/request.cpp b/libs/mpi/src/request.cpp
index 9bc842f2ea..3ba5695aa7 100644
--- a/libs/mpi/src/request.cpp
+++ b/libs/mpi/src/request.cpp
@@ -5,116 +5,235 @@
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/mpi/request.hpp>
#include <boost/mpi/status.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/detail/request_handlers.hpp>
namespace boost { namespace mpi {
+request::request()
+ : m_handler() {}
+
+void
+request::preserve(boost::shared_ptr<void> d) {
+ if (!m_preserved) {
+ m_preserved = d;
+ } else {
+ boost::shared_ptr<void> cdr = m_preserved;
+ typedef std::pair<boost::shared_ptr<void>, boost::shared_ptr<void> > cons;
+ boost::shared_ptr<cons> p(new cons(d, cdr));
+ m_preserved = p;
+ }
+}
+request request::make_dynamic() { return request(new dynamic_handler()); }
+
+request
+request::make_bottom_send(communicator const& comm, int dest, int tag, MPI_Datatype tp) {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (MPI_BOTTOM, 1, tp,
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+}
+
+request
+request::make_empty_send(communicator const& comm, int dest, int tag) {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (MPI_BOTTOM, 0, MPI_PACKED,
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+}
+
+request
+request::make_bottom_recv(communicator const& comm, int dest, int tag, MPI_Datatype tp) {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (MPI_BOTTOM, 1, tp,
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+}
+
+request
+request::make_empty_recv(communicator const& comm, int dest, int tag) {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Irecv,
+ (MPI_BOTTOM, 0, MPI_PACKED,
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+}
+
+request
+request::make_packed_send(communicator const& comm, int dest, int tag, void const* buffer, std::size_t n) {
+#if defined(BOOST_MPI_USE_IMPROBE)
+ {
+ trivial_handler* handler = new trivial_handler;
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (const_cast<void*>(buffer), n, MPI_PACKED,
+ dest, tag, comm, &handler->m_request));
+ return request(handler);
+ }
+#else
+ {
+ dynamic_handler *handler = new dynamic_handler;
+ request req(handler);
+ shared_ptr<std::size_t> size(new std::size_t(n));
+ req.preserve(size);
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (size.get(), 1,
+ get_mpi_datatype(*size),
+ dest, tag, comm, handler->m_requests));
+ BOOST_MPI_CHECK_RESULT(MPI_Isend,
+ (const_cast<void*>(buffer), *size,
+ MPI_PACKED,
+ dest, tag, comm, handler->m_requests+1));
+ return req;
+ }
+#endif
+}
+
/***************************************************************************
- * request *
+ * handlers *
***************************************************************************/
-request::request()
- : m_handler(0), m_data()
+
+request::handler::~handler() {}
+
+optional<MPI_Request&>
+request::legacy_handler::trivial() {
+ return boost::none;
+}
+
+bool
+request::legacy_handler::active() const {
+ return m_requests[0] != MPI_REQUEST_NULL || m_requests[1] != MPI_REQUEST_NULL;
+}
+
+// trivial handler
+
+request::trivial_handler::trivial_handler()
+ : m_request(MPI_REQUEST_NULL) {}
+
+status
+request::trivial_handler::wait()
+{
+ status result;
+ BOOST_MPI_CHECK_RESULT(MPI_Wait, (&m_request, &result.m_status));
+ return result;
+}
+
+
+optional<status>
+request::trivial_handler::test()
+{
+ status result;
+ int flag = 0;
+ BOOST_MPI_CHECK_RESULT(MPI_Test,
+ (&m_request, &flag, &result.m_status));
+ return flag != 0? optional<status>(result) : optional<status>();
+}
+
+void
+request::trivial_handler::cancel()
+{
+ BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_request));
+}
+
+bool
+request::trivial_handler::active() const
+{
+ return m_request != MPI_REQUEST_NULL;
+}
+
+optional<MPI_Request&>
+request::trivial_handler::trivial()
+{
+ return m_request;
+}
+
+// dynamic handler
+
+request::dynamic_handler::dynamic_handler()
{
m_requests[0] = MPI_REQUEST_NULL;
m_requests[1] = MPI_REQUEST_NULL;
}
-
-status request::wait()
+
+status
+request::dynamic_handler::wait()
+{
+ // This request is a send of a serialized type, broken into two
+ // separate messages. Complete both sends at once.
+ MPI_Status stats[2];
+ int error_code = MPI_Waitall(2, m_requests, stats);
+ if (error_code == MPI_ERR_IN_STATUS) {
+ // Dig out which status structure has the error, and use that
+ // one when throwing the exception.
+ if (stats[0].MPI_ERROR == MPI_SUCCESS
+ || stats[0].MPI_ERROR == MPI_ERR_PENDING)
+ boost::throw_exception(exception("MPI_Waitall", stats[1].MPI_ERROR));
+ else
+ boost::throw_exception(exception("MPI_Waitall", stats[0].MPI_ERROR));
+ } else if (error_code != MPI_SUCCESS) {
+ // There was an error somewhere in the MPI_Waitall call; throw
+ // an exception for it.
+ boost::throw_exception(exception("MPI_Waitall", error_code));
+ }
+
+ // No errors. Returns the first status structure.
+ status result;
+ result.m_status = stats[0];
+ return result;
+}
+
+optional<status>
+request::dynamic_handler::test()
{
- if (m_handler) {
- // This request is a receive for a serialized type. Use the
- // handler to wait for completion.
- return *m_handler(this, ra_wait);
- } else if (m_requests[1] == MPI_REQUEST_NULL) {
- // This request is either a send or a receive for a type with an
- // associated MPI datatype, or a serialized datatype that has been
- // packed into a single message. Just wait on the one receive/send
- // and return the status to the user.
+ // This request is a send of a serialized type, broken into two
+ // separate messages. We only get a result if both complete.
+ MPI_Status stats[2];
+ int flag = 0;
+ int error_code = MPI_Testall(2, m_requests, &flag, stats);
+ if (error_code == MPI_ERR_IN_STATUS) {
+ // Dig out which status structure has the error, and use that
+ // one when throwing the exception.
+ if (stats[0].MPI_ERROR == MPI_SUCCESS
+ || stats[0].MPI_ERROR == MPI_ERR_PENDING)
+ boost::throw_exception(exception("MPI_Testall", stats[1].MPI_ERROR));
+ else
+ boost::throw_exception(exception("MPI_Testall", stats[0].MPI_ERROR));
+ } else if (error_code != MPI_SUCCESS) {
+ // There was an error somewhere in the MPI_Testall call; throw
+ // an exception for it.
+ boost::throw_exception(exception("MPI_Testall", error_code));
+ }
+
+ // No errors. Returns the second status structure if the send has
+ // completed.
+ if (flag != 0) {
status result;
- BOOST_MPI_CHECK_RESULT(MPI_Wait, (&m_requests[0], &result.m_status));
+ result.m_status = stats[1];
return result;
} else {
- // This request is a send of a serialized type, broken into two
- // separate messages. Complete both sends at once.
- MPI_Status stats[2];
- int error_code = MPI_Waitall(2, m_requests, stats);
- if (error_code == MPI_ERR_IN_STATUS) {
- // Dig out which status structure has the error, and use that
- // one when throwing the exception.
- if (stats[0].MPI_ERROR == MPI_SUCCESS
- || stats[0].MPI_ERROR == MPI_ERR_PENDING)
- boost::throw_exception(exception("MPI_Waitall", stats[1].MPI_ERROR));
- else
- boost::throw_exception(exception("MPI_Waitall", stats[0].MPI_ERROR));
- } else if (error_code != MPI_SUCCESS) {
- // There was an error somewhere in the MPI_Waitall call; throw
- // an exception for it.
- boost::throw_exception(exception("MPI_Waitall", error_code));
- }
-
- // No errors. Returns the first status structure.
- status result;
- result.m_status = stats[0];
- return result;
+ return optional<status>();
}
}
-optional<status> request::test()
+void
+request::dynamic_handler::cancel()
{
- if (m_handler) {
- // This request is a receive for a serialized type. Use the
- // handler to test for completion.
- return m_handler(this, ra_test);
- } else if (m_requests[1] == MPI_REQUEST_NULL) {
- // This request is either a send or a receive for a type with an
- // associated MPI datatype, or a serialized datatype that has been
- // packed into a single message. Just test the one receive/send
- // and return the status to the user if it has completed.
- status result;
- int flag = 0;
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (&m_requests[0], &flag, &result.m_status));
- return flag != 0? optional<status>(result) : optional<status>();
- } else {
- // This request is a send of a serialized type, broken into two
- // separate messages. We only get a result if both complete.
- MPI_Status stats[2];
- int flag = 0;
- int error_code = MPI_Testall(2, m_requests, &flag, stats);
- if (error_code == MPI_ERR_IN_STATUS) {
- // Dig out which status structure has the error, and use that
- // one when throwing the exception.
- if (stats[0].MPI_ERROR == MPI_SUCCESS
- || stats[0].MPI_ERROR == MPI_ERR_PENDING)
- boost::throw_exception(exception("MPI_Testall", stats[1].MPI_ERROR));
- else
- boost::throw_exception(exception("MPI_Testall", stats[0].MPI_ERROR));
- } else if (error_code != MPI_SUCCESS) {
- // There was an error somewhere in the MPI_Testall call; throw
- // an exception for it.
- boost::throw_exception(exception("MPI_Testall", error_code));
- }
-
- // No errors. Returns the second status structure if the send has
- // completed.
- if (flag != 0) {
- status result;
- result.m_status = stats[1];
- return result;
- } else {
- return optional<status>();
- }
- }
+ BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[0]));
+ BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[1]));
}
-void request::cancel()
+bool
+request::dynamic_handler::active() const
{
- if (m_handler) {
- m_handler(this, ra_cancel);
- } else {
- BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[0]));
- if (m_requests[1] != MPI_REQUEST_NULL)
- BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[1]));
- }
+ return (m_requests[0] != MPI_REQUEST_NULL
+ || m_requests[1] != MPI_REQUEST_NULL);
}
+optional<MPI_Request&>
+request::dynamic_handler::trivial() {
+ return boost::none;
+}
+
} } // end namespace boost::mpi
diff --git a/libs/mpi/test/Jamfile.v2 b/libs/mpi/test/Jamfile.v2
index 15e4971991..6fa85d8ad7 100644
--- a/libs/mpi/test/Jamfile.v2
+++ b/libs/mpi/test/Jamfile.v2
@@ -17,8 +17,8 @@ if [ mpi.configured ]
test-suite mpi
:
[ mpi-test version_test : : : 1 ]
- [ mpi-test block_nonblock_test-b2nb : block_nonblock_test.cpp : <testing.arg>"b2nb" : 2 ]
- [ mpi-test block_nonblock_test-nb2b : block_nonblock_test.cpp : <testing.arg>"nb2b" : 2 ]
+ [ mpi-test block_nonblock_test-b2nb : block_nonblock_test.cpp : : 2 ]
+ [ mpi-test block_nonblock_test-nb2b : block_nonblock_test.cpp : : 2 ]
[ mpi-test random_gather : ../example/random_gather.cpp : : 2 ]
[ mpi-test random_scatter : ../example/random_scatter.cpp : : 2 ]
[ mpi-test cartesian_communicator : ../example/cartesian_communicator.cpp : : 24 ]
@@ -31,10 +31,7 @@ test-suite mpi
[ mpi-test gather_test : : : 1 2 11 ]
[ mpi-test is_mpi_op_test : : : 1 ]
[ mpi-test mt_level_test : : : 1 ]
- [ mpi-test mt_init_test-single : mt_init_test.cpp : <testing.arg>"single" : 1 4 ]
- [ mpi-test mt_init_test-funneled : mt_init_test.cpp : <testing.arg>"funneled" : 1 4 ]
- [ mpi-test mt_init_test-serialized : mt_init_test.cpp : <testing.arg>"serialized" : 1 4 ]
- [ mpi-test mt_init_test-multiple : mt_init_test.cpp : <testing.arg>"multiple" : 1 4 ]
+ [ mpi-test mt_init_test : mt_init_test.cpp : : 1 4 ]
# Note: Microsoft MPI fails nonblocking_test on 1 processor
[ mpi-test nonblocking_test : : : 2 11 24 ]
[ mpi-test reduce_test ]
@@ -51,5 +48,7 @@ test-suite mpi
[ mpi-test groups_test ]
# tests that require -std=c++11
[ mpi-test sendrecv_vector : : : 2 ]
+ # Intel MPI 2018 and older are axtected to fail:
+ [ mpi-test non_blocking_any_source : : : 2 17 ]
;
}
diff --git a/libs/mpi/test/all_gather_test.cpp b/libs/mpi/test/all_gather_test.cpp
index 1bd4da949a..c2385f61e0 100644
--- a/libs/mpi/test/all_gather_test.cpp
+++ b/libs/mpi/test/all_gather_test.cpp
@@ -12,12 +12,14 @@
#include <boost/mpi/collectives/all_gatherv.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/communicator.hpp>
-#include <boost/test/minimal.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/list.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
+#define BOOST_TEST_MODULE mpi_all_gather
+#include <boost/test/included/unit_test.hpp>
+
#include "gps_position.hpp"
namespace mpi = boost::mpi;
@@ -130,18 +132,17 @@ struct string_list_generator
}
};
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(all_gather)
{
- boost::mpi::environment env(argc, argv);
+ boost::mpi::environment env;
mpi::communicator comm;
all_gather_test(comm, int_generator(), "integers");
all_gather_test(comm, gps_generator(), "GPS positions");
all_gather_test(comm, string_generator(), "string");
all_gather_test(comm, string_list_generator(), "list of strings");
-
+
all_gatherv_test(comm, int_generator(), "integers");
all_gatherv_test(comm, gps_generator(), "GPS positions");
all_gatherv_test(comm, string_generator(), "string");
all_gatherv_test(comm, string_list_generator(), "list of strings");
- return 0;
}
diff --git a/libs/mpi/test/all_reduce_test.cpp b/libs/mpi/test/all_reduce_test.cpp
index 2072bdbb7e..31e523591c 100644
--- a/libs/mpi/test/all_reduce_test.cpp
+++ b/libs/mpi/test/all_reduce_test.cpp
@@ -8,7 +8,6 @@
#include <boost/mpi/collectives/all_reduce.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <vector>
#include <algorithm>
#include <boost/serialization/string.hpp>
@@ -16,6 +15,9 @@
#include <boost/lexical_cast.hpp>
#include <numeric>
+#define BOOST_TEST_MODULE mpi_all_reduce
+#include <boost/test/included/unit_test.hpp>
+
using boost::mpi::communicator;
// A simple point class that we can build, add, compare, and
@@ -276,30 +278,25 @@ struct is_commutative<std::plus<wrapped_int>, wrapped_int>
} } // end namespace boost::mpi
-int test_main(int argc, char* argv[])
-{
+BOOST_AUTO_TEST_CASE(all_reduce)
+{
using namespace boost::mpi;
- environment env(argc, argv);
-
+ environment env;
communicator comm;
// Built-in MPI datatypes with built-in MPI operations
- all_reduce_test(comm, int_generator(), "integers", std::plus<int>(), "sum",
- 0);
- all_reduce_test(comm, int_generator(), "integers", std::multiplies<int>(),
- "product", 1);
- all_reduce_test(comm, int_generator(), "integers", maximum<int>(),
- "maximum", 0);
- all_reduce_test(comm, int_generator(), "integers", minimum<int>(),
- "minimum", 2);
+ all_reduce_test(comm, int_generator(), "integers", std::plus<int>(), "sum", 0);
+ all_reduce_test(comm, int_generator(), "integers", std::multiplies<int>(), "product", 1);
+ all_reduce_test(comm, int_generator(), "integers", maximum<int>(), "maximum", 0);
+ all_reduce_test(comm, int_generator(), "integers", minimum<int>(), "minimum", 2);
// User-defined MPI datatypes with operations that have the
// same name as built-in operations.
- all_reduce_test(comm, point_generator(point(0,0,0)), "points",
- std::plus<point>(), "sum", point());
+ all_reduce_test(comm, point_generator(point(0,0,0)), "points", std::plus<point>(),
+ "sum", point());
// Built-in MPI datatypes with user-defined operations
- all_reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(),
+ all_reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(),
"bitwise and", -1);
// Arbitrary types with user-defined, commutative operations.
@@ -309,6 +306,4 @@ int test_main(int argc, char* argv[])
// Arbitrary types with (non-commutative) user-defined operations
all_reduce_test(comm, string_generator(), "strings",
std::plus<std::string>(), "concatenation", std::string());
-
- return 0;
}
diff --git a/libs/mpi/test/all_to_all_test.cpp b/libs/mpi/test/all_to_all_test.cpp
index 4863a2cd08..d1882ece19 100644
--- a/libs/mpi/test/all_to_all_test.cpp
+++ b/libs/mpi/test/all_to_all_test.cpp
@@ -8,7 +8,6 @@
#include <boost/mpi/collectives/all_to_all.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
#include <boost/serialization/string.hpp>
@@ -16,6 +15,9 @@
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
+#define BOOST_TEST_MODULE mpi_all_to_all
+#include <boost/test/included/unit_test.hpp>
+
using boost::mpi::communicator;
using boost::mpi::packed_skeleton_iarchive;
@@ -99,15 +101,13 @@ struct string_list_generator
}
};
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(all_to_all)
{
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator comm;
+
all_to_all_test(comm, int_generator(), "integers");
all_to_all_test(comm, gps_generator(), "GPS positions");
all_to_all_test(comm, string_generator(), "string");
all_to_all_test(comm, string_list_generator(), "list of strings");
-
- return 0;
}
diff --git a/libs/mpi/test/block_nonblock_test.cpp b/libs/mpi/test/block_nonblock_test.cpp
index 1e3f14b47d..3088b65598 100644
--- a/libs/mpi/test/block_nonblock_test.cpp
+++ b/libs/mpi/test/block_nonblock_test.cpp
@@ -1,17 +1,26 @@
#include <vector>
#include <iostream>
#include <iterator>
+#include <typeinfo>
+
#include <boost/mpi.hpp>
#include <boost/serialization/vector.hpp>
-#include <boost/test/minimal.hpp>
+#include <boost/core/demangle.hpp>
+
+//#include "debugger.cpp"
+
+#define BOOST_TEST_MODULE mpi_nonblocking
+#include <boost/test/included/unit_test.hpp>
namespace mpi = boost::mpi;
template<typename T>
bool test(mpi::communicator const& comm, std::vector<T> const& ref, bool iswap, bool alloc)
{
+
int rank = comm.rank();
if (rank == 0) {
+ std::cout << "Testing with type " << boost::core::demangle(typeid(T).name()) << '\n';
if (iswap) {
std::cout << "Blockin send, non blocking receive.\n";
} else {
@@ -55,11 +64,13 @@ bool test(mpi::communicator const& comm, std::vector<T> const& ref, bool iswap,
}
}
-int test_main(int argc, char **argv)
+BOOST_AUTO_TEST_CASE(non_blocking)
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
-
+
+ BOOST_TEST_REQUIRE(world.size() > 1);
+
std::vector<int> integers(13); // don't assume we're lucky
for(int i = 0; i < int(integers.size()); ++i) {
integers[i] = i;
@@ -72,33 +83,13 @@ int test_main(int argc, char **argv)
strings[i] = fmt.str();
}
- bool block_to_non_block = true;
- bool non_block_to_block = true;
- if (argc == 2) {
- if (std::string(argv[1]) == "b2nb") {
- non_block_to_block = false;
- } else if (std::string(argv[1]) == "nb2b") {
- block_to_non_block = false;
- } else {
- if (world.rank() == 0) {
- std::cerr << "Usage: " << argv[0] << " [<n2nb|nb2b]\n";
- }
- return -1;
- }
- }
- bool passed = true;
- if (block_to_non_block) {
- passed = passed && test(world, integers, true, true);
- passed = passed && test(world, integers, true, false);
- passed = passed && test(world, strings, true, true);
- passed = passed && test(world, strings, true, false);
- }
- if (non_block_to_block) {
- passed = passed && test(world, integers, false, true);
- passed = passed && test(world, integers, false, false);
- passed = passed && test(world, strings, false, true);
- passed = passed && test(world, strings, false, false);
- }
- passed = mpi::all_reduce(world, passed, std::logical_and<bool>());
- return passed ? 0 : 1;
+ BOOST_CHECK(test(world, integers, true, true));
+ BOOST_CHECK(test(world, integers, true, false));
+ BOOST_CHECK(test(world, strings, true, true));
+ BOOST_CHECK(test(world, strings, true, false));
+
+ BOOST_CHECK(test(world, integers, false, true));
+ BOOST_CHECK(test(world, integers, false, false));
+ BOOST_CHECK(test(world, strings, false, true));
+ BOOST_CHECK(test(world, strings, false, false));
}
diff --git a/libs/mpi/test/broadcast_stl_test.cpp b/libs/mpi/test/broadcast_stl_test.cpp
index d3c6635b43..705d6ccb9a 100644
--- a/libs/mpi/test/broadcast_stl_test.cpp
+++ b/libs/mpi/test/broadcast_stl_test.cpp
@@ -5,19 +5,21 @@
// http://www.boost.org/LICENSE_1_0.txt)
// A test of the broadcast() collective.
-#include <boost/mpi/collectives/broadcast.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
-
#include <algorithm>
#include <vector>
#include <map>
+#include <boost/mpi/collectives/broadcast.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/environment.hpp>
+
#include <boost/serialization/string.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/map.hpp>
+#define BOOST_TEST_MODULE mpi_broadcast_stl
+#include <boost/test/included/unit_test.hpp>
+
namespace mpi = boost::mpi;
typedef std::vector<std::map<int, double> > sparse;
@@ -58,21 +60,16 @@ broadcast_test(const mpi::communicator& comm, const T& bc_value,
}
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(broadcast_stl)
{
- boost::mpi::environment env(argc, argv);
+ boost::mpi::environment env;
mpi::communicator comm;
- if (comm.size() == 1) {
- std::cerr << "ERROR: Must run the broadcast test with more than one "
- << "process." << std::endl;
- comm.abort(-1);
- }
+ BOOST_TEST_REQUIRE(comm.size() > 1);
sparse s;
s.resize(2);
s[0][12] = 0.12;
s[1][13] = 1.13;
broadcast_test(comm, s, "sparse");
- return 0;
}
diff --git a/libs/mpi/test/broadcast_test.cpp b/libs/mpi/test/broadcast_test.cpp
index ddae7a8217..5e3c6eef6b 100644
--- a/libs/mpi/test/broadcast_test.cpp
+++ b/libs/mpi/test/broadcast_test.cpp
@@ -8,7 +8,6 @@
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
#include <boost/serialization/string.hpp>
@@ -17,6 +16,9 @@
#include <boost/iterator/counting_iterator.hpp>
//#include "debugger.hpp"
+#define BOOST_TEST_MODULE mpi_broadcast
+#include <boost/test/included/unit_test.hpp>
+
using boost::mpi::communicator;
using boost::mpi::packed_skeleton_iarchive;
@@ -133,18 +135,12 @@ test_skeleton_and_content(const communicator& comm, int root = 0)
(comm.barrier)();
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(broadcast)
{
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator comm;
- if (comm.size() == 1) {
- std::cerr << "ERROR: Must run the broadcast test with more than one "
- << "process." << std::endl;
- MPI_Abort(comm, -1);
- }
- //wait_for_debugger(extract_paused_ranks(argc, argv), comm);
+ BOOST_TEST_REQUIRE(comm.size() > 1);
// Check transfer of individual objects
broadcast_test(comm, 17, "integers");
@@ -160,5 +156,4 @@ int test_main(int argc, char* argv[])
test_skeleton_and_content(comm, 0);
test_skeleton_and_content(comm, 1);
- return 0;
}
diff --git a/libs/mpi/test/cartesian_topology_init_test.cpp b/libs/mpi/test/cartesian_topology_init_test.cpp
index 9da72eaef3..6138a2e6c6 100644
--- a/libs/mpi/test/cartesian_topology_init_test.cpp
+++ b/libs/mpi/test/cartesian_topology_init_test.cpp
@@ -14,15 +14,15 @@
#include <algorithm>
#include <functional>
-#define BOOST_TEST_MODULE BoostMPI
-#include <boost/test/included/unit_test.hpp>
-
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/collectives.hpp>
#include <boost/array.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/cartesian_communicator.hpp>
+#define BOOST_TEST_MODULE mpi_cartesian_topology_init
+#include <boost/test/included/unit_test.hpp>
+
namespace mpi = boost::mpi;
BOOST_AUTO_TEST_CASE(cartesian_dimension_init)
diff --git a/libs/mpi/test/cartesian_topology_test.cpp b/libs/mpi/test/cartesian_topology_test.cpp
index fc9ffc19f4..d63fc50036 100644
--- a/libs/mpi/test/cartesian_topology_test.cpp
+++ b/libs/mpi/test/cartesian_topology_test.cpp
@@ -17,7 +17,8 @@
#include <boost/mpi/environment.hpp>
#include <boost/mpi/cartesian_communicator.hpp>
-#include <boost/test/minimal.hpp>
+#define BOOST_TEST_MODULE mpi_cartesian_topolohy
+#include <boost/test/included/unit_test.hpp>
namespace mpi = boost::mpi;
@@ -160,11 +161,11 @@ void test_cartesian_topology( mpi::communicator const& world, mpi::cartesian_top
}
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(cartesian_topology)
{
- mpi::environment env(argc, argv);
-
+ mpi::environment env;
mpi::communicator world;
+
int const ndim = world.size() >= 24 ? 3 : 2;
mpi::cartesian_topology topo(ndim);
typedef mpi::cartesian_dimension cd;
@@ -189,5 +190,4 @@ int test_main(int argc, char* argv[])
}
test_cartesian_topology( world, std::move(topo));
#endif
- return 0;
}
diff --git a/libs/mpi/test/debugger.cpp b/libs/mpi/test/debugger.cpp
index e730ee7b50..90093e79f6 100644
--- a/libs/mpi/test/debugger.cpp
+++ b/libs/mpi/test/debugger.cpp
@@ -18,17 +18,31 @@ std::vector<int> extract_paused_ranks(int argc, char** argv) {
void wait_for_debugger(std::vector<int> const& processes, boost::mpi::communicator const& comm)
{
int i = 1;
+ bool waiting = std::find(processes.begin(), processes.end(), comm.rank()) != processes.end();
for (int r = 0; r < comm.size(); ++r) {
if (comm.rank() == r) {
- std::cout << "Rank " << comm.rank() << " has PID " << getpid() << '\n';
+ std::cout << "Rank " << comm.rank() << " has PID " << getpid();
+ if (waiting) {
+ std::cout << " and is waiting.";
+ }
+ std::cout << std::endl;
}
comm.barrier();
}
- sleep(1);
if (std::find(processes.begin(), processes.end(), comm.rank()) != processes.end()) {
- while (i!=0) {
- sleep(2);
+ while (i != 0) {
+ sleep(5);
}
}
+ std::cout << "Rank " << comm.rank() << " will proceed.\n";
+}
+
+void wait_for_debugger(boost::mpi::communicator const& comm)
+{
+ std::vector<int> all;
+ for (int r = 0; r < comm.size(); ++r) {
+ all.push_back(r);
+ }
+ wait_for_debugger(all, comm);
}
diff --git a/libs/mpi/test/debugger.hpp b/libs/mpi/test/debugger.hpp
index 8c8899ae18..71abf51d07 100644
--- a/libs/mpi/test/debugger.hpp
+++ b/libs/mpi/test/debugger.hpp
@@ -22,3 +22,5 @@ std::vector<int> extract_paused_ranks(int argc, char** argv);
* set the local variable 'i' to 0 to let the process restarts.
*/
void wait_for_debugger(std::vector<int> const& processes, boost::mpi::communicator const& comm);
+/** @override */
+void wait_for_debugger(boost::mpi::communicator const& comm);
diff --git a/libs/mpi/test/gather_test.cpp b/libs/mpi/test/gather_test.cpp
index faec09930a..357e31032e 100644
--- a/libs/mpi/test/gather_test.cpp
+++ b/libs/mpi/test/gather_test.cpp
@@ -9,13 +9,15 @@
#include <boost/mpi/collectives/gatherv.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include "gps_position.hpp"
#include <boost/serialization/string.hpp>
#include <boost/serialization/list.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
+#define BOOST_TEST_MODULE mpi_gather
+#include <boost/test/included/unit_test.hpp>
+
using boost::mpi::communicator;
template<typename Generator>
@@ -146,10 +148,9 @@ struct string_list_generator
}
};
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(gather)
{
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator comm;
gather_test(comm, int_generator(), "integers");
@@ -161,6 +162,4 @@ int test_main(int argc, char* argv[])
gatherv_test(comm, gps_generator(), "GPS positions");
gatherv_test(comm, string_generator(), "string");
gatherv_test(comm, string_list_generator(), "list of strings");
-
- return 0;
}
diff --git a/libs/mpi/test/graph_topology_test.cpp b/libs/mpi/test/graph_topology_test.cpp
index 2cda504e49..4d781265c3 100644
--- a/libs/mpi/test/graph_topology_test.cpp
+++ b/libs/mpi/test/graph_topology_test.cpp
@@ -17,7 +17,6 @@
#include <boost/graph/adjacency_list.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/graph/erdos_renyi_generator.hpp>
-#include <boost/test/minimal.hpp>
#include <boost/random/linear_congruential.hpp>
#include <boost/graph/iteration_macros.hpp>
#include <boost/graph/isomorphism.hpp>
@@ -26,6 +25,9 @@
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/config.hpp>
+#define BOOST_TEST_MODULE mpi_graph_topology
+#include <boost/test/included/unit_test.hpp>
+
#if defined(BOOST_NO_CXX98_RANDOM_SHUFFLE)
#include <random>
@@ -48,7 +50,7 @@ using boost::mpi::communicator;
using boost::mpi::graph_communicator;
using namespace boost;
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(graph_topology)
{
boost::function_requires< IncidenceGraphConcept<graph_communicator> >();
boost::function_requires< AdjacencyGraphConcept<graph_communicator> >();
@@ -57,8 +59,7 @@ int test_main(int argc, char* argv[])
double prob = 0.1;
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator world;
// Random number generator
@@ -137,6 +138,4 @@ int test_main(int argc, char* argv[])
if (graph_comm.rank() == 0)
std::cout << "Verifying isomorphism..." << std::endl;
BOOST_CHECK(verify_isomorphism(graph, graph_comm, graph_alt_index));
-
- return 0;
}
diff --git a/libs/mpi/test/groups_test.cpp b/libs/mpi/test/groups_test.cpp
index 64e4a811c2..ce81d182d2 100644
--- a/libs/mpi/test/groups_test.cpp
+++ b/libs/mpi/test/groups_test.cpp
@@ -9,12 +9,13 @@
#include <boost/mpi/environment.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/group.hpp>
-#include <boost/test/minimal.hpp>
#include <vector>
#include <algorithm>
-namespace mpi = boost::mpi;
+#define BOOST_TEST_MODULE mpi_group_test
+#include <boost/test/included/unit_test.hpp>
+namespace mpi = boost::mpi;
template <typename T>
struct iota
@@ -50,10 +51,9 @@ void group_test(const mpi::communicator& comm)
}
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(group)
{
- mpi::environment env(argc,argv);
+ mpi::environment env;
mpi::communicator comm;
group_test(comm);
- return 0;
}
diff --git a/libs/mpi/test/is_mpi_op_test.cpp b/libs/mpi/test/is_mpi_op_test.cpp
index 01f34fc616..b0991ce68a 100644
--- a/libs/mpi/test/is_mpi_op_test.cpp
+++ b/libs/mpi/test/is_mpi_op_test.cpp
@@ -8,27 +8,35 @@
#include <boost/mpi/operations.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/type_traits/is_base_and_derived.hpp>
-#include <boost/test/minimal.hpp>
+
+#define BOOST_TEST_MODULE mpi_is_mpi_op_test
+#include <boost/test/included/unit_test.hpp>
using namespace boost::mpi;
using namespace std;
using boost::is_base_and_derived;
-int test_main(int argc, char* argv[])
+template<class Op, typename R>
+void
+test_op(int c_value)
{
- boost::mpi::environment env(argc, argv);
+ typedef is_mpi_op<Op, R> mpi_op;
+ BOOST_TEST(mpi_op::op() == c_value);
+}
- // Check each predefined MPI_Op type that we support directly.
- BOOST_CHECK((is_mpi_op<maximum<int>, int>::op() == MPI_MAX));
- BOOST_CHECK((is_mpi_op<minimum<float>, float>::op() == MPI_MIN));
- BOOST_CHECK((is_mpi_op<plus<double>, double>::op() == MPI_SUM));
- BOOST_CHECK((is_mpi_op<multiplies<long>, long>::op() == MPI_PROD));
- BOOST_CHECK((is_mpi_op<logical_and<int>, int>::op() == MPI_LAND));
- BOOST_CHECK((is_mpi_op<bitwise_and<int>, int>::op() == MPI_BAND));
- BOOST_CHECK((is_mpi_op<logical_or<int>, int>::op() == MPI_LOR));
- BOOST_CHECK((is_mpi_op<bitwise_or<int>, int>::op() == MPI_BOR));
- BOOST_CHECK((is_mpi_op<logical_xor<int>, int>::op() == MPI_LXOR));
- BOOST_CHECK((is_mpi_op<bitwise_xor<int>, int>::op() == MPI_BXOR));
+BOOST_AUTO_TEST_CASE(mpi_basic_op)
+{
+ boost::mpi::environment env;
- return 0;
+ // Check each predefined MPI_Op type that we support directly.
+ test_op<minimum<float>, float>(MPI_MIN);
+ BOOST_TEST((is_mpi_op<minimum<float>, float>::op() == MPI_MIN));
+ BOOST_TEST((is_mpi_op<plus<double>, double>::op() == MPI_SUM));
+ BOOST_TEST((is_mpi_op<multiplies<long>, long>::op() == MPI_PROD));
+ BOOST_TEST((is_mpi_op<logical_and<int>, int>::op() == MPI_LAND));
+ BOOST_TEST((is_mpi_op<bitwise_and<int>, int>::op() == MPI_BAND));
+ BOOST_TEST((is_mpi_op<logical_or<int>, int>::op() == MPI_LOR));
+ BOOST_TEST((is_mpi_op<bitwise_or<int>, int>::op() == MPI_BOR));
+ BOOST_TEST((is_mpi_op<logical_xor<int>, int>::op() == MPI_LXOR));
+ BOOST_TEST((is_mpi_op<bitwise_xor<int>, int>::op() == MPI_BXOR));
}
diff --git a/libs/mpi/test/mt_init_test.cpp b/libs/mpi/test/mt_init_test.cpp
index 970af2ee7b..543bf45a8b 100644
--- a/libs/mpi/test/mt_init_test.cpp
+++ b/libs/mpi/test/mt_init_test.cpp
@@ -6,22 +6,33 @@
// test threading::level operations
-#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
+#include <boost/mpi.hpp>
#include <iostream>
#include <sstream>
+#define BOOST_TEST_MODULE mpi_mt_init
+#include <boost/test/included/unit_test.hpp>
+
namespace mpi = boost::mpi;
-int
-test_main(int argc, char* argv[]) {
+void
+test_mt_init(std::string s)
+{
mpi::threading::level required = mpi::threading::level(-1);
- BOOST_CHECK(argc == 2);
- std::istringstream cmdline(argv[1]);
- cmdline >> required;
- BOOST_CHECK(!cmdline.bad());
- mpi::environment env(argc,argv,required);
+ std::istringstream in(s);
+ in >> required;
+ BOOST_CHECK(!in.bad());
+ mpi::environment env;
BOOST_CHECK(env.thread_level() >= mpi::threading::single);
BOOST_CHECK(env.thread_level() <= mpi::threading::multiple);
- return 0;
+}
+
+BOOST_AUTO_TEST_CASE(mt_init)
+{
+ mpi::environment env;
+ mpi::communicator comm;
+ test_mt_init("single");
+ test_mt_init("funneled");
+ test_mt_init("serialized");
+ test_mt_init("multiple");
}
diff --git a/libs/mpi/test/mt_level_test.cpp b/libs/mpi/test/mt_level_test.cpp
index abf5131576..a72e9a8a50 100644
--- a/libs/mpi/test/mt_level_test.cpp
+++ b/libs/mpi/test/mt_level_test.cpp
@@ -7,10 +7,12 @@
// test threading::level operations
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <iostream>
#include <sstream>
+#define BOOST_TEST_MODULE mpi_level_test
+#include <boost/test/included/unit_test.hpp>
+
namespace mpi = boost::mpi;
void
@@ -99,9 +101,8 @@ test_threading_level_cmp() {
BOOST_CHECK(mt::multiple <= mt::multiple);
}
-int
-test_main(int argc, char* argv[]) {
+BOOST_AUTO_TEST_CASE(mt_level)
+{
test_threading_levels_io();
test_threading_level_cmp();
- return 0;
}
diff --git a/libs/mpi/test/non_blocking_any_source.cpp b/libs/mpi/test/non_blocking_any_source.cpp
new file mode 100644
index 0000000000..009babe674
--- /dev/null
+++ b/libs/mpi/test/non_blocking_any_source.cpp
@@ -0,0 +1,60 @@
+// Copyright (C) 2018 Steffen Hirschmann
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Test any_source on serialized data
+#include <vector>
+#include <iostream>
+#include <iterator>
+#include <boost/mpi.hpp>
+#include <boost/serialization/vector.hpp>
+
+#define BOOST_TEST_MODULE mpi_non_blockin_any_source
+#include <boost/test/included/unit_test.hpp>
+
+namespace mpi = boost::mpi;
+
+std::string ok(bool b) {
+ return b ? "ok" : "ko";
+}
+
+BOOST_AUTO_TEST_CASE(non_blocking_any)
+{
+mpi::environment env;
+ mpi::communicator world;
+ int rank = world.rank();
+ if (rank == 0) {
+#if BOOST_MPI_VERSION < 3
+ std::cout << "\nExpected failure with MPI standard < 3 ("
+ << BOOST_MPI_VERSION << "." << BOOST_MPI_SUBVERSION
+ << " detected)\n\n";
+#endif
+ std::vector<boost::mpi::request> req;
+ std::vector<std::vector<int> > data(world.size() - 1);
+ for (int i = 1; i < world.size(); ++i) {
+ req.push_back(world.irecv(mpi::any_source, 0, data[i - 1]));
+ }
+ boost::mpi::wait_all(req.begin(), req.end());
+ std::vector<bool> check(world.size()-1, false);
+ for (int i = 0; i < world.size() - 1; ++i) {
+ std::cout << "Process 0 received:" << std::endl;
+ std::copy(data[i].begin(), data[i].end(), std::ostream_iterator<int>(std::cout, " "));
+ std::cout << std::endl;
+ int idx = data[i].size();
+ BOOST_CHECK(std::equal_range(data[i].begin(), data[i].end(), idx)
+ == std::make_pair(data[i].begin(), data[i].end()));
+ check[idx-1] = true;
+ }
+ for(int i = 0; i < world.size() - 1; ++i) {
+ std::cout << "Received from " << i+1 << " is " << ok(check[i]) << '\n';
+ }
+ BOOST_CHECK(std::equal_range(check.begin(), check.end(), true)
+ == std::make_pair(check.begin(), check.end()));
+ } else {
+ std::vector<int> vec(rank, rank);
+ mpi::request req = world.isend(0, 0, vec);
+ req.wait();
+ }
+}
diff --git a/libs/mpi/test/nonblocking_test.cpp b/libs/mpi/test/nonblocking_test.cpp
index 7ff44b47bd..8e3eb099f9 100644
--- a/libs/mpi/test/nonblocking_test.cpp
+++ b/libs/mpi/test/nonblocking_test.cpp
@@ -8,13 +8,16 @@
#include <boost/mpi/nonblocking.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include "gps_position.hpp"
#include <boost/lexical_cast.hpp>
#include <boost/serialization/string.hpp>
#include <boost/serialization/list.hpp>
#include <iterator>
#include <algorithm>
+//#include "debugger.cpp"
+
+#define BOOST_TEST_MODULE mpi_non_blockin_test
+#include <boost/test/included/unit_test.hpp>
using boost::mpi::communicator;
using boost::mpi::request;
@@ -48,6 +51,7 @@ nonblocking_tests( const communicator& comm, const T* values, int num_values,
{
nonblocking_test(comm, values, num_values, kind, mk_wait_any);
nonblocking_test(comm, values, num_values, kind, mk_test_any);
+ //wait_for_debugger(comm);
nonblocking_test(comm, values, num_values, kind, mk_wait_all);
nonblocking_test(comm, values, num_values, kind, mk_wait_all_keep);
if (!composite) {
@@ -72,6 +76,9 @@ nonblocking_test(const communicator& comm, const T* values, int num_values,
using boost::mpi::wait_some;
using boost::mpi::test_some;
+ int next = (comm.rank() + 1) % comm.size();
+ int prev = (comm.rank() + comm.size() - 1) % comm.size();
+
if (comm.rank() == 0) {
std::cout << "Testing " << method_kind_names[method]
<< " with " << kind << "...";
@@ -83,12 +90,10 @@ nonblocking_test(const communicator& comm, const T* values, int num_values,
T incoming_value;
std::vector<T> incoming_values(num_values);
-
std::vector<request> reqs;
// Send/receive the first value
- reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 0, values[0]));
- reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(),
- 0, incoming_value));
+ reqs.push_back(comm.isend(next, 0, values[0]));
+ reqs.push_back(comm.irecv(prev, 0, incoming_value));
if (method != mk_wait_any && method != mk_test_any) {
#ifndef LAM_MPI
@@ -98,16 +103,13 @@ nonblocking_test(const communicator& comm, const T* values, int num_values,
// when using shared memory, not TCP.
// Send/receive an empty message
- reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 1));
- reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(),
- 1));
+ reqs.push_back(comm.isend(next, 1));
+ reqs.push_back(comm.irecv(prev, 1));
#endif
// Send/receive an array
- reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 2, values,
- num_values));
- reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(),
- 2, &incoming_values.front(), num_values));
+ reqs.push_back(comm.isend(next, 2, values, num_values));
+ reqs.push_back(comm.irecv(prev, 2, &incoming_values.front(), num_values));
}
switch (method) {
@@ -220,10 +222,9 @@ nonblocking_test(const communicator& comm, const T* values, int num_values,
values));
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(nonblocking)
{
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator comm;
int int_array[3] = {17, 42, 256};
@@ -243,6 +244,4 @@ int test_main(int argc, char* argv[])
lst_of_strings.push_back(boost::lexical_cast<std::string>(i));
nonblocking_tests(comm, &lst_of_strings, 1, "list of strings", true);
-
- return 0;
}
diff --git a/libs/mpi/test/pointer_test.cpp b/libs/mpi/test/pointer_test.cpp
index 848b904aec..65428e7522 100644
--- a/libs/mpi/test/pointer_test.cpp
+++ b/libs/mpi/test/pointer_test.cpp
@@ -7,9 +7,11 @@
// a test of pointer serialization
#include <boost/mpi.hpp>
-#include <boost/test/minimal.hpp>
#include <boost/serialization/shared_ptr.hpp>
+#define BOOST_TEST_MODULE mpi_pointer
+#include <boost/test/included/unit_test.hpp>
+
class A
{
public:
@@ -21,24 +23,20 @@ class A
}
};
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(pointer)
{
- boost::mpi::environment env(argc, argv);
+ boost::mpi::environment env;
boost::mpi::communicator world;
- if(world.rank() == 0)
- {
+ if (world.rank() == 0) {
boost::shared_ptr<A> p(new A);
p->i = 42;
world.send(1, 0, p);
- }
- else if(world.rank() == 1)
- {
+ } else if (world.rank() == 1) {
boost::shared_ptr<A> p;
world.recv(0, 0, p);
std::cout << p->i << std::endl;
BOOST_CHECK(p->i==42);
}
- return 0;
}
diff --git a/libs/mpi/test/reduce_test.cpp b/libs/mpi/test/reduce_test.cpp
index d5538b1b46..c4f9cedd70 100644
--- a/libs/mpi/test/reduce_test.cpp
+++ b/libs/mpi/test/reduce_test.cpp
@@ -8,13 +8,15 @@
#include <boost/mpi/collectives/reduce.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <algorithm>
#include <boost/serialization/string.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
#include <numeric>
+#define BOOST_TEST_MODULE mpi_reduce_test
+#include <boost/test/included/unit_test.hpp>
+
using boost::mpi::communicator;
// A simple point class that we can build, add, compare, and
@@ -200,10 +202,10 @@ struct is_commutative<std::plus<wrapped_int>, wrapped_int>
} } // end namespace boost::mpi
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(reduce)
{
using namespace boost::mpi;
- environment env(argc, argv);
+ environment env;
communicator comm;
@@ -232,6 +234,4 @@ int test_main(int argc, char* argv[])
// Arbitrary types with (non-commutative) user-defined operations
reduce_test(comm, string_generator(), "strings",
std::plus<std::string>(), "concatenation", std::string());
-
- return 0;
}
diff --git a/libs/mpi/test/ring_test.cpp b/libs/mpi/test/ring_test.cpp
index cf87a8c15a..573d3d387e 100644
--- a/libs/mpi/test/ring_test.cpp
+++ b/libs/mpi/test/ring_test.cpp
@@ -10,11 +10,14 @@
// types, serializable objects, etc.)
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <algorithm>
#include "gps_position.hpp"
#include <boost/serialization/string.hpp>
#include <boost/serialization/list.hpp>
+//#include "debugger.cpp"
+
+#define BOOST_TEST_MODULE mpi_reduce_ring
+#include <boost/test/included/unit_test.hpp>
using boost::mpi::communicator;
using boost::mpi::status;
@@ -84,16 +87,12 @@ ring_array_test(const communicator& comm, const T* pass_values,
enum color_t {red, green, blue};
BOOST_IS_MPI_DATATYPE(color_t)
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(ring)
{
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator comm;
- if (comm.size() == 1) {
- std::cerr << "ERROR: Must run the ring test with more than one process."
- << std::endl;
- MPI_Abort(comm, -1);
- }
+
+ BOOST_TEST_REQUIRE(comm.size() > 1);
// Check transfer of individual objects
ring_test(comm, 17, "integers", 0);
@@ -122,6 +121,4 @@ int test_main(int argc, char* argv[])
std::string string_array[3] = { "Hello", "MPI", "World" };
ring_array_test(comm, string_array, 3, "string", 0);
ring_array_test(comm, string_array, 3, "string", 1);
-
- return 0;
}
diff --git a/libs/mpi/test/scan_test.cpp b/libs/mpi/test/scan_test.cpp
index 7ba162afb0..4fabdb9e7e 100644
--- a/libs/mpi/test/scan_test.cpp
+++ b/libs/mpi/test/scan_test.cpp
@@ -8,13 +8,16 @@
#include <boost/mpi/collectives/scan.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <algorithm>
#include <boost/serialization/string.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
#include <numeric>
+#define BOOST_TEST_MODULE mpi_scan_test
+#include <boost/test/included/unit_test.hpp>
+
+
using boost::mpi::communicator;
// A simple point class that we can build, add, compare, and
@@ -190,11 +193,10 @@ struct is_commutative<std::plus<wrapped_int>, wrapped_int>
} } // end namespace boost::mpi
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(scan)
{
using namespace boost::mpi;
- environment env(argc, argv);
-
+ environment env;
communicator comm;
// Built-in MPI datatypes with built-in MPI operations
@@ -222,6 +224,4 @@ int test_main(int argc, char* argv[])
// Arbitrary types with (non-commutative) user-defined operations
scan_test(comm, string_generator(), "strings",
std::plus<std::string>(), "concatenation");
-
- return 0;
}
diff --git a/libs/mpi/test/scatter_test.cpp b/libs/mpi/test/scatter_test.cpp
index 5e5ecd2c82..f33dcba42d 100644
--- a/libs/mpi/test/scatter_test.cpp
+++ b/libs/mpi/test/scatter_test.cpp
@@ -10,14 +10,16 @@
#include <boost/mpi/collectives/scatterv.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include "gps_position.hpp"
#include <boost/serialization/string.hpp>
#include <boost/serialization/list.hpp>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/lexical_cast.hpp>
-using boost::mpi::communicator;
+#define BOOST_TEST_MODULE mpi_scatter
+#include <boost/test/included/unit_test.hpp>
+
+using namespace boost::mpi;
template<typename Generator>
void
@@ -51,7 +53,7 @@ scatter_test(const communicator& comm, Generator generator,
BOOST_CHECK(value == generator(comm.rank()));
}
- (comm.barrier)();
+ comm.barrier();
}
@@ -150,14 +152,13 @@ scatterv_test(const communicator& comm, Generator generator,
BOOST_CHECK(myvalues[i] == generator(comm.rank()));
}
- (comm.barrier)();
+ comm.barrier();
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(simple_scatter)
{
- boost::mpi::environment env(argc, argv);
-
+ environment env;
communicator comm;
scatter_test(comm, int_generator(), "integers");
@@ -169,6 +170,4 @@ int test_main(int argc, char* argv[])
scatterv_test(comm, gps_generator(), "GPS positions");
scatterv_test(comm, string_generator(), "string");
scatterv_test(comm, string_list_generator(), "list of strings");
-
- return 0;
}
diff --git a/libs/mpi/test/sendrecv_test.cpp b/libs/mpi/test/sendrecv_test.cpp
index 8fd4233dba..801b262909 100644
--- a/libs/mpi/test/sendrecv_test.cpp
+++ b/libs/mpi/test/sendrecv_test.cpp
@@ -6,7 +6,6 @@
// A test of the sendrecv() operation.
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <vector>
#include <algorithm>
#include <boost/serialization/string.hpp>
@@ -14,6 +13,9 @@
#include <boost/lexical_cast.hpp>
#include <numeric>
+#define BOOST_TEST_MODULE mpi_sendrecv
+#include <boost/test/included/unit_test.hpp>
+
namespace mpi = boost::mpi;
struct blob {
@@ -51,11 +53,10 @@ void test_sendrecv(mpi::communicator& com) {
BOOST_CHECK(recv == T(wprev));
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(sendrecv)
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
test_sendrecv<int>(world);
test_sendrecv<blob>(world);
- return 0;
}
diff --git a/libs/mpi/test/sendrecv_vector.cpp b/libs/mpi/test/sendrecv_vector.cpp
index 29b9751e6b..dc13928589 100644
--- a/libs/mpi/test/sendrecv_vector.cpp
+++ b/libs/mpi/test/sendrecv_vector.cpp
@@ -79,8 +79,7 @@ int main(int argc, char* argv[]) {
array<int, 9>& d = b2;
d[2] = -17;
world.send(1, 0, data);
- }
- else {
+ } else {
world.recv(0, 0, data);
// check data at vector ends
blob& b1 = data[0];
diff --git a/libs/mpi/test/skeleton_content_test.cpp b/libs/mpi/test/skeleton_content_test.cpp
index 0bff1f739e..6a42fd5e04 100644
--- a/libs/mpi/test/skeleton_content_test.cpp
+++ b/libs/mpi/test/skeleton_content_test.cpp
@@ -8,7 +8,6 @@
// content for data types.
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
#include <boost/serialization/list.hpp>
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/mpi/nonblocking.hpp>
@@ -16,6 +15,9 @@
#include <boost/iterator/counting_iterator.hpp>
#include <boost/mpi/collectives/broadcast.hpp>
+#define BOOST_TEST_MODULE mpi_skeleton_content
+#include <boost/test/included/unit_test.hpp>
+
using boost::mpi::communicator;
using boost::mpi::packed_skeleton_iarchive;
@@ -183,17 +185,11 @@ test_skeleton_and_content_nonblocking(const communicator& comm, int root)
(comm.barrier)();
}
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(sendrecv)
{
- boost::mpi::environment env(argc, argv);
-
+ boost::mpi::environment env;
communicator comm;
- if (comm.size() == 1) {
- std::cerr << "ERROR: Must run the skeleton and content test with more "
- "than one process."
- << std::endl;
- MPI_Abort(comm, -1);
- }
+ BOOST_TEST_REQUIRE(comm.size() > 1);
test_skeleton_and_content(comm, 0, true);
test_skeleton_and_content(comm, 0, false);
@@ -201,6 +197,4 @@ int test_main(int argc, char* argv[])
test_skeleton_and_content(comm, 1, false);
test_skeleton_and_content_nonblocking(comm, 0);
test_skeleton_and_content_nonblocking(comm, 1);
-
- return 0;
}
diff --git a/libs/mpi/test/version_test.cpp b/libs/mpi/test/version_test.cpp
index 5ebd90746e..0895b74f46 100644
--- a/libs/mpi/test/version_test.cpp
+++ b/libs/mpi/test/version_test.cpp
@@ -7,13 +7,16 @@
// test mpi version
#include <boost/mpi/environment.hpp>
-#include <boost/test/minimal.hpp>
+#include <boost/mpi/communicator.hpp>
#include <iostream>
+#define BOOST_TEST_MODULE mpi_version
+#include <boost/test/included/unit_test.hpp>
+
namespace mpi = boost::mpi;
-int
-test_main(int argc, char* argv[]) {
+void
+test_version(mpi::communicator const& comm) {
#if defined(MPI_VERSION)
int mpi_version = MPI_VERSION;
int mpi_subversion = MPI_SUBVERSION;
@@ -21,12 +24,37 @@ test_main(int argc, char* argv[]) {
int mpi_version = 0;
int mpi_subversion = 0;
#endif
-
- mpi::environment env(argc,argv);
- std::pair<int,int> version = env.version();
- std::cout << "MPI Version: " << version.first << ',' << version.second << '\n';
-
+
+ std::pair<int,int> version = mpi::environment::version();
+ if (comm.rank() == 0) {
+ std::cout << "MPI Version: " << version.first << ',' << version.second << '\n';
+ }
BOOST_CHECK(version.first == mpi_version);
BOOST_CHECK(version.second == mpi_subversion);
- return 0;
+}
+
+std::string
+yesno(bool b) {
+ return b ? std::string("yes") : std::string("no");
+}
+
+void
+report_features(mpi::communicator const& comm) {
+ if (comm.rank() == 0) {
+ std::cout << "Assuming working MPI_Improbe:" <<
+#if defined(BOOST_MPI_USE_IMPROBE)
+ "yes" << '\n';
+#else
+ "no" << '\n';
+#endif
+ }
+}
+
+BOOST_AUTO_TEST_CASE(version)
+{
+ mpi::environment env;
+ mpi::communicator world;
+
+ test_version(world);
+ report_features(world);
}
diff --git a/libs/mpi/test/wait_any_test.cpp b/libs/mpi/test/wait_any_test.cpp
index 3f21285894..e1f4c556de 100644
--- a/libs/mpi/test/wait_any_test.cpp
+++ b/libs/mpi/test/wait_any_test.cpp
@@ -13,13 +13,15 @@
#include <boost/mpi.hpp>
#include <boost/mpi/nonblocking.hpp>
#include <boost/serialization/string.hpp>
-#include <boost/test/minimal.hpp>
+
+#define BOOST_TEST_MODULE mpi_wait_any
+#include <boost/test/included/unit_test.hpp>
namespace mpi = boost::mpi;
-int test_main(int argc, char* argv[])
+BOOST_AUTO_TEST_CASE(wait_any)
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
std::vector<std::string> ss(world.size());
@@ -59,6 +61,4 @@ int test_main(int argc, char* argv[])
}
mpi::wait_all(sreqs.begin(), sreqs.end());
-
- return 0;
}