summaryrefslogtreecommitdiff
path: root/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi
diff options
context:
space:
mode:
Diffstat (limited to 'inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi')
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/allocator.hpp210
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives.hpp697
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_gather.hpp82
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_reduce.hpp129
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_to_all.hpp153
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/broadcast.hpp145
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gather.hpp152
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gatherv.hpp164
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/reduce.hpp376
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scan.hpp168
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatter.hpp161
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatterv.hpp166
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives_fwd.hpp23
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/communicator.hpp1866
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/config.hpp129
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype.hpp374
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype_fwd.hpp36
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_iprimitive.hpp123
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_oprimitive.hpp104
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/broadcast_sc.hpp41
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/communicator_sc.hpp96
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/computation_tree.hpp86
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/content_oarchive.hpp66
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_iarchive.hpp80
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_oarchive.hpp78
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_iprimitive.hpp54
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_oprimitive.hpp62
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_skeleton_oarchive.hpp73
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_cache.hpp99
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_oarchive.hpp75
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_primitive.hpp145
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_iprimitive.hpp118
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_oprimitive.hpp115
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/point_to_point.hpp52
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/environment.hpp281
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/exception.hpp104
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/graph_communicator.hpp575
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/group.hpp340
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/inplace.hpp63
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/intercommunicator.hpp165
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/nonblocking.hpp738
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/operations.hpp322
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_iarchive.hpp159
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_oarchive.hpp147
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/request.hpp102
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content.hpp392
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content_fwd.hpp31
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/status.hpp107
-rw-r--r--inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/timer.hpp91
49 files changed, 0 insertions, 10115 deletions
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/allocator.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/allocator.hpp
deleted file mode 100644
index 141d96312..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/allocator.hpp
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file allocator.hpp
- *
- * This header provides an STL-compliant allocator that uses the
- * MPI-2 memory allocation facilities.
- */
-#ifndef BOOST_MPI_ALLOCATOR_HPP
-#define BOOST_MPI_ALLOCATOR_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/mpi/exception.hpp>
-#include <cstddef>
-#include <memory>
-#include <boost/limits.hpp>
-
-namespace boost { namespace mpi {
-
-#if defined(BOOST_MPI_HAS_MEMORY_ALLOCATION)
-template<typename T> class allocator;
-
-/** @brief Allocator specialization for @c void value types.
- *
- * The @c void specialization of @c allocator is useful only for
- * rebinding to another, different value type.
- */
-template<>
-class BOOST_MPI_DECL allocator<void>
-{
-public:
- typedef void* pointer;
- typedef const void* const_pointer;
- typedef void value_type;
-
- template <class U>
- struct rebind
- {
- typedef allocator<U> other;
- };
-};
-
-/** @brief Standard Library-compliant allocator for the MPI-2 memory
- * allocation routines.
- *
- * This allocator provides a standard C++ interface to the @c
- * MPI_Alloc_mem and @c MPI_Free_mem routines of MPI-2. It is
- * intended to be used with the containers in the Standard Library
- * (@c vector, in particular) in cases where the contents of the
- * container will be directly transmitted via MPI. This allocator is
- * also used internally by the library for character buffers that
- * will be used in the transmission of data.
- *
- * The @c allocator class template only provides MPI memory
- * allocation when the underlying MPI implementation is either MPI-2
- * compliant or is known to provide @c MPI_Alloc_mem and @c
- * MPI_Free_mem as extensions. When the MPI memory allocation
- * routines are not available, @c allocator is brought in directly
- * from namespace @c std, so that standard allocators are used
- * throughout. The macro @c BOOST_MPI_HAS_MEMORY_ALLOCATION will be
- * defined when the MPI-2 memory allocation facilities are available.
- */
-template<typename T>
-class BOOST_MPI_DECL allocator
-{
-public:
- /// Holds the size of objects
- typedef std::size_t size_type;
-
- /// Holds the number of elements between two pointers
- typedef std::ptrdiff_t difference_type;
-
- /// A pointer to an object of type @c T
- typedef T* pointer;
-
- /// A pointer to a constant object of type @c T
- typedef const T* const_pointer;
-
- /// A reference to an object of type @c T
- typedef T& reference;
-
- /// A reference to a constant object of type @c T
- typedef const T& const_reference;
-
- /// The type of memory allocated by this allocator
- typedef T value_type;
-
- /** @brief Retrieve the type of an allocator similar to this
- * allocator but for a different value type.
- */
- template <typename U>
- struct rebind
- {
- typedef allocator<U> other;
- };
-
- /** Default-construct an allocator. */
- allocator() throw() { }
-
- /** Copy-construct an allocator. */
- allocator(const allocator&) throw() { }
-
- /**
- * Copy-construct an allocator from another allocator for a
- * different value type.
- */
- template <typename U>
- allocator(const allocator<U>&) throw() { }
-
- /** Destroy an allocator. */
- ~allocator() throw() { }
-
- /** Returns the address of object @p x. */
- pointer address(reference x) const
- {
- return &x;
- }
-
- /** Returns the address of object @p x. */
- const_pointer address(const_reference x) const
- {
- return &x;
- }
-
- /**
- * Allocate enough memory for @p n elements of type @c T.
- *
- * @param n The number of elements for which memory should be
- * allocated.
- *
- * @return a pointer to the newly-allocated memory
- */
- pointer allocate(size_type n, allocator<void>::const_pointer /*hint*/ = 0)
- {
- pointer result;
- BOOST_MPI_CHECK_RESULT(MPI_Alloc_mem,
- (static_cast<MPI_Aint>(n * sizeof(T)),
- MPI_INFO_NULL,
- &result));
- return result;
- }
-
- /**
- * Deallocate memory referred to by the pointer @c p.
- *
- * @param p The pointer whose memory should be deallocated. This
- * pointer shall have been returned from the @c allocate() function
- * and not have already been freed.
- */
- void deallocate(pointer p, size_type /*n*/)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Free_mem, (p));
- }
-
- /**
- * Returns the maximum number of elements that can be allocated
- * with @c allocate().
- */
- size_type max_size() const throw()
- {
- return (std::numeric_limits<std::size_t>::max)() / sizeof(T);
- }
-
- /** Construct a copy of @p val at the location referenced by @c p. */
- void construct(pointer p, const T& val)
- {
- new ((void *)p) T(val);
- }
-
- /** Destroy the object referenced by @c p. */
- void destroy(pointer p)
- {
- ((T*)p)->~T();
- }
-};
-
-/** @brief Compare two allocators for equality.
- *
- * Since MPI allocators have no state, all MPI allocators are equal.
- *
- * @returns @c true
- */
-template<typename T1, typename T2>
-inline bool operator==(const allocator<T1>&, const allocator<T2>&) throw()
-{
- return true;
-}
-
-/** @brief Compare two allocators for inequality.
- *
- * Since MPI allocators have no state, all MPI allocators are equal.
- *
- * @returns @c false
- */
-template<typename T1, typename T2>
-inline bool operator!=(const allocator<T1>&, const allocator<T2>&) throw()
-{
- return false;
-}
-#else
-// Bring in the default allocator from namespace std.
-using std::allocator;
-#endif
-
-} } /// end namespace boost::mpi
-
-#endif // BOOST_MPI_ALLOCATOR_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives.hpp
deleted file mode 100644
index 72c429460..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives.hpp
+++ /dev/null
@@ -1,697 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4. MPI Collectives
-
-/** @file collectives.hpp
- *
- * This header contains MPI collective operations, which implement
- * various parallel algorithms that require the coordination of all
- * processes within a communicator. The header @c collectives_fwd.hpp
- * provides forward declarations for each of these operations. To
- * include only specific collective algorithms, use the headers @c
- * boost/mpi/collectives/algorithm_name.hpp.
- */
-#ifndef BOOST_MPI_COLLECTIVES_HPP
-#define BOOST_MPI_COLLECTIVES_HPP
-
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/inplace.hpp>
-#include <vector>
-
-namespace boost { namespace mpi {
-/**
- * @brief Gather the values stored at every process into vectors of
- * values from each process.
- *
- * @c all_gather is a collective algorithm that collects the values
- * stored at each process into a vector of values indexed by the
- * process number they came from. The type @c T of the values may be
- * any type that is serializable or has an associated MPI data type.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Allgather to gather the values.
- *
- * @param comm The communicator over which the all-gather will
- * occur.
- *
- * @param in_value The value to be transmitted by each process. To
- * gather an array of values, @c in_values points to the @c n local
- * values to be transmitted.
- *
- * @param out_values A vector or pointer to storage that will be
- * populated with the values from each process, indexed by the
- * process ID number. If it is a vector, the vector will be resized
- * accordingly.
- */
-template<typename T>
-void
-all_gather(const communicator& comm, const T& in_value,
- std::vector<T>& out_values);
-
-/**
- * \overload
- */
-template<typename T>
-void
-all_gather(const communicator& comm, const T& in_value, T* out_values);
-
-/**
- * \overload
- */
-template<typename T>
-void
-all_gather(const communicator& comm, const T* in_values, int n,
- std::vector<T>& out_values);
-
-/**
- * \overload
- */
-template<typename T>
-void
-all_gather(const communicator& comm, const T* in_values, int n, T* out_values);
-
-/**
- * @brief Combine the values stored by each process into a single
- * value available to all processes.
- *
- * @c all_reduce is a collective algorithm that combines the values
- * stored by each process into a single value available to all
- * processes. The values are combined in a user-defined way,
- * specified via a function object. The type @c T of the values may
- * be any type that is serializable or has an associated MPI data
- * type. One can think of this operation as a @c all_gather, followed
- * by an @c std::accumulate() over the gather values and using the
- * operation @c op.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Allreduce to perform the reduction. If possible,
- * built-in MPI operations will be used; otherwise, @c all_reduce()
- * will create a custom MPI_Op for the call to MPI_Allreduce.
- *
- * @param comm The communicator over which the reduction will
- * occur.
- * @param value The local value to be combined with the local
- * values of every other process. For reducing arrays, @c in_values
- * is a pointer to the local values to be reduced and @c n is the
- * number of values to reduce. See @c reduce for more information.
- *
- * If wrapped in a @c inplace_t object, combine the usage of both
- * input and $c out_value and the local value will be overwritten
- * (a convenience function @c inplace is provided for the wrapping).
- *
- * @param out_value Will receive the result of the reduction
- * operation. If this parameter is omitted, the outgoing value will
- * instead be returned.
- *
- * @param op The binary operation that combines two values of type
- * @c T and returns a third value of type @c T. For types @c T that has
- * ssociated MPI data types, @c op will either be translated into
- * an @c MPI_Op (via @c MPI_Op_create) or, if possible, mapped
- * directly to a built-in MPI operation. See @c is_mpi_op in the @c
- * operations.hpp header for more details on this mapping. For any
- * non-built-in operation, commutativity will be determined by the
- * @c is_commmutative trait (also in @c operations.hpp): users are
- * encouraged to mark commutative operations as such, because it
- * gives the implementation additional lattitude to optimize the
- * reduction operation.
- *
- * @param n Indicated the size of the buffers of array type.
- * @returns If no @p out_value parameter is supplied, returns the
- * result of the reduction operation.
- */
-template<typename T, typename Op>
-void
-all_reduce(const communicator& comm, const T* value, int n, T* out_value,
- Op op);
-/**
- * \overload
- */
-template<typename T, typename Op>
-void
-all_reduce(const communicator& comm, const T& value, T& out_value, Op op);
-/**
- * \overload
- */
-template<typename T, typename Op>
-T all_reduce(const communicator& comm, const T& value, Op op);
-
-/**
- * \overload
- */
-template<typename T, typename Op>
-void
-all_reduce(const communicator& comm, inplace_t<T*> value, int n,
- Op op);
-/**
- * \overload
- */
-template<typename T, typename Op>
-void
-all_reduce(const communicator& comm, inplace_t<T> value, Op op);
-
-/**
- * @brief Send data from every process to every other process.
- *
- * @c all_to_all is a collective algorithm that transmits @c p values
- * from every process to every other process. On process i, jth value
- * of the @p in_values vector is sent to process j and placed in the
- * ith position of the @p out_values vector in process @p j. The type
- * @c T of the values may be any type that is serializable or has an
- * associated MPI data type. If @c n is provided, then arrays of @p n
- * values will be transferred from one process to another.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Alltoall to scatter the values.
- *
- * @param comm The communicator over which the all-to-all
- * communication will occur.
- *
- * @param in_values A vector or pointer to storage that contains
- * the values to send to each process, indexed by the process ID
- * number.
- *
- * @param out_values A vector or pointer to storage that will be
- * updated to contain the values received from other processes. The
- * jth value in @p out_values will come from the procss with rank j.
- */
-template<typename T>
-void
-all_to_all(const communicator& comm, const std::vector<T>& in_values,
- std::vector<T>& out_values);
-
-/**
- * \overload
- */
-template<typename T>
-void all_to_all(const communicator& comm, const T* in_values, T* out_values);
-
-/**
- * \overload
- */
-template<typename T>
-void
-all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
- std::vector<T>& out_values);
-
-/**
- * \overload
- */
-template<typename T>
-void
-all_to_all(const communicator& comm, const T* in_values, int n, T* out_values);
-
-/**
- * @brief Broadcast a value from a root process to all other
- * processes.
- *
- * @c broadcast is a collective algorithm that transfers a value from
- * an arbitrary @p root process to every other process that is part of
- * the given communicator. The @c broadcast algorithm can transmit any
- * Serializable value, values that have associated MPI data types,
- * packed archives, skeletons, and the content of skeletons; see the
- * @c send primitive for communicators for a complete list. The type
- * @c T shall be the same for all processes that are a part of the
- * communicator @p comm, unless packed archives are being transferred:
- * with packed archives, the root sends a @c packed_oarchive or @c
- * packed_skeleton_oarchive whereas the other processes receive a
- * @c packed_iarchive or @c packed_skeleton_iarchve, respectively.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Bcast to perform the broadcast.
- *
- * @param comm The communicator over which the broadcast will
- * occur.
- *
- * @param value The value (or values, if @p n is provided) to be
- * transmitted (if the rank of @p comm is equal to @p root) or
- * received (if the rank of @p comm is not equal to @p root). When
- * the @p value is a @c skeleton_proxy, only the skeleton of the
- * object will be broadcast. In this case, the @p root will build a
- * skeleton from the object help in the proxy and all of the
- * non-roots will reshape the objects held in their proxies based on
- * the skeleton sent from the root.
- *
- * @param n When supplied, the number of values that the pointer @p
- * values points to, for broadcasting an array of values. The value
- * of @p n must be the same for all processes in @p comm.
- *
- * @param root The rank/process ID of the process that will be
- * transmitting the value.
- */
-template<typename T>
-void broadcast(const communicator& comm, T& value, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void broadcast(const communicator& comm, T* values, int n, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void broadcast(const communicator& comm, skeleton_proxy<T>& value, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-broadcast(const communicator& comm, const skeleton_proxy<T>& value, int root);
-
-/**
- * @brief Gather the values stored at every process into a vector at
- * the root process.
- *
- * @c gather is a collective algorithm that collects the values
- * stored at each process into a vector of values at the @p root
- * process. This vector is indexed by the process number that the
- * value came from. The type @c T of the values may be any type that
- * is serializable or has an associated MPI data type.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Gather to gather the values.
- *
- * @param comm The communicator over which the gather will occur.
- *
- * @param in_value The value to be transmitted by each process. For
- * gathering arrays of values, @c in_values points to storage for
- * @c n*comm.size() values.
- *
- * @param out_values A vector or pointer to storage that will be
- * populated with the values from each process, indexed by the
- * process ID number. If it is a vector, it will be resized
- * accordingly. For non-root processes, this parameter may be
- * omitted. If it is still provided, however, it will be unchanged.
- *
- * @param root The process ID number that will collect the
- * values. This value must be the same on all processes.
- */
-template<typename T>
-void
-gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
- int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-gather(const communicator& comm, const T& in_value, T* out_values, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void gather(const communicator& comm, const T& in_value, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-gather(const communicator& comm, const T* in_values, int n,
- std::vector<T>& out_values, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-gather(const communicator& comm, const T* in_values, int n, T* out_values,
- int root);
-
-/**
- * \overload
- */
-template<typename T>
-void gather(const communicator& comm, const T* in_values, int n, int root);
-
-/**
- * @brief Similar to boost::mpi::gather with the difference that the number
- * of values to be send by non-root processes can vary.
- *
- * @param comm The communicator over which the gather will occur.
- *
- * @param in_values The array of values to be transmitted by each process.
- *
- * @param in_size For each non-root process this specifies the size
- * of @p in_values.
- *
- * @param out_values A pointer to storage that will be populated with
- * the values from each process. For non-root processes, this parameter
- * may be omitted. If it is still provided, however, it will be unchanged.
- *
- * @param sizes A vector containing the number of elements each non-root
- * process will send.
- *
- * @param displs A vector such that the i-th entry specifies the
- * displacement (relative to @p out_values) from which to take the ingoing
- * data at the @p root process. Overloaded versions for which @p displs is
- * omitted assume that the data is to be placed contiguously at the root process.
- *
- * @param root The process ID number that will collect the
- * values. This value must be the same on all processes.
- */
-template<typename T>
-void
-gatherv(const communicator& comm, const std::vector<T>& in_values,
- T* out_values, const std::vector<int>& sizes, const std::vector<int>& displs,
- int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-gatherv(const communicator& comm, const T* in_values, int in_size,
- T* out_values, const std::vector<int>& sizes, const std::vector<int>& displs,
- int root);
-
-/**
- * \overload
- */
-template<typename T>
-void gatherv(const communicator& comm, const std::vector<T>& in_values, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void gatherv(const communicator& comm, const T* in_values, int in_size, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-gatherv(const communicator& comm, const T* in_values, int in_size,
- T* out_values, const std::vector<int>& sizes, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-gatherv(const communicator& comm, const std::vector<T>& in_values,
- T* out_values, const std::vector<int>& sizes, int root);
-
-/**
- * @brief Scatter the values stored at the root to all processes
- * within the communicator.
- *
- * @c scatter is a collective algorithm that scatters the values
- * stored in the @p root process (inside a vector) to all of the
- * processes in the communicator. The vector @p out_values (only
- * significant at the @p root) is indexed by the process number to
- * which the corresponding value will be sent. The type @c T of the
- * values may be any type that is serializable or has an associated
- * MPI data type.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Scatter to scatter the values.
- *
- * @param comm The communicator over which the scatter will occur.
- *
- * @param in_values A vector or pointer to storage that will contain
- * the values to send to each process, indexed by the process rank.
- * For non-root processes, this parameter may be omitted. If it is
- * still provided, however, it will be unchanged.
- *
- * @param out_value The value received by each process. When
- * scattering an array of values, @p out_values points to the @p n
- * values that will be received by each process.
- *
- * @param root The process ID number that will scatter the
- * values. This value must be the same on all processes.
- */
-template<typename T>
-void
-scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
- int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-scatter(const communicator& comm, const T* in_values, T& out_value, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void scatter(const communicator& comm, T& out_value, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-scatter(const communicator& comm, const std::vector<T>& in_values,
- T* out_values, int n, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-scatter(const communicator& comm, const T* in_values, T* out_values, int n,
- int root);
-
-/**
- * \overload
- */
-template<typename T>
-void scatter(const communicator& comm, T* out_values, int n, int root);
-
-/**
- * @brief Similar to boost::mpi::scatter with the difference that the number
- * of values stored at the root process does not need to be a multiple of
- * the communicator's size.
- *
- * @param comm The communicator over which the scatter will occur.
- *
- * @param in_values A vector or pointer to storage that will contain
- * the values to send to each process, indexed by the process rank.
- * For non-root processes, this parameter may be omitted. If it is
- * still provided, however, it will be unchanged.
- *
- * @param sizes A vector containing the number of elements each non-root
- * process will receive.
- *
- * @param displs A vector such that the i-th entry specifies the
- * displacement (relative to @p in_values) from which to take the outgoing
- * data to process i. Overloaded versions for which @p displs is omitted
- * assume that the data is contiguous at the @p root process.
- *
- * @param out_values The array of values received by each process.
- *
- * @param out_size For each non-root process this will contain the size
- * of @p out_values.
- *
- * @param root The process ID number that will scatter the
- * values. This value must be the same on all processes.
- */
-template<typename T>
-void
-scatterv(const communicator& comm, const std::vector<T>& in_values,
- const std::vector<int>& sizes, const std::vector<int>& displs,
- T* out_values, int out_size, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-scatterv(const communicator& comm, const T* in_values,
- const std::vector<int>& sizes, const std::vector<int>& displs,
- T* out_values, int out_size, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void scatterv(const communicator& comm, T* out_values, int out_size, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-scatterv(const communicator& comm, const T* in_values,
- const std::vector<int>& sizes, T* out_values, int root);
-
-/**
- * \overload
- */
-template<typename T>
-void
-scatterv(const communicator& comm, const std::vector<T>& in_values,
- const std::vector<int>& sizes, T* out_values, int root);
-
-/**
- * @brief Combine the values stored by each process into a single
- * value at the root.
- *
- * @c reduce is a collective algorithm that combines the values
- * stored by each process into a single value at the @c root. The
- * values can be combined arbitrarily, specified via a function
- * object. The type @c T of the values may be any type that is
- * serializable or has an associated MPI data type. One can think of
- * this operation as a @c gather to the @p root, followed by an @c
- * std::accumulate() over the gathered values and using the operation
- * @c op.
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Reduce to perform the reduction. If possible,
- * built-in MPI operations will be used; otherwise, @c reduce() will
- * create a custom MPI_Op for the call to MPI_Reduce.
- *
- * @param comm The communicator over which the reduction will
- * occur.
- *
- * @param in_value The local value to be combined with the local
- * values of every other process. For reducing arrays, @c in_values
- * contains a pointer to the local values. In this case, @c n is
- * the number of values that will be reduced. Reduction occurs
- * independently for each of the @p n values referenced by @p
- * in_values, e.g., calling reduce on an array of @p n values is
- * like calling @c reduce @p n separate times, one for each
- * location in @p in_values and @p out_values.
- *
- * @param out_value Will receive the result of the reduction
- * operation, but only for the @p root process. Non-root processes
- * may omit if parameter; if they choose to supply the parameter,
- * it will be unchanged. For reducing arrays, @c out_values
- * contains a pointer to the storage for the output values.
- *
- * @param op The binary operation that combines two values of type
- * @c T into a third value of type @c T. For types @c T that has
- * ssociated MPI data types, @c op will either be translated into
- * an @c MPI_Op (via @c MPI_Op_create) or, if possible, mapped
- * directly to a built-in MPI operation. See @c is_mpi_op in the @c
- * operations.hpp header for more details on this mapping. For any
- * non-built-in operation, commutativity will be determined by the
- * @c is_commmutative trait (also in @c operations.hpp): users are
- * encouraged to mark commutative operations as such, because it
- * gives the implementation additional lattitude to optimize the
- * reduction operation.
- *
- * @param root The process ID number that will receive the final,
- * combined value. This value must be the same on all processes.
- */
-template<typename T, typename Op>
-void
-reduce(const communicator& comm, const T& in_value, T& out_value, Op op,
- int root);
-
-/**
- * \overload
- */
-template<typename T, typename Op>
-void reduce(const communicator& comm, const T& in_value, Op op, int root);
-
-/**
- * \overload
- */
-template<typename T, typename Op>
-void
-reduce(const communicator& comm, const T* in_values, int n, T* out_values,
- Op op, int root);
-
-/**
- * \overload
- */
-template<typename T, typename Op>
-void
-reduce(const communicator& comm, const T* in_values, int n, Op op, int root);
-
-/**
- * @brief Compute a prefix reduction of values from all processes in
- * the communicator.
- *
- * @c scan is a collective algorithm that combines the values stored
- * by each process with the values of all processes with a smaller
- * rank. The values can be arbitrarily combined, specified via a
- * function object @p op. The type @c T of the values may be any type
- * that is serializable or has an associated MPI data type. One can
- * think of this operation as a @c gather to some process, followed
- * by an @c std::prefix_sum() over the gathered values using the
- * operation @c op. The ith process returns the ith value emitted by
- * @c std::prefix_sum().
- *
- * When the type @c T has an associated MPI data type, this routine
- * invokes @c MPI_Scan to perform the reduction. If possible,
- * built-in MPI operations will be used; otherwise, @c scan() will
- * create a custom @c MPI_Op for the call to MPI_Scan.
- *
- * @param comm The communicator over which the prefix reduction
- * will occur.
- *
- * @param in_value The local value to be combined with the local
- * values of other processes. For the array variant, the @c
- * in_values parameter points to the @c n local values that will be
- * combined.
- *
- * @param out_value If provided, the ith process will receive the
- * value @c op(in_value[0], op(in_value[1], op(..., in_value[i])
- * ... )). For the array variant, @c out_values contains a pointer
- * to storage for the @c n output values. The prefix reduction
- * occurs independently for each of the @p n values referenced by
- * @p in_values, e.g., calling scan on an array of @p n values is
- * like calling @c scan @p n separate times, one for each location
- * in @p in_values and @p out_values.
- *
- * @param op The binary operation that combines two values of type
- * @c T into a third value of type @c T. For types @c T that has
- * ssociated MPI data types, @c op will either be translated into
- * an @c MPI_Op (via @c MPI_Op_create) or, if possible, mapped
- * directly to a built-in MPI operation. See @c is_mpi_op in the @c
- * operations.hpp header for more details on this mapping. For any
- * non-built-in operation, commutativity will be determined by the
- * @c is_commmutative trait (also in @c operations.hpp).
- *
- * @returns If no @p out_value parameter is provided, returns the
- * result of prefix reduction.
- */
-template<typename T, typename Op>
-void
-scan(const communicator& comm, const T& in_value, T& out_value, Op op);
-
-/**
- * \overload
- */
-template<typename T, typename Op>
-T
-scan(const communicator& comm, const T& in_value, Op op);
-
-/**
- * \overload
- */
-template<typename T, typename Op>
-void
-scan(const communicator& comm, const T* in_values, int n, T* out_values, Op op);
-
-} } // end namespace boost::mpi
-#endif // BOOST_MPI_COLLECTIVES_HPP
-
-#ifndef BOOST_MPI_COLLECTIVES_FORWARD_ONLY
-// Include implementations of each of the collectives
-# include <boost/mpi/collectives/all_gather.hpp>
-# include <boost/mpi/collectives/all_reduce.hpp>
-# include <boost/mpi/collectives/all_to_all.hpp>
-# include <boost/mpi/collectives/broadcast.hpp>
-# include <boost/mpi/collectives/gather.hpp>
-# include <boost/mpi/collectives/gatherv.hpp>
-# include <boost/mpi/collectives/scatter.hpp>
-# include <boost/mpi/collectives/scatterv.hpp>
-# include <boost/mpi/collectives/reduce.hpp>
-# include <boost/mpi/collectives/scan.hpp>
-#endif
-
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_gather.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_gather.hpp
deleted file mode 100644
index da73186c6..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_gather.hpp
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.7. Gather-to-all
-#ifndef BOOST_MPI_ALL_GATHER_HPP
-#define BOOST_MPI_ALL_GATHER_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/serialization/vector.hpp>
-
-// all_gather falls back to gather+broadcast in some cases
-#include <boost/mpi/collectives/broadcast.hpp>
-#include <boost/mpi/collectives/gather.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // We're all-gathering for a type that has an associated MPI
- // datatype, so we'll use MPI_Gather to do all of the work.
- template<typename T>
- void
- all_gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, mpl::true_)
- {
- MPI_Datatype type = boost::mpi::get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Allgather,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, comm));
- }
-
- // We're all-gathering for a type that has no associated MPI
- // type. So, we'll do a manual gather followed by a broadcast.
- template<typename T>
- void
- all_gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, mpl::false_)
- {
- gather(comm, in_values, n, out_values, 0);
- broadcast(comm, out_values, comm.size() * n, 0);
- }
-} // end namespace detail
-
-template<typename T>
-inline void
-all_gather(const communicator& comm, const T& in_value, T* out_values)
-{
- detail::all_gather_impl(comm, &in_value, 1, out_values, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-all_gather(const communicator& comm, const T& in_value,
- std::vector<T>& out_values)
-{
- out_values.resize(comm.size());
- ::boost::mpi::all_gather(comm, &in_value, 1, &out_values[0]);
-}
-
-template<typename T>
-inline void
-all_gather(const communicator& comm, const T* in_values, int n, T* out_values)
-{
- detail::all_gather_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-all_gather(const communicator& comm, const T* in_values, int n,
- std::vector<T>& out_values)
-{
- out_values.resize(comm.size() * n);
- ::boost::mpi::all_gather(comm, in_values, n, &out_values[0]);
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_ALL_GATHER_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_reduce.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_reduce.hpp
deleted file mode 100644
index 06e116a65..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_reduce.hpp
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>
-// Copyright (C) 2004 The Trustees of Indiana University
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
-#ifndef BOOST_MPI_ALL_REDUCE_HPP
-#define BOOST_MPI_ALL_REDUCE_HPP
-
-#include <vector>
-
-#include <boost/mpi/inplace.hpp>
-
-// All-reduce falls back to reduce() + broadcast() in some cases.
-#include <boost/mpi/collectives/broadcast.hpp>
-#include <boost/mpi/collectives/reduce.hpp>
-
-namespace boost { namespace mpi {
-namespace detail {
- /**********************************************************************
- * Simple reduction with MPI_Allreduce *
- **********************************************************************/
- // We are reducing for a type that has an associated MPI
- // datatype and operation, so we'll use MPI_Allreduce directly.
- template<typename T, typename Op>
- void
- all_reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op /*op*/, mpl::true_ /*is_mpi_op*/,
- mpl::true_ /*is_mpi_datatype*/)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
- (const_cast<T*>(in_values), out_values, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- (is_mpi_op<Op, T>::op()), comm));
- }
-
- /**********************************************************************
- * User-defined reduction with MPI_Allreduce *
- **********************************************************************/
- // We are reducing at the root for a type that has an associated MPI
- // datatype but with a custom operation. We'll use MPI_Reduce
- // directly, but we'll need to create an MPI_Op manually.
- template<typename T, typename Op>
- void
- all_reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
- mpl::true_ /*is_mpi_datatype*/)
- {
- user_op<Op, T> mpi_op(op);
- BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
- (const_cast<T*>(in_values), out_values, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- mpi_op.get_mpi_op(), comm));
- }
-
- /**********************************************************************
- * User-defined, tree-based reduction for non-MPI data types *
- **********************************************************************/
- // We are reducing at the root for a type that has no associated MPI
- // datatype and operation, so we'll use a simple tree-based
- // algorithm.
- template<typename T, typename Op>
- void
- all_reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
- mpl::false_ /*is_mpi_datatype*/)
- {
- if (in_values == MPI_IN_PLACE) {
- // if in_values matches the in place tag, then the output
- // buffer actually contains the input data.
- // But we can just go back to the out of place
- // implementation in this case.
- // it's not clear how/if we can avoid the copy.
- std::vector<T> tmp_in( out_values, out_values + n);
- reduce(comm, &(tmp_in[0]), n, out_values, op, 0);
- } else {
- reduce(comm, in_values, n, out_values, op, 0);
- }
- broadcast(comm, out_values, n, 0);
- }
-} // end namespace detail
-
-template<typename T, typename Op>
-inline void
-all_reduce(const communicator& comm, const T* in_values, int n, T* out_values,
- Op op)
-{
- detail::all_reduce_impl(comm, in_values, n, out_values, op,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-inline void
-all_reduce(const communicator& comm, inplace_t<T*> inout_values, int n, Op op)
-{
- all_reduce(comm, static_cast<const T*>(MPI_IN_PLACE), n, inout_values.buffer, op);
-}
-
-template<typename T, typename Op>
-inline void
-all_reduce(const communicator& comm, inplace_t<T> inout_values, Op op)
-{
- all_reduce(comm, static_cast<const T*>(MPI_IN_PLACE), 1, &(inout_values.buffer), op);
-}
-
-template<typename T, typename Op>
-inline void
-all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op)
-{
- detail::all_reduce_impl(comm, &in_value, 1, &out_value, op,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-T all_reduce(const communicator& comm, const T& in_value, Op op)
-{
- T result;
- ::boost::mpi::all_reduce(comm, in_value, result, op);
- return result;
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_ALL_REDUCE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_to_all.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_to_all.hpp
deleted file mode 100644
index 8c33c2a16..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/all_to_all.hpp
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright (C) 2005, 2006 Douglas Gregor.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.8. All-to-all
-#ifndef BOOST_MPI_ALL_TO_ALL_HPP
-#define BOOST_MPI_ALL_TO_ALL_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/assert.hpp>
-#include <boost/mpi/collectives_fwd.hpp>
-#include <boost/mpi/allocator.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // We're performaing an all-to-all with a type that has an
- // associated MPI datatype, so we'll use MPI_Alltoall to do all of
- // the work.
- template<typename T>
- void
- all_to_all_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Alltoall,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, comm));
- }
-
- // We're performing an all-to-all with a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Alltoall, so
- // we'll just have to send individual messages to the other
- // processes.
- template<typename T>
- void
- all_to_all_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, mpl::false_)
- {
- int size = comm.size();
- int rank = comm.rank();
-
- // The amount of data to be sent to each process
- std::vector<int> send_sizes(size);
-
- // The displacements for each outgoing value.
- std::vector<int> send_disps(size);
-
- // The buffer that will store all of the outgoing values
- std::vector<char, allocator<char> > outgoing;
-
- // Pack the buffer with all of the outgoing values.
- for (int dest = 0; dest < size; ++dest) {
- // Keep track of the displacements
- send_disps[dest] = outgoing.size();
-
- // Our own value will never be transmitted, so don't pack it.
- if (dest != rank) {
- packed_oarchive oa(comm, outgoing);
- for (int i = 0; i < n; ++i)
- oa << in_values[dest * n + i];
- }
-
- // Keep track of the sizes
- send_sizes[dest] = outgoing.size() - send_disps[dest];
- }
-
- // Determine how much data each process will receive.
- std::vector<int> recv_sizes(size);
- all_to_all(comm, send_sizes, recv_sizes);
-
- // Prepare a buffer to receive the incoming data.
- std::vector<int> recv_disps(size);
- int sum = 0;
- for (int src = 0; src < size; ++src) {
- recv_disps[src] = sum;
- sum += recv_sizes[src];
- }
- std::vector<char, allocator<char> > incoming(sum > 0? sum : 1);
-
- // Make sure we don't try to reference an empty vector
- if (outgoing.empty())
- outgoing.push_back(0);
-
- // Transmit the actual data
- BOOST_MPI_CHECK_RESULT(MPI_Alltoallv,
- (&outgoing[0], &send_sizes[0],
- &send_disps[0], MPI_PACKED,
- &incoming[0], &recv_sizes[0],
- &recv_disps[0], MPI_PACKED,
- comm));
-
- // Deserialize data from the iarchive
- for (int src = 0; src < size; ++src) {
- if (src == rank)
- std::copy(in_values + src * n, in_values + (src + 1) * n,
- out_values + src * n);
- else {
- packed_iarchive ia(comm, incoming, boost::archive::no_header,
- recv_disps[src]);
- for (int i = 0; i < n; ++i)
- ia >> out_values[src * n + i];
- }
- }
- }
-} // end namespace detail
-
-template<typename T>
-inline void
-all_to_all(const communicator& comm, const T* in_values, T* out_values)
-{
- detail::all_to_all_impl(comm, in_values, 1, out_values, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-all_to_all(const communicator& comm, const std::vector<T>& in_values,
- std::vector<T>& out_values)
-{
- BOOST_ASSERT((int)in_values.size() == comm.size());
- out_values.resize(comm.size());
- ::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
-}
-
-template<typename T>
-inline void
-all_to_all(const communicator& comm, const T* in_values, int n, T* out_values)
-{
- detail::all_to_all_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
- std::vector<T>& out_values)
-{
- BOOST_ASSERT((int)in_values.size() == comm.size() * n);
- out_values.resize(comm.size() * n);
- ::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_ALL_TO_ALL_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/broadcast.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/broadcast.hpp
deleted file mode 100644
index d5160cff7..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/broadcast.hpp
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.4. Broadcast
-#ifndef BOOST_MPI_BROADCAST_HPP
-#define BOOST_MPI_BROADCAST_HPP
-
-#include <boost/mpi/collectives_fwd.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/mpi/communicator.hpp>
-
-namespace boost { namespace mpi {
-
-/************************************************************************
- * Specializations *
- ************************************************************************/
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-broadcast<const packed_oarchive>(const communicator& comm,
- const packed_oarchive& oa,
- int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-broadcast<packed_oarchive>(const communicator& comm, packed_oarchive& oa,
- int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-broadcast<packed_iarchive>(const communicator& comm, packed_iarchive& ia,
- int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-broadcast<const packed_skeleton_oarchive>(const communicator& comm,
- const packed_skeleton_oarchive& oa,
- int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-void
-broadcast<packed_skeleton_oarchive>(const communicator& comm,
- packed_skeleton_oarchive& oa, int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-void
-broadcast<packed_skeleton_iarchive>(const communicator& comm,
- packed_skeleton_iarchive& ia, int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-void broadcast<content>(const communicator& comm, content& c, int root);
-
-/**
- * INTERNAL ONLY
- */
-template<>
-void broadcast<const content>(const communicator& comm, const content& c,
- int root);
-
-/************************************************************************
- * broadcast() implementation *
- ************************************************************************/
-namespace detail {
- // We're sending a type that has an associated MPI datatype, so
- // we'll use MPI_Bcast to do all of the work.
- template<typename T>
- void
- broadcast_impl(const communicator& comm, T* values, int n, int root,
- mpl::true_)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Bcast,
- (values, n,
- boost::mpi::get_mpi_datatype<T>(*values),
- root, MPI_Comm(comm)));
- }
-
- // We're sending a type that does not have an associated MPI
- // datatype, so we'll need to serialize it. Unfortunately, this
- // means that we cannot use MPI_Bcast, so we'll just send from the
- // root to everyone else.
- template<typename T>
- void
- broadcast_impl(const communicator& comm, T* values, int n, int root,
- mpl::false_)
- {
- if (comm.rank() == root) {
- packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
- oa << values[i];
- broadcast(comm, oa, root);
- } else {
- packed_iarchive ia(comm);
- broadcast(comm, ia, root);
- for (int i = 0; i < n; ++i)
- ia >> values[i];
- }
- }
-} // end namespace detail
-
-template<typename T>
-void broadcast(const communicator& comm, T& value, int root)
-{
- detail::broadcast_impl(comm, &value, 1, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void broadcast(const communicator& comm, T* values, int n, int root)
-{
- detail::broadcast_impl(comm, values, n, root, is_mpi_datatype<T>());
-}
-
-} } // end namespace boost::mpi
-
-// If the user has already included skeleton_and_content.hpp, include
-// the code to broadcast skeletons and content.
-#ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
-# include <boost/mpi/detail/broadcast_sc.hpp>
-#endif
-
-#endif // BOOST_MPI_BROADCAST_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gather.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gather.hpp
deleted file mode 100644
index 70dfd6531..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gather.hpp
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright (C) 2005, 2006 Douglas Gregor.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.5. Gather
-#ifndef BOOST_MPI_GATHER_HPP
-#define BOOST_MPI_GATHER_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/detail/point_to_point.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // We're gathering at the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Gather to do all of the work.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Gather,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, root, comm));
- }
-
- // We're gathering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Gather to do all of the work.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Gather,
- (const_cast<T*>(in_values), n, type,
- 0, n, type, root, comm));
- }
-
- // We're gathering at the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gather, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int size = comm.size();
-
- for (int src = 0; src < size; ++src) {
- if (src == root)
- std::copy(in_values, in_values + n, out_values + n * src);
- else
- comm.recv(src, tag, out_values + n * src, n);
- }
- }
-
- // We're gathering at a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gather, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- gather_impl(const communicator& comm, const T* in_values, int n, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
- comm.send(root, tag, in_values, n);
- }
-} // end namespace detail
-
-template<typename T>
-void
-gather(const communicator& comm, const T& in_value, T* out_values, int root)
-{
- if (comm.rank() == root)
- detail::gather_impl(comm, &in_value, 1, out_values, root,
- is_mpi_datatype<T>());
- else
- detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void gather(const communicator& comm, const T& in_value, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::gather_impl(comm, &in_value, 1, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-gather(const communicator& comm, const T& in_value, std::vector<T>& out_values,
- int root)
-{
- if (comm.rank() == root) {
- out_values.resize(comm.size());
- ::boost::mpi::gather(comm, in_value, &out_values[0], root);
- } else {
- ::boost::mpi::gather(comm, in_value, root);
- }
-}
-
-template<typename T>
-void
-gather(const communicator& comm, const T* in_values, int n, T* out_values,
- int root)
-{
- if (comm.rank() == root)
- detail::gather_impl(comm, in_values, n, out_values, root,
- is_mpi_datatype<T>());
- else
- detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-gather(const communicator& comm, const T* in_values, int n,
- std::vector<T>& out_values, int root)
-{
- if (comm.rank() == root) {
- out_values.resize(comm.size() * n);
- ::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
- }
- else
- ::boost::mpi::gather(comm, in_values, n, root);
-}
-
-template<typename T>
-void gather(const communicator& comm, const T* in_values, int n, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::gather_impl(comm, in_values, n, root, is_mpi_datatype<T>());
-}
-
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_GATHER_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gatherv.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gatherv.hpp
deleted file mode 100644
index eb5f9c16d..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/gatherv.hpp
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (C) 2011 Júlio Hoffimann.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.5. Gatherv
-#ifndef BOOST_MPI_GATHERV_HPP
-#define BOOST_MPI_GATHERV_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/detail/point_to_point.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // We're gathering at the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Gatherv to do all of the work.
- template<typename T>
- void
- gatherv_impl(const communicator& comm, const T* in_values, int in_size,
- T* out_values, const int* sizes, const int* displs, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Gatherv,
- (const_cast<T*>(in_values), in_size, type,
- out_values, const_cast<int*>(sizes), const_cast<int*>(displs),
- type, root, comm));
- }
-
- // We're gathering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Gatherv to do all of the work.
- template<typename T>
- void
- gatherv_impl(const communicator& comm, const T* in_values, int in_size, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Gatherv,
- (const_cast<T*>(in_values), in_size, type,
- 0, 0, 0, type, root, comm));
- }
-
- // We're gathering at the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gatherv, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- gatherv_impl(const communicator& comm, const T* in_values, int in_size,
- T* out_values, const int* sizes, const int* displs, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int nprocs = comm.size();
-
- for (int src = 0; src < nprocs; ++src) {
- if (src == root)
- // Our own values will never be transmitted: just copy them.
- std::copy(in_values, in_values + in_size, out_values + displs[src]);
- else {
-// comm.recv(src, tag, out_values + displs[src], sizes[src]);
- // Receive archive
- packed_iarchive ia(comm);
- MPI_Status status;
- detail::packed_archive_recv(comm, src, tag, ia, status);
- for (int i = 0; i < sizes[src]; ++i)
- ia >> out_values[ displs[src] + i ];
- }
- }
- }
-
- // We're gathering at a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Gatherv, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- gatherv_impl(const communicator& comm, const T* in_values, int in_size, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
-// comm.send(root, tag, in_values, in_size);
- packed_oarchive oa(comm);
- for (int i = 0; i < in_size; ++i)
- oa << in_values[i];
- detail::packed_archive_send(comm, root, tag, oa);
- }
-} // end namespace detail
-
-template<typename T>
-void
-gatherv(const communicator& comm, const T* in_values, int in_size,
- T* out_values, const std::vector<int>& sizes, const std::vector<int>& displs,
- int root)
-{
- if (comm.rank() == root)
- detail::gatherv_impl(comm, in_values, in_size,
- out_values, &sizes[0], &displs[0],
- root, is_mpi_datatype<T>());
- else
- detail::gatherv_impl(comm, in_values, in_size, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-gatherv(const communicator& comm, const std::vector<T>& in_values,
- T* out_values, const std::vector<int>& sizes, const std::vector<int>& displs,
- int root)
-{
- ::boost::mpi::gatherv(comm, &in_values[0], in_values.size(), out_values, sizes, displs, root);
-}
-
-template<typename T>
-void gatherv(const communicator& comm, const T* in_values, int in_size, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::gatherv_impl(comm, in_values, in_size, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void gatherv(const communicator& comm, const std::vector<T>& in_values, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::gatherv_impl(comm, &in_values[0], in_values.size(), root, is_mpi_datatype<T>());
-}
-
-///////////////////////
-// common use versions
-///////////////////////
-template<typename T>
-void
-gatherv(const communicator& comm, const T* in_values, int in_size,
- T* out_values, const std::vector<int>& sizes, int root)
-{
- int nprocs = comm.size();
-
- std::vector<int> displs( nprocs );
- for (int rank = 0, aux = 0; rank < nprocs; ++rank) {
- displs[rank] = aux;
- aux += sizes[rank];
- }
- ::boost::mpi::gatherv(comm, in_values, in_size, out_values, sizes, displs, root);
-}
-
-template<typename T>
-void
-gatherv(const communicator& comm, const std::vector<T>& in_values,
- T* out_values, const std::vector<int>& sizes, int root)
-{
- ::boost::mpi::gatherv(comm, &in_values[0], in_values.size(), out_values, sizes, root);
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_GATHERV_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/reduce.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/reduce.hpp
deleted file mode 100644
index 1e2722ee6..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/reduce.hpp
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>.
-// Copyright (C) 2004 The Trustees of Indiana University
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-// Message Passing Interface 1.1 -- Section 4.9.1. Reduce
-#ifndef BOOST_MPI_REDUCE_HPP
-#define BOOST_MPI_REDUCE_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-
-// For (de-)serializing sends and receives
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-
-// For packed_[io]archive sends and receives
-#include <boost/mpi/detail/point_to_point.hpp>
-
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/mpi/detail/computation_tree.hpp>
-#include <boost/mpi/operations.hpp>
-#include <algorithm>
-#include <exception>
-#include <boost/assert.hpp>
-#include <boost/scoped_array.hpp>
-
-namespace boost { namespace mpi {
-
-
-/************************************************************************
- * Implementation details *
- ************************************************************************/
-namespace detail {
- /**********************************************************************
- * Simple reduction with MPI_Reduce *
- **********************************************************************/
- // We are reducing at the root for a type that has an associated MPI
- // datatype and operation, so we'll use MPI_Reduce directly.
- template<typename T, typename Op>
- void
- reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op /*op*/, int root, mpl::true_ /*is_mpi_op*/,
- mpl::true_/*is_mpi_datatype*/)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Reduce,
- (const_cast<T*>(in_values), out_values, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- (is_mpi_op<Op, T>::op()), root, comm));
- }
-
- // We are reducing to the root for a type that has an associated MPI
- // datatype and operation, so we'll use MPI_Reduce directly.
- template<typename T, typename Op>
- void
- reduce_impl(const communicator& comm, const T* in_values, int n, Op /*op*/,
- int root, mpl::true_ /*is_mpi_op*/, mpl::true_/*is_mpi_datatype*/)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Reduce,
- (const_cast<T*>(in_values), 0, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- (is_mpi_op<Op, T>::op()), root, comm));
- }
-
- /**********************************************************************
- * User-defined reduction with MPI_Reduce *
- **********************************************************************/
-
- // We are reducing at the root for a type that has an associated MPI
- // datatype but with a custom operation. We'll use MPI_Reduce
- // directly, but we'll need to create an MPI_Op manually.
- template<typename T, typename Op>
- void
- reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op op, int root, mpl::false_ /*is_mpi_op*/,
- mpl::true_/*is_mpi_datatype*/)
- {
- user_op<Op, T> mpi_op(op);
- BOOST_MPI_CHECK_RESULT(MPI_Reduce,
- (const_cast<T*>(in_values), out_values, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- mpi_op.get_mpi_op(), root, comm));
- }
-
- // We are reducing to the root for a type that has an associated MPI
- // datatype but with a custom operation. We'll use MPI_Reduce
- // directly, but we'll need to create an MPI_Op manually.
- template<typename T, typename Op>
- void
- reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
- int root, mpl::false_/*is_mpi_op*/, mpl::true_/*is_mpi_datatype*/)
- {
- user_op<Op, T> mpi_op(op);
- BOOST_MPI_CHECK_RESULT(MPI_Reduce,
- (const_cast<T*>(in_values), 0, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- mpi_op.get_mpi_op(), root, comm));
- }
-
- /**********************************************************************
- * User-defined, tree-based reduction for non-MPI data types *
- **********************************************************************/
-
- // Commutative reduction
- template<typename T, typename Op>
- void
- tree_reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op op, int root,
- mpl::true_ /*is_commutative*/)
- {
- std::copy(in_values, in_values + n, out_values);
-
- int size = comm.size();
- int rank = comm.rank();
-
- // The computation tree we will use.
- detail::computation_tree tree(rank, size, root);
-
- int tag = environment::collectives_tag();
-
- MPI_Status status;
- int children = 0;
- for (int child = tree.child_begin();
- children < tree.branching_factor() && child != root;
- ++children, child = (child + 1) % size) {
- // Receive archive
- packed_iarchive ia(comm);
- detail::packed_archive_recv(comm, child, tag, ia, status);
-
- T incoming;
- for (int i = 0; i < n; ++i) {
- ia >> incoming;
- out_values[i] = op(out_values[i], incoming);
- }
- }
-
- // For non-roots, send the result to the parent.
- if (tree.parent() != rank) {
- packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
- oa << out_values[i];
- detail::packed_archive_send(comm, tree.parent(), tag, oa);
- }
- }
-
- // Commutative reduction from a non-root.
- template<typename T, typename Op>
- void
- tree_reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
- int root, mpl::true_ /*is_commutative*/)
- {
- scoped_array<T> results(new T[n]);
- detail::tree_reduce_impl(comm, in_values, n, results.get(), op, root,
- mpl::true_());
- }
-
- // Non-commutative reduction
- template<typename T, typename Op>
- void
- tree_reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op op, int root,
- mpl::false_ /*is_commutative*/)
- {
- int tag = environment::collectives_tag();
-
- int left_child = root / 2;
- int right_child = (root + comm.size()) / 2;
-
- MPI_Status status;
- if (left_child != root) {
- // Receive value from the left child and merge it with the value
- // we had incoming.
- packed_iarchive ia(comm);
- detail::packed_archive_recv(comm, left_child, tag, ia, status);
- T incoming;
- for (int i = 0; i < n; ++i) {
- ia >> incoming;
- out_values[i] = op(incoming, in_values[i]);
- }
- } else {
- // There was no left value, so copy our incoming value.
- std::copy(in_values, in_values + n, out_values);
- }
-
- if (right_child != root) {
- // Receive value from the right child and merge it with the
- // value we had incoming.
- packed_iarchive ia(comm);
- detail::packed_archive_recv(comm, right_child, tag, ia, status);
- T incoming;
- for (int i = 0; i < n; ++i) {
- ia >> incoming;
- out_values[i] = op(out_values[i], incoming);
- }
- }
- }
-
- // Non-commutative reduction from a non-root.
- template<typename T, typename Op>
- void
- tree_reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
- int root, mpl::false_ /*is_commutative*/)
- {
- int size = comm.size();
- int rank = comm.rank();
-
- int tag = environment::collectives_tag();
-
- // Determine our parents and children in the commutative binary
- // computation tree.
- int grandparent = root;
- int parent = root;
- int left_bound = 0;
- int right_bound = size;
- int left_child, right_child;
- do {
- left_child = (left_bound + parent) / 2;
- right_child = (parent + right_bound) / 2;
-
- if (rank < parent) {
- // Go left.
- grandparent = parent;
- right_bound = parent;
- parent = left_child;
- } else if (rank > parent) {
- // Go right.
- grandparent = parent;
- left_bound = parent + 1;
- parent = right_child;
- } else {
- // We've found the parent
- break;
- }
- } while (true);
-
- // Our parent is the grandparent of our children. This is a slight
- // abuse of notation, but it makes the send-to-parent below make
- // more sense.
- parent = grandparent;
-
- MPI_Status status;
- scoped_array<T> out_values(new T[n]);
- if (left_child != rank) {
- // Receive value from the left child and merge it with the value
- // we had incoming.
- packed_iarchive ia(comm);
- detail::packed_archive_recv(comm, left_child, tag, ia, status);
- T incoming;
- for (int i = 0; i < n; ++i) {
- ia >> incoming;
- out_values[i] = op(incoming, in_values[i]);
- }
- } else {
- // There was no left value, so copy our incoming value.
- std::copy(in_values, in_values + n, out_values.get());
- }
-
- if (right_child != rank) {
- // Receive value from the right child and merge it with the
- // value we had incoming.
- packed_iarchive ia(comm);
- detail::packed_archive_recv(comm, right_child, tag, ia, status);
- T incoming;
- for (int i = 0; i < n; ++i) {
- ia >> incoming;
- out_values[i] = op(out_values[i], incoming);
- }
- }
-
- // Send the combined value to our parent.
- packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
- oa << out_values[i];
- detail::packed_archive_send(comm, parent, tag, oa);
- }
-
- // We are reducing at the root for a type that has no associated MPI
- // datatype and operation, so we'll use a simple tree-based
- // algorithm.
- template<typename T, typename Op>
- void
- reduce_impl(const communicator& comm, const T* in_values, int n,
- T* out_values, Op op, int root, mpl::false_ /*is_mpi_op*/,
- mpl::false_ /*is_mpi_datatype*/)
- {
- detail::tree_reduce_impl(comm, in_values, n, out_values, op, root,
- is_commutative<Op, T>());
- }
-
- // We are reducing to the root for a type that has no associated MPI
- // datatype and operation, so we'll use a simple tree-based
- // algorithm.
- template<typename T, typename Op>
- void
- reduce_impl(const communicator& comm, const T* in_values, int n, Op op,
- int root, mpl::false_ /*is_mpi_op*/,
- mpl::false_ /*is_mpi_datatype*/)
- {
- detail::tree_reduce_impl(comm, in_values, n, op, root,
- is_commutative<Op, T>());
- }
-} // end namespace detail
-
-template<typename T, typename Op>
-void
-reduce(const communicator& comm, const T* in_values, int n, T* out_values,
- Op op, int root)
-{
- if (comm.rank() == root)
- detail::reduce_impl(comm, in_values, n, out_values, op, root,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
- else
- detail::reduce_impl(comm, in_values, n, op, root,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-void
-reduce(const communicator& comm, const T* in_values, int n, Op op, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
-
- detail::reduce_impl(comm, in_values, n, op, root,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-void
-reduce(const communicator & comm, std::vector<T> const & in_values, Op op,
- int root)
-{
- reduce(comm, &in_values.front(), in_values.size(), op, root);
-}
-
-template<typename T, typename Op>
-void
-reduce(const communicator & comm, std::vector<T> const & in_values,
- std::vector<T> & out_values, Op op, int root)
-{
- if (root == comm.rank()) out_values.resize(in_values.size());
- reduce(comm, &in_values.front(), in_values.size(), &out_values.front(), op,
- root);
-}
-
-
-template<typename T, typename Op>
-void
-reduce(const communicator& comm, const T& in_value, T& out_value, Op op,
- int root)
-{
- if (comm.rank() == root)
- detail::reduce_impl(comm, &in_value, 1, &out_value, op, root,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
- else
- detail::reduce_impl(comm, &in_value, 1, op, root,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-void reduce(const communicator& comm, const T& in_value, Op op, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
-
- detail::reduce_impl(comm, &in_value, 1, op, root,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_REDUCE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scan.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scan.hpp
deleted file mode 100644
index 9264838ae..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scan.hpp
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com>.
-// Copyright (C) 2004 The Trustees of Indiana University
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-// Message Passing Interface 1.1 -- Section 4.9.1. Scan
-#ifndef BOOST_MPI_SCAN_HPP
-#define BOOST_MPI_SCAN_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-
-// For (de-)serializing sends and receives
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-
-// For packed_[io]archive sends and receives
-#include <boost/mpi/detail/point_to_point.hpp>
-
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/mpi/detail/computation_tree.hpp>
-#include <boost/mpi/operations.hpp>
-#include <algorithm>
-#include <exception>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-
-/************************************************************************
- * Implementation details *
- ************************************************************************/
-namespace detail {
- /**********************************************************************
- * Simple prefix reduction with MPI_Scan *
- **********************************************************************/
-
- // We are performing prefix reduction for a type that has an
- // associated MPI datatype and operation, so we'll use MPI_Scan
- // directly.
- template<typename T, typename Op>
- void
- scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
- Op /*op*/, mpl::true_ /*is_mpi_op*/, mpl::true_ /*is_mpi_datatype*/)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Scan,
- (const_cast<T*>(in_values), out_values, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- (is_mpi_op<Op, T>::op()), comm));
- }
-
- /**********************************************************************
- * User-defined prefix reduction with MPI_Scan *
- **********************************************************************/
-
- // We are performing prefix reduction for a type that has an
- // associated MPI datatype but with a custom operation. We'll use
- // MPI_Scan directly, but we'll need to create an MPI_Op manually.
- template<typename T, typename Op>
- void
- scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
- Op op, mpl::false_ /*is_mpi_op*/, mpl::true_ /*is_mpi_datatype*/)
- {
- user_op<Op, T> mpi_op(op);
- BOOST_MPI_CHECK_RESULT(MPI_Scan,
- (const_cast<T*>(in_values), out_values, n,
- boost::mpi::get_mpi_datatype<T>(*in_values),
- mpi_op.get_mpi_op(), comm));
- }
-
- /**********************************************************************
- * User-defined, tree-based reduction for non-MPI data types *
- **********************************************************************/
-
- template<typename T, typename Op>
- void
- upper_lower_scan(const communicator& comm, const T* in_values, int n,
- T* out_values, Op& op, int lower, int upper)
- {
- int tag = environment::collectives_tag();
- int rank = comm.rank();
-
- if (lower + 1 == upper) {
- std::copy(in_values, in_values + n, out_values);
- } else {
- int middle = (lower + upper) / 2;
-
- if (rank < middle) {
- // Lower half
- upper_lower_scan(comm, in_values, n, out_values, op, lower, middle);
-
- // If we're the last process in the lower half, send our values
- // to everyone in the upper half.
- if (rank == middle - 1) {
- packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
- oa << out_values[i];
-
- for (int p = middle; p < upper; ++p)
- comm.send(p, tag, oa);
- }
- } else {
- // Upper half
- upper_lower_scan(comm, in_values, n, out_values, op, middle, upper);
-
- // Receive value from the last process in the lower half.
- packed_iarchive ia(comm);
- comm.recv(middle - 1, tag, ia);
-
- // Combine value that came from the left with our value
- T left_value;
- for (int i = 0; i < n; ++i)
- {
- ia >> left_value;
- out_values[i] = op(left_value, out_values[i]);
- }
- }
- }
- }
-
- // We are performing prefix reduction for a type that has no
- // associated MPI datatype and operation, so we'll use a simple
- // upper/lower algorithm.
- template<typename T, typename Op>
- inline void
- scan_impl(const communicator& comm, const T* in_values, int n, T* out_values,
- Op op, mpl::false_ /*is_mpi_op*/, mpl::false_/*is_mpi_datatype*/)
- {
- upper_lower_scan(comm, in_values, n, out_values, op, 0, comm.size());
- }
-} // end namespace detail
-
-
-template<typename T, typename Op>
-inline void
-scan(const communicator& comm, const T& in_value, T& out_value, Op op)
-{
- detail::scan_impl(comm, &in_value, 1, &out_value, op,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-inline void
-scan(const communicator& comm, const T* in_values, int n, T* out_values, Op op)
-{
- detail::scan_impl(comm, in_values, n, out_values, op,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
-}
-
-template<typename T, typename Op>
-inline T
-scan(const communicator& comm, const T& in_value, Op op)
-{
- T out_value;
- detail::scan_impl(comm, &in_value, 1, &out_value, op,
- is_mpi_op<Op, T>(), is_mpi_datatype<T>());
- return out_value;
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_SCAN_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatter.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatter.hpp
deleted file mode 100644
index 196682dd5..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatter.hpp
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (C) 2005, 2006 Douglas Gregor.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.6. Scatter
-#ifndef BOOST_MPI_SCATTER_HPP
-#define BOOST_MPI_SCATTER_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/detail/point_to_point.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // We're scattering from the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatter to do all of the work.
- template<typename T>
- void
- scatter_impl(const communicator& comm, const T* in_values, T* out_values,
- int n, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatter,
- (const_cast<T*>(in_values), n, type,
- out_values, n, type, root, comm));
- }
-
- // We're scattering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatter to do all of the work.
- template<typename T>
- void
- scatter_impl(const communicator& comm, T* out_values, int n, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*out_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatter,
- (0, n, type,
- out_values, n, type,
- root, comm));
- }
-
- // We're scattering from the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatter, so
- // we'll just have the root send individual messages to the other
- // processes.
- template<typename T>
- void
- scatter_impl(const communicator& comm, const T* in_values, T* out_values,
- int n, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int size = comm.size();
-
- for (int dest = 0; dest < size; ++dest) {
- if (dest == root) {
- // Our own values will never be transmitted: just copy them.
- std::copy(in_values + dest * n, in_values + (dest + 1) * n, out_values);
- } else {
- // Send archive
- packed_oarchive oa(comm);
- for (int i = 0; i < n; ++i)
- oa << in_values[dest * n + i];
- detail::packed_archive_send(comm, dest, tag, oa);
- }
- }
- }
-
- // We're scattering to a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to de-serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatter, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- scatter_impl(const communicator& comm, T* out_values, int n, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
-
- packed_iarchive ia(comm);
- MPI_Status status;
- detail::packed_archive_recv(comm, root, tag, ia, status);
- for (int i = 0; i < n; ++i)
- ia >> out_values[i];
- }
-} // end namespace detail
-
-template<typename T>
-void
-scatter(const communicator& comm, const T* in_values, T& out_value, int root)
-{
- if (comm.rank() == root)
- detail::scatter_impl(comm, in_values, &out_value, 1, root,
- is_mpi_datatype<T>());
- else
- detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-scatter(const communicator& comm, const std::vector<T>& in_values, T& out_value,
- int root)
-{
- if (comm.rank() == root)
- ::boost::mpi::scatter<T>(comm, &in_values[0], out_value, root);
- else
- ::boost::mpi::scatter<T>(comm, static_cast<const T*>(0), out_value,
- root);
-}
-
-template<typename T>
-void scatter(const communicator& comm, T& out_value, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::scatter_impl(comm, &out_value, 1, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-scatter(const communicator& comm, const T* in_values, T* out_values, int n,
- int root)
-{
- if (comm.rank() == root)
- detail::scatter_impl(comm, in_values, out_values, n, root,
- is_mpi_datatype<T>());
- else
- detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-scatter(const communicator& comm, const std::vector<T>& in_values,
- T* out_values, int n, int root)
-{
- if (comm.rank() == root)
- ::boost::mpi::scatter(comm, &in_values[0], out_values, n, root);
- else
- ::boost::mpi::scatter(comm, static_cast<const T*>(0), out_values,
- n, root);
-}
-
-template<typename T>
-void scatter(const communicator& comm, T* out_values, int n, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::scatter_impl(comm, out_values, n, root, is_mpi_datatype<T>());
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_SCATTER_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatterv.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatterv.hpp
deleted file mode 100644
index 6e6f27002..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives/scatterv.hpp
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (C) 2011 Júlio Hoffimann.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4.6. Scatterv
-#ifndef BOOST_MPI_SCATTERV_HPP
-#define BOOST_MPI_SCATTERV_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <vector>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/detail/point_to_point.hpp>
-#include <boost/mpi/communicator.hpp>
-#include <boost/mpi/environment.hpp>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // We're scattering from the root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatterv to do all of the work.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, const T* in_values, const int* sizes,
- const int* displs, T* out_values, int out_size, int root, mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*in_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatterv,
- (const_cast<T*>(in_values), const_cast<int*>(sizes),
- const_cast<int*>(displs), type,
- out_values, out_size, type, root, comm));
- }
-
- // We're scattering from a non-root for a type that has an associated MPI
- // datatype, so we'll use MPI_Scatterv to do all of the work.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, T* out_values, int out_size, int root,
- mpl::true_)
- {
- MPI_Datatype type = get_mpi_datatype<T>(*out_values);
- BOOST_MPI_CHECK_RESULT(MPI_Scatterv,
- (0, 0, 0, type,
- out_values, out_size, type,
- root, comm));
- }
-
- // We're scattering from the root for a type that does not have an
- // associated MPI datatype, so we'll need to serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatterv, so
- // we'll just have the root send individual messages to the other
- // processes.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, const T* in_values, const int* sizes,
- const int* displs, T* out_values, int out_size, int root, mpl::false_)
- {
- int tag = environment::collectives_tag();
- int nprocs = comm.size();
-
- for (int dest = 0; dest < nprocs; ++dest) {
- if (dest == root) {
- // Our own values will never be transmitted: just copy them.
- std::copy(in_values + displs[dest],
- in_values + displs[dest] + out_size, out_values);
- } else {
- // Send archive
- packed_oarchive oa(comm);
- for (int i = 0; i < sizes[dest]; ++i)
- oa << in_values[ displs[dest] + i ];
- detail::packed_archive_send(comm, dest, tag, oa);
- }
- }
- }
-
- // We're scattering to a non-root for a type that does not have an
- // associated MPI datatype, so we'll need to de-serialize
- // it. Unfortunately, this means that we cannot use MPI_Scatterv, so
- // we'll just have all of the non-root nodes send individual
- // messages to the root.
- template<typename T>
- void
- scatterv_impl(const communicator& comm, T* out_values, int out_size, int root,
- mpl::false_)
- {
- int tag = environment::collectives_tag();
-
- packed_iarchive ia(comm);
- MPI_Status status;
- detail::packed_archive_recv(comm, root, tag, ia, status);
- for (int i = 0; i < out_size; ++i)
- ia >> out_values[i];
- }
-} // end namespace detail
-
-template<typename T>
-void
-scatterv(const communicator& comm, const T* in_values,
- const std::vector<int>& sizes, const std::vector<int>& displs,
- T* out_values, int out_size, int root)
-{
- int rank = comm.rank();
- if (rank == root)
- detail::scatterv_impl(comm, in_values, &sizes[0], &displs[0],
- out_values, out_size, root, is_mpi_datatype<T>());
- else
- detail::scatterv_impl(comm, out_values, out_size, root,
- is_mpi_datatype<T>());
-}
-
-template<typename T>
-void
-scatterv(const communicator& comm, const std::vector<T>& in_values,
- const std::vector<int>& sizes, const std::vector<int>& displs,
- T* out_values, int out_size, int root)
-{
- if (comm.rank() == root)
- ::boost::mpi::scatterv(comm, &in_values[0], sizes, displs,
- out_values, out_size, root);
- else
- ::boost::mpi::scatterv(comm, static_cast<const T*>(0), sizes, displs,
- out_values, out_size, root);
-}
-
-template<typename T>
-void scatterv(const communicator& comm, T* out_values, int out_size, int root)
-{
- BOOST_ASSERT(comm.rank() != root);
- detail::scatterv_impl(comm, out_values, out_size, root, is_mpi_datatype<T>());
-}
-
-///////////////////////
-// common use versions
-///////////////////////
-template<typename T>
-void
-scatterv(const communicator& comm, const T* in_values,
- const std::vector<int>& sizes, T* out_values, int root)
-{
- int nprocs = comm.size();
- int myrank = comm.rank();
-
- std::vector<int> displs(nprocs);
- for (int rank = 0, aux = 0; rank < nprocs; ++rank) {
- displs[rank] = aux;
- aux += sizes[rank];
- }
- ::boost::mpi::scatterv(comm, in_values, sizes, displs, out_values,
- sizes[myrank], root);
-}
-
-template<typename T>
-void
-scatterv(const communicator& comm, const std::vector<T>& in_values,
- const std::vector<int>& sizes, T* out_values, int root)
-{
- ::boost::mpi::scatterv(comm, &in_values[0], sizes, out_values, root);
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_SCATTERV_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives_fwd.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives_fwd.hpp
deleted file mode 100644
index e65392470..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/collectives_fwd.hpp
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4. MPI Collectives
-
-/** @file collectives_fwd.hpp
- *
- * This header provides forward declarations for all of the
- * collective operations contained in the header @c collectives.hpp.
- */
-#ifndef BOOST_MPI_COLLECTIVES_FWD_HPP
-#define BOOST_MPI_COLLECTIVES_FWD_HPP
-
-/// INTERNAL ONLY
-#define BOOST_MPI_COLLECTIVES_FORWARD_ONLY
-#include <boost/mpi/collectives.hpp>
-#undef BOOST_MPI_COLLECTIVES_FORWARD_ONLY
-
-#endif // BOOST_MPI_COLLECTIVES_FWD_HPP
-
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/communicator.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/communicator.hpp
deleted file mode 100644
index a491086ad..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/communicator.hpp
+++ /dev/null
@@ -1,1866 +0,0 @@
-// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-// Copyright (C) 2016 K. Noel Belcourt <kbelco -at- sandia.gov>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file communicator.hpp
- *
- * This header defines the @c communicator class, which is the basis
- * of all communication within Boost.MPI, and provides point-to-point
- * communication operations.
- */
-#ifndef BOOST_MPI_COMMUNICATOR_HPP
-#define BOOST_MPI_COMMUNICATOR_HPP
-
-#include <boost/assert.hpp>
-#include <boost/mpi/config.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/optional.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/mpi/nonblocking.hpp>
-#include <utility>
-#include <iterator>
-#include <stdexcept> // for std::range_error
-#include <vector>
-
-// For (de-)serializing sends and receives
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-
-// For (de-)serializing skeletons and content
-#include <boost/mpi/skeleton_and_content_fwd.hpp>
-
-// For (de-)serializing arrays
-#include <boost/serialization/array.hpp>
-
-#include <boost/mpi/detail/point_to_point.hpp>
-#include <boost/mpi/status.hpp>
-#include <boost/mpi/request.hpp>
-
-#ifdef BOOST_MSVC
-# pragma warning(push)
-# pragma warning(disable : 4800) // forcing to bool 'true' or 'false'
-#endif
-
-namespace boost { namespace mpi {
-
-/**
- * @brief A constant representing "any process."
- *
- * This constant may be used for the @c source parameter of @c receive
- * operations to indicate that a message may be received from any
- * source.
- */
-const int any_source = MPI_ANY_SOURCE;
-
-/**
- * @brief A constant representing "any tag."
- *
- * This constant may be used for the @c tag parameter of @c receive
- * operations to indicate that a @c send with any tag will be matched
- * by the receive.
- */
-const int any_tag = MPI_ANY_TAG;
-
-/**
- * @brief Enumeration used to describe how to adopt a C @c MPI_Comm into
- * a Boost.MPI communicator.
- *
- * The values for this enumeration determine how a Boost.MPI
- * communicator will behave when constructed with an MPI
- * communicator. The options are:
- *
- * - @c comm_duplicate: Duplicate the MPI_Comm communicator to
- * create a new communicator (e.g., with MPI_Comm_dup). This new
- * MPI_Comm communicator will be automatically freed when the
- * Boost.MPI communicator (and all copies of it) is destroyed.
- *
- * - @c comm_take_ownership: Take ownership of the communicator. It
- * will be freed automatically when all of the Boost.MPI
- * communicators go out of scope. This option must not be used with
- * MPI_COMM_WORLD.
- *
- * - @c comm_attach: The Boost.MPI communicator will reference the
- * existing MPI communicator but will not free it when the Boost.MPI
- * communicator goes out of scope. This option should only be used
- * when the communicator is managed by the user or MPI library
- * (e.g., MPI_COMM_WORLD).
- */
-enum comm_create_kind { comm_duplicate, comm_take_ownership, comm_attach };
-
-/**
- * INTERNAL ONLY
- *
- * Forward declaration of @c group needed for the @c group
- * constructor and accessor.
- */
-class group;
-
-/**
- * INTERNAL ONLY
- *
- * Forward declaration of @c intercommunicator needed for the "cast"
- * from a communicator to an intercommunicator.
- */
-class intercommunicator;
-
-/**
- * INTERNAL ONLY
- *
- * Forward declaration of @c graph_communicator needed for the "cast"
- * from a communicator to a graph communicator.
- */
-class graph_communicator;
-
-/**
- * @brief A communicator that permits communication and
- * synchronization among a set of processes.
- *
- * The @c communicator class abstracts a set of communicating
- * processes in MPI. All of the processes that belong to a certain
- * communicator can determine the size of the communicator, their rank
- * within the communicator, and communicate with any other processes
- * in the communicator.
- */
-class BOOST_MPI_DECL communicator
-{
- public:
- /**
- * Build a new Boost.MPI communicator for @c MPI_COMM_WORLD.
- *
- * Constructs a Boost.MPI communicator that attaches to @c
- * MPI_COMM_WORLD. This is the equivalent of constructing with
- * @c (MPI_COMM_WORLD, comm_attach).
- */
- communicator();
-
- /**
- * Build a new Boost.MPI communicator based on the MPI communicator
- * @p comm.
- *
- * @p comm may be any valid MPI communicator. If @p comm is
- * MPI_COMM_NULL, an empty communicator (that cannot be used for
- * communication) is created and the @p kind parameter is
- * ignored. Otherwise, the @p kind parameters determines how the
- * Boost.MPI communicator will be related to @p comm:
- *
- * - If @p kind is @c comm_duplicate, duplicate @c comm to create
- * a new communicator. This new communicator will be freed when
- * the Boost.MPI communicator (and all copies of it) is destroyed.
- * This option is only permitted if @p comm is a valid MPI
- * intracommunicator or if the underlying MPI implementation
- * supports MPI 2.0 (which supports duplication of
- * intercommunicators).
- *
- * - If @p kind is @c comm_take_ownership, take ownership of @c
- * comm. It will be freed automatically when all of the Boost.MPI
- * communicators go out of scope. This option must not be used
- * when @c comm is MPI_COMM_WORLD.
- *
- * - If @p kind is @c comm_attach, this Boost.MPI communicator
- * will reference the existing MPI communicator @p comm but will
- * not free @p comm when the Boost.MPI communicator goes out of
- * scope. This option should only be used when the communicator is
- * managed by the user or MPI library (e.g., MPI_COMM_WORLD).
- */
- communicator(const MPI_Comm& comm, comm_create_kind kind);
-
- /**
- * Build a new Boost.MPI communicator based on a subgroup of another
- * MPI communicator.
- *
- * This routine will construct a new communicator containing all of
- * the processes from communicator @c comm that are listed within
- * the group @c subgroup. Equivalent to @c MPI_Comm_create.
- *
- * @param comm An MPI communicator.
- *
- * @param subgroup A subgroup of the MPI communicator, @p comm, for
- * which we will construct a new communicator.
- */
- communicator(const communicator& comm, const boost::mpi::group& subgroup);
-
- /**
- * @brief Determine the rank of the executing process in a
- * communicator.
- *
- * This routine is equivalent to @c MPI_Comm_rank.
- *
- * @returns The rank of the process in the communicator, which
- * will be a value in [0, size())
- */
- int rank() const;
-
- /**
- * @brief Determine the number of processes in a communicator.
- *
- * This routine is equivalent to @c MPI_Comm_size.
- *
- * @returns The number of processes in the communicator.
- */
- int size() const;
-
- /**
- * This routine constructs a new group whose members are the
- * processes within this communicator. Equivalent to
- * calling @c MPI_Comm_group.
- */
- boost::mpi::group group() const;
-
- // ----------------------------------------------------------------
- // Point-to-point communication
- // ----------------------------------------------------------------
-
- /**
- * @brief Send data to another process.
- *
- * This routine executes a potentially blocking send with tag @p tag
- * to the process with rank @p dest. It can be received by the
- * destination process with a matching @c recv call.
- *
- * The given @p value must be suitable for transmission over
- * MPI. There are several classes of types that meet these
- * requirements:
- *
- * - Types with mappings to MPI data types: If @c
- * is_mpi_datatype<T> is convertible to @c mpl::true_, then @p
- * value will be transmitted using the MPI data type
- * @c get_mpi_datatype<T>(). All primitive C++ data types that have
- * MPI equivalents, e.g., @c int, @c float, @c char, @c double,
- * etc., have built-in mappings to MPI data types. You may turn a
- * Serializable type with fixed structure into an MPI data type by
- * specializing @c is_mpi_datatype for your type.
- *
- * - Serializable types: Any type that provides the @c serialize()
- * functionality required by the Boost.Serialization library can be
- * transmitted and received.
- *
- * - Packed archives and skeletons: Data that has been packed into
- * an @c mpi::packed_oarchive or the skeletons of data that have
- * been backed into an @c mpi::packed_skeleton_oarchive can be
- * transmitted, but will be received as @c mpi::packed_iarchive and
- * @c mpi::packed_skeleton_iarchive, respectively, to allow the
- * values (or skeletons) to be extracted by the destination process.
- *
- * - Content: Content associated with a previously-transmitted
- * skeleton can be transmitted by @c send and received by @c
- * recv. The receiving process may only receive content into the
- * content of a value that has been constructed with the matching
- * skeleton.
- *
- * For types that have mappings to an MPI data type (including the
- * concent of a type), an invocation of this routine will result in
- * a single MPI_Send call. For variable-length data, e.g.,
- * serialized types and packed archives, two messages will be sent
- * via MPI_Send: one containing the length of the data and the
- * second containing the data itself. Note that the transmission
- * mode for variable-length data is an implementation detail that
- * is subject to change.
- *
- * @param dest The rank of the remote process to which the data
- * will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- * @param value The value that will be transmitted to the
- * receiver. The type @c T of this value must meet the aforementioned
- * criteria for transmission.
- */
- template<typename T>
- void send(int dest, int tag, const T& value) const;
-
- template<typename T, typename A>
- void send(int dest, int tag, const std::vector<T,A>& value) const;
-
- template<typename T, typename A>
- void send_vector(int dest, int tag, const std::vector<T,A>& value,
- mpl::true_) const;
-
- template<typename T, typename A>
- void send_vector(int dest, int tag, const std::vector<T,A>& value,
- mpl::false_) const;
-
- /**
- * @brief Send the skeleton of an object.
- *
- * This routine executes a potentially blocking send with tag @p
- * tag to the process with rank @p dest. It can be received by the
- * destination process with a matching @c recv call. This variation
- * on @c send will be used when a send of a skeleton is explicitly
- * requested via code such as:
- *
- * @code
- * comm.send(dest, tag, skeleton(object));
- * @endcode
- *
- * The semantics of this routine are equivalent to that of sending
- * a @c packed_skeleton_oarchive storing the skeleton of the @c
- * object.
- *
- * @param dest The rank of the remote process to which the skeleton
- * will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- * @param proxy The @c skeleton_proxy containing a reference to the
- * object whose skeleton will be transmitted.
- *
- */
- template<typename T>
- void send(int dest, int tag, const skeleton_proxy<T>& proxy) const;
-
- /**
- * @brief Send an array of values to another process.
- *
- * This routine executes a potentially blocking send of an array of
- * data with tag @p tag to the process with rank @p dest. It can be
- * received by the destination process with a matching array @c
- * recv call.
- *
- * If @c T is an MPI datatype, an invocation of this routine will
- * be mapped to a single call to MPI_Send, using the datatype @c
- * get_mpi_datatype<T>().
- *
- * @param dest The process rank of the remote process to which
- * the data will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- * @param values The array of values that will be transmitted to the
- * receiver. The type @c T of these values must be mapped to an MPI
- * data type.
- *
- * @param n The number of values stored in the array. The destination
- * process must call receive with at least this many elements to
- * correctly receive the message.
- */
- template<typename T>
- void send(int dest, int tag, const T* values, int n) const;
-
- /**
- * @brief Send a message to another process without any data.
- *
- * This routine executes a potentially blocking send of a message
- * to another process. The message contains no extra data, and can
- * therefore only be received by a matching call to @c recv().
- *
- * @param dest The process rank of the remote process to which
- * the message will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- */
- void send(int dest, int tag) const;
-
- /**
- * @brief Receive data from a remote process.
- *
- * This routine blocks until it receives a message from the process @p
- * source with the given @p tag. The type @c T of the @p value must be
- * suitable for transmission over MPI, which includes serializable
- * types, types that can be mapped to MPI data types (including most
- * built-in C++ types), packed MPI archives, skeletons, and content
- * associated with skeletons; see the documentation of @c send for a
- * complete description.
- *
- * @param source The process that will be sending data. This will
- * either be a process rank within the communicator or the
- * constant @c any_source, indicating that we can receive the
- * message from any process.
- *
- * @param tag The tag that matches a particular kind of message sent
- * by the source process. This may be any tag value permitted by @c
- * send. Alternatively, the argument may be the constant @c any_tag,
- * indicating that this receive matches a message with any tag.
- *
- * @param value Will contain the value of the message after a
- * successful receive. The type of this value must match the value
- * transmitted by the sender, unless the sender transmitted a packed
- * archive or skeleton: in these cases, the sender transmits a @c
- * packed_oarchive or @c packed_skeleton_oarchive and the
- * destination receives a @c packed_iarchive or @c
- * packed_skeleton_iarchive, respectively.
- *
- * @returns Information about the received message.
- */
- template<typename T>
- status recv(int source, int tag, T& value) const;
-
- template<typename T, typename A>
- status recv(int source, int tag, std::vector<T,A>& value) const;
-
- template<typename T, typename A>
- status recv_vector(int source, int tag, std::vector<T,A>& value,
- mpl::true_) const;
-
- template<typename T, typename A>
- status recv_vector(int source, int tag, std::vector<T,A>& value,
- mpl::false_) const;
-
- /**
- * @brief Receive a skeleton from a remote process.
- *
- * This routine blocks until it receives a message from the process @p
- * source with the given @p tag containing a skeleton.
- *
- * @param source The process that will be sending data. This will
- * either be a process rank within the communicator or the constant
- * @c any_source, indicating that we can receive the message from
- * any process.
- *
- * @param tag The tag that matches a particular kind of message
- * sent by the source process. This may be any tag value permitted
- * by @c send. Alternatively, the argument may be the constant @c
- * any_tag, indicating that this receive matches a message with any
- * tag.
- *
- * @param proxy The @c skeleton_proxy containing a reference to the
- * object that will be reshaped to match the received skeleton.
- *
- * @returns Information about the received message.
- */
- template<typename T>
- status recv(int source, int tag, const skeleton_proxy<T>& proxy) const;
-
- /**
- * @brief Receive a skeleton from a remote process.
- *
- * This routine blocks until it receives a message from the process @p
- * source with the given @p tag containing a skeleton.
- *
- * @param source The process that will be sending data. This will
- * either be a process rank within the communicator or the constant
- * @c any_source, indicating that we can receive the message from
- * any process.
- *
- * @param tag The tag that matches a particular kind of message
- * sent by the source process. This may be any tag value permitted
- * by @c send. Alternatively, the argument may be the constant @c
- * any_tag, indicating that this receive matches a message with any
- * tag.
- *
- * @param proxy The @c skeleton_proxy containing a reference to the
- * object that will be reshaped to match the received skeleton.
- *
- * @returns Information about the received message.
- */
- template<typename T>
- status recv(int source, int tag, skeleton_proxy<T>& proxy) const;
-
- /**
- * @brief Receive an array of values from a remote process.
- *
- * This routine blocks until it receives an array of values from the
- * process @p source with the given @p tag. If the type @c T is
- *
- * @param source The process that will be sending data. This will
- * either be a process rank within the communicator or the
- * constant @c any_source, indicating that we can receive the
- * message from any process.
- *
- * @param tag The tag that matches a particular kind of message sent
- * by the source process. This may be any tag value permitted by @c
- * send. Alternatively, the argument may be the constant @c any_tag,
- * indicating that this receive matches a message with any tag.
- *
- * @param values Will contain the values in the message after a
- * successful receive. The type of these elements must match the
- * type of the elements transmitted by the sender.
- *
- * @param n The number of values that can be stored into the @p
- * values array. This shall not be smaller than the number of
- * elements transmitted by the sender.
- *
- * @throws std::range_error if the message to be received contains
- * more than @p n values.
- *
- * @returns Information about the received message.
- */
- template<typename T>
- status recv(int source, int tag, T* values, int n) const;
-
- /**
- * @brief Receive a message from a remote process without any data.
- *
- * This routine blocks until it receives a message from the process
- * @p source with the given @p tag.
- *
- * @param source The process that will be sending the message. This
- * will either be a process rank within the communicator or the
- * constant @c any_source, indicating that we can receive the
- * message from any process.
- *
- * @param tag The tag that matches a particular kind of message
- * sent by the source process. This may be any tag value permitted
- * by @c send. Alternatively, the argument may be the constant @c
- * any_tag, indicating that this receive matches a message with any
- * tag.
- *
- * @returns Information about the received message.
- */
- status recv(int source, int tag) const;
-
- /** @brief Send a message to remote process nd receive another message
- * from another process.
- */
- template<typename T>
- status sendrecv(int dest, int stag, const T& sval, int src, int rtag, T& rval) const;
-
- /**
- * @brief Send a message to a remote process without blocking.
- *
- * The @c isend method is functionality identical to the @c send
- * method and transmits data in the same way, except that @c isend
- * will not block while waiting for the data to be
- * transmitted. Instead, a request object will be immediately
- * returned, allowing one to query the status of the communication
- * or wait until it has completed.
- *
- * @param dest The rank of the remote process to which the data
- * will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- * @param value The value that will be transmitted to the
- * receiver. The type @c T of this value must meet the aforementioned
- * criteria for transmission.
- *
- * @returns a @c request object that describes this communication.
- */
- template<typename T>
- request isend(int dest, int tag, const T& value) const;
-
- /**
- * @brief Send the skeleton of an object without blocking.
- *
- * This routine is functionally identical to the @c send method for
- * @c skeleton_proxy objects except that @c isend will not block
- * while waiting for the data to be transmitted. Instead, a request
- * object will be immediately returned, allowing one to query the
- * status of the communication or wait until it has completed.
- *
- * The semantics of this routine are equivalent to a non-blocking
- * send of a @c packed_skeleton_oarchive storing the skeleton of
- * the @c object.
- *
- * @param dest The rank of the remote process to which the skeleton
- * will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- * @param proxy The @c skeleton_proxy containing a reference to the
- * object whose skeleton will be transmitted.
- *
- * @returns a @c request object that describes this communication.
- */
- template<typename T>
- request isend(int dest, int tag, const skeleton_proxy<T>& proxy) const;
-
- /**
- * @brief Send an array of values to another process without
- * blocking.
- *
- * This routine is functionally identical to the @c send method for
- * arrays except that @c isend will not block while waiting for the
- * data to be transmitted. Instead, a request object will be
- * immediately returned, allowing one to query the status of the
- * communication or wait until it has completed.
- *
- * @param dest The process rank of the remote process to which
- * the data will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- * @param values The array of values that will be transmitted to the
- * receiver. The type @c T of these values must be mapped to an MPI
- * data type.
- *
- * @param n The number of values stored in the array. The destination
- * process must call receive with at least this many elements to
- * correctly receive the message.
- *
- * @returns a @c request object that describes this communication.
- */
- template<typename T>
- request isend(int dest, int tag, const T* values, int n) const;
-
- /**
- * @brief Send a message to another process without any data
- * without blocking.
- *
- * This routine is functionally identical to the @c send method for
- * sends with no data, except that @c isend will not block while
- * waiting for the message to be transmitted. Instead, a request
- * object will be immediately returned, allowing one to query the
- * status of the communication or wait until it has completed.
- *
- * @param dest The process rank of the remote process to which
- * the message will be sent.
- *
- * @param tag The tag that will be associated with this message. Tags
- * may be any integer between zero and an implementation-defined
- * upper limit. This limit is accessible via @c environment::max_tag().
- *
- *
- * @returns a @c request object that describes this communication.
- */
- request isend(int dest, int tag) const;
-
- /**
- * @brief Prepare to receive a message from a remote process.
- *
- * The @c irecv method is functionally identical to the @c recv
- * method and receive data in the same way, except that @c irecv
- * will not block while waiting for data to be
- * transmitted. Instead, it immediately returns a request object
- * that allows one to query the status of the receive or wait until
- * it has completed.
- *
- * @param source The process that will be sending data. This will
- * either be a process rank within the communicator or the
- * constant @c any_source, indicating that we can receive the
- * message from any process.
- *
- * @param tag The tag that matches a particular kind of message sent
- * by the source process. This may be any tag value permitted by @c
- * send. Alternatively, the argument may be the constant @c any_tag,
- * indicating that this receive matches a message with any tag.
- *
- * @param value Will contain the value of the message after a
- * successful receive. The type of this value must match the value
- * transmitted by the sender, unless the sender transmitted a packed
- * archive or skeleton: in these cases, the sender transmits a @c
- * packed_oarchive or @c packed_skeleton_oarchive and the
- * destination receives a @c packed_iarchive or @c
- * packed_skeleton_iarchive, respectively.
- *
- * @returns a @c request object that describes this communication.
- */
- template<typename T>
- request irecv(int source, int tag, T& value) const;
-
- /**
- * @brief Initiate receipt of an array of values from a remote process.
- *
- * This routine initiates a receive operation for an array of values
- * transmitted by process @p source with the given @p tag.
- *
- * @param source The process that will be sending data. This will
- * either be a process rank within the communicator or the
- * constant @c any_source, indicating that we can receive the
- * message from any process.
- *
- * @param tag The tag that matches a particular kind of message sent
- * by the source process. This may be any tag value permitted by @c
- * send. Alternatively, the argument may be the constant @c any_tag,
- * indicating that this receive matches a message with any tag.
- *
- * @param values Will contain the values in the message after a
- * successful receive. The type of these elements must match the
- * type of the elements transmitted by the sender.
- *
- * @param n The number of values that can be stored into the @p
- * values array. This shall not be smaller than the number of
- * elements transmitted by the sender.
- *
- * @returns a @c request object that describes this communication.
- */
- template<typename T>
- request irecv(int source, int tag, T* values, int n) const;
-
- /**
- * @brief Initiate receipt of a message from a remote process that
- * carries no data.
- *
- * This routine initiates a receive operation for a message from
- * process @p source with the given @p tag that carries no data.
- *
- * @param source The process that will be sending the message. This
- * will either be a process rank within the communicator or the
- * constant @c any_source, indicating that we can receive the
- * message from any process.
- *
- * @param tag The tag that matches a particular kind of message
- * sent by the source process. This may be any tag value permitted
- * by @c send. Alternatively, the argument may be the constant @c
- * any_tag, indicating that this receive matches a message with any
- * tag.
- *
- * @returns a @c request object that describes this communication.
- */
- request irecv(int source, int tag) const;
-
- /**
- * @brief Waits until a message is available to be received.
- *
- * This operation waits until a message matching (@p source, @p tag)
- * is available to be received. It then returns information about
- * that message. The functionality is equivalent to @c MPI_Probe. To
- * check if a message is available without blocking, use @c iprobe.
- *
- * @param source Determine if there is a message available from
- * this rank. If @c any_source, then the message returned may come
- * from any source.
- *
- * @param tag Determine if there is a message available with the
- * given tag. If @c any_tag, then the message returned may have any
- * tag.
- *
- * @returns Returns information about the first message that
- * matches the given criteria.
- */
- status probe(int source = any_source, int tag = any_tag) const;
-
- /**
- * @brief Determine if a message is available to be received.
- *
- * This operation determines if a message matching (@p source, @p
- * tag) is available to be received. If so, it returns information
- * about that message; otherwise, it returns immediately with an
- * empty optional. The functionality is equivalent to @c
- * MPI_Iprobe. To wait until a message is available, use @c wait.
- *
- * @param source Determine if there is a message available from
- * this rank. If @c any_source, then the message returned may come
- * from any source.
- *
- * @param tag Determine if there is a message available with the
- * given tag. If @c any_tag, then the message returned may have any
- * tag.
- *
- * @returns If a matching message is available, returns
- * information about that message. Otherwise, returns an empty
- * @c boost::optional.
- */
- optional<status>
- iprobe(int source = any_source, int tag = any_tag) const;
-
-#ifdef barrier
- // Linux defines a function-like macro named "barrier". So, we need
- // to avoid expanding the macro when we define our barrier()
- // function. However, some C++ parsers (Doxygen, for instance) can't
- // handle this syntax, so we only use it when necessary.
- void (barrier)() const;
-#else
- /**
- * @brief Wait for all processes within a communicator to reach the
- * barrier.
- *
- * This routine is a collective operation that blocks each process
- * until all processes have entered it, then releases all of the
- * processes "simultaneously". It is equivalent to @c MPI_Barrier.
- */
- void barrier() const;
-#endif
-
- /** @brief Determine if this communicator is valid for
- * communication.
- *
- * Evaluates @c true in a boolean context if this communicator is
- * valid for communication, i.e., does not represent
- * MPI_COMM_NULL. Otherwise, evaluates @c false.
- */
- operator bool() const { return (bool)comm_ptr; }
-
- /**
- * @brief Access the MPI communicator associated with a Boost.MPI
- * communicator.
- *
- * This routine permits the implicit conversion from a Boost.MPI
- * communicator to an MPI communicator.
- *
- * @returns The associated MPI communicator.
- */
- operator MPI_Comm() const;
-
- /**
- * Split the communicator into multiple, disjoint communicators
- * each of which is based on a particular color. This is a
- * collective operation that returns a new communicator that is a
- * subgroup of @p this. This routine is functionally equivalent to
- * @c MPI_Comm_split.
- *
- * @param color The color of this process. All processes with the
- * same @p color value will be placed into the same group.
- *
- * @returns A new communicator containing all of the processes in
- * @p this that have the same @p color.
- */
- communicator split(int color) const;
-
- /**
- * Split the communicator into multiple, disjoint communicators
- * each of which is based on a particular color. This is a
- * collective operation that returns a new communicator that is a
- * subgroup of @p this. This routine is functionally equivalent to
- * @c MPI_Comm_split.
- *
- * @param color The color of this process. All processes with the
- * same @p color value will be placed into the same group.
- *
- * @param key A key value that will be used to determine the
- * ordering of processes with the same color in the resulting
- * communicator. If omitted, the rank of the processes in @p this
- * will determine the ordering of processes in the resulting
- * group.
- *
- * @returns A new communicator containing all of the processes in
- * @p this that have the same @p color.
- */
- communicator split(int color, int key) const;
-
- /**
- * Determine if the communicator is in fact an intercommunicator
- * and, if so, return that intercommunicator.
- *
- * @returns an @c optional containing the intercommunicator, if this
- * communicator is in fact an intercommunicator. Otherwise, returns
- * an empty @c optional.
- */
- optional<intercommunicator> as_intercommunicator() const;
-
- /**
- * Determine if the communicator has a graph topology and, if so,
- * return that @c graph_communicator. Even though the communicators
- * have different types, they refer to the same underlying
- * communication space and can be used interchangeably for
- * communication.
- *
- * @returns an @c optional containing the graph communicator, if this
- * communicator does in fact have a graph topology. Otherwise, returns
- * an empty @c optional.
- */
- optional<graph_communicator> as_graph_communicator() const;
-
- /**
- * Determines whether this communicator has a Cartesian topology.
- */
- bool has_cartesian_topology() const;
-
-#if 0
- template<typename Extents>
- communicator
- with_cartesian_topology(const Extents& extents,
- bool periodic = false,
- bool reorder = false) const;
-
- template<typename DimInputIterator, typename PeriodicInputIterator>
- communicator
- with_cartesian_topology(DimInputIterator first_dim,
- DimInputIterator last_dim,
- PeriodicInputIterator first_periodic,
- bool reorder = false);
-
- template<typename Allocator, std::size_t NumDims>
- communicator
- with_cartesian_topology(const multi_array<bool, NumDims, Allocator>& periods,
- bool reorder = false);
-#endif
-
- /** Abort all tasks in the group of this communicator.
- *
- * Makes a "best attempt" to abort all of the tasks in the group of
- * this communicator. Depending on the underlying MPI
- * implementation, this may either abort the entire program (and
- * possibly return @p errcode to the environment) or only abort
- * some processes, allowing the others to continue. Consult the
- * documentation for your MPI implementation. This is equivalent to
- * a call to @c MPI_Abort
- *
- * @param errcode The error code to return from aborted processes.
- * @returns Will not return.
- */
- void abort(int errcode) const;
-
- protected:
-
- /**
- * INTERNAL ONLY
- *
- * Implementation of sendrecv for mpi type.
- */
- template<typename T>
- status sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
- mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * Implementation of sendrecv for complex types, which must be passed as archives.
- */
- template<typename T>
- status sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
- mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * Function object that frees an MPI communicator and deletes the
- * memory associated with it. Intended to be used as a deleter with
- * shared_ptr.
- */
- struct comm_free
- {
- void operator()(MPI_Comm* comm) const
- {
- BOOST_ASSERT( comm != 0 );
- BOOST_ASSERT(*comm != MPI_COMM_NULL);
- int finalized;
- BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&finalized));
- if (!finalized)
- BOOST_MPI_CHECK_RESULT(MPI_Comm_free, (comm));
- delete comm;
- }
- };
-
-
- /**
- * INTERNAL ONLY
- *
- * We're sending a type that has an associated MPI datatype, so we
- * map directly to that datatype.
- */
- template<typename T>
- void send_impl(int dest, int tag, const T& value, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending a type that does not have an associated MPI
- * datatype, so it must be serialized then sent as MPI_PACKED data,
- * to be deserialized on the receiver side.
- */
- template<typename T>
- void send_impl(int dest, int tag, const T& value, mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending an array of a type that has an associated MPI
- * datatype, so we map directly to that datatype.
- */
- template<typename T>
- void
- array_send_impl(int dest, int tag, const T* values, int n, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending an array of a type that does not have an associated
- * MPI datatype, so it must be serialized then sent as MPI_PACKED
- * data, to be deserialized on the receiver side.
- */
- template<typename T>
- void
- array_send_impl(int dest, int tag, const T* values, int n,
- mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending a type that has an associated MPI datatype, so we
- * map directly to that datatype.
- */
- template<typename T>
- request isend_impl(int dest, int tag, const T& value, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending a type that does not have an associated MPI
- * datatype, so it must be serialized then sent as MPI_PACKED data,
- * to be deserialized on the receiver side.
- */
- template<typename T>
- request isend_impl(int dest, int tag, const T& value, mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending an array of a type that has an associated MPI
- * datatype, so we map directly to that datatype.
- */
- template<typename T>
- request
- array_isend_impl(int dest, int tag, const T* values, int n,
- mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're sending an array of a type that does not have an associated
- * MPI datatype, so it must be serialized then sent as MPI_PACKED
- * data, to be deserialized on the receiver side.
- */
- template<typename T>
- request
- array_isend_impl(int dest, int tag, const T* values, int n,
- mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that has an associated MPI datatype, so we
- * map directly to that datatype.
- */
- template<typename T>
- status recv_impl(int source, int tag, T& value, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that does not have an associated MPI
- * datatype, so it must have been serialized then sent as
- * MPI_PACKED. We'll receive it and then deserialize.
- */
- template<typename T>
- status recv_impl(int source, int tag, T& value, mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving an array of a type that has an associated MPI
- * datatype, so we map directly to that datatype.
- */
- template<typename T>
- status
- array_recv_impl(int source, int tag, T* values, int n, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that does not have an associated MPI
- * datatype, so it must have been serialized then sent as
- * MPI_PACKED. We'll receive it and then deserialize.
- */
- template<typename T>
- status
- array_recv_impl(int source, int tag, T* values, int n, mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that has an associated MPI datatype, so we
- * map directly to that datatype.
- */
- template<typename T>
- request irecv_impl(int source, int tag, T& value, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that does not have an associated MPI
- * datatype, so it must have been serialized then sent as
- * MPI_PACKED. We'll receive it and then deserialize.
- */
- template<typename T>
- request irecv_impl(int source, int tag, T& value, mpl::false_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that has an associated MPI datatype, so we
- * map directly to that datatype.
- */
- template<typename T>
- request
- array_irecv_impl(int source, int tag, T* values, int n, mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- *
- * We're receiving a type that does not have an associated MPI
- * datatype, so it must have been serialized then sent as
- * MPI_PACKED. We'll receive it and then deserialize.
- */
- template<typename T>
- request
- array_irecv_impl(int source, int tag, T* values, int n, mpl::false_) const;
-
- shared_ptr<MPI_Comm> comm_ptr;
-};
-
-/**
- * @brief Determines whether two communicators are identical.
- *
- * Equivalent to calling @c MPI_Comm_compare and checking whether the
- * result is @c MPI_IDENT.
- *
- * @returns True when the two communicators refer to the same
- * underlying MPI communicator.
- */
-BOOST_MPI_DECL bool operator==(const communicator& comm1, const communicator& comm2);
-
-/**
- * @brief Determines whether two communicators are different.
- *
- * @returns @c !(comm1 == comm2)
- */
-inline bool operator!=(const communicator& comm1, const communicator& comm2)
-{
- return !(comm1 == comm2);
-}
-
-
-/************************************************************************
- * Implementation details *
- ************************************************************************/
-// Count elements in a message
-template<typename T>
-inline optional<int> status::count() const
-{
- return count_impl<T>(is_mpi_datatype<T>());
-}
-
-template<typename T>
-optional<int> status::count_impl(mpl::true_) const
-{
- if (m_count != -1)
- return m_count;
-
- int return_value;
- BOOST_MPI_CHECK_RESULT(MPI_Get_count,
- (&m_status, get_mpi_datatype<T>(T()), &return_value));
- if (return_value == MPI_UNDEFINED)
- return optional<int>();
- else
- /* Cache the result. */
- return m_count = return_value;
-}
-
-template<typename T>
-inline optional<int> status::count_impl(mpl::false_) const
-{
- if (m_count == -1)
- return optional<int>();
- else
- return m_count;
-}
-
-// We're sending a type that has an associated MPI datatype, so we
-// map directly to that datatype.
-template<typename T>
-void
-communicator::send_impl(int dest, int tag, const T& value, mpl::true_) const
-{
- BOOST_MPI_CHECK_RESULT(MPI_Send,
- (const_cast<T*>(&value), 1, get_mpi_datatype<T>(value),
- dest, tag, MPI_Comm(*this)));
-}
-
-// We're sending a type that does not have an associated MPI
-// datatype, so it must be serialized then sent as MPI_PACKED data,
-// to be deserialized on the receiver side.
-template<typename T>
-void
-communicator::send_impl(int dest, int tag, const T& value, mpl::false_) const
-{
- packed_oarchive oa(*this);
- oa << value;
- send(dest, tag, oa);
-}
-
-// Single-element receive may either send the element directly or
-// serialize it via a buffer.
-template<typename T>
-void communicator::send(int dest, int tag, const T& value) const
-{
- this->send_impl(dest, tag, value, is_mpi_datatype<T>());
-}
-
-// We're sending an array of a type that has an associated MPI
-// datatype, so we map directly to that datatype.
-template<typename T>
-void
-communicator::array_send_impl(int dest, int tag, const T* values, int n,
- mpl::true_) const
-{
- BOOST_MPI_CHECK_RESULT(MPI_Send,
- (const_cast<T*>(values), n,
- get_mpi_datatype<T>(*values),
- dest, tag, MPI_Comm(*this)));
-}
-
-// We're sending an array of a type that does not have an associated
-// MPI datatype, so it must be serialized then sent as MPI_PACKED
-// data, to be deserialized on the receiver side.
-template<typename T>
-void
-communicator::array_send_impl(int dest, int tag, const T* values, int n,
- mpl::false_) const
-{
- packed_oarchive oa(*this);
- oa << n << boost::serialization::make_array(values, n);
- send(dest, tag, oa);
-}
-
-template<typename T, typename A>
-void communicator::send_vector(int dest, int tag,
- const std::vector<T,A>& value, mpl::true_ true_type) const
-{
- // send the vector size
- typename std::vector<T,A>::size_type size = value.size();
- send(dest, tag, size);
- // send the data
- this->array_send_impl(dest, tag, value.data(), size, true_type);
-}
-
-template<typename T, typename A>
-void communicator::send_vector(int dest, int tag,
- const std::vector<T,A>& value, mpl::false_ false_type) const
-{
- this->send_impl(dest, tag, value, false_type);
-}
-
-template<typename T, typename A>
-void communicator::send(int dest, int tag, const std::vector<T,A>& value) const
-{
- send_vector(dest, tag, value, is_mpi_datatype<T>());
-}
-
-// Array send must send the elements directly
-template<typename T>
-void communicator::send(int dest, int tag, const T* values, int n) const
-{
- this->array_send_impl(dest, tag, values, n, is_mpi_datatype<T>());
-}
-
-// We're receiving a type that has an associated MPI datatype, so we
-// map directly to that datatype.
-template<typename T>
-status communicator::recv_impl(int source, int tag, T& value, mpl::true_) const
-{
- status stat;
-
- BOOST_MPI_CHECK_RESULT(MPI_Recv,
- (const_cast<T*>(&value), 1,
- get_mpi_datatype<T>(value),
- source, tag, MPI_Comm(*this), &stat.m_status));
- return stat;
-}
-
-template<typename T>
-status
-communicator::recv_impl(int source, int tag, T& value, mpl::false_) const
-{
- // Receive the message
- packed_iarchive ia(*this);
- status stat = recv(source, tag, ia);
-
- // Deserialize the data in the message
- ia >> value;
-
- return stat;
-}
-
-// Single-element receive may either receive the element directly or
-// deserialize it from a buffer.
-template<typename T>
-status communicator::recv(int source, int tag, T& value) const
-{
- return this->recv_impl(source, tag, value, is_mpi_datatype<T>());
-}
-
-template<typename T>
-status
-communicator::array_recv_impl(int source, int tag, T* values, int n,
- mpl::true_) const
-{
- status stat;
- BOOST_MPI_CHECK_RESULT(MPI_Recv,
- (const_cast<T*>(values), n,
- get_mpi_datatype<T>(*values),
- source, tag, MPI_Comm(*this), &stat.m_status));
- return stat;
-}
-
-template<typename T>
-status
-communicator::array_recv_impl(int source, int tag, T* values, int n,
- mpl::false_) const
-{
- // Receive the message
- packed_iarchive ia(*this);
- status stat = recv(source, tag, ia);
-
- // Determine how much data we are going to receive
- int count;
- ia >> count;
-
- // Deserialize the data in the message
- boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
- ia >> arr;
-
- if (count > n) {
- boost::throw_exception(
- std::range_error("communicator::recv: message receive overflow"));
- }
-
- stat.m_count = count;
- return stat;
-}
-
-template<typename T, typename A>
-status communicator::recv_vector(int source, int tag,
- std::vector<T,A>& value, mpl::true_ true_type) const
-{
- // receive the vector size
- typename std::vector<T,A>::size_type size = 0;
- recv(source, tag, size);
- // size the vector
- value.resize(size);
- // receive the data
- return this->array_recv_impl(source, tag, value.data(), size, true_type);
-}
-
-template<typename T, typename A>
-status communicator::recv_vector(int source, int tag,
- std::vector<T,A>& value, mpl::false_ false_type) const
-{
- return this->recv_impl(source, tag, value, false_type);
-}
-
-template<typename T, typename A>
-status communicator::recv(int source, int tag, std::vector<T,A>& value) const
-{
- return recv_vector(source, tag, value, is_mpi_datatype<T>());
-}
-
-// Array receive must receive the elements directly into a buffer.
-template<typename T>
-status communicator::recv(int source, int tag, T* values, int n) const
-{
- return this->array_recv_impl(source, tag, values, n, is_mpi_datatype<T>());
-}
-
-
-template<typename T>
-status communicator::sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
- mpl::true_) const
-{
- status stat;
- BOOST_MPI_CHECK_RESULT(MPI_Sendrecv,
- (const_cast<T*>(&sval), 1,
- get_mpi_datatype<T>(sval),
- dest, stag,
- &rval, 1,
- get_mpi_datatype<T>(rval),
- src, rtag,
- MPI_Comm(*this), &stat.m_status));
- return stat;
-}
-
-template<typename T>
-status communicator::sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
- mpl::false_) const
-{
- int const SEND = 0;
- int const RECV = 1;
- request srrequests[2];
- srrequests[SEND] = this->isend_impl(dest, stag, sval, mpl::false_());
- srrequests[RECV] = this->irecv_impl(src, rtag, rval, mpl::false_());
- status srstatuses[2];
- wait_all(srrequests, srrequests + 2, srstatuses);
- return srstatuses[RECV];
-}
-
-template<typename T>
-status communicator::sendrecv(int dest, int stag, const T& sval, int src, int rtag, T& rval) const
-{
- return this->sendrecv_impl(dest, stag, sval, src, rtag, rval, is_mpi_datatype<T>());
-}
-
-
-// We're sending a type that has an associated MPI datatype, so we
-// map directly to that datatype.
-template<typename T>
-request
-communicator::isend_impl(int dest, int tag, const T& value, mpl::true_) const
-{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (const_cast<T*>(&value), 1,
- get_mpi_datatype<T>(value),
- dest, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
-}
-
-// We're sending a type that does not have an associated MPI
-// datatype, so it must be serialized then sent as MPI_PACKED data,
-// to be deserialized on the receiver side.
-template<typename T>
-request
-communicator::isend_impl(int dest, int tag, const T& value, mpl::false_) const
-{
- shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
- *archive << value;
- request result = isend(dest, tag, *archive);
- result.m_data = archive;
- return result;
-}
-
-// Single-element receive may either send the element directly or
-// serialize it via a buffer.
-template<typename T>
-request communicator::isend(int dest, int tag, const T& value) const
-{
- return this->isend_impl(dest, tag, value, is_mpi_datatype<T>());
-}
-
-template<typename T>
-request
-communicator::array_isend_impl(int dest, int tag, const T* values, int n,
- mpl::true_) const
-{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Isend,
- (const_cast<T*>(values), n,
- get_mpi_datatype<T>(*values),
- dest, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
-}
-
-template<typename T>
-request
-communicator::array_isend_impl(int dest, int tag, const T* values, int n,
- mpl::false_) const
-{
- shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
- *archive << n << boost::serialization::make_array(values, n);
- request result = isend(dest, tag, *archive);
- result.m_data = archive;
- return result;
-}
-
-
-// Array isend must send the elements directly
-template<typename T>
-request communicator::isend(int dest, int tag, const T* values, int n) const
-{
- return array_isend_impl(dest, tag, values, n, is_mpi_datatype<T>());
-}
-
-namespace detail {
- /**
- * Internal data structure that stores everything required to manage
- * the receipt of serialized data via a request object.
- */
- template<typename T>
- struct serialized_irecv_data
- {
- serialized_irecv_data(const communicator& comm, int source, int tag,
- T& value)
- : comm(comm), source(source), tag(tag), ia(comm), value(value)
- {
- }
-
- void deserialize(status& stat)
- {
- ia >> value;
- stat.m_count = 1;
- }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_iarchive ia;
- T& value;
- };
-
- template<>
- struct serialized_irecv_data<packed_iarchive>
- {
- serialized_irecv_data(const communicator& comm, int source, int tag,
- packed_iarchive& ia)
- : comm(comm), source(source), tag(tag), ia(ia) { }
-
- void deserialize(status&) { /* Do nothing. */ }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_iarchive& ia;
- };
-
- /**
- * Internal data structure that stores everything required to manage
- * the receipt of an array of serialized data via a request object.
- */
- template<typename T>
- struct serialized_array_irecv_data
- {
- serialized_array_irecv_data(const communicator& comm, int source, int tag,
- T* values, int n)
- : comm(comm), source(source), tag(tag), ia(comm), values(values), n(n)
- {
- }
-
- void deserialize(status& stat);
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_iarchive ia;
- T* values;
- int n;
- };
-
- template<typename T>
- void serialized_array_irecv_data<T>::deserialize(status& stat)
- {
- // Determine how much data we are going to receive
- int count;
- ia >> count;
-
- // Deserialize the data in the message
- boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
- ia >> arr;
-
- if (count > n) {
- boost::throw_exception(
- std::range_error("communicator::recv: message receive overflow"));
- }
-
- stat.m_count = count;
- }
-}
-
-template<typename T>
-optional<status>
-request::handle_serialized_irecv(request* self, request_action action)
-{
- typedef detail::serialized_irecv_data<T> data_t;
- shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
-
- if (action == ra_wait) {
- status stat;
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Wait for the count message to complete
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests, &stat.m_status));
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(), MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- }
-
- // Wait until we have received the entire message
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests + 1, &stat.m_status));
-
- data->deserialize(stat);
- return stat;
- } else if (action == ra_test) {
- status stat;
- int flag = 0;
-
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Check if the count message has completed
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests, &flag, &stat.m_status));
- if (flag) {
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(),MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- } else
- return optional<status>(); // We have not finished yet
- }
-
- // Check if we have received the message data
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests + 1, &flag, &stat.m_status));
- if (flag) {
- data->deserialize(stat);
- return stat;
- } else
- return optional<status>();
- } else {
- return optional<status>();
- }
-}
-
-template<typename T>
-optional<status>
-request::handle_serialized_array_irecv(request* self, request_action action)
-{
- typedef detail::serialized_array_irecv_data<T> data_t;
- shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
-
- if (action == ra_wait) {
- status stat;
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Wait for the count message to complete
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests, &stat.m_status));
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(), MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- }
-
- // Wait until we have received the entire message
- BOOST_MPI_CHECK_RESULT(MPI_Wait,
- (self->m_requests + 1, &stat.m_status));
-
- data->deserialize(stat);
- return stat;
- } else if (action == ra_test) {
- status stat;
- int flag = 0;
-
- if (self->m_requests[1] == MPI_REQUEST_NULL) {
- // Check if the count message has completed
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests, &flag, &stat.m_status));
- if (flag) {
- // Resize our buffer and get ready to receive its data
- data->ia.resize(data->count);
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (data->ia.address(), data->ia.size(),MPI_PACKED,
- stat.source(), stat.tag(),
- MPI_Comm(data->comm), self->m_requests + 1));
- } else
- return optional<status>(); // We have not finished yet
- }
-
- // Check if we have received the message data
- BOOST_MPI_CHECK_RESULT(MPI_Test,
- (self->m_requests + 1, &flag, &stat.m_status));
- if (flag) {
- data->deserialize(stat);
- return stat;
- } else
- return optional<status>();
- } else {
- return optional<status>();
- }
-}
-
-// We're receiving a type that has an associated MPI datatype, so we
-// map directly to that datatype.
-template<typename T>
-request
-communicator::irecv_impl(int source, int tag, T& value, mpl::true_) const
-{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (const_cast<T*>(&value), 1,
- get_mpi_datatype<T>(value),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
-}
-
-template<typename T>
-request
-communicator::irecv_impl(int source, int tag, T& value, mpl::false_) const
-{
- typedef detail::serialized_irecv_data<T> data_t;
- shared_ptr<data_t> data(new data_t(*this, source, tag, value));
- request req;
- req.m_data = data;
- req.m_handler = request::handle_serialized_irecv<T>;
-
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&data->count, 1,
- get_mpi_datatype<std::size_t>(data->count),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
-
- return req;
-}
-
-template<typename T>
-request
-communicator::irecv(int source, int tag, T& value) const
-{
- return this->irecv_impl(source, tag, value, is_mpi_datatype<T>());
-}
-
-template<typename T>
-request
-communicator::array_irecv_impl(int source, int tag, T* values, int n,
- mpl::true_) const
-{
- request req;
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (const_cast<T*>(values), n,
- get_mpi_datatype<T>(*values),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
- return req;
-}
-
-template<typename T>
-request
-communicator::array_irecv_impl(int source, int tag, T* values, int n,
- mpl::false_) const
-{
- typedef detail::serialized_array_irecv_data<T> data_t;
- shared_ptr<data_t> data(new data_t(*this, source, tag, values, n));
- request req;
- req.m_data = data;
- req.m_handler = request::handle_serialized_array_irecv<T>;
-
- BOOST_MPI_CHECK_RESULT(MPI_Irecv,
- (&data->count, 1,
- get_mpi_datatype<std::size_t>(data->count),
- source, tag, MPI_Comm(*this), &req.m_requests[0]));
-
- return req;
-}
-
-
-// Array receive must receive the elements directly into a buffer.
-template<typename T>
-request communicator::irecv(int source, int tag, T* values, int n) const
-{
- return this->array_irecv_impl(source, tag, values, n, is_mpi_datatype<T>());
-}
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-communicator::send<packed_oarchive>(int dest, int tag,
- const packed_oarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-communicator::send<packed_skeleton_oarchive>
- (int dest, int tag, const packed_skeleton_oarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL void
-communicator::send<content>(int dest, int tag, const content& c) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL status
-communicator::recv<packed_iarchive>(int source, int tag,
- packed_iarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL status
-communicator::recv<packed_skeleton_iarchive>
- (int source, int tag, packed_skeleton_iarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL status
-communicator::recv<const content>(int source, int tag,
- const content& c) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-inline status
-communicator::recv<content>(int source, int tag,
- content& c) const
-{
- return recv<const content>(source,tag,c);
-}
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL request
-communicator::isend<packed_oarchive>(int dest, int tag,
- const packed_oarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL request
-communicator::isend<packed_skeleton_oarchive>
- (int dest, int tag, const packed_skeleton_oarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL request
-communicator::isend<content>(int dest, int tag, const content& c) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL request
-communicator::irecv<packed_skeleton_iarchive>
- (int source, int tag, packed_skeleton_iarchive& ar) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-BOOST_MPI_DECL request
-communicator::irecv<const content>(int source, int tag,
- const content& c) const;
-
-/**
- * INTERNAL ONLY
- */
-template<>
-inline request
-communicator::irecv<content>(int source, int tag,
- content& c) const
-{
- return irecv<const content>(source, tag, c);
-}
-
-
-} } // end namespace boost::mpi
-
-// If the user has already included skeleton_and_content.hpp, include
-// the code to send/receive skeletons and content.
-#ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
-# include <boost/mpi/detail/communicator_sc.hpp>
-#endif
-
-#ifdef BOOST_MSVC
-# pragma warning(pop)
-#endif
-
-#endif // BOOST_MPI_COMMUNICATOR_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/config.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/config.hpp
deleted file mode 100644
index c83277f66..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/config.hpp
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file config.hpp
- *
- * This header provides MPI configuration details that expose the
- * capabilities of the underlying MPI implementation, and provides
- * auto-linking support on Windows.
- */
-#ifndef BOOST_MPI_CONFIG_HPP
-#define BOOST_MPI_CONFIG_HPP
-
-/* Force MPICH not to define SEEK_SET, SEEK_CUR, and SEEK_END, which
- conflict with the versions in <stdio.h> and <cstdio>. */
-#define MPICH_IGNORE_CXX_SEEK 1
-
-#include <mpi.h>
-#include <boost/config.hpp>
-
-/** @brief Comment this macro is you are running in an heterogeneous environment.
- *
- * When this flag is enabled, we assume some simple, POD-like, type can be
- * transmitted without paying the cost of portable serialization.
- *
- * Comment this if your platform is not homogeneous and that portable
- * serialization/deserialization must be performed.
- *
- * It you do so, check that your MPI implementation supports thats kind of environment.
- */
-#define BOOST_MPI_HOMOGENEOUS
-
-// If this is an MPI-2 implementation, define configuration macros for
-// the features we are interested in.
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
-/** @brief Determine if the MPI implementation has support for memory
- * allocation.
- *
- * This macro will be defined when the underlying MPI implementation
- * has support for the MPI-2 memory allocation routines @c
- * MPI_Alloc_mem and @c MPI_Free_mem. When defined, the @c allocator
- * class template will provide Standard Library-compliant access to
- * these memory-allocation routines.
- */
-# define BOOST_MPI_HAS_MEMORY_ALLOCATION
-
-/** @brief Determine if the MPI implementation has supports initialization
- * without command-line arguments.
- *
- * This macro will be defined when the underlying implementation
- * supports initialization of MPI without passing along command-line
- * arguments, e.g., @c MPI_Init(NULL, NULL). When defined, the @c
- * environment class will provide a default constructor. This macro is
- * always defined for MPI-2 implementations. */
-# define BOOST_MPI_HAS_NOARG_INITIALIZATION
-#else
-// If this is an MPI-1.x implementation, no arg initialization for
-// mpi environment could still be available, but not mandatory.
-// Undef this if no arg init is available:
-//# define BOOST_MPI_HAS_NOARG_INITIALIZATION
-#endif
-
-#if defined(MPIAPI)
-# define BOOST_MPI_CALLING_CONVENTION MPIAPI
-#else
-/** @brief Specifies the calling convention that will be used for callbacks
- * from the underlying C MPI.
- *
- * This is a Windows-specific macro, which will be used internally to state
- * the calling convention of any function that is to be used as a callback
- * from MPI. For example, the internally-defined functions that are used in
- * a call to @c MPI_Op_create. This macro is likely only to be useful to
- * users that wish to bypass Boost.MPI, registering their own callbacks in
- * certain cases, e.g., through @c MPI_Op_create.
- */
-# define BOOST_MPI_CALLING_CONVENTION
-#endif
-
-/** @brief Indicates that MPI_Bcast supports MPI_BOTTOM.
- *
- * Some implementations have a broken MPI_Bcast wrt to MPI_BOTTOM.
- * BullX MPI and LAM seems to be among them, at least for some versions.
- * The `broacast_test.cpp` test `test_skeleton_and_content` can be used to
- * detect that.
- */
-#define BOOST_MPI_BCAST_BOTTOM_WORKS_FINE
-
-#if defined(LAM_MPI)
-// Configuration for LAM/MPI
-# define BOOST_MPI_HAS_MEMORY_ALLOCATION
-# define BOOST_MPI_HAS_NOARG_INITIALIZATION
-# undef BOOST_MPI_BCAST_BOTTOM_WORKS_FINE
-#elif defined(MPICH_NAME)
-// Configuration for MPICH
-#endif
-
-/*****************************************************************************
- * *
- * DLL import/export options *
- * *
- *****************************************************************************/
-
-#if (defined(BOOST_MPI_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) && !defined(BOOST_MPI_STATIC_LINK)
-# if defined(BOOST_MPI_SOURCE)
-# define BOOST_MPI_DECL BOOST_SYMBOL_EXPORT
-# define BOOST_MPI_BUILD_DLL
-# else
-# define BOOST_MPI_DECL BOOST_SYMBOL_IMPORT
-# endif
-#endif
-
-#ifndef BOOST_MPI_DECL
-# define BOOST_MPI_DECL
-#endif
-
-#if !defined(BOOST_MPI_NO_LIB) && !defined(BOOST_MPI_SOURCE) && !defined(BOOST_ALL_NO_LIB) && defined(__cplusplus)
-# define BOOST_LIB_NAME boost_mpi
-# if defined(BOOST_MPI_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)
-# define BOOST_DYN_LINK
-# endif
-# ifdef BOOST_MPI_DIAG
-# define BOOST_LIB_DIAGNOSTIC
-# endif
-# include <boost/config/auto_link.hpp>
-#endif
-
-#endif // BOOST_MPI_CONFIG_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype.hpp
deleted file mode 100644
index 1f069977d..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype.hpp
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2004 The Trustees of Indiana University.
-// Copyright 2005 Matthias Troyer.
-// Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-// Matthias Troyer
-
-/** @file datatype.hpp
- *
- * This header provides the mapping from C++ types to MPI data types.
- */
-#ifndef BOOST_MPI_DATATYPE_HPP
-#define BOOST_MPI_DATATYPE_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/mpi/datatype_fwd.hpp>
-#include <mpi.h>
-#include <boost/config.hpp>
-#include <boost/mpl/bool.hpp>
-#include <boost/mpl/or.hpp>
-#include <boost/mpl/and.hpp>
-#include <boost/mpi/detail/mpi_datatype_cache.hpp>
-#include <boost/mpl/assert.hpp>
-#include <boost/archive/basic_archive.hpp>
-#include <boost/serialization/item_version_type.hpp>
-#include <utility> // for std::pair
-
-#if defined(__cplusplus) && (201103L <= __cplusplus)
-#include <array>
-#endif
-
-namespace boost { namespace mpi {
-
-/**
- * @brief Type trait that determines if there exists a built-in
- * integer MPI data type for a given C++ type.
- *
- * This type trait determines when there is a direct mapping from a
- * C++ type to an MPI data type that is classified as an integer data
- * type. See @c is_mpi_builtin_datatype for general information about
- * built-in MPI data types.
- */
-template<typename T>
-struct is_mpi_integer_datatype
- : public boost::mpl::false_ { };
-
-/**
- * @brief Type trait that determines if there exists a built-in
- * floating point MPI data type for a given C++ type.
- *
- * This type trait determines when there is a direct mapping from a
- * C++ type to an MPI data type that is classified as a floating
- * point data type. See @c is_mpi_builtin_datatype for general
- * information about built-in MPI data types.
- */
-template<typename T>
-struct is_mpi_floating_point_datatype
- : public boost::mpl::false_ { };
-
-/**
- * @brief Type trait that determines if there exists a built-in
- * logical MPI data type for a given C++ type.
- *
- * This type trait determines when there is a direct mapping from a
- * C++ type to an MPI data type that is classified as an logical data
- * type. See @c is_mpi_builtin_datatype for general information about
- * built-in MPI data types.
- */
-template<typename T>
-struct is_mpi_logical_datatype
- : public boost::mpl::false_ { };
-
-/**
- * @brief Type trait that determines if there exists a built-in
- * complex MPI data type for a given C++ type.
- *
- * This type trait determines when there is a direct mapping from a
- * C++ type to an MPI data type that is classified as an complex data
- * type. See @c is_mpi_builtin_datatype for general information about
- * built-in MPI data types.
- */
-template<typename T>
-struct is_mpi_complex_datatype
- : public boost::mpl::false_ { };
-
-/**
- * @brief Type trait that determines if there exists a built-in
- * byte MPI data type for a given C++ type.
- *
- * This type trait determines when there is a direct mapping from a
- * C++ type to an MPI data type that is classified as an byte data
- * type. See @c is_mpi_builtin_datatype for general information about
- * built-in MPI data types.
- */
-template<typename T>
-struct is_mpi_byte_datatype
- : public boost::mpl::false_ { };
-
-/** @brief Type trait that determines if there exists a built-in MPI
- * data type for a given C++ type.
- *
- * This type trait determines when there is a direct mapping from a
- * C++ type to an MPI type. For instance, the C++ @c int type maps
- * directly to the MPI type @c MPI_INT. When there is a direct
- * mapping from the type @c T to an MPI type, @c
- * is_mpi_builtin_datatype will derive from @c mpl::true_ and the MPI
- * data type will be accessible via @c get_mpi_datatype.
- *
- * In general, users should not need to specialize this
- * trait. However, if you have an additional C++ type that can map
- * directly to only of MPI's built-in types, specialize either this
- * trait or one of the traits corresponding to categories of MPI data
- * types (@c is_mpi_integer_datatype, @c
- * is_mpi_floating_point_datatype, @c is_mpi_logical_datatype, @c
- * is_mpi_complex_datatype, or @c is_mpi_builtin_datatype). @c
- * is_mpi_builtin_datatype derives @c mpl::true_ if any of the traits
- * corresponding to MPI data type categories derived @c mpl::true_.
- */
-template<typename T>
-struct is_mpi_builtin_datatype
- : boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_floating_point_datatype<T>,
- is_mpi_logical_datatype<T>,
- is_mpi_complex_datatype<T>,
- is_mpi_byte_datatype<T> >
-{
-};
-
-/** @brief Type trait that determines if a C++ type can be mapped to
- * an MPI data type.
- *
- * This type trait determines if it is possible to build an MPI data
- * type that represents a C++ data type. When this is the case, @c
- * is_mpi_datatype derives @c mpl::true_ and the MPI data type will
- * be accessible via @c get_mpi_datatype.
-
- * For any C++ type that maps to a built-in MPI data type (see @c
- * is_mpi_builtin_datatype), @c is_mpi_data_type is trivially
- * true. However, any POD ("Plain Old Data") type containing types
- * that themselves can be represented by MPI data types can itself be
- * represented as an MPI data type. For instance, a @c point3d class
- * containing three @c double values can be represented as an MPI
- * data type. To do so, first make the data type Serializable (using
- * the Boost.Serialization library); then, specialize the @c
- * is_mpi_datatype trait for the point type so that it will derive @c
- * mpl::true_:
- *
- * @code
- * namespace boost { namespace mpi {
- * template<> struct is_mpi_datatype<point>
- * : public mpl::true_ { };
- * } }
- * @endcode
- */
-template<typename T>
-struct is_mpi_datatype
- : public is_mpi_builtin_datatype<T>
-{
-};
-
-/** @brief Returns an MPI data type for a C++ type.
- *
- * The function creates an MPI data type for the given object @c
- * x. The first time it is called for a class @c T, the MPI data type
- * is created and cached. Subsequent calls for objects of the same
- * type @c T return the cached MPI data type. The type @c T must
- * allow creation of an MPI data type. That is, it must be
- * Serializable and @c is_mpi_datatype<T> must derive @c mpl::true_.
- *
- * For fundamental MPI types, a copy of the MPI data type of the MPI
- * library is returned.
- *
- * Note that since the data types are cached, the caller should never
- * call @c MPI_Type_free() for the MPI data type returned by this
- * call.
- *
- * @param x for an optimized call, a constructed object of the type
- * should be passed; otherwise, an object will be
- * default-constructed.
- *
- * @returns The MPI data type corresponding to type @c T.
- */
-template<typename T> MPI_Datatype get_mpi_datatype(const T& x)
-{
- BOOST_MPL_ASSERT((is_mpi_datatype<T>));
- return detail::mpi_datatype_cache().datatype(x);
-}
-
-// Don't parse this part when we're generating Doxygen documentation.
-#ifndef BOOST_MPI_DOXYGEN
-
-/// INTERNAL ONLY
-#define BOOST_MPI_DATATYPE(CppType, MPIType, Kind) \
-template<> \
-inline MPI_Datatype \
-get_mpi_datatype< CppType >(const CppType&) { return MPIType; } \
- \
-template<> \
- struct BOOST_JOIN(is_mpi_,BOOST_JOIN(Kind,_datatype))< CppType > \
-: boost::mpl::true_ \
-{}
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(packed, MPI_PACKED, builtin);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(char, MPI_CHAR, builtin);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(short, MPI_SHORT, integer);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(int, MPI_INT, integer);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(long, MPI_LONG, integer);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(float, MPI_FLOAT, floating_point);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(double, MPI_DOUBLE, floating_point);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(long double, MPI_LONG_DOUBLE, floating_point);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(unsigned char, MPI_UNSIGNED_CHAR, builtin);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(unsigned short, MPI_UNSIGNED_SHORT, integer);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(unsigned, MPI_UNSIGNED, integer);
-
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(unsigned long, MPI_UNSIGNED_LONG, integer);
-
-/// INTERNAL ONLY
-#define BOOST_MPI_LIST2(A, B) A, B
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(float, int)>, MPI_FLOAT_INT,
- builtin);
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(double, int)>, MPI_DOUBLE_INT,
- builtin);
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(long double, int)>,
- MPI_LONG_DOUBLE_INT, builtin);
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(long, int>), MPI_LONG_INT,
- builtin);
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(short, int>), MPI_SHORT_INT,
- builtin);
-/// INTERNAL ONLY
-BOOST_MPI_DATATYPE(std::pair<BOOST_MPI_LIST2(int, int>), MPI_2INT, builtin);
-#undef BOOST_MPI_LIST2
-
-/// specialization of is_mpi_datatype for pairs
-template <class T, class U>
-struct is_mpi_datatype<std::pair<T,U> >
- : public mpl::and_<is_mpi_datatype<T>,is_mpi_datatype<U> >
-{
-};
-
-/// specialization of is_mpi_datatype for arrays
-#if defined(__cplusplus) && (201103L <= __cplusplus)
-template<class T, std::size_t N>
-struct is_mpi_datatype<std::array<T, N> >
- : public is_mpi_datatype<T>
-{
-};
-#endif
-
-// Define wchar_t specialization of is_mpi_datatype, if possible.
-#if !defined(BOOST_NO_INTRINSIC_WCHAR_T) && \
- (defined(MPI_WCHAR) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
-BOOST_MPI_DATATYPE(wchar_t, MPI_WCHAR, builtin);
-#endif
-
-// Define long long or __int64 specialization of is_mpi_datatype, if possible.
-#if defined(BOOST_HAS_LONG_LONG) && \
- (defined(MPI_LONG_LONG_INT) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
-BOOST_MPI_DATATYPE(long long, MPI_LONG_LONG_INT, builtin);
-#elif defined(BOOST_HAS_MS_INT64) && \
- (defined(MPI_LONG_LONG_INT) || (defined(MPI_VERSION) && MPI_VERSION >= 2))
-BOOST_MPI_DATATYPE(__int64, MPI_LONG_LONG_INT, builtin);
-#endif
-
-// Define unsigned long long or unsigned __int64 specialization of
-// is_mpi_datatype, if possible. We separate this from the check for
-// the (signed) long long/__int64 because some MPI implementations
-// (e.g., MPICH-MX) have MPI_LONG_LONG_INT but not
-// MPI_UNSIGNED_LONG_LONG.
-#if defined(BOOST_HAS_LONG_LONG) && \
- (defined(MPI_UNSIGNED_LONG_LONG) \
- || (defined(MPI_VERSION) && MPI_VERSION >= 2))
-BOOST_MPI_DATATYPE(unsigned long long, MPI_UNSIGNED_LONG_LONG, builtin);
-#elif defined(BOOST_HAS_MS_INT64) && \
- (defined(MPI_UNSIGNED_LONG_LONG) \
- || (defined(MPI_VERSION) && MPI_VERSION >= 2))
-BOOST_MPI_DATATYPE(unsigned __int64, MPI_UNSIGNED_LONG_LONG, builtin);
-#endif
-
-// Define signed char specialization of is_mpi_datatype, if possible.
-#if defined(MPI_SIGNED_CHAR) || (defined(MPI_VERSION) && MPI_VERSION >= 2)
-BOOST_MPI_DATATYPE(signed char, MPI_SIGNED_CHAR, builtin);
-#endif
-
-
-#endif // Doxygen
-
-namespace detail {
- inline MPI_Datatype build_mpi_datatype_for_bool()
- {
- MPI_Datatype type;
- MPI_Type_contiguous(sizeof(bool), MPI_BYTE, &type);
- MPI_Type_commit(&type);
- return type;
- }
-}
-
-/// Support for bool. There is no corresponding MPI_BOOL.
-/// INTERNAL ONLY
-template<>
-inline MPI_Datatype get_mpi_datatype<bool>(const bool&)
-{
- static MPI_Datatype type = detail::build_mpi_datatype_for_bool();
- return type;
-}
-
-/// INTERNAL ONLY
-template<>
-struct is_mpi_datatype<bool>
- : boost::mpl::bool_<true>
-{};
-
-
-#ifndef BOOST_MPI_DOXYGEN
-// direct support for special primitive data types of the serialization library
-BOOST_MPI_DATATYPE(boost::archive::library_version_type, get_mpi_datatype(uint_least16_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::version_type, get_mpi_datatype(uint_least8_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::class_id_type, get_mpi_datatype(int_least16_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::class_id_reference_type, get_mpi_datatype(int_least16_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::class_id_optional_type, get_mpi_datatype(int_least16_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::object_id_type, get_mpi_datatype(uint_least32_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::object_reference_type, get_mpi_datatype(uint_least32_t()), integer);
-BOOST_MPI_DATATYPE(boost::archive::tracking_type, get_mpi_datatype(bool()), builtin);
-BOOST_MPI_DATATYPE(boost::serialization::collection_size_type, get_mpi_datatype(std::size_t()), integer);
-BOOST_MPI_DATATYPE(boost::serialization::item_version_type, get_mpi_datatype(uint_least8_t()), integer);
-#endif // Doxygen
-
-
-} } // end namespace boost::mpi
-
-// direct support for special primitive data types of the serialization library
-// in the case of homogeneous systems
-// define a macro to make explicit designation of this more transparent
-#define BOOST_IS_MPI_DATATYPE(T) \
-namespace boost { \
-namespace mpi { \
-template<> \
-struct is_mpi_datatype< T > : mpl::true_ {}; \
-}} \
-/**/
-
-
-#endif // BOOST_MPI_MPI_DATATYPE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype_fwd.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype_fwd.hpp
deleted file mode 100644
index 3a5f94139..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/datatype_fwd.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file datatype_fwd.hpp
- *
- * This header provides forward declarations for the contents of the
- * header @c datatype.hpp. It is expected to be used primarily by
- * user-defined C++ classes that need to specialize @c
- * is_mpi_datatype.
- */
-#ifndef BOOST_MPI_DATATYPE_FWD_HPP
-#define BOOST_MPI_DATATYPE_FWD_HPP
-
-#include <boost/mpi/config.hpp>
-
-namespace boost { namespace mpi {
-
-template<typename T> struct is_mpi_builtin_datatype;
-template<typename T> struct is_mpi_integer_datatype;
-template<typename T> struct is_mpi_floating_point_datatype;
-template<typename T> struct is_mpi_logical_datatype;
-template<typename T> struct is_mpi_complex_datatype;
-template<typename T> struct is_mpi_byte_datatype;
-template<typename T> struct is_mpi_datatype;
-template<typename T> MPI_Datatype get_mpi_datatype(const T& x);
-template<typename T> MPI_Datatype get_mpi_datatype()
- { return get_mpi_datatype(T());}
-
-/// a dummy data type giving MPI_PACKED as its MPI_Datatype
-struct packed {};
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_MPI_DATATYPE_FWD_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_iprimitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_iprimitive.hpp
deleted file mode 100644
index 388cd44c1..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_iprimitive.hpp
+++ /dev/null
@@ -1,123 +0,0 @@
-// (C) Copyright 2005-2007 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_BINARY_BUFFER_IPRIMITIVE_HPP
-#define BOOST_MPI_BINARY_BUFFER_IPRIMITIVE_HPP
-
-#include <mpi.h>
-#include <iostream>
-#include <cstddef> // size_t
-#include <boost/config.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/assert.hpp>
-#include <boost/mpl/assert.hpp>
-#include <boost/serialization/array.hpp>
-#include <boost/serialization/is_bitwise_serializable.hpp>
-#include <vector>
-#include <boost/mpi/allocator.hpp>
-#include <cstring> // for memcpy
-#include <cassert>
-
-namespace boost { namespace mpi {
-
-/// deserialization using MPI_Unpack
-
-class BOOST_MPI_DECL binary_buffer_iprimitive
-{
-public:
- /// the type of the buffer from which the data is unpacked upon deserialization
- typedef std::vector<char, allocator<char> > buffer_type;
-
- binary_buffer_iprimitive(buffer_type & b, MPI_Comm const &, int position = 0)
- : buffer_(b),
- position(position)
- {
- }
-
- void* address ()
- {
- return &buffer_.front();
- }
-
- void const* address () const
- {
- return &buffer_.front();
- }
-
- const std::size_t& size() const
- {
- return size_ = buffer_.size();
- }
-
- void resize(std::size_t s)
- {
- buffer_.resize(s);
- }
-
- void load_binary(void *address, std::size_t count)
- {
- load_impl(address,count);
- }
-
- // fast saving of arrays of fundamental types
- template<class T>
- void load_array(serialization::array_wrapper<T> const& x, unsigned int /* file_version */)
- {
- BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
- if (x.count())
- load_impl(x.address(), sizeof(T)*x.count());
- }
-
- typedef serialization::is_bitwise_serializable<mpl::_1> use_array_optimization;
-
- template<class T>
- void load(serialization::array_wrapper<T> const& x)
- {
- load_array(x,0u);
- }
-
- // default saving of primitives.
- template<class T>
- void load( T & t)
- {
- BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
- load_impl(&t, sizeof(T));
- }
-
- template<class CharType>
- void load(std::basic_string<CharType> & s)
- {
- unsigned int l;
- load(l);
- // borland de-allocator fixup
- #if BOOST_WORKAROUND(_RWSTD_VER, BOOST_TESTED_AT(20101))
- if(NULL != s.data())
- #endif
- s.resize(l);
- // note breaking a rule here - could be a problem on some platform
- load_impl(const_cast<char *>(s.data()),l);
- }
-
-private:
-
- void load_impl(void * p, int l)
- {
- assert(position+l<=static_cast<int>(buffer_.size()));
- if (l)
- std::memcpy(p,&buffer_[position],l);
- position += l;
- }
-
- buffer_type & buffer_;
- mutable std::size_t size_;
- int position;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_PACKED_IPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_oprimitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_oprimitive.hpp
deleted file mode 100644
index 1de441d26..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/binary_buffer_oprimitive.hpp
+++ /dev/null
@@ -1,104 +0,0 @@
-// (C) Copyright 2005-2007 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_BINARY_BUFFER_OPRIMITIVE_HPP
-#define BOOST_MPI_BINARY_BUFFER_OPRIMITIVE_HPP
-
-#include <mpi.h>
-#include <iostream>
-#include <cstddef> // size_t
-#include <boost/config.hpp>
-
-#include <boost/serialization/array.hpp>
-#include <boost/serialization/is_bitwise_serializable.hpp>
-#include <boost/assert.hpp>
-#include <boost/mpl/assert.hpp>
-#include <vector>
-#include <boost/mpi/allocator.hpp>
-#include <boost/mpl/always.hpp>
-#include <boost/type_traits/remove_const.hpp>
-
-namespace boost { namespace mpi {
-
-/// serialization using binary copy into a buffer
-
-class BOOST_MPI_DECL binary_buffer_oprimitive
-{
-public:
- /// the type of the buffer into which the data is packed upon serialization
- typedef std::vector<char, allocator<char> > buffer_type;
-
- binary_buffer_oprimitive(buffer_type & b, MPI_Comm const &)
- : buffer_(b)
- {
- }
-
- void const * address() const
- {
- return &buffer_.front();
- }
-
- const std::size_t& size() const
- {
- return size_ = buffer_.size();
- }
-
- void save_binary(void const *address, std::size_t count)
- {
- save_impl(address,count);
- }
-
- // fast saving of arrays
- template<class T>
- void save_array(serialization::array_wrapper<T> const& x, unsigned int /* file_version */)
- {
-
- BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
- if (x.count())
- save_impl(x.address(), x.count()*sizeof(T));
- }
-
- template<class T>
- void save(serialization::array_wrapper<T> const& x)
- {
- save_array(x,0u);
- }
-
- typedef serialization::is_bitwise_serializable<mpl::_1> use_array_optimization;
-
- // default saving of primitives.
- template<class T>
- void save(const T & t)
- {
- BOOST_MPL_ASSERT((serialization::is_bitwise_serializable<BOOST_DEDUCED_TYPENAME remove_const<T>::type>));
- save_impl(&t, sizeof(T));
- }
-
- template<class CharType>
- void save(const std::basic_string<CharType> &s)
- {
- unsigned int l = static_cast<unsigned int>(s.size());
- save(l);
- save_impl(s.data(),s.size());
- }
-
-private:
-
- void save_impl(void const * p, int l)
- {
- char const* ptr = reinterpret_cast<char const*>(p);
- buffer_.insert(buffer_.end(),ptr,ptr+l);
- }
-
- buffer_type& buffer_;
- mutable std::size_t size_;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_BINARY_BUFFER_OPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/broadcast_sc.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/broadcast_sc.hpp
deleted file mode 100644
index c84da662a..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/broadcast_sc.hpp
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Allows broadcast of skeletons via proxy.
-
-// This header may only be included after both the broadcast.hpp and
-// and skeleton_and_content.hpp headers have been included.
-#ifndef BOOST_MPI_BROADCAST_SC_HPP
-#define BOOST_MPI_BROADCAST_SC_HPP
-
-namespace boost { namespace mpi {
-
-template<typename T>
-inline void
-broadcast(const communicator& comm, skeleton_proxy<T>& proxy, int root)
-{
- const skeleton_proxy<T>& const_proxy(proxy);
- broadcast(comm, const_proxy, root);
-}
-
-template<typename T>
-void
-broadcast(const communicator& comm, const skeleton_proxy<T>& proxy, int root)
-{
- if (comm.rank() == root) {
- packed_skeleton_oarchive oa(comm);
- oa << proxy.object;
- broadcast(comm, oa, root);
- } else {
- packed_skeleton_iarchive ia(comm);
- broadcast(comm, ia, root);
- ia >> proxy.object;
- }
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_BROADCAST_SC_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/communicator_sc.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/communicator_sc.hpp
deleted file mode 100644
index 1dfcc3c52..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/communicator_sc.hpp
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Skeleton and content support for communicators
-
-// This header should be included only after both communicator.hpp and
-// skeleton_and_content.hpp have been included.
-#ifndef BOOST_MPI_COMMUNICATOR_SC_HPP
-#define BOOST_MPI_COMMUNICATOR_SC_HPP
-
-namespace boost { namespace mpi {
-
-template<typename T>
-void
-communicator::send(int dest, int tag, const skeleton_proxy<T>& proxy) const
-{
- packed_skeleton_oarchive ar(*this);
- ar << proxy.object;
- send(dest, tag, ar);
-}
-
-template<typename T>
-status
-communicator::recv(int source, int tag, const skeleton_proxy<T>& proxy) const
-{
- packed_skeleton_iarchive ar(*this);
- status result = recv(source, tag, ar);
- ar >> proxy.object;
- return result;
-}
-
-template<typename T>
-status communicator::recv(int source, int tag, skeleton_proxy<T>& proxy) const
-{
- packed_skeleton_iarchive ar(*this);
- status result = recv(source, tag, ar);
- ar >> proxy.object;
- return result;
-}
-
-template<typename T>
-request
-communicator::isend(int dest, int tag, const skeleton_proxy<T>& proxy) const
-{
- shared_ptr<packed_skeleton_oarchive>
- archive(new packed_skeleton_oarchive(*this));
-
- *archive << proxy.object;
- request result = isend(dest, tag, *archive);
- result.m_data = archive;
- return result;
-}
-
-namespace detail {
- template<typename T>
- struct serialized_irecv_data<const skeleton_proxy<T> >
- {
- serialized_irecv_data(const communicator& comm, int source, int tag,
- skeleton_proxy<T> proxy)
- : comm(comm), source(source), tag(tag), isa(comm),
- ia(isa.get_skeleton()), proxy(proxy) { }
-
- void deserialize(status& stat)
- {
- isa >> proxy.object;
- stat.m_count = 1;
- }
-
- communicator comm;
- int source;
- int tag;
- std::size_t count;
- packed_skeleton_iarchive isa;
- packed_iarchive& ia;
- skeleton_proxy<T> proxy;
- };
-
- template<typename T>
- struct serialized_irecv_data<skeleton_proxy<T> >
- : public serialized_irecv_data<const skeleton_proxy<T> >
- {
- typedef serialized_irecv_data<const skeleton_proxy<T> > inherited;
-
- serialized_irecv_data(const communicator& comm, int source, int tag,
- const skeleton_proxy<T>& proxy)
- : inherited(comm, source, tag, proxy) { }
- };
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_COMMUNICATOR_SC_HPP
-
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/computation_tree.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/computation_tree.hpp
deleted file mode 100644
index 83acd72d8..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/computation_tree.hpp
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (C) 2005 Douglas Gregor.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Compute parents, children, levels, etc. to effect a parallel
-// computation tree.
-#ifndef BOOST_MPI_COMPUTATION_TREE_HPP
-#define BOOST_MPI_COMPUTATION_TREE_HPP
-
-namespace boost { namespace mpi { namespace detail {
-
-/**
- * @brief Aids tree-based parallel collective algorithms.
- *
- * Objects of this type
- */
-class computation_tree
-{
- public:
- computation_tree(int rank, int size, int root, int branching_factor = -1);
-
- /// Returns the branching factor of the tree.
- int branching_factor() const { return branching_factor_; }
-
- /// Returns the level in the tree on which this process resides.
- int level() const { return level_; }
-
- /**
- * Returns the index corresponding to the n^th level of the tree.
- *
- * @param n The level in the tree whose index will be returned.
- */
- int level_index(int n) const;
-
- /**
- * @brief Returns the parent of this process.
- *
- * @returns If this process is the root, returns itself. Otherwise,
- * returns the process number that is the parent in the computation
- * tree.
- */
- int parent() const;
-
- /// Returns the index for the first child of this process.
- int child_begin() const;
-
- /**
- * @brief The default branching factor within the computation tree.
- *
- * This is the default branching factor for the computation tree, to
- * be used by any computation tree that does not fix the branching
- * factor itself. The default is initialized to 3, but may be
- * changed by the application so long as all processes have the same
- * branching factor.
- */
- static int default_branching_factor;
-
- protected:
- /// The rank of this process in the computation tree.
- int rank;
-
- /// The number of processes participating in the computation tree.
- int size;
-
- /// The process number that is acting as the root in the computation
- /// tree.
- int root;
-
- /**
- * @brief The branching factor within the computation tree.
- *
- * This is the default number of children that each node in a
- * computation tree will have. This value will be used for
- * collective operations that use tree-based algorithms.
- */
- int branching_factor_;
-
- /// The level in the tree at which this process resides.
- int level_;
-};
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_COMPUTATION_TREE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/content_oarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/content_oarchive.hpp
deleted file mode 100644
index 215884724..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/content_oarchive.hpp
+++ /dev/null
@@ -1,66 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
-#define BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
-
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/basic_archive.hpp>
-#include <boost/mpi/detail/ignore_skeleton_oarchive.hpp>
-#include <boost/mpi/detail/mpi_datatype_primitive.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/archive/detail/register_archive.hpp>
-
-namespace boost { namespace mpi {
-
-namespace detail {
- // an archive wrapper that stores only the data members but not the
- // special types defined by the serialization library
- // to define the data skeletons (classes, pointers, container sizes, ...)
-
- class BOOST_MPI_DECL content_oarchive
- : public mpi_datatype_primitive,
- public ignore_skeleton_oarchive<content_oarchive>
- {
- public:
- content_oarchive()
- : committed(false)
- {}
-
- content get_content()
- {
- if (!committed)
- {
- // create the content holder only once
- c=this->get_mpi_datatype();
- committed=true;
- }
- return c;
- }
-
- private:
- bool committed;
- content c;
- };
-} // end namespace detail
-
-template <class T>
-const content get_content(const T& x)
-{
- detail::content_oarchive ar;
- ar << x;
- return ar.get_content();
-}
-
-} } // end namespace boost::mpi
-
-// required by export
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::content_oarchive)
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::ignore_skeleton_oarchive<boost::mpi::detail::content_oarchive>)
-BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::detail::content_oarchive)
-#endif // BOOST_MPI_DETAIL_CONTENT_OARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_iarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_iarchive.hpp
deleted file mode 100644
index 0dfcaf974..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_iarchive.hpp
+++ /dev/null
@@ -1,80 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
-#define BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
-
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/detail/iserializer.hpp>
-#include <boost/archive/detail/interface_iarchive.hpp>
-#include <boost/archive/detail/common_iarchive.hpp>
-#include <boost/serialization/collection_size_type.hpp>
-
-namespace boost { namespace mpi { namespace detail {
-
-template<class Archive, class ImplementationArchive>
-class forward_skeleton_iarchive
- : public archive::detail::common_iarchive<Archive>
-{
-public:
-
- typedef ImplementationArchive implementation_archive_type;
-
- forward_skeleton_iarchive(implementation_archive_type& ar)
- : archive::detail::common_iarchive<Archive>(archive::no_header),
- implementation_archive(ar)
- {
- }
-
-#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
-public:
-#else
- friend class archive::detail::interface_iarchive<Archive>;
- friend class archive::load_access;
-protected:
-#endif
-
- template<class T>
- void load_override(T & t)
- {
- archive::load(* this->This(), t);
- }
-
-#define BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(T) \
- void load_override(T & t) \
- { \
- implementation_archive >> t; \
- }
-
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_optional_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::version_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_reference_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_id_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_reference_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::tracking_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_name_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(serialization::collection_size_type)
-
- void load_override(std::string & s)
- {
- serialization::collection_size_type length(s.size());
- load_override(length);
- s.resize(length);
- }
-
-#undef BOOST_ARCHIVE_FORWARD_IMPLEMENTATION
-protected:
- /// the actual archive used to serialize the information we actually want to store
- implementation_archive_type& implementation_archive;
-};
-
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_DETAIL_FORWARD_SKELETON_IARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_oarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_oarchive.hpp
deleted file mode 100644
index 1a170b4ab..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/forward_skeleton_oarchive.hpp
+++ /dev/null
@@ -1,78 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
-#define BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
-
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/detail/oserializer.hpp>
-#include <boost/archive/detail/interface_oarchive.hpp>
-#include <boost/archive/detail/common_oarchive.hpp>
-#include <boost/serialization/collection_size_type.hpp>
-
-namespace boost { namespace mpi { namespace detail {
-
-template<class Archive, class ImplementationArchive>
-class forward_skeleton_oarchive
- : public archive::detail::common_oarchive<Archive>
-{
-public:
-
- typedef ImplementationArchive implementation_archive_type;
-
- forward_skeleton_oarchive(implementation_archive_type& ar)
- : archive::detail::common_oarchive<Archive>(archive::no_header),
- implementation_archive(ar)
- {
- }
-
-#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
-public:
-#else
- friend class archive::detail::interface_oarchive<Archive>;
- friend class archive::save_access;
-protected:
-#endif
-
- template<class T>
- void save_override(T const& t)
- {
- archive::save(* this->This(), t);
- }
-
-#define BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(T) \
- void save_override(T const & t) \
- { \
- implementation_archive << t; \
- }
-
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_optional_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::version_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_reference_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_id_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_reference_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::tracking_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_name_type)
-BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(serialization::collection_size_type)
-
- void save_override(std::string const & t)
- {
- save_override(serialization::collection_size_type(t.size()));
- }
-
-
-#undef BOOST_ARCHIVE_FORWARD_IMPLEMENTATION
-protected:
- /// the actual archive used to serialize the information we actually want to store
- implementation_archive_type& implementation_archive;
-};
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_DETAIL_FORWARD_SKELETON_OARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_iprimitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_iprimitive.hpp
deleted file mode 100644
index 151ed0b34..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_iprimitive.hpp
+++ /dev/null
@@ -1,54 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
-#define BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
-
-#include <boost/config.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/serialization/array.hpp>
-
-
-namespace boost { namespace mpi { namespace detail {
-
-/// @brief a minimal input archive, which ignores any load
-///
-/// This class implements a minimal input archive, probably an input archive
-/// archetype, doing nothing at any load. It's use, besides acting as an
-/// archetype is as a base class to implement special archives that ignore
-/// loading of most types
-
-class ignore_iprimitive
-{
-public:
- /// a trivial default constructor
- ignore_iprimitive()
- {}
-
-
- /// don't do anything when loading binary data
- void load_binary(void *, std::size_t )
- {}
-
- /// don't do anything when loading arrays
- template<class T>
- void load_array(serialization::array_wrapper<T> &, unsigned int )
- {}
-
- typedef is_mpi_datatype<mpl::_1> use_array_optimization;
-
- /// don't do anything when loading primitive types
- template<class T>
- void load(T &)
- {
- }
-};
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_DETAIL_IGNORE_IPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_oprimitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_oprimitive.hpp
deleted file mode 100644
index 4f2994bfc..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_oprimitive.hpp
+++ /dev/null
@@ -1,62 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
-#define BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
-
-#include <boost/config.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/serialization/array.hpp>
-
-namespace boost { namespace mpi { namespace detail {
-
-/// @brief a minimal output archive, which ignores any save
-///
-/// This class implements a minimal output archive, probably an output archive
-/// archetype, doing nothing at any save. It's use, besides acting as an
-/// archetype is as a base class to implement special archives that ignore
-/// saving of most types
-
-class ignore_oprimitive
-{
-public:
- /// a trivial default constructor
- ignore_oprimitive()
- {}
-
- /// don't do anything when saving binary data
- void save_binary(const void *, std::size_t )
- {
- }
-
- /// don't do anything when saving arrays
- template<class T>
- void save_array(serialization::array_wrapper<T> const&, unsigned int )
- {
- }
-
- typedef is_mpi_datatype<mpl::_1> use_array_optimization;
-
-
-#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
- friend class archive::save_access;
-protected:
-#else
-public:
-#endif
-
- /// don't do anything when saving primitive types
- template<class T>
- void save(const T &)
- {
- }
-};
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_DETAIL_IGNORE_OPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_skeleton_oarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_skeleton_oarchive.hpp
deleted file mode 100644
index f9285a25d..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/ignore_skeleton_oarchive.hpp
+++ /dev/null
@@ -1,73 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
-#define BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
-
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/detail/common_oarchive.hpp>
-#include <boost/archive/basic_archive.hpp>
-#include <boost/archive/detail/oserializer.hpp>
-#include <boost/serialization/collection_size_type.hpp>
-#include <boost/serialization/array.hpp>
-#include <boost/serialization/item_version_type.hpp>
-
-namespace boost { namespace mpi { namespace detail {
-
-template<class Archive>
-class ignore_skeleton_oarchive
- : public archive::detail::common_oarchive<Archive>
-{
-public:
- ignore_skeleton_oarchive()
- : archive::detail::common_oarchive<Archive>(archive::no_header)
- {
- }
-
-#ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
-public:
-#else
- friend class archive::detail::interface_oarchive<Archive>;
- friend class archive::save_access;
-protected:
-#endif
- template<class T>
- void save_override(T const& t)
- {
- archive::save(* this->This(), t);
- }
-
-#define BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(T) \
- void save_override(T const &) \
- {}
-
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_id_optional_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::version_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::library_version_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_id_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_id_reference_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::object_id_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::object_reference_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::tracking_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(archive::class_name_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(serialization::collection_size_type)
-BOOST_ARCHIVE_IGNORE_IMPLEMENTATION(serialization::item_version_type)
-
- void save_override(std::string const & s)
- {
- if (s.size())
- save_override(serialization::make_array(s.data(),s.size()));
- }
-
-#undef BOOST_ARCHIVE_IGNORE_IMPLEMENTATION
-};
-
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_DETAIL_IGNORE_SKELETON_OARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_cache.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_cache.hpp
deleted file mode 100644
index c99ddb1e8..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_cache.hpp
+++ /dev/null
@@ -1,99 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
-#define BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
-
-#include <boost/mpi/datatype_fwd.hpp>
-#include <boost/mpi/detail/mpi_datatype_oarchive.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/utility/enable_if.hpp>
-#include <boost/mpl/assert.hpp>
-#include <boost/noncopyable.hpp>
-#include <typeinfo>
-
-// The std::type_info::before function in Visual C++ 8.0 (and probably earlier)
-// incorrectly returns an "int" instead of a "bool". Then the compiler has the
-// audacity to complain when that "int" is converted to a "bool". Silence
-// this warning.
-#ifdef BOOST_MSVC
-# pragma warning(push)
-# pragma warning(disable : 4800)
-#endif
-
-namespace boost { namespace mpi { namespace detail {
-
-/// @brief comparison function object for two std::type_info pointers
-///
-/// is implemented using the before() member function of the std::type_info
-/// class
-
-struct type_info_compare
-{
- bool operator()(std::type_info const* lhs, std::type_info const* rhs) const
- {
- return lhs->before(*rhs);
- }
-};
-
-
-/// @brief a map of MPI data types, indexed by their type_info
-///
-///
-class BOOST_MPI_DECL mpi_datatype_map
- : public boost::noncopyable
-{
- struct implementation;
-
- implementation *impl;
-
-public:
- mpi_datatype_map();
- ~mpi_datatype_map();
-
- template <class T>
- MPI_Datatype datatype(const T& x = T(), typename boost::enable_if<is_mpi_builtin_datatype<T> >::type* =0)
- {
- return get_mpi_datatype<T>(x);
- }
-
- template <class T>
- MPI_Datatype datatype(const T& x =T(), typename boost::disable_if<is_mpi_builtin_datatype<T> >::type* =0 )
- {
- BOOST_MPL_ASSERT((is_mpi_datatype<T>));
-
- // check whether the type already exists
- std::type_info const* t = &typeid(T);
- MPI_Datatype datatype = get(t);
- if (datatype == MPI_DATATYPE_NULL) {
- // need to create a type
- mpi_datatype_oarchive ar(x);
- datatype = ar.get_mpi_datatype();
- set(t, datatype);
- }
-
- return datatype;
- }
-
- void clear();
-
-private:
- MPI_Datatype get(const std::type_info* t);
- void set(const std::type_info* t, MPI_Datatype datatype);
-};
-
-/// Retrieve the MPI datatype cache
-BOOST_MPI_DECL mpi_datatype_map& mpi_datatype_cache();
-
-} } } // end namespace boost::mpi::detail
-
-#ifdef BOOST_MSVC
-# pragma warning(pop)
-#endif
-
-#endif // BOOST_MPI_DETAIL_TYPE_MPI_DATATYPE_CACHE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_oarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_oarchive.hpp
deleted file mode 100644
index 68f9abb6b..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_oarchive.hpp
+++ /dev/null
@@ -1,75 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
-#define BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
-
-#include <boost/type_traits/is_enum.hpp>
-#include <boost/mpl/bool.hpp>
-#include <boost/archive/detail/oserializer.hpp>
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/basic_archive.hpp>
-#include <boost/mpi/detail/ignore_skeleton_oarchive.hpp>
-#include <boost/mpi/detail/mpi_datatype_primitive.hpp>
-#include <boost/mpi/datatype_fwd.hpp>
-#include <boost/mpl/assert.hpp>
-#include <boost/static_assert.hpp>
-#include <boost/integer.hpp>
-#include <boost/archive/detail/register_archive.hpp>
-
-namespace boost { namespace mpi { namespace detail {
-
-
-// an archive wrapper that stores only the data members but not the
-// special types defined by the serialization library
-// to define the data skeletons (classes, pointers, container sizes, ...)
-
-class mpi_datatype_oarchive
- : public mpi_datatype_primitive,
- public ignore_skeleton_oarchive<mpi_datatype_oarchive>
-{
-public:
- template <class T>
- mpi_datatype_oarchive(const T& x)
- : mpi_datatype_primitive(&x) // register address
- {
- BOOST_MPL_ASSERT((is_mpi_datatype<T>));
- *this << x; // serialize the object
- }
-
- template<class T>
- void save_override(T const& t)
- {
- save_enum(t,boost::is_enum<T>());
- }
-
- template<class T>
- void save_enum(T const& t, mpl::false_)
- {
- ignore_skeleton_oarchive<mpi_datatype_oarchive>::save_override(t);
- }
-
- template<class T>
- void save_enum(T const& t, mpl::true_)
- {
- // select the right sized integer for the enum
- typedef typename boost::uint_t<8*sizeof(T)>::least int_type;
- BOOST_STATIC_ASSERT((sizeof(T)==sizeof(int_type)));
- this->save(*reinterpret_cast<int_type const*>(&t));
- }
-
-};
-
-} } } // end namespace boost::mpi::detail
-
-// required by export
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::mpi_datatype_oarchive)
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::ignore_skeleton_oarchive<boost::mpi::detail::mpi_datatype_oarchive>)
-BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::detail::mpi_datatype_oarchive)
-
-#endif // BOOST_MPI_DETAIL_MPI_DATATYPE_OARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_primitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_primitive.hpp
deleted file mode 100644
index c230055ab..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/mpi_datatype_primitive.hpp
+++ /dev/null
@@ -1,145 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
-#define BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
-
-#include <boost/mpi/config.hpp>
-#include <cstddef> // size_t
-
-#include <boost/config.hpp>
-#if defined(BOOST_NO_STDC_NAMESPACE)
-namespace std{
- using ::size_t;
-} // namespace std
-#endif
-
-#include <boost/mpi/datatype_fwd.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/throw_exception.hpp>
-#include <boost/assert.hpp>
-#include <boost/mpl/placeholders.hpp>
-#include <boost/serialization/array.hpp>
-#include <boost/serialization/detail/get_data.hpp>
-#include <stdexcept>
-#include <iostream>
-#include <vector>
-
-namespace boost { namespace mpi { namespace detail {
-
-/////////////////////////////////////////////////////////////////////////
-// class mpi_data_type_oprimitive - creation of custom MPI data types
-
-class mpi_datatype_primitive
-{
-public:
-
- // trivial default constructor
- mpi_datatype_primitive()
- : is_committed(false),
- origin(0)
- {}
-
- mpi_datatype_primitive(void const* orig)
- : is_committed(false),
- origin()
- {
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
- BOOST_MPI_CHECK_RESULT(MPI_Get_address,(const_cast<void*>(orig), &origin));
-#else
- BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(orig), &origin));
-#endif
- }
-
- void save_binary(void const *address, std::size_t count)
- {
- save_impl(address,MPI_BYTE,count);
- }
-
- // fast saving of arrays of MPI types
- template<class T>
- void save_array(serialization::array_wrapper<T> const& x, unsigned int /* version */)
- {
- if (x.count())
- save_impl(x.address(), boost::mpi::get_mpi_datatype(*x.address()), x.count());
- }
-
- typedef is_mpi_datatype<mpl::_1> use_array_optimization;
-
- // create and return the custom MPI data type
- MPI_Datatype get_mpi_datatype()
- {
- if (!is_committed)
- {
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
- BOOST_MPI_CHECK_RESULT(MPI_Type_create_struct,
- (
- addresses.size(),
- boost::serialization::detail::get_data(lengths),
- boost::serialization::detail::get_data(addresses),
- boost::serialization::detail::get_data(types),
- &datatype_
- ));
-#else
- BOOST_MPI_CHECK_RESULT(MPI_Type_struct,
- (
- addresses.size(),
- boost::serialization::detail::get_data(lengths),
- boost::serialization::detail::get_data(addresses),
- boost::serialization::detail::get_data(types),
- &datatype_
- ));
-#endif
- BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&datatype_));
-
- is_committed = true;
- }
-
- return datatype_;
- }
-
- // default saving of primitives.
- template<class T>
- void save(const T & t)
- {
- save_impl(&t, boost::mpi::get_mpi_datatype(t), 1);
- }
-
-private:
-
- void save_impl(void const * p, MPI_Datatype t, int l)
- {
- BOOST_ASSERT ( !is_committed );
-
- // store address, type and length
-
- MPI_Aint a;
-#if defined(MPI_VERSION) && MPI_VERSION >= 2
- BOOST_MPI_CHECK_RESULT(MPI_Get_address,(const_cast<void*>(p), &a));
-#else
- BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(p), &a));
-#endif
- addresses.push_back(a-origin);
- types.push_back(t);
- lengths.push_back(l);
- }
-
- std::vector<MPI_Aint> addresses;
- std::vector<MPI_Datatype> types;
- std::vector<int> lengths;
-
- bool is_committed;
- MPI_Datatype datatype_;
- MPI_Aint origin;
-};
-
-
-} } } // end namespace boost::mpi::detail
-
-
-#endif // BOOST_MPI_DETAIL_MPI_DATATYPE_OPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_iprimitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_iprimitive.hpp
deleted file mode 100644
index 7080cbf53..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_iprimitive.hpp
+++ /dev/null
@@ -1,118 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_PACKED_IPRIMITIVE_HPP
-#define BOOST_MPI_PACKED_IPRIMITIVE_HPP
-
-#include <boost/mpi/config.hpp>
-#include <cstddef> // size_t
-#include <boost/config.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/assert.hpp>
-#include <boost/serialization/array.hpp>
-#include <boost/serialization/detail/get_data.hpp>
-#include <vector>
-#include <boost/mpi/allocator.hpp>
-
-namespace boost { namespace mpi {
-
-/// deserialization using MPI_Unpack
-
-class BOOST_MPI_DECL packed_iprimitive
-{
-public:
- /// the type of the buffer from which the data is unpacked upon deserialization
- typedef std::vector<char, allocator<char> > buffer_type;
-
- packed_iprimitive(buffer_type & b, MPI_Comm const & comm, int position = 0)
- : buffer_(b),
- comm(comm),
- position(position)
- {
- }
-
- void* address ()
- {
- return &buffer_[0];
- }
-
- void const* address () const
- {
- return &buffer_[0];
- }
-
- const std::size_t& size() const
- {
- return size_ = buffer_.size();
- }
-
- void resize(std::size_t s)
- {
- buffer_.resize(s);
- }
-
- void load_binary(void *address, std::size_t count)
- {
- load_impl(address,MPI_BYTE,count);
- }
-
- // fast saving of arrays of fundamental types
- template<class T>
- void load_array(serialization::array_wrapper<T> const& x, unsigned int /* file_version */)
- {
- if (x.count())
- load_impl(x.address(), get_mpi_datatype(*x.address()), x.count());
- }
-
-/*
- template<class T>
- void load(serialization::array_wrapper<T> const& x)
- {
- load_array(x,0u);
- }
-*/
-
- typedef is_mpi_datatype<mpl::_1> use_array_optimization;
-
- // default saving of primitives.
- template<class T>
- void load( T & t)
- {
- load_impl(&t, get_mpi_datatype(t), 1);
- }
-
- template<class CharType>
- void load(std::basic_string<CharType> & s)
- {
- unsigned int l;
- load(l);
- s.resize(l);
- // note breaking a rule here - could be a problem on some platform
- if (l)
- load_impl(const_cast<CharType *>(s.data()),
- get_mpi_datatype(CharType()),l);
- }
-
-private:
-
- void load_impl(void * p, MPI_Datatype t, int l)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Unpack,
- (const_cast<char*>(boost::serialization::detail::get_data(buffer_)), buffer_.size(), &position, p, l, t, comm));
- }
-
- buffer_type & buffer_;
- mutable std::size_t size_;
- MPI_Comm comm;
- int position;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_PACKED_IPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_oprimitive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_oprimitive.hpp
deleted file mode 100644
index 5b6b3b270..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/packed_oprimitive.hpp
+++ /dev/null
@@ -1,115 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-
-#ifndef BOOST_MPI_PACKED_OPRIMITIVE_HPP
-#define BOOST_MPI_PACKED_OPRIMITIVE_HPP
-
-#include <boost/mpi/config.hpp>
-#include <cstddef> // size_t
-#include <boost/config.hpp>
-
-#include <boost/mpi/datatype.hpp>
-#include <boost/mpi/exception.hpp>
-#include <boost/serialization/detail/get_data.hpp>
-#include <boost/serialization/array.hpp>
-#include <boost/assert.hpp>
-#include <vector>
-#include <boost/mpi/allocator.hpp>
-
-namespace boost { namespace mpi {
-
-/// serialization using MPI::Pack
-
-class BOOST_MPI_DECL packed_oprimitive
-{
-public:
- /// the type of the buffer into which the data is packed upon serialization
- typedef std::vector<char, allocator<char> > buffer_type;
-
- packed_oprimitive(buffer_type & b, MPI_Comm const & comm)
- : buffer_(b),
- comm(comm)
- {
- }
-
- void const * address() const
- {
- return &buffer_[0];
- }
-
- const std::size_t& size() const
- {
- return size_ = buffer_.size();
- }
-
- void save_binary(void const *address, std::size_t count)
- {
- save_impl(address,MPI_BYTE,count);
- }
-
- // fast saving of arrays
- template<class T>
- void save_array(serialization::array_wrapper<T> const& x, unsigned int /* file_version */)
- {
- if (x.count())
- save_impl(x.address(), get_mpi_datatype(*x.address()), x.count());
- }
-
- typedef is_mpi_datatype<mpl::_1> use_array_optimization;
-
-#ifndef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
- friend class archive::save_access;
-protected:
-#else
-public:
-#endif
-
- // default saving of primitives.
- template<class T>
- void save(const T & t)
- {
- save_impl(&t, get_mpi_datatype<T>(t), 1);
- }
-
- template<class CharType>
- void save(const std::basic_string<CharType> &s)
- {
- unsigned int l = static_cast<unsigned int>(s.size());
- save(l);
- if (l)
- save_impl(s.data(),get_mpi_datatype(CharType()),s.size());
- }
-
-private:
-
- void save_impl(void const * p, MPI_Datatype t, int l)
- {
- // allocate enough memory
- int memory_needed;
- BOOST_MPI_CHECK_RESULT(MPI_Pack_size,(l,t,comm,&memory_needed));
-
- int position = buffer_.size();
- buffer_.resize(position + memory_needed);
-
- // pack the data into the buffer
- BOOST_MPI_CHECK_RESULT(MPI_Pack,
- (const_cast<void*>(p), l, t, boost::serialization::detail::get_data(buffer_), buffer_.size(), &position, comm));
- // reduce the buffer size if needed
- BOOST_ASSERT(std::size_t(position) <= buffer_.size());
- if (std::size_t(position) < buffer_.size())
- buffer_.resize(position);
- }
-
- buffer_type& buffer_;
- mutable std::size_t size_;
- MPI_Comm comm;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_PACKED_OPRIMITIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/point_to_point.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/point_to_point.hpp
deleted file mode 100644
index 06db34ce9..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/detail/point_to_point.hpp
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2005 Douglas Gregor.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point
-#ifndef BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
-#define BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
-
-// For (de-)serializing sends and receives
-#include <boost/mpi/config.hpp>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-
-namespace boost { namespace mpi { namespace detail {
-
-/** Sends a packed archive using MPI_Send. */
-BOOST_MPI_DECL void
-packed_archive_send(MPI_Comm comm, int dest, int tag,
- const packed_oarchive& ar);
-
-/** Sends a packed archive using MPI_Isend.
- *
- * This routine may split sends into multiple packets. The MPI_Request
- * for each packet will be placed into the out_requests array, up to
- * num_out_requests packets. The number of packets sent will be
- * returned from the function.
- *
- * @pre num_out_requests >= 2
- */
-BOOST_MPI_DECL int
-packed_archive_isend(MPI_Comm comm, int dest, int tag,
- const packed_oarchive& ar,
- MPI_Request* out_requests, int num_out_requests);
-
-/**
- * \overload
- */
-BOOST_MPI_DECL int
-packed_archive_isend(MPI_Comm comm, int dest, int tag,
- const packed_iarchive& ar,
- MPI_Request* out_requests, int num_out_requests);
-
-/** Receives a packed archive using MPI_Recv. */
-BOOST_MPI_DECL void
-packed_archive_recv(MPI_Comm comm, int source, int tag, packed_iarchive& ar,
- MPI_Status& status);
-
-} } } // end namespace boost::mpi::detail
-
-#endif // BOOST_MPI_DETAIL_POINT_TO_POINT_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/environment.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/environment.hpp
deleted file mode 100644
index 92af129f3..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/environment.hpp
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file environment.hpp
- *
- * This header provides the @c environment class, which provides
- * routines to initialize, finalization, and query the status of the
- * Boost MPI environment.
- */
-#ifndef BOOST_MPI_ENVIRONMENT_HPP
-#define BOOST_MPI_ENVIRONMENT_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/noncopyable.hpp>
-#include <boost/optional.hpp>
-#include <string>
-#include <iosfwd>
-
-namespace boost { namespace mpi {
-namespace threading {
-/** @brief specify the supported threading level.
- *
- * Based on MPI 2 standard/8.7.3
- */
-enum level {
- /** Only one thread will execute.
- */
- single = MPI_THREAD_SINGLE,
- /** Only main thread will do MPI calls.
- *
- * The process may be multi-threaded, but only the main
- * thread will make MPI calls (all MPI calls are ``funneled''
- * to the main thread).
- */
- funneled = MPI_THREAD_FUNNELED,
- /** Only one thread at the time do MPI calls.
- *
- * The process may be multi-threaded, and multiple
- * threads may make MPI calls, but only one at a time:
- * MPI calls are not made concurrently from two distinct
- * threads (all MPI calls are ``serialized'').
- */
- serialized = MPI_THREAD_SERIALIZED,
- /** Multiple thread may do MPI calls.
- *
- * Multiple threads may call MPI, with no restrictions.
- */
- multiple = MPI_THREAD_MULTIPLE
-};
-
-/** Formated output for threading level. */
-std::ostream& operator<<(std::ostream& out, level l);
-
-/** Formated input for threading level. */
-std::istream& operator>>(std::istream& in, level& l);
-} // namespace threading
-/** @brief Initialize, finalize, and query the MPI environment.
- *
- * The @c environment class is used to initialize, finalize, and
- * query the MPI environment. It will typically be used in the @c
- * main() function of a program, which will create a single instance
- * of @c environment initialized with the arguments passed to the
- * program:
- *
- * @code
- * int main(int argc, char* argv[])
- * {
- * mpi::environment env(argc, argv);
- * }
- * @endcode
- *
- * The instance of @c environment will initialize MPI (by calling @c
- * MPI_Init) in its constructor and finalize MPI (by calling @c
- * MPI_Finalize for normal termination or @c MPI_Abort for an
- * uncaught exception) in its destructor.
- *
- * The use of @c environment is not mandatory. Users may choose to
- * invoke @c MPI_Init and @c MPI_Finalize manually. In this case, no
- * @c environment object is needed. If one is created, however, it
- * will do nothing on either construction or destruction.
- */
-class BOOST_MPI_DECL environment : noncopyable {
-public:
-#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION
- /** Initialize the MPI environment.
- *
- * If the MPI environment has not already been initialized,
- * initializes MPI with a call to @c MPI_Init. Since this
- * constructor does not take command-line arguments (@c argc and @c
- * argv), it is only available when the underlying MPI
- * implementation supports calling @c MPI_Init with @c NULL
- * arguments, indicated by the macro @c
- * BOOST_MPI_HAS_NOARG_INITIALIZATION.
- *
- * @param abort_on_exception When true, this object will abort the
- * program if it is destructed due to an uncaught exception.
- */
- explicit environment(bool abort_on_exception = true);
- /** Initialize the MPI environment.
- *
- * If the MPI environment has not already been initialized,
- * initializes MPI with a call to @c MPI_Init_thread. Since this
- * constructor does not take command-line arguments (@c argc and @c
- * argv), it is only available when the underlying MPI
- * implementation supports calling @c MPI_Init with @c NULL
- * arguments, indicated by the macro @c
- * BOOST_MPI_HAS_NOARG_INITIALIZATION.
- *
- * @param mt_level the required level of threading support.
- *
- * @param abort_on_exception When true, this object will abort the
- * program if it is destructed due to an uncaught exception.
- */
- explicit environment(threading::level mt_level, bool abort_on_exception = true);
-#endif
-
- /** Initialize the MPI environment.
- *
- * If the MPI environment has not already been initialized,
- * initializes MPI with a call to @c MPI_Init.
- *
- * @param argc The number of arguments provided in @p argv, as
- * passed into the program's @c main function.
- *
- * @param argv The array of argument strings passed to the program
- * via @c main.
- *
- * @param abort_on_exception When true, this object will abort the
- * program if it is destructed due to an uncaught exception.
- */
- environment(int& argc, char** &argv, bool abort_on_exception = true);
-
- /** Initialize the MPI environment.
- *
- * If the MPI environment has not already been initialized,
- * initializes MPI with a call to @c MPI_Init_thread.
- *
- * @param argc The number of arguments provided in @p argv, as
- * passed into the program's @c main function.
- *
- * @param argv The array of argument strings passed to the program
- * via @c main.
- *
- * @param mt_level the required level of threading support
- *
- * @param abort_on_exception When true, this object will abort the
- * program if it is destructed due to an uncaught exception.
- */
- environment(int& argc, char** &argv, threading::level mt_level,
- bool abort_on_exception = true);
-
- /** Shuts down the MPI environment.
- *
- * If this @c environment object was used to initialize the MPI
- * environment, and the MPI environment has not already been shut
- * down (finalized), this destructor will shut down the MPI
- * environment. Under normal circumstances, this only involves
- * invoking @c MPI_Finalize. However, if destruction is the result
- * of an uncaught exception and the @c abort_on_exception parameter
- * of the constructor had the value @c true, this destructor will
- * invoke @c MPI_Abort with @c MPI_COMM_WORLD to abort the entire
- * MPI program with a result code of -1.
- */
- ~environment();
-
- /** Abort all MPI processes.
- *
- * Aborts all MPI processes and returns to the environment. The
- * precise behavior will be defined by the underlying MPI
- * implementation. This is equivalent to a call to @c MPI_Abort
- * with @c MPI_COMM_WORLD.
- *
- * @param errcode The error code to return to the environment.
- * @returns Will not return.
- */
- static void abort(int errcode);
-
- /** Determine if the MPI environment has already been initialized.
- *
- * This routine is equivalent to a call to @c MPI_Initialized.
- *
- * @returns @c true if the MPI environment has been initialized.
- */
- static bool initialized();
-
- /** Determine if the MPI environment has already been finalized.
- *
- * The routine is equivalent to a call to @c MPI_Finalized.
- *
- * @returns @c true if the MPI environment has been finalized.
- */
- static bool finalized();
-
- /** Retrieves the maximum tag value.
- *
- * Returns the maximum value that may be used for the @c tag
- * parameter of send/receive operations. This value will be
- * somewhat smaller than the value of @c MPI_TAG_UB, because the
- * Boost.MPI implementation reserves some tags for collective
- * operations.
- *
- * @returns the maximum tag value.
- */
- static int max_tag();
-
- /** The tag value used for collective operations.
- *
- * Returns the reserved tag value used by the Boost.MPI
- * implementation for collective operations. Although users are not
- * permitted to use this tag to send or receive messages, it may be
- * useful when monitoring communication patterns.
- *
- * @returns the tag value used for collective operations.
- */
- static int collectives_tag();
-
- /** Retrieves the rank of the host process, if one exists.
- *
- * If there is a host process, this routine returns the rank of
- * that process. Otherwise, it returns an empty @c
- * optional<int>. MPI does not define the meaning of a "host"
- * process: consult the documentation for the MPI
- * implementation. This routine examines the @c MPI_HOST attribute
- * of @c MPI_COMM_WORLD.
- *
- * @returns The rank of the host process, if one exists.
- */
- static optional<int> host_rank();
-
- /** Retrieves the rank of a process that can perform input/output.
- *
- * This routine returns the rank of a process that can perform
- * input/output via the standard C and C++ I/O facilities. If every
- * process can perform I/O using the standard facilities, this
- * routine will return @c any_source; if no process can perform
- * I/O, this routine will return no value (an empty @c
- * optional). This routine examines the @c MPI_IO attribute of @c
- * MPI_COMM_WORLD.
- *
- * @returns the rank of the process that can perform I/O, @c
- * any_source if every process can perform I/O, or no value if no
- * process can perform I/O.
- */
- static optional<int> io_rank();
-
- /** Retrieve the name of this processor.
- *
- * This routine returns the name of this processor. The actual form
- * of the name is unspecified, but may be documented by the
- * underlying MPI implementation. This routine is implemented as a
- * call to @c MPI_Get_processor_name.
- *
- * @returns the name of this processor.
- */
- static std::string processor_name();
-
- /** Query the current level of thread support.
- */
- static threading::level thread_level();
-
- /** Are we in the main thread?
- */
- static bool is_main_thread();
-
-private:
- /// Whether this environment object called MPI_Init
- bool i_initialized;
-
- /// Whether we should abort if the destructor is
- bool abort_on_exception;
-
- /// The number of reserved tags.
- static const int num_reserved_tags = 1;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_ENVIRONMENT_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/exception.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/exception.hpp
deleted file mode 100644
index 12523077c..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/exception.hpp
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file exception.hpp
- *
- * This header provides exception classes that report MPI errors to
- * the user and macros that translate MPI error codes into Boost.MPI
- * exceptions.
- */
-#ifndef BOOST_MPI_EXCEPTION_HPP
-#define BOOST_MPI_EXCEPTION_HPP
-
-#include <boost/mpi/config.hpp>
-#include <exception>
-#include <string>
-#include <boost/config.hpp>
-#include <boost/throw_exception.hpp>
-
-namespace boost { namespace mpi {
-
-/** @brief Catch-all exception class for MPI errors.
- *
- * Instances of this class will be thrown when an MPI error
- * occurs. MPI failures that trigger these exceptions may or may not
- * be recoverable, depending on the underlying MPI
- * implementation. Consult the documentation for your MPI
- * implementation to determine the effect of MPI errors.
- */
-class BOOST_MPI_DECL exception : public std::exception
-{
- public:
- /**
- * Build a new @c exception exception.
- *
- * @param routine The MPI routine in which the error
- * occurred. This should be a pointer to a string constant: it
- * will not be copied.
- *
- * @param result_code The result code returned from the MPI
- * routine that aborted with an error.
- */
- exception(const char* routine, int result_code);
-
- virtual ~exception() throw();
-
- /**
- * A description of the error that occurred.
- */
- virtual const char * what () const throw ()
- {
- return this->message.c_str();
- }
-
- /** Retrieve the name of the MPI routine that reported the error. */
- const char* routine() const { return routine_; }
-
- /**
- * @brief Retrieve the result code returned from the MPI routine
- * that reported the error.
- */
- int result_code() const { return result_code_; }
-
- /**
- * @brief Returns the MPI error class associated with the error that
- * triggered this exception.
- */
- int error_class() const
- {
- int result;
- MPI_Error_class(result_code_, &result);
- return result;
- }
-
- protected:
- /// The MPI routine that triggered the error
- const char* routine_;
-
- /// The failed result code reported by the MPI implementation.
- int result_code_;
-
- /// The formatted error message
- std::string message;
-};
-
-/**
- * Call the MPI routine MPIFunc with arguments Args (surrounded by
- * parentheses). If the result is not MPI_SUCCESS, use
- * boost::throw_exception to throw an exception or abort, depending on
- * BOOST_NO_EXCEPTIONS.
- */
-#define BOOST_MPI_CHECK_RESULT( MPIFunc, Args ) \
- { \
- int _check_result = MPIFunc Args; \
- if (_check_result != MPI_SUCCESS) \
- boost::throw_exception(boost::mpi::exception(#MPIFunc, \
- _check_result)); \
- }
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_EXCEPTION_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/graph_communicator.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/graph_communicator.hpp
deleted file mode 100644
index 6cafb1fea..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/graph_communicator.hpp
+++ /dev/null
@@ -1,575 +0,0 @@
-// Copyright (C) 2007 Trustees of Indiana University
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file graph_communicator.hpp
- *
- * This header defines facilities to support MPI communicators with
- * graph topologies, using the graph interface defined by the Boost
- * Graph Library. One can construct a communicator whose topology is
- * described by any graph meeting the requirements of the Boost Graph
- * Library's graph concepts. Likewise, any communicator that has a
- * graph topology can be viewed as a graph by the Boost Graph
- * Library, permitting one to use the BGL's graph algorithms on the
- * process topology.
- */
-#ifndef BOOST_MPI_GRAPH_COMMUNICATOR_HPP
-#define BOOST_MPI_GRAPH_COMMUNICATOR_HPP
-
-#include <boost/mpi/communicator.hpp>
-#include <vector>
-#include <utility>
-
-// Headers required to implement graph topologies
-#include <boost/graph/graph_traits.hpp>
-#include <boost/graph/properties.hpp>
-#include <boost/property_map/property_map.hpp>
-#include <boost/iterator/counting_iterator.hpp>
-#include <boost/graph/iteration_macros.hpp>
-#include <boost/shared_array.hpp>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-/**
- * @brief An MPI communicator with a graph topology.
- *
- * A @c graph_communicator is a communicator whose topology is
- * expressed as a graph. Graph communicators have the same
- * functionality as (intra)communicators, but also allow one to query
- * the relationships among processes. Those relationships are
- * expressed via a graph, using the interface defined by the Boost
- * Graph Library. The @c graph_communicator class meets the
- * requirements of the BGL Graph, Incidence Graph, Adjacency Graph,
- * Vertex List Graph, and Edge List Graph concepts.
- */
-class BOOST_MPI_DECL graph_communicator : public communicator
-{
- friend class communicator;
-
- /**
- * INTERNAL ONLY
- *
- * Construct a graph communicator given a shared pointer to the
- * underlying MPI_Comm. This operation is used for "casting" from a
- * communicator to a graph communicator.
- */
- explicit graph_communicator(const shared_ptr<MPI_Comm>& comm_ptr)
- {
-#ifndef BOOST_DISABLE_ASSERTS
- int status;
- BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status));
- BOOST_ASSERT(status == MPI_GRAPH);
-#endif
- this->comm_ptr = comm_ptr;
- }
-
-public:
- /**
- * Build a new Boost.MPI graph communicator based on the MPI
- * communicator @p comm with graph topology.
- *
- * @p comm may be any valid MPI communicator. If @p comm is
- * MPI_COMM_NULL, an empty communicator (that cannot be used for
- * communication) is created and the @p kind parameter is
- * ignored. Otherwise, the @p kind parameter determines how the
- * Boost.MPI communicator will be related to @p comm:
- *
- * - If @p kind is @c comm_duplicate, duplicate @c comm to create
- * a new communicator. This new communicator will be freed when
- * the Boost.MPI communicator (and all copies of it) is
- * destroyed. This option is only permitted if the underlying MPI
- * implementation supports MPI 2.0; duplication of
- * intercommunicators is not available in MPI 1.x.
- *
- * - If @p kind is @c comm_take_ownership, take ownership of @c
- * comm. It will be freed automatically when all of the Boost.MPI
- * communicators go out of scope.
- *
- * - If @p kind is @c comm_attach, this Boost.MPI communicator
- * will reference the existing MPI communicator @p comm but will
- * not free @p comm when the Boost.MPI communicator goes out of
- * scope. This option should only be used when the communicator is
- * managed by the user.
- */
- graph_communicator(const MPI_Comm& comm, comm_create_kind kind)
- : communicator(comm, kind)
- {
-#ifndef BOOST_DISABLE_ASSERTS
- int status;
- BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status));
- BOOST_ASSERT(status == MPI_GRAPH);
-#endif
- }
-
- /**
- * Create a new communicator whose topology is described by the
- * given graph. The indices of the vertices in the graph will be
- * assumed to be the ranks of the processes within the
- * communicator. There may be fewer vertices in the graph than
- * there are processes in the communicator; in this case, the
- * resulting communicator will be a NULL communicator.
- *
- * @param comm The communicator that the new, graph communicator
- * will be based on.
- *
- * @param graph Any type that meets the requirements of the
- * Incidence Graph and Vertex List Graph concepts from the Boost Graph
- * Library. This structure of this graph will become the topology
- * of the communicator that is returned.
- *
- * @param reorder Whether MPI is permitted to re-order the process
- * ranks within the returned communicator, to better optimize
- * communication. If false, the ranks of each process in the
- * returned process will match precisely the rank of that process
- * within the original communicator.
- */
- template<typename Graph>
- explicit
- graph_communicator(const communicator& comm, const Graph& graph,
- bool reorder = false);
-
- /**
- * Create a new communicator whose topology is described by the
- * given graph. The rank map (@p rank) gives the mapping from
- * vertices in the graph to ranks within the communicator. There
- * may be fewer vertices in the graph than there are processes in
- * the communicator; in this case, the resulting communicator will
- * be a NULL communicator.
- *
- * @param comm The communicator that the new, graph communicator
- * will be based on. The ranks in @c rank refer to the processes in
- * this communicator.
- *
- * @param graph Any type that meets the requirements of the
- * Incidence Graph and Vertex List Graph concepts from the Boost Graph
- * Library. This structure of this graph will become the topology
- * of the communicator that is returned.
- *
- * @param rank This map translates vertices in the @c graph into
- * ranks within the current communicator. It must be a Readable
- * Property Map (see the Boost Property Map library) whose key type
- * is the vertex type of the @p graph and whose value type is @c
- * int.
- *
- * @param reorder Whether MPI is permitted to re-order the process
- * ranks within the returned communicator, to better optimize
- * communication. If false, the ranks of each process in the
- * returned process will match precisely the rank of that process
- * within the original communicator.
- */
- template<typename Graph, typename RankMap>
- explicit
- graph_communicator(const communicator& comm, const Graph& graph,
- RankMap rank, bool reorder = false);
-
-protected:
- /**
- * INTERNAL ONLY
- *
- * Used by the constructors to create the new communicator with a
- * graph topology.
- */
- template<typename Graph, typename RankMap>
- void
- setup_graph(const communicator& comm, const Graph& graph, RankMap rank,
- bool reorder);
-};
-
-/****************************************************************************
- * Implementation Details *
- ****************************************************************************/
-
-template<typename Graph>
-graph_communicator::graph_communicator(const communicator& comm,
- const Graph& graph,
- bool reorder)
-{
- this->setup_graph(comm, graph, get(vertex_index, graph), reorder);
-}
-
-template<typename Graph, typename RankMap>
-graph_communicator::graph_communicator(const communicator& comm,
- const Graph& graph,
- RankMap rank, bool reorder)
-{
- this->setup_graph(comm, graph, rank, reorder);
-}
-
-
-template<typename Graph, typename RankMap>
-void
-graph_communicator::setup_graph(const communicator& comm, const Graph& graph,
- RankMap rank, bool reorder)
-{
- typedef typename graph_traits<Graph>::vertex_descriptor vertex_descriptor;
-
- // Build a mapping from ranks to vertices
- std::vector<vertex_descriptor> vertex_with_rank(num_vertices(graph));
- if (vertex_with_rank.empty())
- return;
-
- BGL_FORALL_VERTICES_T(v, graph, Graph)
- vertex_with_rank[get(rank, v)] = v;
-
- // Build the representation of the graph required by
- // MPI_Graph_create.
- std::vector<int> indices(num_vertices(graph));
- std::vector<int> edges;
- int nvertices = indices.size();
- for (int vertex_index = 0; vertex_index < nvertices; ++vertex_index) {
- vertex_descriptor v = vertex_with_rank[vertex_index];
-
- BGL_FORALL_OUTEDGES_T(v, e, graph, Graph)
- edges.push_back(get(rank, target(e, graph)));
-
- indices[vertex_index] = edges.size();
- }
-
- // Create the new communicator
- MPI_Comm newcomm;
- BOOST_MPI_CHECK_RESULT(MPI_Graph_create,
- ((MPI_Comm)comm,
- nvertices,
- &indices[0],
- edges.empty()? (int*)0 : &edges[0],
- reorder,
- &newcomm));
- this->comm_ptr.reset(new MPI_Comm(newcomm), comm_free());
-}
-
-/****************************************************************************
- * Communicator with Graph Topology as BGL Graph *
- ****************************************************************************/
-namespace detail {
- /**
- * INTERNAL ONLY
- *
- * The iterator used to access the outgoing edges within a
- * communicator's graph topology.
- */
- class comm_out_edge_iterator
- : public iterator_facade<comm_out_edge_iterator,
- std::pair<int, int>,
- random_access_traversal_tag,
- const std::pair<int, int>&,
- int>
- {
- public:
- comm_out_edge_iterator() { }
-
- comm_out_edge_iterator(int source, shared_array<int> neighbors, int index)
- : edge(source, -1), neighbors(neighbors), index(index) { }
-
- protected:
- friend class boost::iterator_core_access;
-
- const std::pair<int, int>& dereference() const
- {
- edge.second = neighbors[index];
- return edge;
- }
-
- bool equal(const comm_out_edge_iterator& other) const
- {
- return (edge.first == other.edge.first
- && index == other.index);
- }
-
- void increment() { ++index; }
-
- void decrement() { --index; }
-
- void advance(int n) { index += n; }
-
- int distance_to(const comm_out_edge_iterator& other) const
- {
- return other.index - index;
- }
-
- mutable std::pair<int, int> edge;
- shared_array<int> neighbors;
- int index;
- };
-
- /**
- * INTERNAL ONLY
- *
- * The iterator used to access the adjacent vertices within a
- * communicator's graph topology.
- */
- class comm_adj_iterator
- : public iterator_facade<comm_adj_iterator,
- int,
- random_access_traversal_tag,
- int,
- int>
- {
- public:
- comm_adj_iterator() { }
-
- comm_adj_iterator(shared_array<int> neighbors, int index)
- : neighbors(neighbors), index(index) { }
-
- protected:
- friend class boost::iterator_core_access;
-
- int dereference() const { return neighbors[index]; }
-
- bool equal(const comm_adj_iterator& other) const
- {
- return (neighbors == other.neighbors
- && index == other.index);
- }
-
- void increment() { ++index; }
-
- void decrement() { --index; }
-
- void advance(int n) { index += n; }
-
- int distance_to(const comm_adj_iterator& other) const
- {
- return other.index - index;
- }
-
- shared_array<int> neighbors;
- int index;
- };
-
- /**
- * INTERNAL ONLY
- *
- * The iterator used to access the edges in a communicator's graph
- * topology.
- */
- class comm_edge_iterator
- : public iterator_facade<comm_edge_iterator,
- std::pair<int, int>,
- forward_traversal_tag,
- const std::pair<int, int>&,
- int>
- {
- public:
- comm_edge_iterator() { }
-
- /// Constructor for a past-the-end iterator
- comm_edge_iterator(int nedges) : edge_index(nedges) { }
-
- comm_edge_iterator(shared_array<int> indices, shared_array<int> edges)
- : indices(indices), edges(edges), edge_index(0), edge(0, 0)
- { }
-
- protected:
- friend class boost::iterator_core_access;
-
- const std::pair<int, int>& dereference() const
- {
- while (edge_index == indices[edge.first])
- ++edge.first;
- edge.second = edges[edge_index];
- return edge;
- }
-
- bool equal(const comm_edge_iterator& other) const
- {
- return edge_index == other.edge_index;
- }
-
- void increment()
- {
- ++edge_index;
- }
-
- shared_array<int> indices;
- shared_array<int> edges;
- int edge_index;
- mutable std::pair<int, int> edge;
- };
-
-} // end namespace detail
-
-// Incidence Graph requirements
-
-/**
- * @brief Returns the source vertex from an edge in the graph topology
- * of a communicator.
- */
-inline int source(const std::pair<int, int>& edge, const graph_communicator&)
-{
- return edge.first;
-}
-
-/**
- * @brief Returns the target vertex from an edge in the graph topology
- * of a communicator.
- */
-inline int target(const std::pair<int, int>& edge, const graph_communicator&)
-{
- return edge.second;
-}
-
-/**
- * @brief Returns an iterator range containing all of the edges
- * outgoing from the given vertex in a graph topology of a
- * communicator.
- */
-std::pair<detail::comm_out_edge_iterator, detail::comm_out_edge_iterator>
-out_edges(int vertex, const graph_communicator& comm);
-
-
-/**
- * @brief Returns the out-degree of a vertex in the graph topology of
- * a communicator.
- */
-int out_degree(int vertex, const graph_communicator& comm);
-
-// Adjacency Graph requirements
-
-/**
- * @brief Returns an iterator range containing all of the neighbors of
- * the given vertex in the communicator's graph topology.
- */
-std::pair<detail::comm_adj_iterator, detail::comm_adj_iterator>
-adjacent_vertices(int vertex, const graph_communicator& comm);
-
-// Vertex List Graph requirements
-
-/**
- * @brief Returns an iterator range that contains all of the vertices
- * with the communicator's graph topology, i.e., all of the process
- * ranks in the communicator.
- */
-inline std::pair<counting_iterator<int>, counting_iterator<int> >
-vertices(const graph_communicator& comm)
-{
- return std::make_pair(counting_iterator<int>(0),
- counting_iterator<int>(comm.size()));
-}
-
-/**
- * @brief Returns the number of vertices within the graph topology of
- * the communicator, i.e., the number of processes in the
- * communicator.
- */
-inline int num_vertices(const graph_communicator& comm) { return comm.size(); }
-
-// Edge List Graph requirements
-
-/**
- * @brief Returns an iterator range that contains all of the edges
- * with the communicator's graph topology.
- */
-std::pair<detail::comm_edge_iterator, detail::comm_edge_iterator>
-edges(const graph_communicator& comm);
-
-/**
- * @brief Returns the number of edges in the communicator's graph
- * topology.
- */
-int num_edges(const graph_communicator& comm);
-
-// Property Graph requirements
-
-/**
- * @brief Returns a property map that maps from vertices in a
- * communicator's graph topology to their index values.
- *
- * Since the vertices are ranks in the communicator, the returned
- * property map is the identity property map.
- */
-inline identity_property_map get(vertex_index_t, const graph_communicator&)
-{
- return identity_property_map();
-}
-
-/**
- * @brief Returns the index of a vertex in the communicator's graph
- * topology.
- *
- * Since the vertices are ranks in the communicator, this is the
- * identity function.
- */
-inline int get(vertex_index_t, const graph_communicator&, int vertex)
-{
- return vertex;
-}
-
-} } // end namespace boost::mpi
-
-namespace boost {
-
-/**
- * @brief Traits structure that allows a communicator with graph
- * topology to be view as a graph by the Boost Graph Library.
- *
- * The specialization of @c graph_traits for an MPI communicator
- * allows a communicator with graph topology to be viewed as a
- * graph. An MPI communicator with graph topology meets the
- * requirements of the Graph, Incidence Graph, Adjacency Graph, Vertex
- * List Graph, and Edge List Graph concepts from the Boost Graph
- * Library.
- */
-template<>
-struct graph_traits<mpi::graph_communicator> {
- // Graph concept requirements
- typedef int vertex_descriptor;
- typedef std::pair<int, int> edge_descriptor;
- typedef directed_tag directed_category;
- typedef disallow_parallel_edge_tag edge_parallel_category;
-
- /**
- * INTERNAL ONLY
- */
- struct traversal_category
- : incidence_graph_tag,
- adjacency_graph_tag,
- vertex_list_graph_tag,
- edge_list_graph_tag
- {
- };
-
- /**
- * @brief Returns a vertex descriptor that can never refer to any
- * valid vertex.
- */
- static vertex_descriptor null_vertex() { return -1; }
-
- // Incidence Graph requirements
- typedef mpi::detail::comm_out_edge_iterator out_edge_iterator;
- typedef int degree_size_type;
-
- // Adjacency Graph requirements
- typedef mpi::detail::comm_adj_iterator adjacency_iterator;
-
- // Vertex List Graph requirements
- typedef counting_iterator<int> vertex_iterator;
- typedef int vertices_size_type;
-
- // Edge List Graph requirements
- typedef mpi::detail::comm_edge_iterator edge_iterator;
- typedef int edges_size_type;
-};
-
-// Property Graph requirements
-
-/**
- * INTERNAL ONLY
- */
-template<>
-struct property_map<mpi::graph_communicator, vertex_index_t>
-{
- typedef identity_property_map type;
- typedef identity_property_map const_type;
-};
-
-} // end namespace boost
-
-
-
-#endif // BOOST_MPI_GRAPH_COMMUNICATOR_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/group.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/group.hpp
deleted file mode 100644
index 103b35a11..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/group.hpp
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright (C) 2007 Trustees of Indiana University
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file group.hpp
- *
- * This header defines the @c group class, which allows one to
- * manipulate and query groups of processes.
- */
-#ifndef BOOST_MPI_GROUP_HPP
-#define BOOST_MPI_GROUP_HPP
-
-#include <boost/mpi/exception.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/optional.hpp>
-#include <vector>
-
-namespace boost { namespace mpi {
-
-/**
- * @brief A @c group is a representation of a subset of the processes
- * within a @c communicator.
- *
- * The @c group class allows one to create arbitrary subsets of the
- * processes within a communicator. One can compute the union,
- * intersection, or difference of two groups, or create new groups by
- * specifically including or excluding certain processes. Given a
- * group, one can create a new communicator containing only the
- * processes in that group.
- */
-class BOOST_MPI_DECL group
-{
-public:
- /**
- * @brief Constructs an empty group.
- */
- group() : group_ptr() { }
-
- /**
- * @brief Constructs a group from an @c MPI_Group.
- *
- * This routine allows one to construct a Boost.MPI @c group from a
- * C @c MPI_Group. The @c group object can (optionally) adopt the @c
- * MPI_Group, after which point the @c group object becomes
- * responsible for freeing the @c MPI_Group when the last copy of @c
- * group disappears.
- *
- * @param in_group The @c MPI_Group used to construct this @c group.
- *
- * @param adopt Whether the @c group should adopt the @c
- * MPI_Group. When true, the @c group object (or one of its copies)
- * will free the group (via @c MPI_Comm_free) when the last copy is
- * destroyed. Otherwise, the user is responsible for calling @c
- * MPI_Group_free.
- */
- group(const MPI_Group& in_group, bool adopt);
-
- /**
- * @brief Determine the rank of the calling process in the group.
- *
- * This routine is equivalent to @c MPI_Group_rank.
- *
- * @returns The rank of the calling process in the group, which will
- * be a value in [0, size()). If the calling process is not in the
- * group, returns an empty value.
- */
- optional<int> rank() const;
-
- /**
- * @brief Determine the number of processes in the group.
- *
- * This routine is equivalent to @c MPI_Group_size.
- *
- * @returns The number of processes in the group.
- */
- int size() const;
-
- /**
- * @brief Translates the ranks from one group into the ranks of the
- * same processes in another group.
- *
- * This routine translates each of the integer rank values in the
- * iterator range @c [first, last) from the current group into rank
- * values of the corresponding processes in @p to_group. The
- * corresponding rank values are written via the output iterator @c
- * out. When there is no correspondence between a rank in the
- * current group and a rank in @c to_group, the value @c
- * MPI_UNDEFINED is written to the output iterator.
- *
- * @param first Beginning of the iterator range of ranks in the
- * current group.
- *
- * @param last Past the end of the iterator range of ranks in the
- * current group.
- *
- * @param to_group The group that we are translating ranks to.
- *
- * @param out The output iterator to which the translated ranks will
- * be written.
- *
- * @returns the output iterator, which points one step past the last
- * rank written.
- */
- template<typename InputIterator, typename OutputIterator>
- OutputIterator translate_ranks(InputIterator first, InputIterator last,
- const group& to_group, OutputIterator out);
-
- /**
- * @brief Determines whether the group is non-empty.
- *
- * @returns True if the group is not empty, false if it is empty.
- */
- operator bool() const { return (bool)group_ptr; }
-
- /**
- * @brief Retrieves the underlying @c MPI_Group associated with this
- * group.
- *
- * @returns The @c MPI_Group handle manipulated by this object. If
- * this object represents the empty group, returns @c
- * MPI_GROUP_EMPTY.
- */
- operator MPI_Group() const
- {
- if (group_ptr)
- return *group_ptr;
- else
- return MPI_GROUP_EMPTY;
- }
-
- /**
- * @brief Creates a new group including a subset of the processes
- * in the current group.
- *
- * This routine creates a new @c group which includes only those
- * processes in the current group that are listed in the integer
- * iterator range @c [first, last). Equivalent to @c
- * MPI_Group_incl.
- *
- * @c first The beginning of the iterator range of ranks to include.
- *
- * @c last Past the end of the iterator range of ranks to include.
- *
- * @returns A new group containing those processes with ranks @c
- * [first, last) in the current group.
- */
- template<typename InputIterator>
- group include(InputIterator first, InputIterator last);
-
- /**
- * @brief Creates a new group from all of the processes in the
- * current group, exluding a specific subset of the processes.
- *
- * This routine creates a new @c group which includes all of the
- * processes in the current group except those whose ranks are
- * listed in the integer iterator range @c [first,
- * last). Equivalent to @c MPI_Group_excl.
- *
- * @c first The beginning of the iterator range of ranks to exclude.
- *
- * @c last Past the end of the iterator range of ranks to exclude.
- *
- * @returns A new group containing all of the processes in the
- * current group except those processes with ranks @c [first, last)
- * in the current group.
- */
- template<typename InputIterator>
- group exclude(InputIterator first, InputIterator last);
-
-
-protected:
- /**
- * INTERNAL ONLY
- *
- * Function object that frees an MPI group and deletes the
- * memory associated with it. Intended to be used as a deleter with
- * shared_ptr.
- */
- struct group_free
- {
- void operator()(MPI_Group* comm) const
- {
- int finalized;
- BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&finalized));
- if (!finalized)
- BOOST_MPI_CHECK_RESULT(MPI_Group_free, (comm));
- delete comm;
- }
- };
-
- /**
- * The underlying MPI group. This is a shared pointer, so the actual
- * MPI group which will be shared among all related instances of the
- * @c group class. When there are no more such instances, the group
- * will be automatically freed.
- */
- shared_ptr<MPI_Group> group_ptr;
-};
-
-/**
- * @brief Determines whether two process groups are identical.
- *
- * Equivalent to calling @c MPI_Group_compare and checking whether the
- * result is @c MPI_IDENT.
- *
- * @returns True when the two process groups contain the same
- * processes in the same order.
- */
-BOOST_MPI_DECL bool operator==(const group& g1, const group& g2);
-
-/**
- * @brief Determines whether two process groups are not identical.
- *
- * Equivalent to calling @c MPI_Group_compare and checking whether the
- * result is not @c MPI_IDENT.
- *
- * @returns False when the two process groups contain the same
- * processes in the same order.
- */
-inline bool operator!=(const group& g1, const group& g2)
-{
- return !(g1 == g2);
-}
-
-/**
- * @brief Computes the union of two process groups.
- *
- * This routine returns a new @c group that contains all processes
- * that are either in group @c g1 or in group @c g2 (or both). The
- * processes that are in @c g1 will be first in the resulting group,
- * followed by the processes from @c g2 (but not also in @c
- * g1). Equivalent to @c MPI_Group_union.
- */
-BOOST_MPI_DECL group operator|(const group& g1, const group& g2);
-
-/**
- * @brief Computes the intersection of two process groups.
- *
- * This routine returns a new @c group that contains all processes
- * that are in group @c g1 and in group @c g2, ordered in the same way
- * as @c g1. Equivalent to @c MPI_Group_intersection.
- */
-BOOST_MPI_DECL group operator&(const group& g1, const group& g2);
-
-/**
- * @brief Computes the difference between two process groups.
- *
- * This routine returns a new @c group that contains all processes
- * that are in group @c g1 but not in group @c g2, ordered in the same way
- * as @c g1. Equivalent to @c MPI_Group_difference.
- */
-BOOST_MPI_DECL group operator-(const group& g1, const group& g2);
-
-/************************************************************************
- * Implementation details *
- ************************************************************************/
-template<typename InputIterator, typename OutputIterator>
-OutputIterator
-group::translate_ranks(InputIterator first, InputIterator last,
- const group& to_group, OutputIterator out)
-{
- std::vector<int> in_array(first, last);
- if (in_array.empty())
- return out;
-
- std::vector<int> out_array(in_array.size());
- BOOST_MPI_CHECK_RESULT(MPI_Group_translate_ranks,
- ((MPI_Group)*this,
- in_array.size(),
- &in_array[0],
- (MPI_Group)to_group,
- &out_array[0]));
-
- for (std::vector<int>::size_type i = 0, n = out_array.size(); i < n; ++i)
- *out++ = out_array[i];
- return out;
-}
-
-/**
- * INTERNAL ONLY
- *
- * Specialization of translate_ranks that handles the one case where
- * we can avoid any memory allocation or copying.
- */
-template<>
-BOOST_MPI_DECL int*
-group::translate_ranks(int* first, int* last, const group& to_group, int* out);
-
-template<typename InputIterator>
-group group::include(InputIterator first, InputIterator last)
-{
- if (first == last)
- return group();
-
- std::vector<int> ranks(first, last);
- MPI_Group result;
- BOOST_MPI_CHECK_RESULT(MPI_Group_incl,
- ((MPI_Group)*this, ranks.size(), &ranks[0], &result));
- return group(result, /*adopt=*/true);
-}
-
-/**
- * INTERNAL ONLY
- *
- * Specialization of group::include that handles the one case where we
- * can avoid any memory allocation or copying before creating the
- * group.
- */
-template<> BOOST_MPI_DECL group group::include(int* first, int* last);
-
-template<typename InputIterator>
-group group::exclude(InputIterator first, InputIterator last)
-{
- if (first == last)
- return group();
-
- std::vector<int> ranks(first, last);
- MPI_Group result;
- BOOST_MPI_CHECK_RESULT(MPI_Group_excl,
- ((MPI_Group)*this, ranks.size(), &ranks[0], &result));
- return group(result, /*adopt=*/true);
-}
-
-/**
- * INTERNAL ONLY
- *
- * Specialization of group::exclude that handles the one case where we
- * can avoid any memory allocation or copying before creating the
- * group.
- */
-template<> BOOST_MPI_DECL group group::exclude(int* first, int* last);
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_GROUP_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/inplace.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/inplace.hpp
deleted file mode 100644
index d84d07db5..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/inplace.hpp
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2005-2006 Alain Miniussi <alain.miniussi -at- oca.eu>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Message Passing Interface 1.1 -- Section 4. MPI Collectives
-
-/** @file inplace.hpp
- *
- * This header provides helpers to indicate to MPI collective operation
- * that a buffer can be use both as an input and output.
- */
-#ifndef BOOST_MPI_INPLACE_HPP
-#define BOOST_MPI_INPLACE_HPP
-
-#include <boost/mpi/communicator.hpp>
-#include <vector>
-
-namespace boost { namespace mpi {
-
-/**
- * @brief Wrapper type to explicitly indicate that a input data
- * can be overriden with an output value.
- */
-template <typename T>
-struct inplace_t {
- inplace_t(T& inout) : buffer(inout) {}
- T& buffer;
-};
-
-template <typename T>
-struct inplace_t<T*> {
- inplace_t(T* inout) : buffer(inout) {}
- T* buffer;
-};
-
-
-/**
- * @brief Wrapp a input data to indicate that it can be overriden
- * with an ouput value.
- * @param inout the contributing input value, it will be overriden
- * with the output value where one is expected. If it is a pointer,
- * the number of elements will be provided separatly.
- * @returns The wrapped value or pointer.
- */
-template<typename T>
-inplace_t<T>
-inplace(T& inout) {
- return inplace_t<T>(inout);
-}
-/**
- * \overload
- */
-template<typename T>
-inplace_t<T*>
-inplace(T* inout) {
- return inplace_t<T*>(inout);
-}
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_INPLACE_HPP
-
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/intercommunicator.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/intercommunicator.hpp
deleted file mode 100644
index ad246b595..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/intercommunicator.hpp
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (C) 2007 The Trustees of Indiana University.
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file intercommunicator.hpp
- *
- * This header defines the @c intercommunicator class, which permits
- * communication between different process groups.
- */
-#ifndef BOOST_MPI_INTERCOMMUNICATOR_HPP
-#define BOOST_MPI_INTERCOMMUNICATOR_HPP
-
-#include <boost/mpi/communicator.hpp>
-
-namespace boost { namespace mpi {
-
-/**
- * INTERNAL ONLY
- *
- * Forward declaration of the MPI "group" representation, for use in
- * the description of the @c intercommunicator class.
- */
-class group;
-
-/**
- * @brief Communication facilities among processes in different
- * groups.
- *
- * The @c intercommunicator class provides communication facilities
- * among processes from different groups. An intercommunicator is
- * always associated with two process groups: one "local" process
- * group, containing the process that initiates an MPI operation
- * (e.g., the sender in a @c send operation), and one "remote" process
- * group, containing the process that is the target of the MPI
- * operation.
- *
- * While intercommunicators have essentially the same point-to-point
- * operations as intracommunicators (the latter communicate only
- * within a single process group), all communication with
- * intercommunicators occurs between the processes in the local group
- * and the processes in the remote group; communication within a group
- * must use a different (intra-)communicator.
- *
- */
-class BOOST_MPI_DECL intercommunicator : public communicator
-{
-private:
- friend class communicator;
-
- /**
- * INTERNAL ONLY
- *
- * Construct an intercommunicator given a shared pointer to the
- * underlying MPI_Comm. This operation is used for "casting" from a
- * communicator to an intercommunicator.
- */
- explicit intercommunicator(const shared_ptr<MPI_Comm>& cp)
- {
- this->comm_ptr = cp;
- }
-
-public:
- /**
- * Build a new Boost.MPI intercommunicator based on the MPI
- * intercommunicator @p comm.
- *
- * @p comm may be any valid MPI intercommunicator. If @p comm is
- * MPI_COMM_NULL, an empty communicator (that cannot be used for
- * communication) is created and the @p kind parameter is
- * ignored. Otherwise, the @p kind parameter determines how the
- * Boost.MPI communicator will be related to @p comm:
- *
- * - If @p kind is @c comm_duplicate, duplicate @c comm to create
- * a new communicator. This new communicator will be freed when
- * the Boost.MPI communicator (and all copies of it) is
- * destroyed. This option is only permitted if the underlying MPI
- * implementation supports MPI 2.0; duplication of
- * intercommunicators is not available in MPI 1.x.
- *
- * - If @p kind is @c comm_take_ownership, take ownership of @c
- * comm. It will be freed automatically when all of the Boost.MPI
- * communicators go out of scope.
- *
- * - If @p kind is @c comm_attach, this Boost.MPI communicator
- * will reference the existing MPI communicator @p comm but will
- * not free @p comm when the Boost.MPI communicator goes out of
- * scope. This option should only be used when the communicator is
- * managed by the user.
- */
- intercommunicator(const MPI_Comm& comm, comm_create_kind kind)
- : communicator(comm, kind) { }
-
- /**
- * Constructs a new intercommunicator whose local group is @p local
- * and whose remote group is @p peer. The intercommunicator can then
- * be used to communicate between processes in the two groups. This
- * constructor is equivalent to a call to @c MPI_Intercomm_create.
- *
- * @param local The intracommunicator containing all of the
- * processes that will go into the local group.
- *
- * @param local_leader The rank within the @p local
- * intracommunicator that will serve as its leader.
- *
- * @param peer The intracommunicator containing all of the processes
- * that will go into the remote group.
- *
- * @param remote_leader The rank within the @p peer group that will
- * serve as its leader.
- */
- intercommunicator(const communicator& local, int local_leader,
- const communicator& peer, int remote_leader);
-
- /**
- * Returns the size of the local group, i.e., the number of local
- * processes that are part of the group.
- */
- int local_size() const { return this->size(); }
-
- /**
- * Returns the local group, containing all of the local processes in
- * this intercommunicator.
- */
- boost::mpi::group local_group() const;
-
- /**
- * Returns the rank of this process within the local group.
- */
- int local_rank() const { return this->rank(); }
-
- /**
- * Returns the size of the remote group, i.e., the number of
- * processes that are part of the remote group.
- */
- int remote_size() const;
-
- /**
- * Returns the remote group, containing all of the remote processes
- * in this intercommunicator.
- */
- boost::mpi::group remote_group() const;
-
- /**
- * Merge the local and remote groups in this intercommunicator into
- * a new intracommunicator containing the union of the processes in
- * both groups. This method is equivalent to @c MPI_Intercomm_merge.
- *
- * @param high Whether the processes in this group should have the
- * higher rank numbers than the processes in the other group. Each
- * of the processes within a particular group shall have the same
- * "high" value.
- *
- * @returns the new, merged intracommunicator
- */
- communicator merge(bool high) const;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_INTERCOMMUNICATOR_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/nonblocking.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/nonblocking.hpp
deleted file mode 100644
index 1fc1ecd03..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/nonblocking.hpp
+++ /dev/null
@@ -1,738 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file nonblocking.hpp
- *
- * This header defines operations for completing non-blocking
- * communication requests.
- */
-#ifndef BOOST_MPI_NONBLOCKING_HPP
-#define BOOST_MPI_NONBLOCKING_HPP
-
-#include <boost/mpi/config.hpp>
-#include <vector>
-#include <iterator> // for std::iterator_traits
-#include <boost/optional.hpp>
-#include <utility> // for std::pair
-#include <algorithm> // for iter_swap, reverse
-#include <boost/static_assert.hpp>
-#include <boost/mpi/request.hpp>
-#include <boost/mpi/status.hpp>
-#include <boost/mpi/exception.hpp>
-
-namespace boost { namespace mpi {
-
-/**
- * @brief Wait until any non-blocking request has completed.
- *
- * This routine takes in a set of requests stored in the iterator
- * range @c [first,last) and waits until any of these requests has
- * been completed. It provides functionality equivalent to
- * @c MPI_Waitany.
- *
- * @param first The iterator that denotes the beginning of the
- * sequence of request objects.
- *
- * @param last The iterator that denotes the end of the sequence of
- * request objects. This may not be equal to @c first.
- *
- * @returns A pair containing the status object that corresponds to
- * the completed operation and the iterator referencing the completed
- * request.
- */
-template<typename ForwardIterator>
-std::pair<status, ForwardIterator>
-wait_any(ForwardIterator first, ForwardIterator last)
-{
- using std::advance;
-
- BOOST_ASSERT(first != last);
-
- typedef typename std::iterator_traits<ForwardIterator>::difference_type
- difference_type;
-
- bool all_trivial_requests = true;
- difference_type n = 0;
- ForwardIterator current = first;
- while (true) {
- // Check if we have found a completed request. If so, return it.
- if (current->m_requests[0] != MPI_REQUEST_NULL &&
- (current->m_requests[1] != MPI_REQUEST_NULL ||
- current->m_handler)) {
- if (optional<status> result = current->test())
- return std::make_pair(*result, current);
- }
-
- // Check if this request (and all others before it) are "trivial"
- // requests, e.g., they can be represented with a single
- // MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
-
- // Move to the next request.
- ++n;
- if (++current == last) {
- // We have reached the end of the list. If all requests thus far
- // have been trivial, we can call MPI_Waitany directly, because
- // it may be more efficient than our busy-wait semantics.
- if (all_trivial_requests) {
- std::vector<MPI_Request> requests;
- requests.reserve(n);
- for (current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
-
- // Let MPI wait until one of these operations completes.
- int index;
- status stat;
- BOOST_MPI_CHECK_RESULT(MPI_Waitany,
- (n, &requests[0], &index, &stat.m_status));
-
- // We don't have a notion of empty requests or status objects,
- // so this is an error.
- if (index == MPI_UNDEFINED)
- boost::throw_exception(exception("MPI_Waitany", MPI_ERR_REQUEST));
-
- // Find the iterator corresponding to the completed request.
- current = first;
- advance(current, index);
- current->m_requests[0] = requests[index];
- return std::make_pair(stat, current);
- }
-
- // There are some nontrivial requests, so we must continue our
- // busy waiting loop.
- n = 0;
- current = first;
- all_trivial_requests = true;
- }
- }
-
- // We cannot ever get here
- BOOST_ASSERT(false);
-}
-
-/**
- * @brief Test whether any non-blocking request has completed.
- *
- * This routine takes in a set of requests stored in the iterator
- * range @c [first,last) and tests whether any of these requests has
- * been completed. This routine is similar to @c wait_any, but will
- * not block waiting for requests to completed. It provides
- * functionality equivalent to @c MPI_Testany.
- *
- * @param first The iterator that denotes the beginning of the
- * sequence of request objects.
- *
- * @param last The iterator that denotes the end of the sequence of
- * request objects.
- *
- * @returns If any outstanding requests have completed, a pair
- * containing the status object that corresponds to the completed
- * operation and the iterator referencing the completed
- * request. Otherwise, an empty @c optional<>.
- */
-template<typename ForwardIterator>
-optional<std::pair<status, ForwardIterator> >
-test_any(ForwardIterator first, ForwardIterator last)
-{
- while (first != last) {
- // Check if we have found a completed request. If so, return it.
- if (optional<status> result = first->test()) {
- return std::make_pair(*result, first);
- }
- ++first;
- }
-
- // We found nothing
- return optional<std::pair<status, ForwardIterator> >();
-}
-
-/**
- * @brief Wait until all non-blocking requests have completed.
- *
- * This routine takes in a set of requests stored in the iterator
- * range @c [first,last) and waits until all of these requests have
- * been completed. It provides functionality equivalent to
- * @c MPI_Waitall.
- *
- * @param first The iterator that denotes the beginning of the
- * sequence of request objects.
- *
- * @param last The iterator that denotes the end of the sequence of
- * request objects.
- *
- * @param out If provided, an output iterator through which the
- * status of each request will be emitted. The @c status objects are
- * emitted in the same order as the requests are retrieved from
- * @c [first,last).
- *
- * @returns If an @p out parameter was provided, the value @c out
- * after all of the @c status objects have been emitted.
- */
-template<typename ForwardIterator, typename OutputIterator>
-OutputIterator
-wait_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
-{
- typedef typename std::iterator_traits<ForwardIterator>::difference_type
- difference_type;
-
- using std::distance;
-
- difference_type num_outstanding_requests = distance(first, last);
-
- std::vector<status> results(num_outstanding_requests);
- std::vector<bool> completed(num_outstanding_requests);
-
- while (num_outstanding_requests > 0) {
- bool all_trivial_requests = true;
- difference_type idx = 0;
- for (ForwardIterator current = first; current != last; ++current, ++idx) {
- if (!completed[idx]) {
- if (optional<status> stat = current->test()) {
- // This outstanding request has been completed. We're done.
- results[idx] = *stat;
- completed[idx] = true;
- --num_outstanding_requests;
- all_trivial_requests = false;
- } else {
- // Check if this request (and all others before it) are "trivial"
- // requests, e.g., they can be represented with a single
- // MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
- }
- }
- }
-
- // If we have yet to fulfill any requests and all of the requests
- // are trivial (i.e., require only a single MPI_Request to be
- // fulfilled), call MPI_Waitall directly.
- if (all_trivial_requests
- && num_outstanding_requests == (difference_type)results.size()) {
- std::vector<MPI_Request> requests;
- requests.reserve(num_outstanding_requests);
- for (ForwardIterator current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
-
- // Let MPI wait until all of these operations completes.
- std::vector<MPI_Status> stats(num_outstanding_requests);
- BOOST_MPI_CHECK_RESULT(MPI_Waitall,
- (num_outstanding_requests, &requests[0],
- &stats[0]));
-
- for (std::vector<MPI_Status>::iterator i = stats.begin();
- i != stats.end(); ++i, ++out) {
- status stat;
- stat.m_status = *i;
- *out = stat;
- }
-
- return out;
- }
-
- all_trivial_requests = false;
- }
-
- return std::copy(results.begin(), results.end(), out);
-}
-
-/**
- * \overload
- */
-template<typename ForwardIterator>
-void
-wait_all(ForwardIterator first, ForwardIterator last)
-{
- typedef typename std::iterator_traits<ForwardIterator>::difference_type
- difference_type;
-
- using std::distance;
-
- difference_type num_outstanding_requests = distance(first, last);
-
- std::vector<bool> completed(num_outstanding_requests);
-
- while (num_outstanding_requests > 0) {
- bool all_trivial_requests = true;
-
- difference_type idx = 0;
- for (ForwardIterator current = first; current != last; ++current, ++idx) {
- if (!completed[idx]) {
- if (optional<status> stat = current->test()) {
- // This outstanding request has been completed.
- completed[idx] = true;
- --num_outstanding_requests;
- all_trivial_requests = false;
- } else {
- // Check if this request (and all others before it) are "trivial"
- // requests, e.g., they can be represented with a single
- // MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
- }
- }
- }
-
- // If we have yet to fulfill any requests and all of the requests
- // are trivial (i.e., require only a single MPI_Request to be
- // fulfilled), call MPI_Waitall directly.
- if (all_trivial_requests
- && num_outstanding_requests == (difference_type)completed.size()) {
- std::vector<MPI_Request> requests;
- requests.reserve(num_outstanding_requests);
- for (ForwardIterator current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
-
- // Let MPI wait until all of these operations completes.
- BOOST_MPI_CHECK_RESULT(MPI_Waitall,
- (num_outstanding_requests, &requests[0],
- MPI_STATUSES_IGNORE));
-
- // Signal completion
- num_outstanding_requests = 0;
- }
- }
-}
-
-/**
- * @brief Tests whether all non-blocking requests have completed.
- *
- * This routine takes in a set of requests stored in the iterator
- * range @c [first,last) and determines whether all of these requests
- * have been completed. However, due to limitations of the underlying
- * MPI implementation, if any of the requests refers to a
- * non-blocking send or receive of a serialized data type, @c
- * test_all will always return the equivalent of @c false (i.e., the
- * requests cannot all be finished at this time). This routine
- * performs the same functionality as @c wait_all, except that this
- * routine will not block. This routine provides functionality
- * equivalent to @c MPI_Testall.
- *
- * @param first The iterator that denotes the beginning of the
- * sequence of request objects.
- *
- * @param last The iterator that denotes the end of the sequence of
- * request objects.
- *
- * @param out If provided and all requests hav been completed, an
- * output iterator through which the status of each request will be
- * emitted. The @c status objects are emitted in the same order as
- * the requests are retrieved from @c [first,last).
- *
- * @returns If an @p out parameter was provided, the value @c out
- * after all of the @c status objects have been emitted (if all
- * requests were completed) or an empty @c optional<>. If no @p out
- * parameter was provided, returns @c true if all requests have
- * completed or @c false otherwise.
- */
-template<typename ForwardIterator, typename OutputIterator>
-optional<OutputIterator>
-test_all(ForwardIterator first, ForwardIterator last, OutputIterator out)
-{
- std::vector<MPI_Request> requests;
- for (; first != last; ++first) {
- // If we have a non-trivial request, then no requests can be
- // completed.
- if (first->m_handler || first->m_requests[1] != MPI_REQUEST_NULL)
- return optional<OutputIterator>();
-
- requests.push_back(first->m_requests[0]);
- }
-
- int flag = 0;
- int n = requests.size();
- std::vector<MPI_Status> stats(n);
- BOOST_MPI_CHECK_RESULT(MPI_Testall, (n, &requests[0], &flag, &stats[0]));
- if (flag) {
- for (int i = 0; i < n; ++i, ++out) {
- status stat;
- stat.m_status = stats[i];
- *out = stat;
- }
- return out;
- } else {
- return optional<OutputIterator>();
- }
-}
-
-/**
- * \overload
- */
-template<typename ForwardIterator>
-bool
-test_all(ForwardIterator first, ForwardIterator last)
-{
- std::vector<MPI_Request> requests;
- for (; first != last; ++first) {
- // If we have a non-trivial request, then no requests can be
- // completed.
- if (first->m_handler || first->m_requests[1] != MPI_REQUEST_NULL)
- return false;
-
- requests.push_back(first->m_requests[0]);
- }
-
- int flag = 0;
- int n = requests.size();
- BOOST_MPI_CHECK_RESULT(MPI_Testall,
- (n, &requests[0], &flag, MPI_STATUSES_IGNORE));
- return flag != 0;
-}
-
-/**
- * @brief Wait until some non-blocking requests have completed.
- *
- * This routine takes in a set of requests stored in the iterator
- * range @c [first,last) and waits until at least one of the requests
- * has completed. It then completes all of the requests it can,
- * partitioning the input sequence into pending requests followed by
- * completed requests. If an output iterator is provided, @c status
- * objects will be emitted for each of the completed requests. This
- * routine provides functionality equivalent to @c MPI_Waitsome.
- *
- * @param first The iterator that denotes the beginning of the
- * sequence of request objects.
- *
- * @param last The iterator that denotes the end of the sequence of
- * request objects. This may not be equal to @c first.
- *
- * @param out If provided, the @c status objects corresponding to
- * completed requests will be emitted through this output iterator.
-
- * @returns If the @p out parameter was provided, a pair containing
- * the output iterator @p out after all of the @c status objects have
- * been written through it and an iterator referencing the first
- * completed request. If no @p out parameter was provided, only the
- * iterator referencing the first completed request will be emitted.
- */
-template<typename BidirectionalIterator, typename OutputIterator>
-std::pair<OutputIterator, BidirectionalIterator>
-wait_some(BidirectionalIterator first, BidirectionalIterator last,
- OutputIterator out)
-{
- using std::advance;
-
- if (first == last)
- return std::make_pair(out, first);
-
- typedef typename std::iterator_traits<BidirectionalIterator>::difference_type
- difference_type;
-
- bool all_trivial_requests = true;
- difference_type n = 0;
- BidirectionalIterator current = first;
- BidirectionalIterator start_of_completed = last;
- while (true) {
- // Check if we have found a completed request.
- if (optional<status> result = current->test()) {
- using std::iter_swap;
-
- // Emit the resulting status object
- *out++ = *result;
-
- // We're expanding the set of completed requests
- --start_of_completed;
-
- if (current == start_of_completed) {
- // If we have hit the end of the list of pending
- // requests. Finish up by fixing the order of the completed
- // set to match the order in which we emitted status objects,
- // then return.
- std::reverse(start_of_completed, last);
- return std::make_pair(out, start_of_completed);
- }
-
- // Swap the request we just completed with the last request that
- // has not yet been tested.
- iter_swap(current, start_of_completed);
-
- continue;
- }
-
- // Check if this request (and all others before it) are "trivial"
- // requests, e.g., they can be represented with a single
- // MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
-
- // Move to the next request.
- ++n;
- if (++current == start_of_completed) {
- if (start_of_completed != last) {
- // We have satisfied some requests. Make the order of the
- // completed requests match that of the status objects we've
- // already emitted and we're done.
- std::reverse(start_of_completed, last);
- return std::make_pair(out, start_of_completed);
- }
-
- // We have reached the end of the list. If all requests thus far
- // have been trivial, we can call MPI_Waitsome directly, because
- // it may be more efficient than our busy-wait semantics.
- if (all_trivial_requests) {
- std::vector<MPI_Request> requests;
- std::vector<int> indices(n);
- std::vector<MPI_Status> stats(n);
- requests.reserve(n);
- for (current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
-
- // Let MPI wait until some of these operations complete.
- int num_completed;
- BOOST_MPI_CHECK_RESULT(MPI_Waitsome,
- (n, &requests[0], &num_completed, &indices[0],
- &stats[0]));
-
- // Translate the index-based result of MPI_Waitsome into a
- // partitioning on the requests.
- int current_offset = 0;
- current = first;
- for (int index = 0; index < num_completed; ++index, ++out) {
- using std::iter_swap;
-
- // Move "current" to the request object at this index
- advance(current, indices[index] - current_offset);
- current_offset = indices[index];
-
- // Emit the status object
- status stat;
- stat.m_status = stats[index];
- *out = stat;
-
- // Finish up the request and swap it into the "completed
- // requests" partition.
- current->m_requests[0] = requests[indices[index]];
- --start_of_completed;
- iter_swap(current, start_of_completed);
- }
-
- // We have satisfied some requests. Make the order of the
- // completed requests match that of the status objects we've
- // already emitted and we're done.
- std::reverse(start_of_completed, last);
- return std::make_pair(out, start_of_completed);
- }
-
- // There are some nontrivial requests, so we must continue our
- // busy waiting loop.
- n = 0;
- current = first;
- }
- }
-
- // We cannot ever get here
- BOOST_ASSERT(false);
-}
-
-/**
- * \overload
- */
-template<typename BidirectionalIterator>
-BidirectionalIterator
-wait_some(BidirectionalIterator first, BidirectionalIterator last)
-{
- using std::advance;
-
- if (first == last)
- return first;
-
- typedef typename std::iterator_traits<BidirectionalIterator>::difference_type
- difference_type;
-
- bool all_trivial_requests = true;
- difference_type n = 0;
- BidirectionalIterator current = first;
- BidirectionalIterator start_of_completed = last;
- while (true) {
- // Check if we have found a completed request.
- if (optional<status> result = current->test()) {
- using std::iter_swap;
-
- // We're expanding the set of completed requests
- --start_of_completed;
-
- // If we have hit the end of the list of pending requests, we're
- // done.
- if (current == start_of_completed)
- return start_of_completed;
-
- // Swap the request we just completed with the last request that
- // has not yet been tested.
- iter_swap(current, start_of_completed);
-
- continue;
- }
-
- // Check if this request (and all others before it) are "trivial"
- // requests, e.g., they can be represented with a single
- // MPI_Request.
- all_trivial_requests =
- all_trivial_requests
- && !current->m_handler
- && current->m_requests[1] == MPI_REQUEST_NULL;
-
- // Move to the next request.
- ++n;
- if (++current == start_of_completed) {
- // If we have satisfied some requests, we're done.
- if (start_of_completed != last)
- return start_of_completed;
-
- // We have reached the end of the list. If all requests thus far
- // have been trivial, we can call MPI_Waitsome directly, because
- // it may be more efficient than our busy-wait semantics.
- if (all_trivial_requests) {
- std::vector<MPI_Request> requests;
- std::vector<int> indices(n);
- requests.reserve(n);
- for (current = first; current != last; ++current)
- requests.push_back(current->m_requests[0]);
-
- // Let MPI wait until some of these operations complete.
- int num_completed;
- BOOST_MPI_CHECK_RESULT(MPI_Waitsome,
- (n, &requests[0], &num_completed, &indices[0],
- MPI_STATUSES_IGNORE));
-
- // Translate the index-based result of MPI_Waitsome into a
- // partitioning on the requests.
- int current_offset = 0;
- current = first;
- for (int index = 0; index < num_completed; ++index) {
- using std::iter_swap;
-
- // Move "current" to the request object at this index
- advance(current, indices[index] - current_offset);
- current_offset = indices[index];
-
- // Finish up the request and swap it into the "completed
- // requests" partition.
- current->m_requests[0] = requests[indices[index]];
- --start_of_completed;
- iter_swap(current, start_of_completed);
- }
-
- // We have satisfied some requests, so we are done.
- return start_of_completed;
- }
-
- // There are some nontrivial requests, so we must continue our
- // busy waiting loop.
- n = 0;
- current = first;
- }
- }
-
- // We cannot ever get here
- BOOST_ASSERT(false);
-}
-
-/**
- * @brief Test whether some non-blocking requests have completed.
- *
- * This routine takes in a set of requests stored in the iterator
- * range @c [first,last) and tests to see if any of the requests has
- * completed. It completes all of the requests it can, partitioning
- * the input sequence into pending requests followed by completed
- * requests. If an output iterator is provided, @c status objects
- * will be emitted for each of the completed requests. This routine
- * is similar to @c wait_some, but does not wait until any requests
- * have completed. This routine provides functionality equivalent to
- * @c MPI_Testsome.
- *
- * @param first The iterator that denotes the beginning of the
- * sequence of request objects.
- *
- * @param last The iterator that denotes the end of the sequence of
- * request objects. This may not be equal to @c first.
- *
- * @param out If provided, the @c status objects corresponding to
- * completed requests will be emitted through this output iterator.
-
- * @returns If the @p out parameter was provided, a pair containing
- * the output iterator @p out after all of the @c status objects have
- * been written through it and an iterator referencing the first
- * completed request. If no @p out parameter was provided, only the
- * iterator referencing the first completed request will be emitted.
- */
-template<typename BidirectionalIterator, typename OutputIterator>
-std::pair<OutputIterator, BidirectionalIterator>
-test_some(BidirectionalIterator first, BidirectionalIterator last,
- OutputIterator out)
-{
- BidirectionalIterator current = first;
- BidirectionalIterator start_of_completed = last;
- while (current != start_of_completed) {
- // Check if we have found a completed request.
- if (optional<status> result = current->test()) {
- using std::iter_swap;
-
- // Emit the resulting status object
- *out++ = *result;
-
- // We're expanding the set of completed requests
- --start_of_completed;
-
- // Swap the request we just completed with the last request that
- // has not yet been tested.
- iter_swap(current, start_of_completed);
-
- continue;
- }
-
- // Move to the next request.
- ++current;
- }
-
- // Finish up by fixing the order of the completed set to match the
- // order in which we emitted status objects, then return.
- std::reverse(start_of_completed, last);
- return std::make_pair(out, start_of_completed);
-}
-
-/**
- * \overload
- */
-template<typename BidirectionalIterator>
-BidirectionalIterator
-test_some(BidirectionalIterator first, BidirectionalIterator last)
-{
- BidirectionalIterator current = first;
- BidirectionalIterator start_of_completed = last;
- while (current != start_of_completed) {
- // Check if we have found a completed request.
- if (optional<status> result = current->test()) {
- using std::iter_swap;
-
- // We're expanding the set of completed requests
- --start_of_completed;
-
- // Swap the request we just completed with the last request that
- // has not yet been tested.
- iter_swap(current, start_of_completed);
-
- continue;
- }
-
- // Move to the next request.
- ++current;
- }
-
- return start_of_completed;
-}
-
-} } // end namespace boost::mpi
-
-
-#endif // BOOST_MPI_NONBLOCKING_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/operations.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/operations.hpp
deleted file mode 100644
index c1189e436..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/operations.hpp
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright (C) 2004 The Trustees of Indiana University.
-// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Douglas Gregor
-// Andrew Lumsdaine
-
-/** @file operations.hpp
- *
- * This header provides a mapping from function objects to @c MPI_Op
- * constants used in MPI collective operations. It also provides
- * several new function object types not present in the standard @c
- * <functional> header that have direct mappings to @c MPI_Op.
- */
-#ifndef BOOST_MPI_IS_MPI_OP_HPP
-#define BOOST_MPI_IS_MPI_OP_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/mpl/bool.hpp>
-#include <boost/mpl/if.hpp>
-#include <boost/mpl/and.hpp>
-#include <boost/mpi/datatype.hpp>
-#include <boost/utility/enable_if.hpp>
-#include <functional>
-
-namespace boost { namespace mpi {
-
-template<typename Op, typename T> struct is_mpi_op;
-
-/**
- * @brief Determine if a function object type is commutative.
- *
- * This trait determines if an operation @c Op is commutative when
- * applied to values of type @c T. Parallel operations such as @c
- * reduce and @c prefix_sum can be implemented more efficiently with
- * commutative operations. To mark an operation as commutative, users
- * should specialize @c is_commutative and derive from the class @c
- * mpl::true_.
- */
-template<typename Op, typename T>
-struct is_commutative : public mpl::false_ { };
-
-/**************************************************************************
- * Function objects for MPI operations not in <functional> header *
- **************************************************************************/
-
-/**
- * @brief Compute the maximum of two values.
- *
- * This binary function object computes the maximum of the two values
- * it is given. When used with MPI and a type @c T that has an
- * associated, built-in MPI data type, translates to @c MPI_MAX.
- */
-template<typename T>
-struct maximum : public std::binary_function<T, T, T>
-{
- /** @returns the maximum of x and y. */
- const T& operator()(const T& x, const T& y) const
- {
- return x < y? y : x;
- }
-};
-
-/**
- * @brief Compute the minimum of two values.
- *
- * This binary function object computes the minimum of the two values
- * it is given. When used with MPI and a type @c T that has an
- * associated, built-in MPI data type, translates to @c MPI_MIN.
- */
-template<typename T>
-struct minimum : public std::binary_function<T, T, T>
-{
- /** @returns the minimum of x and y. */
- const T& operator()(const T& x, const T& y) const
- {
- return x < y? x : y;
- }
-};
-
-
-/**
- * @brief Compute the bitwise AND of two integral values.
- *
- * This binary function object computes the bitwise AND of the two
- * values it is given. When used with MPI and a type @c T that has an
- * associated, built-in MPI data type, translates to @c MPI_BAND.
- */
-template<typename T>
-struct bitwise_and : public std::binary_function<T, T, T>
-{
- /** @returns @c x & y. */
- T operator()(const T& x, const T& y) const
- {
- return x & y;
- }
-};
-
-/**
- * @brief Compute the bitwise OR of two integral values.
- *
- * This binary function object computes the bitwise OR of the two
- * values it is given. When used with MPI and a type @c T that has an
- * associated, built-in MPI data type, translates to @c MPI_BOR.
- */
-template<typename T>
-struct bitwise_or : public std::binary_function<T, T, T>
-{
- /** @returns the @c x | y. */
- T operator()(const T& x, const T& y) const
- {
- return x | y;
- }
-};
-
-/**
- * @brief Compute the logical exclusive OR of two integral values.
- *
- * This binary function object computes the logical exclusive of the
- * two values it is given. When used with MPI and a type @c T that has
- * an associated, built-in MPI data type, translates to @c MPI_LXOR.
- */
-template<typename T>
-struct logical_xor : public std::binary_function<T, T, T>
-{
- /** @returns the logical exclusive OR of x and y. */
- T operator()(const T& x, const T& y) const
- {
- return (x || y) && !(x && y);
- }
-};
-
-/**
- * @brief Compute the bitwise exclusive OR of two integral values.
- *
- * This binary function object computes the bitwise exclusive OR of
- * the two values it is given. When used with MPI and a type @c T that
- * has an associated, built-in MPI data type, translates to @c
- * MPI_BXOR.
- */
-template<typename T>
-struct bitwise_xor : public std::binary_function<T, T, T>
-{
- /** @returns @c x ^ y. */
- T operator()(const T& x, const T& y) const
- {
- return x ^ y;
- }
-};
-
-/**************************************************************************
- * MPI_Op queries *
- **************************************************************************/
-
-/**
- * @brief Determine if a function object has an associated @c MPI_Op.
- *
- * This trait determines if a function object type @c Op, when used
- * with argument type @c T, has an associated @c MPI_Op. If so, @c
- * is_mpi_op<Op,T> will derive from @c mpl::false_ and will
- * contain a static member function @c op that takes no arguments but
- * returns the associated @c MPI_Op value. For instance, @c
- * is_mpi_op<std::plus<int>,int>::op() returns @c MPI_SUM.
- *
- * Users may specialize @c is_mpi_op for any other class templates
- * that map onto operations that have @c MPI_Op equivalences, such as
- * bitwise OR, logical and, or maximum. However, users are encouraged
- * to use the standard function objects in the @c functional and @c
- * boost/mpi/operations.hpp headers whenever possible. For
- * function objects that are class templates with a single template
- * parameter, it may be easier to specialize @c is_builtin_mpi_op.
- */
-template<typename Op, typename T>
-struct is_mpi_op : public mpl::false_ { };
-
-/// INTERNAL ONLY
-template<typename T>
-struct is_mpi_op<maximum<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_floating_point_datatype<T> >
-{
- static MPI_Op op() { return MPI_MAX; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
-struct is_mpi_op<minimum<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_floating_point_datatype<T> >
-{
- static MPI_Op op() { return MPI_MIN; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<std::plus<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_floating_point_datatype<T>,
- is_mpi_complex_datatype<T> >
-{
- static MPI_Op op() { return MPI_SUM; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<std::multiplies<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_floating_point_datatype<T>,
- is_mpi_complex_datatype<T> >
-{
- static MPI_Op op() { return MPI_PROD; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<std::logical_and<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_logical_datatype<T> >
-{
- static MPI_Op op() { return MPI_LAND; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<std::logical_or<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_logical_datatype<T> >
-{
- static MPI_Op op() { return MPI_LOR; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<logical_xor<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_logical_datatype<T> >
-{
- static MPI_Op op() { return MPI_LXOR; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<bitwise_and<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_byte_datatype<T> >
-{
- static MPI_Op op() { return MPI_BAND; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<bitwise_or<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_byte_datatype<T> >
-{
- static MPI_Op op() { return MPI_BOR; }
-};
-
-/// INTERNAL ONLY
-template<typename T>
- struct is_mpi_op<bitwise_xor<T>, T>
- : public boost::mpl::or_<is_mpi_integer_datatype<T>,
- is_mpi_byte_datatype<T> >
-{
- static MPI_Op op() { return MPI_BXOR; }
-};
-
-namespace detail {
- // A helper class used to create user-defined MPI_Ops
- template<typename Op, typename T>
- class user_op
- {
- public:
- explicit user_op(Op& op)
- {
- BOOST_MPI_CHECK_RESULT(MPI_Op_create,
- (&user_op<Op, T>::perform,
- is_commutative<Op, T>::value,
- &mpi_op));
-
- op_ptr = &op;
- }
-
- ~user_op()
- {
- if (std::uncaught_exception()) {
- // Ignore failure cases: there are obviously other problems
- // already, and we don't want to cause program termination if
- // MPI_Op_free fails.
- MPI_Op_free(&mpi_op);
- } else {
- BOOST_MPI_CHECK_RESULT(MPI_Op_free, (&mpi_op));
- }
- }
-
- MPI_Op& get_mpi_op()
- {
- return mpi_op;
- }
-
- private:
- MPI_Op mpi_op;
- static Op* op_ptr;
-
- static void BOOST_MPI_CALLING_CONVENTION perform(void* vinvec, void* voutvec, int* plen, MPI_Datatype*)
- {
- T* invec = static_cast<T*>(vinvec);
- T* outvec = static_cast<T*>(voutvec);
- std::transform(invec, invec + *plen, outvec, outvec, *op_ptr);
- }
- };
-
- template<typename Op, typename T> Op* user_op<Op, T>::op_ptr = 0;
-
-} // end namespace detail
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_GET_MPI_OP_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_iarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_iarchive.hpp
deleted file mode 100644
index bb8094b41..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_iarchive.hpp
+++ /dev/null
@@ -1,159 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-// Douglas Gregor
-
-/** @file packed_iarchive.hpp
- *
- * This header provides the facilities for packing Serializable data
- * types into a buffer using @c MPI_Pack. The buffers can then be
- * transmitted via MPI and then be unpacked either via the facilities
- * in @c packed_oarchive.hpp or @c MPI_Unpack.
- */
-#ifndef BOOST_MPI_PACKED_IARCHIVE_HPP
-#define BOOST_MPI_PACKED_IARCHIVE_HPP
-
-#include <boost/mpi/datatype.hpp>
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/detail/common_iarchive.hpp>
-#include <boost/archive/basic_archive.hpp>
-#include <boost/mpi/detail/packed_iprimitive.hpp>
-#include <boost/mpi/detail/binary_buffer_iprimitive.hpp>
-#include <boost/serialization/string.hpp>
-#include <boost/serialization/collection_size_type.hpp>
-#include <boost/serialization/item_version_type.hpp>
-#include <boost/assert.hpp>
-
-namespace boost { namespace mpi {
-
-#ifdef BOOST_MPI_HOMOGENEOUS
- typedef binary_buffer_iprimitive iprimitive;
-#else
- typedef packed_iprimitive iprimitive;
-#endif
-
-
-/** @brief An archive that unpacks binary data from an MPI buffer.
- *
- * The @c packed_oarchive class is an Archiver (as in the
- * Boost.Serialization library) that unpacks binary data from a
- * buffer received via MPI. It can operate on any Serializable data
- * type and will use the @c MPI_Unpack function of the underlying MPI
- * implementation to perform deserialization.
- */
-
-class BOOST_MPI_DECL packed_iarchive
- : public iprimitive
- , public archive::detail::common_iarchive<packed_iarchive>
-{
-public:
- /**
- * Construct a @c packed_iarchive to receive data over the given
- * MPI communicator and with an initial buffer.
- *
- * @param comm The communicator over which this archive will be
- * received.
- *
- * @param b A user-defined buffer that contains the binary
- * representation of serialized objects.
- *
- * @param flags Control the serialization of the data types. Refer
- * to the Boost.Serialization documentation before changing the
- * default flags.
- */
-
- packed_iarchive(MPI_Comm const & comm, buffer_type & b, unsigned int flags = boost::archive::no_header, int position = 0)
- : iprimitive(b,comm,position),
- archive::detail::common_iarchive<packed_iarchive>(flags)
- {}
-
- /**
- * Construct a @c packed_iarchive to receive data over the given
- * MPI communicator.
- *
- * @param comm The communicator over which this archive will be
- * received.
- *
- * @param flags Control the serialization of the data types. Refer
- * to the Boost.Serialization documentation before changing the
- * default flags.
- */
-
- packed_iarchive
- ( MPI_Comm const & comm , std::size_t s=0,
- unsigned int flags = boost::archive::no_header)
- : iprimitive(internal_buffer_,comm)
- , archive::detail::common_iarchive<packed_iarchive>(flags)
- , internal_buffer_(s)
- {}
-
- // Load everything else in the usual way, forwarding on to the Base class
- template<class T>
- void load_override(T& x, mpl::false_)
- {
- archive::detail::common_iarchive<packed_iarchive>::load_override(x);
- }
-
- // Load it directly using the primnivites
- template<class T>
- void load_override(T& x, mpl::true_)
- {
- iprimitive::load(x);
- }
-
- // Load all supported datatypes directly
- template<class T>
- void load_override(T& x)
- {
- typedef typename mpl::apply1<use_array_optimization
- , BOOST_DEDUCED_TYPENAME remove_const<T>::type
- >::type use_optimized;
- load_override(x, use_optimized());
- }
-
- // input archives need to ignore the optional information
- void load_override(archive::class_id_optional_type & /*t*/){}
-
- void load_override(archive::class_id_type & t){
- int_least16_t x=0;
- * this->This() >> x;
- t = boost::archive::class_id_type(x);
- }
-
- void load_override(archive::version_type & t){
- int_least8_t x=0;
- * this->This() >> x;
- t = boost::archive::version_type(x);
- }
-
- void load_override(archive::class_id_reference_type & t){
- load_override(static_cast<archive::class_id_type &>(t));
- }
-
- void load_override(archive::class_name_type & t)
- {
- std::string cn;
- cn.reserve(BOOST_SERIALIZATION_MAX_KEY_SIZE);
- * this->This() >> cn;
- std::memcpy(t, cn.data(), cn.size());
- // borland tweak
- t.t[cn.size()] = '\0';
- }
-
-private:
- /// An internal buffer to be used when the user does not supply his
- /// own buffer.
- buffer_type internal_buffer_;
-};
-
-} } // end namespace boost::mpi
-
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_iarchive)
-BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_iarchive)
-
-#endif // BOOST_MPI_PACKED_IARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_oarchive.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_oarchive.hpp
deleted file mode 100644
index c6c0173ae..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/packed_oarchive.hpp
+++ /dev/null
@@ -1,147 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-// Douglas Gregor
-
-/** @file packed_oarchive.hpp
- *
- * This header provides the facilities for unpacking Serializable
- * data types from a buffer using @c MPI_Unpack. The buffers are
- * typically received via MPI and have been packed either by via the
- * facilities in @c packed_iarchive.hpp or @c MPI_Pack.
- */
-#ifndef BOOST_MPI_PACKED_OARCHIVE_HPP
-#define BOOST_MPI_PACKED_OARCHIVE_HPP
-
-#include <boost/mpi/datatype.hpp>
-#include <boost/archive/basic_archive.hpp>
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/archive/detail/common_oarchive.hpp>
-#include <boost/mpi/detail/packed_oprimitive.hpp>
-#include <boost/mpi/detail/binary_buffer_oprimitive.hpp>
-#include <boost/serialization/string.hpp>
-#include <boost/serialization/collection_size_type.hpp>
-#include <boost/serialization/item_version_type.hpp>
-
-namespace boost { namespace mpi {
-
-#ifdef BOOST_MPI_HOMOGENEOUS
- typedef binary_buffer_oprimitive oprimitive;
-#else
- typedef packed_oprimitive oprimitive;
-#endif
-
-/** @brief An archive that packs binary data into an MPI buffer.
- *
- * The @c packed_iarchive class is an Archiver (as in the
- * Boost.Serialization library) that packs binary data into a buffer
- * for transmission via MPI. It can operate on any Serializable data
- * type and will use the @c MPI_Pack function of the underlying MPI
- * implementation to perform serialization.
- */
-
-class BOOST_MPI_DECL packed_oarchive
- : public oprimitive
- , public archive::detail::common_oarchive<packed_oarchive>
-{
-public:
- /**
- * Construct a @c packed_oarchive for transmission over the given
- * MPI communicator and with an initial buffer.
- *
- * @param comm The communicator over which this archive will be
- * sent.
- *
- * @param b A user-defined buffer that will be filled with the
- * binary representation of serialized objects.
- *
- * @param flags Control the serialization of the data types. Refer
- * to the Boost.Serialization documentation before changing the
- * default flags.
- *
- * @param position Set the offset into buffer @p b at which
- * deserialization will begin.
- */
- packed_oarchive( MPI_Comm const & comm, buffer_type & b, unsigned int flags = boost::archive::no_header)
- : oprimitive(b,comm),
- archive::detail::common_oarchive<packed_oarchive>(flags)
- {}
-
- /**
- * Construct a @c packed_oarchive for transmission over the given
- * MPI communicator.
- *
- * @param comm The communicator over which this archive will be
- * sent.
- *
- * @param s The size of the buffer to be received.
- *
- * @param flags Control the serialization of the data types. Refer
- * to the Boost.Serialization documentation before changing the
- * default flags.
- */
- packed_oarchive ( MPI_Comm const & comm, unsigned int flags = boost::archive::no_header)
- : oprimitive(internal_buffer_,comm),
- archive::detail::common_oarchive<packed_oarchive>(flags)
- {}
-
- // Save everything else in the usual way, forwarding on to the Base class
- template<class T>
- void save_override(T const& x, mpl::false_)
- {
- archive::detail::common_oarchive<packed_oarchive>::save_override(x);
- }
-
- // Save it directly using the primitives
- template<class T>
- void save_override(T const& x, mpl::true_)
- {
- oprimitive::save(x);
- }
-
- // Save all supported datatypes directly
- template<class T>
- void save_override(T const& x)
- {
- typedef typename mpl::apply1<use_array_optimization,T>::type use_optimized;
- save_override(x, use_optimized());
- }
-
- // output archives need to ignore the optional information
- void save_override(const archive::class_id_optional_type & ){}
-
- // explicitly convert to char * to avoid compile ambiguities
- void save_override(const archive::class_name_type & t){
- const std::string s(t);
- * this->This() << s;
- }
-
- void save_override(const archive::class_id_type & t){
- const boost::int_least16_t x = t;
- * this->This() << x;
- }
-
- void save_override(const archive::version_type & t){
- const boost::int_least8_t x = t;
- * this->This() << x;
- }
-private:
- /// An internal buffer to be used when the user does not supply his
- /// own buffer.
- buffer_type internal_buffer_;
-};
-
-} } // end namespace boost::mpi
-
-// required by export
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_oarchive)
-BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_oarchive)
-
-
-
-#endif // BOOST_MPI_PACKED_OARCHIVE_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/request.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/request.hpp
deleted file mode 100644
index cb36cc5ab..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/request.hpp
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file request.hpp
- *
- * This header defines the class @c request, which contains a request
- * for non-blocking communication.
- */
-#ifndef BOOST_MPI_REQUEST_HPP
-#define BOOST_MPI_REQUEST_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/optional.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-
-namespace boost { namespace mpi {
-
-class status;
-class communicator;
-
-/**
- * @brief A request for a non-blocking send or receive.
- *
- * This structure contains information about a non-blocking send or
- * receive and will be returned from @c isend or @c irecv,
- * respectively.
- */
-class BOOST_MPI_DECL request
-{
- public:
- /**
- * Constructs a NULL request.
- */
- request();
-
- /**
- * Wait until the communication associated with this request has
- * completed, then return a @c status object describing the
- * communication.
- */
- status wait();
-
- /**
- * Determine whether the communication associated with this request
- * has completed successfully. If so, returns the @c status object
- * describing the communication. Otherwise, returns an empty @c
- * optional<> to indicate that the communication has not completed
- * yet. Note that once @c test() returns a @c status object, the
- * request has completed and @c wait() should not be called.
- */
- optional<status> test();
-
- /**
- * Cancel a pending communication, assuming it has not already been
- * completed.
- */
- void cancel();
-
- private:
- enum request_action { ra_wait, ra_test, ra_cancel };
- typedef optional<status> (*handler_type)(request* self,
- request_action action);
-
- /**
- * INTERNAL ONLY
- *
- * Handles the non-blocking receive of a serialized value.
- */
- template<typename T>
- static optional<status>
- handle_serialized_irecv(request* self, request_action action);
-
- /**
- * INTERNAL ONLY
- *
- * Handles the non-blocking receive of an array of serialized values.
- */
- template<typename T>
- static optional<status>
- handle_serialized_array_irecv(request* self, request_action action);
-
- public: // template friends are not portable
-
- /// INTERNAL ONLY
- MPI_Request m_requests[2];
-
- /// INTERNAL ONLY
- handler_type m_handler;
-
- /// INTERNAL ONLY
- shared_ptr<void> m_data;
-
- friend class communicator;
-};
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_REQUEST_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content.hpp
deleted file mode 100644
index dcd13bfe5..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content.hpp
+++ /dev/null
@@ -1,392 +0,0 @@
-// (C) Copyright 2005 Matthias Troyer
-// (C) Copyright 2006 Douglas Gregor <doug.gregor -at gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Matthias Troyer
-// Douglas Gregor
-
-/** @file skeleton_and_content.hpp
- *
- * This header provides facilities that allow the structure of data
- * types (called the "skeleton") to be transmitted and received
- * separately from the content stored in those data types. These
- * facilities are useful when the data in a stable data structure
- * (e.g., a mesh or a graph) will need to be transmitted
- * repeatedly. In this case, transmitting the skeleton only once
- * saves both communication effort (it need not be sent again) and
- * local computation (serialization need only be performed once for
- * the content).
- */
-#ifndef BOOST_MPI_SKELETON_AND_CONTENT_HPP
-#define BOOST_MPI_SKELETON_AND_CONTENT_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/archive/detail/auto_link_archive.hpp>
-#include <boost/mpi/packed_iarchive.hpp>
-#include <boost/mpi/packed_oarchive.hpp>
-#include <boost/mpi/detail/forward_skeleton_iarchive.hpp>
-#include <boost/mpi/detail/forward_skeleton_oarchive.hpp>
-#include <boost/mpi/detail/ignore_iprimitive.hpp>
-#include <boost/mpi/detail/ignore_oprimitive.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/archive/detail/register_archive.hpp>
-
-namespace boost { namespace mpi {
-
-/**
- * @brief A proxy that requests that the skeleton of an object be
- * transmitted.
- *
- * The @c skeleton_proxy is a lightweight proxy object used to
- * indicate that the skeleton of an object, not the object itself,
- * should be transmitted. It can be used with the @c send and @c recv
- * operations of communicators or the @c broadcast collective. When a
- * @c skeleton_proxy is sent, Boost.MPI generates a description
- * containing the structure of the stored object. When that skeleton
- * is received, the receiving object is reshaped to match the
- * structure. Once the skeleton of an object as been transmitted, its
- * @c content can be transmitted separately (often several times)
- * without changing the structure of the object.
- */
-template <class T>
-struct BOOST_MPI_DECL skeleton_proxy
-{
- /**
- * Constructs a @c skeleton_proxy that references object @p x.
- *
- * @param x the object whose structure will be transmitted or
- * altered.
- */
- skeleton_proxy(T& x)
- : object(x)
- {}
-
- T& object;
-};
-
-/**
- * @brief Create a skeleton proxy object.
- *
- * This routine creates an instance of the skeleton_proxy class. It
- * will typically be used when calling @c send, @c recv, or @c
- * broadcast, to indicate that only the skeleton (structure) of an
- * object should be transmitted and not its contents.
- *
- * @param x the object whose structure will be transmitted.
- *
- * @returns a skeleton_proxy object referencing @p x
- */
-template <class T>
-inline const skeleton_proxy<T> skeleton(T& x)
-{
- return skeleton_proxy<T>(x);
-}
-
-namespace detail {
- /// @brief a class holding an MPI datatype
- /// INTERNAL ONLY
- /// the type is freed upon destruction
- class BOOST_MPI_DECL mpi_datatype_holder : public boost::noncopyable
- {
- public:
- mpi_datatype_holder()
- : is_committed(false)
- {}
-
- mpi_datatype_holder(MPI_Datatype t, bool committed = true)
- : d(t)
- , is_committed(committed)
- {}
-
- void commit()
- {
- BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&d));
- is_committed=true;
- }
-
- MPI_Datatype get_mpi_datatype() const
- {
- return d;
- }
-
- ~mpi_datatype_holder()
- {
- int finalized=0;
- BOOST_MPI_CHECK_RESULT(MPI_Finalized,(&finalized));
- if (!finalized && is_committed)
- BOOST_MPI_CHECK_RESULT(MPI_Type_free,(&d));
- }
-
- private:
- MPI_Datatype d;
- bool is_committed;
- };
-} // end namespace detail
-
-/** @brief A proxy object that transfers the content of an object
- * without its structure.
- *
- * The @c content class indicates that Boost.MPI should transmit or
- * receive the content of an object, but without any information
- * about the structure of the object. It is only meaningful to
- * transmit the content of an object after the receiver has already
- * received the skeleton for the same object.
- *
- * Most users will not use @c content objects directly. Rather, they
- * will invoke @c send, @c recv, or @c broadcast operations using @c
- * get_content().
- */
-class BOOST_MPI_DECL content
-{
-public:
- /**
- * Constructs an empty @c content object. This object will not be
- * useful for any Boost.MPI operations until it is reassigned.
- */
- content() {}
-
- /**
- * This routine initializes the @c content object with an MPI data
- * type that refers to the content of an object without its structure.
- *
- * @param d the MPI data type referring to the content of the object.
- *
- * @param committed @c true indicates that @c MPI_Type_commit has
- * already been excuted for the data type @p d.
- */
- content(MPI_Datatype d, bool committed=true)
- : holder(new detail::mpi_datatype_holder(d,committed))
- {}
-
- /**
- * Replace the MPI data type referencing the content of an object.
- *
- * @param d the new MPI data type referring to the content of the
- * object.
- *
- * @returns *this
- */
- const content& operator=(MPI_Datatype d)
- {
- holder.reset(new detail::mpi_datatype_holder(d));
- return *this;
- }
-
- /**
- * Retrieve the MPI data type that refers to the content of the
- * object.
- *
- * @returns the MPI data type, which should only be transmitted or
- * received using @c MPI_BOTTOM as the address.
- */
- MPI_Datatype get_mpi_datatype() const
- {
- return holder->get_mpi_datatype();
- }
-
- /**
- * Commit the MPI data type referring to the content of the
- * object.
- */
- void commit()
- {
- holder->commit();
- }
-
-private:
- boost::shared_ptr<detail::mpi_datatype_holder> holder;
-};
-
-/** @brief Returns the content of an object, suitable for transmission
- * via Boost.MPI.
- *
- * The function creates an absolute MPI datatype for the object,
- * where all offsets are counted from the address 0 (a.k.a. @c
- * MPI_BOTTOM) instead of the address @c &x of the object. This
- * allows the creation of MPI data types for complex data structures
- * containing pointers, such as linked lists or trees.
- *
- * The disadvantage, compared to relative MPI data types is that for
- * each object a new MPI data type has to be created.
- *
- * The contents of an object can only be transmitted when the
- * receiver already has an object with the same structure or shape as
- * the sender. To accomplish this, first transmit the skeleton of the
- * object using, e.g., @c skeleton() or @c skeleton_proxy.
- *
- * The type @c T has to allow creation of an absolute MPI data type
- * (content).
- *
- * @param x the object for which the content will be transmitted.
- *
- * @returns the content of the object @p x, which can be used for
- * transmission via @c send, @c recv, or @c broadcast.
- */
-template <class T> const content get_content(const T& x);
-
-/** @brief An archiver that reconstructs a data structure based on the
- * binary skeleton stored in a buffer.
- *
- * The @c packed_skeleton_iarchive class is an Archiver (as in the
- * Boost.Serialization library) that can construct the the shape of a
- * data structure based on a binary skeleton stored in a buffer. The
- * @c packed_skeleton_iarchive is typically used by the receiver of a
- * skeleton, to prepare a data structure that will eventually receive
- * content separately.
- *
- * Users will not generally need to use @c packed_skeleton_iarchive
- * directly. Instead, use @c skeleton or @c get_skeleton.
- */
-class BOOST_MPI_DECL packed_skeleton_iarchive
- : public detail::ignore_iprimitive,
- public detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>
-{
-public:
- /**
- * Construct a @c packed_skeleton_iarchive for the given
- * communicator.
- *
- * @param comm The communicator over which this archive will be
- * transmitted.
- *
- * @param flags Control the serialization of the skeleton. Refer to
- * the Boost.Serialization documentation before changing the
- * default flags.
- */
- packed_skeleton_iarchive(MPI_Comm const & comm,
- unsigned int flags = boost::archive::no_header)
- : detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(skeleton_archive_)
- , skeleton_archive_(comm,flags)
- {}
-
- /**
- * Construct a @c packed_skeleton_iarchive that unpacks a skeleton
- * from the given @p archive.
- *
- * @param archive the archive from which the skeleton will be
- * unpacked.
- *
- */
- explicit packed_skeleton_iarchive(packed_iarchive & archive)
- : detail::forward_skeleton_iarchive<packed_skeleton_iarchive,packed_iarchive>(archive)
- , skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
- {}
-
- /**
- * Retrieve the archive corresponding to this skeleton.
- */
- const packed_iarchive& get_skeleton() const
- {
- return this->implementation_archive;
- }
-
- /**
- * Retrieve the archive corresponding to this skeleton.
- */
- packed_iarchive& get_skeleton()
- {
- return this->implementation_archive;
- }
-
-private:
- /// Store the actual archive that holds the structure, unless the
- /// user overrides this with their own archive.
- packed_iarchive skeleton_archive_;
-};
-
-/** @brief An archiver that records the binary skeleton of a data
- * structure into a buffer.
- *
- * The @c packed_skeleton_oarchive class is an Archiver (as in the
- * Boost.Serialization library) that can record the shape of a data
- * structure (called the "skeleton") into a binary representation
- * stored in a buffer. The @c packed_skeleton_oarchive is typically
- * used by the send of a skeleton, to pack the skeleton of a data
- * structure for transmission separately from the content.
- *
- * Users will not generally need to use @c packed_skeleton_oarchive
- * directly. Instead, use @c skeleton or @c get_skeleton.
- */
-class BOOST_MPI_DECL packed_skeleton_oarchive
- : public detail::ignore_oprimitive,
- public detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>
-{
-public:
- /**
- * Construct a @c packed_skeleton_oarchive for the given
- * communicator.
- *
- * @param comm The communicator over which this archive will be
- * transmitted.
- *
- * @param flags Control the serialization of the skeleton. Refer to
- * the Boost.Serialization documentation before changing the
- * default flags.
- */
- packed_skeleton_oarchive(MPI_Comm const & comm,
- unsigned int flags = boost::archive::no_header)
- : detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(skeleton_archive_)
- , skeleton_archive_(comm,flags)
- {}
-
- /**
- * Construct a @c packed_skeleton_oarchive that packs a skeleton
- * into the given @p archive.
- *
- * @param archive the archive to which the skeleton will be packed.
- *
- */
- explicit packed_skeleton_oarchive(packed_oarchive & archive)
- : detail::forward_skeleton_oarchive<packed_skeleton_oarchive,packed_oarchive>(archive)
- , skeleton_archive_(MPI_COMM_WORLD, boost::archive::no_header)
- {}
-
- /**
- * Retrieve the archive corresponding to this skeleton.
- */
- const packed_oarchive& get_skeleton() const
- {
- return this->implementation_archive;
- }
-
-private:
- /// Store the actual archive that holds the structure.
- packed_oarchive skeleton_archive_;
-};
-
-namespace detail {
- typedef boost::mpi::detail::forward_skeleton_oarchive<boost::mpi::packed_skeleton_oarchive,boost::mpi::packed_oarchive> type1;
- typedef boost::mpi::detail::forward_skeleton_iarchive<boost::mpi::packed_skeleton_iarchive,boost::mpi::packed_iarchive> type2;
-}
-
-
-} } // end namespace boost::mpi
-
-#include <boost/mpi/detail/content_oarchive.hpp>
-
-// For any headers that have provided declarations based on forward
-// declarations of the contents of this header, include definitions
-// for those declarations. This means that the inclusion of
-// skeleton_and_content.hpp enables the use of skeleton/content
-// transmission throughout the library.
-#ifdef BOOST_MPI_BROADCAST_HPP
-# include <boost/mpi/detail/broadcast_sc.hpp>
-#endif
-
-#ifdef BOOST_MPI_COMMUNICATOR_HPP
-# include <boost/mpi/detail/communicator_sc.hpp>
-#endif
-
-// required by export
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_skeleton_oarchive)
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::packed_skeleton_iarchive)
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::type1)
-BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::mpi::detail::type2)
-
-BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_skeleton_oarchive)
-BOOST_SERIALIZATION_USE_ARRAY_OPTIMIZATION(boost::mpi::packed_skeleton_iarchive)
-
-#endif // BOOST_MPI_SKELETON_AND_CONTENT_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content_fwd.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content_fwd.hpp
deleted file mode 100644
index 9df47fa31..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/skeleton_and_content_fwd.hpp
+++ /dev/null
@@ -1,31 +0,0 @@
-// (C) Copyright 2006 Douglas Gregor <doug.gregor -at gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-// Authors: Douglas Gregor
-
-/** @file skeleton_and_content_fwd.hpp
- *
- * This header contains all of the forward declarations required to
- * use transmit skeletons of data structures and the content of data
- * structures separately. To actually transmit skeletons or content,
- * include the header @c boost/mpi/skeleton_and_content.hpp.
- */
-
-#ifndef BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
-#define BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
-
-namespace boost { namespace mpi {
-
-template <class T> struct skeleton_proxy;
-template <class T> const skeleton_proxy<T> skeleton(T& x);
-class content;
-template <class T> const content get_content(const T& x);
-class packed_skeleton_iarchive;
-class packed_skeleton_oarchive;
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_SKELETON_AND_CONTENT_FWD_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/status.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/status.hpp
deleted file mode 100644
index d444faa41..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/status.hpp
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file status.hpp
- *
- * This header defines the class @c status, which reports on the
- * results of point-to-point communication.
- */
-#ifndef BOOST_MPI_STATUS_HPP
-#define BOOST_MPI_STATUS_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/optional.hpp>
-
-namespace boost { namespace mpi {
-
-class request;
-class communicator;
-
-/** @brief Contains information about a message that has been or can
- * be received.
- *
- * This structure contains status information about messages that
- * have been received (with @c communicator::recv) or can be received
- * (returned from @c communicator::probe or @c
- * communicator::iprobe). It permits access to the source of the
- * message, message tag, error code (rarely used), or the number of
- * elements that have been transmitted.
- */
-class BOOST_MPI_DECL status
-{
- public:
- status() : m_count(-1) { }
-
- status(MPI_Status const& s) : m_status(s), m_count(-1) {}
-
- /**
- * Retrieve the source of the message.
- */
- int source() const { return m_status.MPI_SOURCE; }
-
- /**
- * Retrieve the message tag.
- */
- int tag() const { return m_status.MPI_TAG; }
-
- /**
- * Retrieve the error code.
- */
- int error() const { return m_status.MPI_ERROR; }
-
- /**
- * Determine whether the communication associated with this object
- * has been successfully cancelled.
- */
- bool cancelled() const;
-
- /**
- * Determines the number of elements of type @c T contained in the
- * message. The type @c T must have an associated data type, i.e.,
- * @c is_mpi_datatype<T> must derive @c mpl::true_. In cases where
- * the type @c T does not match the transmitted type, this routine
- * will return an empty @c optional<int>.
- *
- * @returns the number of @c T elements in the message, if it can be
- * determined.
- */
- template<typename T> optional<int> count() const;
-
- /**
- * References the underlying @c MPI_Status
- */
- operator MPI_Status&() { return m_status; }
-
- /**
- * References the underlying @c MPI_Status
- */
- operator const MPI_Status&() const { return m_status; }
-
- private:
- /**
- * INTERNAL ONLY
- */
- template<typename T> optional<int> count_impl(mpl::true_) const;
-
- /**
- * INTERNAL ONLY
- */
- template<typename T> optional<int> count_impl(mpl::false_) const;
-
- public: // friend templates are not portable
-
- /// INTERNAL ONLY
- mutable MPI_Status m_status;
- mutable int m_count;
-
- friend class communicator;
- friend class request;
-};
-
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_STATUS_HPP
diff --git a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/timer.hpp b/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/timer.hpp
deleted file mode 100644
index fb1761839..000000000
--- a/inference-engine/thirdparty/clDNN/common/boost/1.64.0/include/boost-1_64/boost/mpi/timer.hpp
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>
-
-// Use, modification and distribution is subject to the Boost Software
-// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-/** @file timer.hpp
- *
- * This header provides the @c timer class, which provides access to
- * the MPI timers.
- */
-#ifndef BOOST_MPI_TIMER_HPP
-#define BOOST_MPI_TIMER_HPP
-
-#include <boost/mpi/config.hpp>
-#include <boost/limits.hpp>
-
-namespace boost { namespace mpi {
-
-/** @brief A simple timer that provides access to the MPI timing
- * facilities.
- *
- * The @c timer class is a simple wrapper around the MPI timing
- * facilities that mimics the interface of the Boost Timer library.
- */
-class BOOST_MPI_DECL timer {
-public:
- /** Initializes the timer
- *
- * @post @c elapsed() == 0
- */
- timer();
-
- /** Restart the timer.
- *
- * @post @c elapsed() == 0
- */
- void restart();
-
- /** Return the amount of time that has elapsed since the last
- * construction or reset, in seconds.
- */
- double elapsed() const;
-
- /** Return an estimate of the maximum possible value of
- * elapsed(). Note that this routine may return too high a value on
- * some systems.
- */
- double elapsed_max() const;
-
- /** Returns the minimum non-zero value that @c elapsed() may
- * return. This is the resolution of the timer.
- */
- double elapsed_min() const;
-
- /** Determines whether the elapsed time values are global times or
- local processor times. */
- static bool time_is_global();
-
-private:
- double start_time;
-}; // timer
-
-inline timer::timer()
-{
- restart();
-}
-
-inline void timer::restart()
-{
- start_time = MPI_Wtime();
-}
-
-inline double timer::elapsed() const
-{
- return MPI_Wtime() - start_time;
-}
-
-inline double timer::elapsed_max() const
-{
- return (std::numeric_limits<double>::max)();
-}
-
-inline double timer::elapsed_min() const
-{
- return MPI_Wtick();
-}
-
-} } // end namespace boost::mpi
-
-#endif // BOOST_MPI_TIMER_HPP