]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/mpi/collectives/all_to_all.hpp
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / boost / boost / mpi / collectives / all_to_all.hpp
1 // Copyright (C) 2005, 2006 Douglas Gregor.
2
3 // Use, modification and distribution is subject to the Boost Software
4 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
6
7 // Message Passing Interface 1.1 -- Section 4.8. All-to-all
8 #ifndef BOOST_MPI_ALL_TO_ALL_HPP
9 #define BOOST_MPI_ALL_TO_ALL_HPP
10
11 #include <boost/mpi/exception.hpp>
12 #include <boost/mpi/datatype.hpp>
13 #include <vector>
14 #include <boost/mpi/packed_oarchive.hpp>
15 #include <boost/mpi/packed_iarchive.hpp>
16 #include <boost/mpi/communicator.hpp>
17 #include <boost/mpi/environment.hpp>
18 #include <boost/assert.hpp>
19 #include <boost/mpi/collectives_fwd.hpp>
20 #include <boost/mpi/allocator.hpp>
21
22 namespace boost { namespace mpi {
23
24 namespace detail {
25 // We're performing an all-to-all with a type that has an
26 // associated MPI datatype, so we'll use MPI_Alltoall to do all of
27 // the work.
28 template<typename T>
29 void
30 all_to_all_impl(const communicator& comm, const T* in_values, int n,
31 T* out_values, mpl::true_)
32 {
33 MPI_Datatype type = get_mpi_datatype<T>(*in_values);
34 BOOST_MPI_CHECK_RESULT(MPI_Alltoall,
35 (const_cast<T*>(in_values), n, type,
36 out_values, n, type, comm));
37 }
38
39 // We're performing an all-to-all with a type that does not have an
40 // associated MPI datatype, so we'll need to serialize
41 // it.
42 template<typename T>
43 void
44 all_to_all_impl(const communicator& comm, const T* in_values, int n,
45 T* out_values, mpl::false_)
46 {
47 int size = comm.size();
48 int rank = comm.rank();
49
50 // The amount of data to be sent to each process
51 std::vector<int> send_sizes(size);
52
53 // The displacements for each outgoing value.
54 std::vector<int> send_disps(size);
55
56 // The buffer that will store all of the outgoing values
57 std::vector<char, allocator<char> > outgoing;
58
59 // Pack the buffer with all of the outgoing values.
60 for (int dest = 0; dest < size; ++dest) {
61 // Keep track of the displacements
62 send_disps[dest] = outgoing.size();
63
64 // Our own value will never be transmitted, so don't pack it.
65 if (dest != rank) {
66 packed_oarchive oa(comm, outgoing);
67 for (int i = 0; i < n; ++i)
68 oa << in_values[dest * n + i];
69 }
70
71 // Keep track of the sizes
72 send_sizes[dest] = outgoing.size() - send_disps[dest];
73 }
74
75 // Determine how much data each process will receive.
76 std::vector<int> recv_sizes(size);
77 all_to_all(comm, send_sizes, recv_sizes);
78
79 // Prepare a buffer to receive the incoming data.
80 std::vector<int> recv_disps(size);
81 int sum = 0;
82 for (int src = 0; src < size; ++src) {
83 recv_disps[src] = sum;
84 sum += recv_sizes[src];
85 }
86 std::vector<char, allocator<char> > incoming(sum > 0? sum : 1);
87
88 // Make sure we don't try to reference an empty vector
89 if (outgoing.empty())
90 outgoing.push_back(0);
91
92 // Transmit the actual data
93 BOOST_MPI_CHECK_RESULT(MPI_Alltoallv,
94 (&outgoing[0], &send_sizes[0],
95 &send_disps[0], MPI_PACKED,
96 &incoming[0], &recv_sizes[0],
97 &recv_disps[0], MPI_PACKED,
98 comm));
99
100 // Deserialize data from the iarchive
101 for (int src = 0; src < size; ++src) {
102 if (src == rank)
103 std::copy(in_values + src * n, in_values + (src + 1) * n,
104 out_values + src * n);
105 else {
106 packed_iarchive ia(comm, incoming, boost::archive::no_header,
107 recv_disps[src]);
108 for (int i = 0; i < n; ++i)
109 ia >> out_values[src * n + i];
110 }
111 }
112 }
113 } // end namespace detail
114
115 template<typename T>
116 inline void
117 all_to_all(const communicator& comm, const T* in_values, T* out_values)
118 {
119 detail::all_to_all_impl(comm, in_values, 1, out_values, is_mpi_datatype<T>());
120 }
121
122 template<typename T>
123 void
124 all_to_all(const communicator& comm, const std::vector<T>& in_values,
125 std::vector<T>& out_values)
126 {
127 BOOST_ASSERT((int)in_values.size() == comm.size());
128 out_values.resize(comm.size());
129 ::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
130 }
131
132 template<typename T>
133 inline void
134 all_to_all(const communicator& comm, const T* in_values, int n, T* out_values)
135 {
136 detail::all_to_all_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
137 }
138
139 template<typename T>
140 void
141 all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
142 std::vector<T>& out_values)
143 {
144 BOOST_ASSERT((int)in_values.size() == comm.size() * n);
145 out_values.resize(comm.size() * n);
146 ::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
147 }
148
149 } } // end namespace boost::mpi
150
151 #endif // BOOST_MPI_ALL_TO_ALL_HPP