]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/mpi/include/boost/mpi/collectives/all_to_all.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / mpi / include / boost / mpi / collectives / all_to_all.hpp
CommitLineData
7c673cae
FG
1// Copyright (C) 2005, 2006 Douglas Gregor.
2
3// Use, modification and distribution is subject to the Boost Software
4// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6
7// Message Passing Interface 1.1 -- Section 4.8. All-to-all
8#ifndef BOOST_MPI_ALL_TO_ALL_HPP
9#define BOOST_MPI_ALL_TO_ALL_HPP
10
11#include <boost/mpi/exception.hpp>
12#include <boost/mpi/datatype.hpp>
13#include <vector>
14#include <boost/mpi/packed_oarchive.hpp>
15#include <boost/mpi/packed_iarchive.hpp>
16#include <boost/mpi/communicator.hpp>
17#include <boost/mpi/environment.hpp>
18#include <boost/assert.hpp>
19#include <boost/mpi/collectives_fwd.hpp>
20#include <boost/mpi/allocator.hpp>
21
22namespace boost { namespace mpi {
23
24namespace detail {
25 // We're performaing an all-to-all with a type that has an
26 // associated MPI datatype, so we'll use MPI_Alltoall to do all of
27 // the work.
28 template<typename T>
29 void
30 all_to_all_impl(const communicator& comm, const T* in_values, int n,
31 T* out_values, mpl::true_)
32 {
33 MPI_Datatype type = get_mpi_datatype<T>(*in_values);
34 BOOST_MPI_CHECK_RESULT(MPI_Alltoall,
35 (const_cast<T*>(in_values), n, type,
36 out_values, n, type, comm));
37 }
38
39 // We're performing an all-to-all with a type that does not have an
40 // associated MPI datatype, so we'll need to serialize
41 // it. Unfortunately, this means that we cannot use MPI_Alltoall, so
42 // we'll just have to send individual messages to the other
43 // processes.
44 template<typename T>
45 void
46 all_to_all_impl(const communicator& comm, const T* in_values, int n,
47 T* out_values, mpl::false_)
48 {
49 int size = comm.size();
50 int rank = comm.rank();
51
52 // The amount of data to be sent to each process
53 std::vector<int> send_sizes(size);
54
55 // The displacements for each outgoing value.
56 std::vector<int> send_disps(size);
57
58 // The buffer that will store all of the outgoing values
59 std::vector<char, allocator<char> > outgoing;
60
61 // Pack the buffer with all of the outgoing values.
62 for (int dest = 0; dest < size; ++dest) {
63 // Keep track of the displacements
64 send_disps[dest] = outgoing.size();
65
66 // Our own value will never be transmitted, so don't pack it.
67 if (dest != rank) {
68 packed_oarchive oa(comm, outgoing);
69 for (int i = 0; i < n; ++i)
70 oa << in_values[dest * n + i];
71 }
72
73 // Keep track of the sizes
74 send_sizes[dest] = outgoing.size() - send_disps[dest];
75 }
76
77 // Determine how much data each process will receive.
78 std::vector<int> recv_sizes(size);
79 all_to_all(comm, send_sizes, recv_sizes);
80
81 // Prepare a buffer to receive the incoming data.
82 std::vector<int> recv_disps(size);
83 int sum = 0;
84 for (int src = 0; src < size; ++src) {
85 recv_disps[src] = sum;
86 sum += recv_sizes[src];
87 }
88 std::vector<char, allocator<char> > incoming(sum > 0? sum : 1);
89
90 // Make sure we don't try to reference an empty vector
91 if (outgoing.empty())
92 outgoing.push_back(0);
93
94 // Transmit the actual data
95 BOOST_MPI_CHECK_RESULT(MPI_Alltoallv,
96 (&outgoing[0], &send_sizes[0],
97 &send_disps[0], MPI_PACKED,
98 &incoming[0], &recv_sizes[0],
99 &recv_disps[0], MPI_PACKED,
100 comm));
101
102 // Deserialize data from the iarchive
103 for (int src = 0; src < size; ++src) {
104 if (src == rank)
105 std::copy(in_values + src * n, in_values + (src + 1) * n,
106 out_values + src * n);
107 else {
108 packed_iarchive ia(comm, incoming, boost::archive::no_header,
109 recv_disps[src]);
110 for (int i = 0; i < n; ++i)
111 ia >> out_values[src * n + i];
112 }
113 }
114 }
115} // end namespace detail
116
117template<typename T>
118inline void
119all_to_all(const communicator& comm, const T* in_values, T* out_values)
120{
121 detail::all_to_all_impl(comm, in_values, 1, out_values, is_mpi_datatype<T>());
122}
123
124template<typename T>
125void
126all_to_all(const communicator& comm, const std::vector<T>& in_values,
127 std::vector<T>& out_values)
128{
129 BOOST_ASSERT((int)in_values.size() == comm.size());
130 out_values.resize(comm.size());
131 ::boost::mpi::all_to_all(comm, &in_values[0], &out_values[0]);
132}
133
134template<typename T>
135inline void
136all_to_all(const communicator& comm, const T* in_values, int n, T* out_values)
137{
138 detail::all_to_all_impl(comm, in_values, n, out_values, is_mpi_datatype<T>());
139}
140
141template<typename T>
142void
143all_to_all(const communicator& comm, const std::vector<T>& in_values, int n,
144 std::vector<T>& out_values)
145{
146 BOOST_ASSERT((int)in_values.size() == comm.size() * n);
147 out_values.resize(comm.size() * n);
148 ::boost::mpi::all_to_all(comm, &in_values[0], n, &out_values[0]);
149}
150
151} } // end namespace boost::mpi
152
153#endif // BOOST_MPI_ALL_TO_ALL_HPP