]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/mpi/include/boost/mpi/collectives/all_reduce.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / mpi / include / boost / mpi / collectives / all_reduce.hpp
1 // Copyright (C) 2005-2006 Douglas Gregor <doug.gregor -at- gmail.com>
2 // Copyright (C) 2004 The Trustees of Indiana University
3
4 // Use, modification and distribution is subject to the Boost Software
5 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
6 // http://www.boost.org/LICENSE_1_0.txt)
7
8 // Authors: Douglas Gregor
9 // Andrew Lumsdaine
10
11 // Message Passing Interface 1.1 -- Section 4.9.1. Reduce
12 #ifndef BOOST_MPI_ALL_REDUCE_HPP
13 #define BOOST_MPI_ALL_REDUCE_HPP
14
15 #include <vector>
16
17 #include <boost/mpi/inplace.hpp>
18
19 // All-reduce falls back to reduce() + broadcast() in some cases.
20 #include <boost/mpi/collectives/broadcast.hpp>
21 #include <boost/mpi/collectives/reduce.hpp>
22
23 namespace boost { namespace mpi {
24 namespace detail {
25 /**********************************************************************
26 * Simple reduction with MPI_Allreduce *
27 **********************************************************************/
28 // We are reducing for a type that has an associated MPI
29 // datatype and operation, so we'll use MPI_Allreduce directly.
30 template<typename T, typename Op>
31 void
32 all_reduce_impl(const communicator& comm, const T* in_values, int n,
33 T* out_values, Op /*op*/, mpl::true_ /*is_mpi_op*/,
34 mpl::true_ /*is_mpi_datatype*/)
35 {
36 BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
37 (const_cast<T*>(in_values), out_values, n,
38 boost::mpi::get_mpi_datatype<T>(*in_values),
39 (is_mpi_op<Op, T>::op()), comm));
40 }
41
42 /**********************************************************************
43 * User-defined reduction with MPI_Allreduce *
44 **********************************************************************/
45 // We are reducing at the root for a type that has an associated MPI
46 // datatype but with a custom operation. We'll use MPI_Reduce
47 // directly, but we'll need to create an MPI_Op manually.
48 template<typename T, typename Op>
49 void
50 all_reduce_impl(const communicator& comm, const T* in_values, int n,
51 T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
52 mpl::true_ /*is_mpi_datatype*/)
53 {
54 user_op<Op, T> mpi_op(op);
55 BOOST_MPI_CHECK_RESULT(MPI_Allreduce,
56 (const_cast<T*>(in_values), out_values, n,
57 boost::mpi::get_mpi_datatype<T>(*in_values),
58 mpi_op.get_mpi_op(), comm));
59 }
60
61 /**********************************************************************
62 * User-defined, tree-based reduction for non-MPI data types *
63 **********************************************************************/
64 // We are reducing at the root for a type that has no associated MPI
65 // datatype and operation, so we'll use a simple tree-based
66 // algorithm.
67 template<typename T, typename Op>
68 void
69 all_reduce_impl(const communicator& comm, const T* in_values, int n,
70 T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
71 mpl::false_ /*is_mpi_datatype*/)
72 {
73 if (in_values == MPI_IN_PLACE) {
74 // if in_values matches the in place tag, then the output
75 // buffer actually contains the input data.
76 // But we can just go back to the out of place
77 // implementation in this case.
78 // it's not clear how/if we can avoid the copy.
79 std::vector<T> tmp_in( out_values, out_values + n);
80 reduce(comm, &(tmp_in[0]), n, out_values, op, 0);
81 } else {
82 reduce(comm, in_values, n, out_values, op, 0);
83 }
84 broadcast(comm, out_values, n, 0);
85 }
86 } // end namespace detail
87
88 template<typename T, typename Op>
89 inline void
90 all_reduce(const communicator& comm, const T* in_values, int n, T* out_values,
91 Op op)
92 {
93 detail::all_reduce_impl(comm, in_values, n, out_values, op,
94 is_mpi_op<Op, T>(), is_mpi_datatype<T>());
95 }
96
97 template<typename T, typename Op>
98 inline void
99 all_reduce(const communicator& comm, inplace_t<T*> inout_values, int n, Op op)
100 {
101 all_reduce(comm, static_cast<const T*>(MPI_IN_PLACE), n, inout_values.buffer, op);
102 }
103
104 template<typename T, typename Op>
105 inline void
106 all_reduce(const communicator& comm, inplace_t<T> inout_values, Op op)
107 {
108 all_reduce(comm, static_cast<const T*>(MPI_IN_PLACE), 1, &(inout_values.buffer), op);
109 }
110
111 template<typename T, typename Op>
112 inline void
113 all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op)
114 {
115 detail::all_reduce_impl(comm, &in_value, 1, &out_value, op,
116 is_mpi_op<Op, T>(), is_mpi_datatype<T>());
117 }
118
119 template<typename T, typename Op>
120 T all_reduce(const communicator& comm, const T& in_value, Op op)
121 {
122 T result;
123 ::boost::mpi::all_reduce(comm, in_value, result, op);
124 return result;
125 }
126
127 } } // end namespace boost::mpi
128
129 #endif // BOOST_MPI_ALL_REDUCE_HPP