]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/mpi/communicator.hpp
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / boost / boost / mpi / communicator.hpp
1 // Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
2 // Copyright (C) 2016 K. Noel Belcourt <kbelco -at- sandia.gov>.
3
4 // Use, modification and distribution is subject to the Boost Software
5 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
6 // http://www.boost.org/LICENSE_1_0.txt)
7
8 /** @file communicator.hpp
9 *
10 * This header defines the @c communicator class, which is the basis
11 * of all communication within Boost.MPI, and provides point-to-point
12 * communication operations.
13 */
14 #ifndef BOOST_MPI_COMMUNICATOR_HPP
15 #define BOOST_MPI_COMMUNICATOR_HPP
16
17 #include <boost/assert.hpp>
18 #include <boost/mpi/config.hpp>
19 #include <boost/mpi/exception.hpp>
20 #include <boost/optional.hpp>
21 #include <boost/shared_ptr.hpp>
22 #include <boost/mpi/datatype.hpp>
23 #include <boost/mpi/nonblocking.hpp>
24 #include <utility>
25 #include <iterator>
26 #include <stdexcept> // for std::range_error
27 #include <vector>
28
29 // For (de-)serializing sends and receives
30 #include <boost/mpi/packed_oarchive.hpp>
31 #include <boost/mpi/packed_iarchive.hpp>
32
33 // For (de-)serializing skeletons and content
34 #include <boost/mpi/skeleton_and_content_fwd.hpp>
35
36 // For (de-)serializing arrays
37 #include <boost/serialization/array.hpp>
38
39 #include <boost/mpi/detail/point_to_point.hpp>
40 #include <boost/mpi/status.hpp>
41 #include <boost/mpi/request.hpp>
42
43 #ifdef BOOST_MSVC
44 # pragma warning(push)
45 # pragma warning(disable : 4800) // forcing to bool 'true' or 'false'
46 #endif
47
48 namespace boost { namespace mpi {
49
50 /**
51 * @brief A constant representing "any process."
52 *
53 * This constant may be used for the @c source parameter of @c receive
54 * operations to indicate that a message may be received from any
55 * source.
56 */
57 const int any_source = MPI_ANY_SOURCE;
58
59 /**
60 * @brief A constant representing "any tag."
61 *
62 * This constant may be used for the @c tag parameter of @c receive
63 * operations to indicate that a @c send with any tag will be matched
64 * by the receive.
65 */
66 const int any_tag = MPI_ANY_TAG;
67
68 /**
69 * @brief Enumeration used to describe how to adopt a C @c MPI_Comm into
70 * a Boost.MPI communicator.
71 *
72 * The values for this enumeration determine how a Boost.MPI
73 * communicator will behave when constructed with an MPI
74 * communicator. The options are:
75 *
76 * - @c comm_duplicate: Duplicate the MPI_Comm communicator to
77 * create a new communicator (e.g., with MPI_Comm_dup). This new
78 * MPI_Comm communicator will be automatically freed when the
79 * Boost.MPI communicator (and all copies of it) is destroyed.
80 *
81 * - @c comm_take_ownership: Take ownership of the communicator. It
82 * will be freed automatically when all of the Boost.MPI
83 * communicators go out of scope. This option must not be used with
84 * MPI_COMM_WORLD.
85 *
86 * - @c comm_attach: The Boost.MPI communicator will reference the
87 * existing MPI communicator but will not free it when the Boost.MPI
88 * communicator goes out of scope. This option should only be used
89 * when the communicator is managed by the user or MPI library
90 * (e.g., MPI_COMM_WORLD).
91 */
92 enum comm_create_kind { comm_duplicate, comm_take_ownership, comm_attach };
93
94 /**
95 * INTERNAL ONLY
96 *
97 * Forward declaration of @c group needed for the @c group
98 * constructor and accessor.
99 */
100 class group;
101
102 /**
103 * INTERNAL ONLY
104 *
105 * Forward declaration of @c intercommunicator needed for the "cast"
106 * from a communicator to an intercommunicator.
107 */
108 class intercommunicator;
109
110 /**
111 * INTERNAL ONLY
112 *
113 * Forward declaration of @c graph_communicator needed for the "cast"
114 * from a communicator to a graph communicator.
115 */
116 class graph_communicator;
117
118 /**
119 * INTERNAL ONLY
120 *
121 * Forward declaration of @c cartesian_communicator needed for the "cast"
122 * from a communicator to a cartesian communicator.
123 */
124 class cartesian_communicator;
125
126 /**
127 * @brief A communicator that permits communication and
128 * synchronization among a set of processes.
129 *
130 * The @c communicator class abstracts a set of communicating
131 * processes in MPI. All of the processes that belong to a certain
132 * communicator can determine the size of the communicator, their rank
133 * within the communicator, and communicate with any other processes
134 * in the communicator.
135 */
136 class BOOST_MPI_DECL communicator
137 {
138 public:
139 /**
140 * Build a new Boost.MPI communicator for @c MPI_COMM_WORLD.
141 *
142 * Constructs a Boost.MPI communicator that attaches to @c
143 * MPI_COMM_WORLD. This is the equivalent of constructing with
144 * @c (MPI_COMM_WORLD, comm_attach).
145 */
146 communicator();
147
148 /**
149 * Build a new Boost.MPI communicator based on the MPI communicator
150 * @p comm.
151 *
152 * @p comm may be any valid MPI communicator. If @p comm is
153 * MPI_COMM_NULL, an empty communicator (that cannot be used for
154 * communication) is created and the @p kind parameter is
155 * ignored. Otherwise, the @p kind parameters determines how the
156 * Boost.MPI communicator will be related to @p comm:
157 *
158 * - If @p kind is @c comm_duplicate, duplicate @c comm to create
159 * a new communicator. This new communicator will be freed when
160 * the Boost.MPI communicator (and all copies of it) is destroyed.
161 * This option is only permitted if @p comm is a valid MPI
162 * intracommunicator or if the underlying MPI implementation
163 * supports MPI 2.0 (which supports duplication of
164 * intercommunicators).
165 *
166 * - If @p kind is @c comm_take_ownership, take ownership of @c
167 * comm. It will be freed automatically when all of the Boost.MPI
168 * communicators go out of scope. This option must not be used
169 * when @c comm is MPI_COMM_WORLD.
170 *
171 * - If @p kind is @c comm_attach, this Boost.MPI communicator
172 * will reference the existing MPI communicator @p comm but will
173 * not free @p comm when the Boost.MPI communicator goes out of
174 * scope. This option should only be used when the communicator is
175 * managed by the user or MPI library (e.g., MPI_COMM_WORLD).
176 */
177 communicator(const MPI_Comm& comm, comm_create_kind kind);
178
179 /**
180 * Build a new Boost.MPI communicator based on a subgroup of another
181 * MPI communicator.
182 *
183 * This routine will construct a new communicator containing all of
184 * the processes from communicator @c comm that are listed within
185 * the group @c subgroup. Equivalent to @c MPI_Comm_create.
186 *
187 * @param comm An MPI communicator.
188 *
189 * @param subgroup A subgroup of the MPI communicator, @p comm, for
190 * which we will construct a new communicator.
191 */
192 communicator(const communicator& comm, const boost::mpi::group& subgroup);
193
194 /**
195 * @brief Determine the rank of the executing process in a
196 * communicator.
197 *
198 * This routine is equivalent to @c MPI_Comm_rank.
199 *
200 * @returns The rank of the process in the communicator, which
201 * will be a value in [0, size())
202 */
203 int rank() const;
204
205 /**
206 * @brief Determine the number of processes in a communicator.
207 *
208 * This routine is equivalent to @c MPI_Comm_size.
209 *
210 * @returns The number of processes in the communicator.
211 */
212 int size() const;
213
214 /**
215 * This routine constructs a new group whose members are the
216 * processes within this communicator. Equivalent to
217 * calling @c MPI_Comm_group.
218 */
219 boost::mpi::group group() const;
220
221 // ----------------------------------------------------------------
222 // Point-to-point communication
223 // ----------------------------------------------------------------
224
225 /**
226 * @brief Send data to another process.
227 *
228 * This routine executes a potentially blocking send with tag @p tag
229 * to the process with rank @p dest. It can be received by the
230 * destination process with a matching @c recv call.
231 *
232 * The given @p value must be suitable for transmission over
233 * MPI. There are several classes of types that meet these
234 * requirements:
235 *
236 * - Types with mappings to MPI data types: If @c
237 * is_mpi_datatype<T> is convertible to @c mpl::true_, then @p
238 * value will be transmitted using the MPI data type
239 * @c get_mpi_datatype<T>(). All primitive C++ data types that have
240 * MPI equivalents, e.g., @c int, @c float, @c char, @c double,
241 * etc., have built-in mappings to MPI data types. You may turn a
242 * Serializable type with fixed structure into an MPI data type by
243 * specializing @c is_mpi_datatype for your type.
244 *
245 * - Serializable types: Any type that provides the @c serialize()
246 * functionality required by the Boost.Serialization library can be
247 * transmitted and received.
248 *
249 * - Packed archives and skeletons: Data that has been packed into
250 * an @c mpi::packed_oarchive or the skeletons of data that have
251 * been backed into an @c mpi::packed_skeleton_oarchive can be
252 * transmitted, but will be received as @c mpi::packed_iarchive and
253 * @c mpi::packed_skeleton_iarchive, respectively, to allow the
254 * values (or skeletons) to be extracted by the destination process.
255 *
256 * - Content: Content associated with a previously-transmitted
257 * skeleton can be transmitted by @c send and received by @c
258 * recv. The receiving process may only receive content into the
259 * content of a value that has been constructed with the matching
260 * skeleton.
261 *
262 * For types that have mappings to an MPI data type (including the
263 * concent of a type), an invocation of this routine will result in
264 * a single MPI_Send call. For variable-length data, e.g.,
265 * serialized types and packed archives, two messages will be sent
266 * via MPI_Send: one containing the length of the data and the
267 * second containing the data itself. Note that the transmission
268 * mode for variable-length data is an implementation detail that
269 * is subject to change.
270 *
271 * @param dest The rank of the remote process to which the data
272 * will be sent.
273 *
274 * @param tag The tag that will be associated with this message. Tags
275 * may be any integer between zero and an implementation-defined
276 * upper limit. This limit is accessible via @c environment::max_tag().
277 *
278 * @param value The value that will be transmitted to the
279 * receiver. The type @c T of this value must meet the aforementioned
280 * criteria for transmission.
281 */
282 template<typename T>
283 void send(int dest, int tag, const T& value) const;
284
285 template<typename T, typename A>
286 void send(int dest, int tag, const std::vector<T,A>& value) const;
287
288 template<typename T, typename A>
289 void send_vector(int dest, int tag, const std::vector<T,A>& value,
290 mpl::true_) const;
291
292 template<typename T, typename A>
293 void send_vector(int dest, int tag, const std::vector<T,A>& value,
294 mpl::false_) const;
295
296 /**
297 * @brief Send the skeleton of an object.
298 *
299 * This routine executes a potentially blocking send with tag @p
300 * tag to the process with rank @p dest. It can be received by the
301 * destination process with a matching @c recv call. This variation
302 * on @c send will be used when a send of a skeleton is explicitly
303 * requested via code such as:
304 *
305 * @code
306 * comm.send(dest, tag, skeleton(object));
307 * @endcode
308 *
309 * The semantics of this routine are equivalent to that of sending
310 * a @c packed_skeleton_oarchive storing the skeleton of the @c
311 * object.
312 *
313 * @param dest The rank of the remote process to which the skeleton
314 * will be sent.
315 *
316 * @param tag The tag that will be associated with this message. Tags
317 * may be any integer between zero and an implementation-defined
318 * upper limit. This limit is accessible via @c environment::max_tag().
319 *
320 * @param proxy The @c skeleton_proxy containing a reference to the
321 * object whose skeleton will be transmitted.
322 *
323 */
324 template<typename T>
325 void send(int dest, int tag, const skeleton_proxy<T>& proxy) const;
326
327 /**
328 * @brief Send an array of values to another process.
329 *
330 * This routine executes a potentially blocking send of an array of
331 * data with tag @p tag to the process with rank @p dest. It can be
332 * received by the destination process with a matching array @c
333 * recv call.
334 *
335 * If @c T is an MPI datatype, an invocation of this routine will
336 * be mapped to a single call to MPI_Send, using the datatype @c
337 * get_mpi_datatype<T>().
338 *
339 * @param dest The process rank of the remote process to which
340 * the data will be sent.
341 *
342 * @param tag The tag that will be associated with this message. Tags
343 * may be any integer between zero and an implementation-defined
344 * upper limit. This limit is accessible via @c environment::max_tag().
345 *
346 * @param values The array of values that will be transmitted to the
347 * receiver. The type @c T of these values must be mapped to an MPI
348 * data type.
349 *
350 * @param n The number of values stored in the array. The destination
351 * process must call receive with at least this many elements to
352 * correctly receive the message.
353 */
354 template<typename T>
355 void send(int dest, int tag, const T* values, int n) const;
356
357 /**
358 * @brief Send a message to another process without any data.
359 *
360 * This routine executes a potentially blocking send of a message
361 * to another process. The message contains no extra data, and can
362 * therefore only be received by a matching call to @c recv().
363 *
364 * @param dest The process rank of the remote process to which
365 * the message will be sent.
366 *
367 * @param tag The tag that will be associated with this message. Tags
368 * may be any integer between zero and an implementation-defined
369 * upper limit. This limit is accessible via @c environment::max_tag().
370 *
371 */
372 void send(int dest, int tag) const;
373
374 /**
375 * @brief Receive data from a remote process.
376 *
377 * This routine blocks until it receives a message from the process @p
378 * source with the given @p tag. The type @c T of the @p value must be
379 * suitable for transmission over MPI, which includes serializable
380 * types, types that can be mapped to MPI data types (including most
381 * built-in C++ types), packed MPI archives, skeletons, and content
382 * associated with skeletons; see the documentation of @c send for a
383 * complete description.
384 *
385 * @param source The process that will be sending data. This will
386 * either be a process rank within the communicator or the
387 * constant @c any_source, indicating that we can receive the
388 * message from any process.
389 *
390 * @param tag The tag that matches a particular kind of message sent
391 * by the source process. This may be any tag value permitted by @c
392 * send. Alternatively, the argument may be the constant @c any_tag,
393 * indicating that this receive matches a message with any tag.
394 *
395 * @param value Will contain the value of the message after a
396 * successful receive. The type of this value must match the value
397 * transmitted by the sender, unless the sender transmitted a packed
398 * archive or skeleton: in these cases, the sender transmits a @c
399 * packed_oarchive or @c packed_skeleton_oarchive and the
400 * destination receives a @c packed_iarchive or @c
401 * packed_skeleton_iarchive, respectively.
402 *
403 * @returns Information about the received message.
404 */
405 template<typename T>
406 status recv(int source, int tag, T& value) const;
407
408 template<typename T, typename A>
409 status recv(int source, int tag, std::vector<T,A>& value) const;
410
411 template<typename T, typename A>
412 status recv_vector(int source, int tag, std::vector<T,A>& value,
413 mpl::true_) const;
414
415 template<typename T, typename A>
416 status recv_vector(int source, int tag, std::vector<T,A>& value,
417 mpl::false_) const;
418
419 /**
420 * @brief Receive a skeleton from a remote process.
421 *
422 * This routine blocks until it receives a message from the process @p
423 * source with the given @p tag containing a skeleton.
424 *
425 * @param source The process that will be sending data. This will
426 * either be a process rank within the communicator or the constant
427 * @c any_source, indicating that we can receive the message from
428 * any process.
429 *
430 * @param tag The tag that matches a particular kind of message
431 * sent by the source process. This may be any tag value permitted
432 * by @c send. Alternatively, the argument may be the constant @c
433 * any_tag, indicating that this receive matches a message with any
434 * tag.
435 *
436 * @param proxy The @c skeleton_proxy containing a reference to the
437 * object that will be reshaped to match the received skeleton.
438 *
439 * @returns Information about the received message.
440 */
441 template<typename T>
442 status recv(int source, int tag, const skeleton_proxy<T>& proxy) const;
443
444 /**
445 * @brief Receive a skeleton from a remote process.
446 *
447 * This routine blocks until it receives a message from the process @p
448 * source with the given @p tag containing a skeleton.
449 *
450 * @param source The process that will be sending data. This will
451 * either be a process rank within the communicator or the constant
452 * @c any_source, indicating that we can receive the message from
453 * any process.
454 *
455 * @param tag The tag that matches a particular kind of message
456 * sent by the source process. This may be any tag value permitted
457 * by @c send. Alternatively, the argument may be the constant @c
458 * any_tag, indicating that this receive matches a message with any
459 * tag.
460 *
461 * @param proxy The @c skeleton_proxy containing a reference to the
462 * object that will be reshaped to match the received skeleton.
463 *
464 * @returns Information about the received message.
465 */
466 template<typename T>
467 status recv(int source, int tag, skeleton_proxy<T>& proxy) const;
468
469 /**
470 * @brief Receive an array of values from a remote process.
471 *
472 * This routine blocks until it receives an array of values from the
473 * process @p source with the given @p tag. If the type @c T is
474 *
475 * @param source The process that will be sending data. This will
476 * either be a process rank within the communicator or the
477 * constant @c any_source, indicating that we can receive the
478 * message from any process.
479 *
480 * @param tag The tag that matches a particular kind of message sent
481 * by the source process. This may be any tag value permitted by @c
482 * send. Alternatively, the argument may be the constant @c any_tag,
483 * indicating that this receive matches a message with any tag.
484 *
485 * @param values Will contain the values in the message after a
486 * successful receive. The type of these elements must match the
487 * type of the elements transmitted by the sender.
488 *
489 * @param n The number of values that can be stored into the @p
490 * values array. This shall not be smaller than the number of
491 * elements transmitted by the sender.
492 *
493 * @throws std::range_error if the message to be received contains
494 * more than @p n values.
495 *
496 * @returns Information about the received message.
497 */
498 template<typename T>
499 status recv(int source, int tag, T* values, int n) const;
500
501 /**
502 * @brief Receive a message from a remote process without any data.
503 *
504 * This routine blocks until it receives a message from the process
505 * @p source with the given @p tag.
506 *
507 * @param source The process that will be sending the message. This
508 * will either be a process rank within the communicator or the
509 * constant @c any_source, indicating that we can receive the
510 * message from any process.
511 *
512 * @param tag The tag that matches a particular kind of message
513 * sent by the source process. This may be any tag value permitted
514 * by @c send. Alternatively, the argument may be the constant @c
515 * any_tag, indicating that this receive matches a message with any
516 * tag.
517 *
518 * @returns Information about the received message.
519 */
520 status recv(int source, int tag) const;
521
522 /** @brief Send a message to remote process nd receive another message
523 * from another process.
524 */
525 template<typename T>
526 status sendrecv(int dest, int stag, const T& sval, int src, int rtag, T& rval) const;
527
528 /**
529 * @brief Send a message to a remote process without blocking.
530 *
531 * The @c isend method is functionality identical to the @c send
532 * method and transmits data in the same way, except that @c isend
533 * will not block while waiting for the data to be
534 * transmitted. Instead, a request object will be immediately
535 * returned, allowing one to query the status of the communication
536 * or wait until it has completed.
537 *
538 * @param dest The rank of the remote process to which the data
539 * will be sent.
540 *
541 * @param tag The tag that will be associated with this message. Tags
542 * may be any integer between zero and an implementation-defined
543 * upper limit. This limit is accessible via @c environment::max_tag().
544 *
545 * @param value The value that will be transmitted to the
546 * receiver. The type @c T of this value must meet the aforementioned
547 * criteria for transmission.
548 *
549 * @returns a @c request object that describes this communication.
550 */
551 template<typename T>
552 request isend(int dest, int tag, const T& value) const;
553
554 /**
555 * @brief Send the skeleton of an object without blocking.
556 *
557 * This routine is functionally identical to the @c send method for
558 * @c skeleton_proxy objects except that @c isend will not block
559 * while waiting for the data to be transmitted. Instead, a request
560 * object will be immediately returned, allowing one to query the
561 * status of the communication or wait until it has completed.
562 *
563 * The semantics of this routine are equivalent to a non-blocking
564 * send of a @c packed_skeleton_oarchive storing the skeleton of
565 * the @c object.
566 *
567 * @param dest The rank of the remote process to which the skeleton
568 * will be sent.
569 *
570 * @param tag The tag that will be associated with this message. Tags
571 * may be any integer between zero and an implementation-defined
572 * upper limit. This limit is accessible via @c environment::max_tag().
573 *
574 * @param proxy The @c skeleton_proxy containing a reference to the
575 * object whose skeleton will be transmitted.
576 *
577 * @returns a @c request object that describes this communication.
578 */
579 template<typename T>
580 request isend(int dest, int tag, const skeleton_proxy<T>& proxy) const;
581
582 /**
583 * @brief Send an array of values to another process without
584 * blocking.
585 *
586 * This routine is functionally identical to the @c send method for
587 * arrays except that @c isend will not block while waiting for the
588 * data to be transmitted. Instead, a request object will be
589 * immediately returned, allowing one to query the status of the
590 * communication or wait until it has completed.
591 *
592 * @param dest The process rank of the remote process to which
593 * the data will be sent.
594 *
595 * @param tag The tag that will be associated with this message. Tags
596 * may be any integer between zero and an implementation-defined
597 * upper limit. This limit is accessible via @c environment::max_tag().
598 *
599 * @param values The array of values that will be transmitted to the
600 * receiver. The type @c T of these values must be mapped to an MPI
601 * data type.
602 *
603 * @param n The number of values stored in the array. The destination
604 * process must call receive with at least this many elements to
605 * correctly receive the message.
606 *
607 * @returns a @c request object that describes this communication.
608 */
609 template<typename T>
610 request isend(int dest, int tag, const T* values, int n) const;
611
612 /**
613 * @brief Send a message to another process without any data
614 * without blocking.
615 *
616 * This routine is functionally identical to the @c send method for
617 * sends with no data, except that @c isend will not block while
618 * waiting for the message to be transmitted. Instead, a request
619 * object will be immediately returned, allowing one to query the
620 * status of the communication or wait until it has completed.
621 *
622 * @param dest The process rank of the remote process to which
623 * the message will be sent.
624 *
625 * @param tag The tag that will be associated with this message. Tags
626 * may be any integer between zero and an implementation-defined
627 * upper limit. This limit is accessible via @c environment::max_tag().
628 *
629 *
630 * @returns a @c request object that describes this communication.
631 */
632 request isend(int dest, int tag) const;
633
634 /**
635 * @brief Prepare to receive a message from a remote process.
636 *
637 * The @c irecv method is functionally identical to the @c recv
638 * method and receive data in the same way, except that @c irecv
639 * will not block while waiting for data to be
640 * transmitted. Instead, it immediately returns a request object
641 * that allows one to query the status of the receive or wait until
642 * it has completed.
643 *
644 * @param source The process that will be sending data. This will
645 * either be a process rank within the communicator or the
646 * constant @c any_source, indicating that we can receive the
647 * message from any process.
648 *
649 * @param tag The tag that matches a particular kind of message sent
650 * by the source process. This may be any tag value permitted by @c
651 * send. Alternatively, the argument may be the constant @c any_tag,
652 * indicating that this receive matches a message with any tag.
653 *
654 * @param value Will contain the value of the message after a
655 * successful receive. The type of this value must match the value
656 * transmitted by the sender, unless the sender transmitted a packed
657 * archive or skeleton: in these cases, the sender transmits a @c
658 * packed_oarchive or @c packed_skeleton_oarchive and the
659 * destination receives a @c packed_iarchive or @c
660 * packed_skeleton_iarchive, respectively.
661 *
662 * @returns a @c request object that describes this communication.
663 */
664 template<typename T>
665 request irecv(int source, int tag, T& value) const;
666
667 /**
668 * @brief Initiate receipt of an array of values from a remote process.
669 *
670 * This routine initiates a receive operation for an array of values
671 * transmitted by process @p source with the given @p tag.
672 *
673 * @param source The process that will be sending data. This will
674 * either be a process rank within the communicator or the
675 * constant @c any_source, indicating that we can receive the
676 * message from any process.
677 *
678 * @param tag The tag that matches a particular kind of message sent
679 * by the source process. This may be any tag value permitted by @c
680 * send. Alternatively, the argument may be the constant @c any_tag,
681 * indicating that this receive matches a message with any tag.
682 *
683 * @param values Will contain the values in the message after a
684 * successful receive. The type of these elements must match the
685 * type of the elements transmitted by the sender.
686 *
687 * @param n The number of values that can be stored into the @p
688 * values array. This shall not be smaller than the number of
689 * elements transmitted by the sender.
690 *
691 * @returns a @c request object that describes this communication.
692 */
693 template<typename T>
694 request irecv(int source, int tag, T* values, int n) const;
695
696 /**
697 * @brief Initiate receipt of a message from a remote process that
698 * carries no data.
699 *
700 * This routine initiates a receive operation for a message from
701 * process @p source with the given @p tag that carries no data.
702 *
703 * @param source The process that will be sending the message. This
704 * will either be a process rank within the communicator or the
705 * constant @c any_source, indicating that we can receive the
706 * message from any process.
707 *
708 * @param tag The tag that matches a particular kind of message
709 * sent by the source process. This may be any tag value permitted
710 * by @c send. Alternatively, the argument may be the constant @c
711 * any_tag, indicating that this receive matches a message with any
712 * tag.
713 *
714 * @returns a @c request object that describes this communication.
715 */
716 request irecv(int source, int tag) const;
717
718 /**
719 * @brief Waits until a message is available to be received.
720 *
721 * This operation waits until a message matching (@p source, @p tag)
722 * is available to be received. It then returns information about
723 * that message. The functionality is equivalent to @c MPI_Probe. To
724 * check if a message is available without blocking, use @c iprobe.
725 *
726 * @param source Determine if there is a message available from
727 * this rank. If @c any_source, then the message returned may come
728 * from any source.
729 *
730 * @param tag Determine if there is a message available with the
731 * given tag. If @c any_tag, then the message returned may have any
732 * tag.
733 *
734 * @returns Returns information about the first message that
735 * matches the given criteria.
736 */
737 status probe(int source = any_source, int tag = any_tag) const;
738
739 /**
740 * @brief Determine if a message is available to be received.
741 *
742 * This operation determines if a message matching (@p source, @p
743 * tag) is available to be received. If so, it returns information
744 * about that message; otherwise, it returns immediately with an
745 * empty optional. The functionality is equivalent to @c
746 * MPI_Iprobe. To wait until a message is available, use @c wait.
747 *
748 * @param source Determine if there is a message available from
749 * this rank. If @c any_source, then the message returned may come
750 * from any source.
751 *
752 * @param tag Determine if there is a message available with the
753 * given tag. If @c any_tag, then the message returned may have any
754 * tag.
755 *
756 * @returns If a matching message is available, returns
757 * information about that message. Otherwise, returns an empty
758 * @c boost::optional.
759 */
760 optional<status>
761 iprobe(int source = any_source, int tag = any_tag) const;
762
763 #ifdef barrier
764 // Linux defines a function-like macro named "barrier". So, we need
765 // to avoid expanding the macro when we define our barrier()
766 // function. However, some C++ parsers (Doxygen, for instance) can't
767 // handle this syntax, so we only use it when necessary.
768 void (barrier)() const;
769 #else
770 /**
771 * @brief Wait for all processes within a communicator to reach the
772 * barrier.
773 *
774 * This routine is a collective operation that blocks each process
775 * until all processes have entered it, then releases all of the
776 * processes "simultaneously". It is equivalent to @c MPI_Barrier.
777 */
778 void barrier() const;
779 #endif
780
781 /** @brief Determine if this communicator is valid for
782 * communication.
783 *
784 * Evaluates @c true in a boolean context if this communicator is
785 * valid for communication, i.e., does not represent
786 * MPI_COMM_NULL. Otherwise, evaluates @c false.
787 */
788 operator bool() const { return (bool)comm_ptr; }
789
790 /**
791 * @brief Access the MPI communicator associated with a Boost.MPI
792 * communicator.
793 *
794 * This routine permits the implicit conversion from a Boost.MPI
795 * communicator to an MPI communicator.
796 *
797 * @returns The associated MPI communicator.
798 */
799 operator MPI_Comm() const;
800
801 /**
802 * Split the communicator into multiple, disjoint communicators
803 * each of which is based on a particular color. This is a
804 * collective operation that returns a new communicator that is a
805 * subgroup of @p this. This routine is functionally equivalent to
806 * @c MPI_Comm_split.
807 *
808 * @param color The color of this process. All processes with the
809 * same @p color value will be placed into the same group.
810 *
811 * @returns A new communicator containing all of the processes in
812 * @p this that have the same @p color.
813 */
814 communicator split(int color) const;
815
816 /**
817 * Split the communicator into multiple, disjoint communicators
818 * each of which is based on a particular color. This is a
819 * collective operation that returns a new communicator that is a
820 * subgroup of @p this. This routine is functionally equivalent to
821 * @c MPI_Comm_split.
822 *
823 * @param color The color of this process. All processes with the
824 * same @p color value will be placed into the same group.
825 *
826 * @param key A key value that will be used to determine the
827 * ordering of processes with the same color in the resulting
828 * communicator. If omitted, the rank of the processes in @p this
829 * will determine the ordering of processes in the resulting
830 * group.
831 *
832 * @returns A new communicator containing all of the processes in
833 * @p this that have the same @p color.
834 */
835 communicator split(int color, int key) const;
836
837 /**
838 * Determine if the communicator is in fact an intercommunicator
839 * and, if so, return that intercommunicator.
840 *
841 * @returns an @c optional containing the intercommunicator, if this
842 * communicator is in fact an intercommunicator. Otherwise, returns
843 * an empty @c optional.
844 */
845 optional<intercommunicator> as_intercommunicator() const;
846
847 /**
848 * Determine if the communicator has a graph topology and, if so,
849 * return that @c graph_communicator. Even though the communicators
850 * have different types, they refer to the same underlying
851 * communication space and can be used interchangeably for
852 * communication.
853 *
854 * @returns an @c optional containing the graph communicator, if this
855 * communicator does in fact have a graph topology. Otherwise, returns
856 * an empty @c optional.
857 */
858 optional<graph_communicator> as_graph_communicator() const;
859
860 /**
861 * Determines whether this communicator has a Graph topology.
862 */
863 bool has_graph_topology() const;
864
865 /**
866 * Determine if the communicator has a cartesian topology and, if so,
867 * return that @c cartesian_communicator. Even though the communicators
868 * have different types, they refer to the same underlying
869 * communication space and can be used interchangeably for
870 * communication.
871 *
872 * @returns an @c optional containing the cartesian communicator, if this
873 * communicator does in fact have a cartesian topology. Otherwise, returns
874 * an empty @c optional.
875 */
876 optional<cartesian_communicator> as_cartesian_communicator() const;
877
878 /**
879 * Determines whether this communicator has a Cartesian topology.
880 */
881 bool has_cartesian_topology() const;
882
883 /** Abort all tasks in the group of this communicator.
884 *
885 * Makes a "best attempt" to abort all of the tasks in the group of
886 * this communicator. Depending on the underlying MPI
887 * implementation, this may either abort the entire program (and
888 * possibly return @p errcode to the environment) or only abort
889 * some processes, allowing the others to continue. Consult the
890 * documentation for your MPI implementation. This is equivalent to
891 * a call to @c MPI_Abort
892 *
893 * @param errcode The error code to return from aborted processes.
894 * @returns Will not return.
895 */
896 void abort(int errcode) const;
897
898 protected:
899
900 /**
901 * INTERNAL ONLY
902 *
903 * Implementation of sendrecv for mpi type.
904 */
905 template<typename T>
906 status sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
907 mpl::true_) const;
908
909 /**
910 * INTERNAL ONLY
911 *
912 * Implementation of sendrecv for complex types, which must be passed as archives.
913 */
914 template<typename T>
915 status sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
916 mpl::false_) const;
917
918 /**
919 * INTERNAL ONLY
920 *
921 * Function object that frees an MPI communicator and deletes the
922 * memory associated with it. Intended to be used as a deleter with
923 * shared_ptr.
924 */
925 struct comm_free
926 {
927 void operator()(MPI_Comm* comm) const
928 {
929 BOOST_ASSERT( comm != 0 );
930 BOOST_ASSERT(*comm != MPI_COMM_NULL);
931 int finalized;
932 BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&finalized));
933 if (!finalized)
934 BOOST_MPI_CHECK_RESULT(MPI_Comm_free, (comm));
935 delete comm;
936 }
937 };
938
939
940 /**
941 * INTERNAL ONLY
942 *
943 * We're sending a type that has an associated MPI datatype, so we
944 * map directly to that datatype.
945 */
946 template<typename T>
947 void send_impl(int dest, int tag, const T& value, mpl::true_) const;
948
949 /**
950 * INTERNAL ONLY
951 *
952 * We're sending a type that does not have an associated MPI
953 * datatype, so it must be serialized then sent as MPI_PACKED data,
954 * to be deserialized on the receiver side.
955 */
956 template<typename T>
957 void send_impl(int dest, int tag, const T& value, mpl::false_) const;
958
959 /**
960 * INTERNAL ONLY
961 *
962 * We're sending an array of a type that has an associated MPI
963 * datatype, so we map directly to that datatype.
964 */
965 template<typename T>
966 void
967 array_send_impl(int dest, int tag, const T* values, int n, mpl::true_) const;
968
969 /**
970 * INTERNAL ONLY
971 *
972 * We're sending an array of a type that does not have an associated
973 * MPI datatype, so it must be serialized then sent as MPI_PACKED
974 * data, to be deserialized on the receiver side.
975 */
976 template<typename T>
977 void
978 array_send_impl(int dest, int tag, const T* values, int n,
979 mpl::false_) const;
980
981 /**
982 * INTERNAL ONLY
983 *
984 * We're sending a type that has an associated MPI datatype, so we
985 * map directly to that datatype.
986 */
987 template<typename T>
988 request isend_impl(int dest, int tag, const T& value, mpl::true_) const;
989
990 /**
991 * INTERNAL ONLY
992 *
993 * We're sending a type that does not have an associated MPI
994 * datatype, so it must be serialized then sent as MPI_PACKED data,
995 * to be deserialized on the receiver side.
996 */
997 template<typename T>
998 request isend_impl(int dest, int tag, const T& value, mpl::false_) const;
999
1000 /**
1001 * INTERNAL ONLY
1002 *
1003 * We're sending an array of a type that has an associated MPI
1004 * datatype, so we map directly to that datatype.
1005 */
1006 template<typename T>
1007 request
1008 array_isend_impl(int dest, int tag, const T* values, int n,
1009 mpl::true_) const;
1010
1011 /**
1012 * INTERNAL ONLY
1013 *
1014 * We're sending an array of a type that does not have an associated
1015 * MPI datatype, so it must be serialized then sent as MPI_PACKED
1016 * data, to be deserialized on the receiver side.
1017 */
1018 template<typename T>
1019 request
1020 array_isend_impl(int dest, int tag, const T* values, int n,
1021 mpl::false_) const;
1022
1023 /**
1024 * INTERNAL ONLY
1025 *
1026 * We're receiving a type that has an associated MPI datatype, so we
1027 * map directly to that datatype.
1028 */
1029 template<typename T>
1030 status recv_impl(int source, int tag, T& value, mpl::true_) const;
1031
1032 /**
1033 * INTERNAL ONLY
1034 *
1035 * We're receiving a type that does not have an associated MPI
1036 * datatype, so it must have been serialized then sent as
1037 * MPI_PACKED. We'll receive it and then deserialize.
1038 */
1039 template<typename T>
1040 status recv_impl(int source, int tag, T& value, mpl::false_) const;
1041
1042 /**
1043 * INTERNAL ONLY
1044 *
1045 * We're receiving an array of a type that has an associated MPI
1046 * datatype, so we map directly to that datatype.
1047 */
1048 template<typename T>
1049 status
1050 array_recv_impl(int source, int tag, T* values, int n, mpl::true_) const;
1051
1052 /**
1053 * INTERNAL ONLY
1054 *
1055 * We're receiving a type that does not have an associated MPI
1056 * datatype, so it must have been serialized then sent as
1057 * MPI_PACKED. We'll receive it and then deserialize.
1058 */
1059 template<typename T>
1060 status
1061 array_recv_impl(int source, int tag, T* values, int n, mpl::false_) const;
1062
1063 /**
1064 * INTERNAL ONLY
1065 *
1066 * We're receiving a type that has an associated MPI datatype, so we
1067 * map directly to that datatype.
1068 */
1069 template<typename T>
1070 request irecv_impl(int source, int tag, T& value, mpl::true_) const;
1071
1072 /**
1073 * INTERNAL ONLY
1074 *
1075 * We're receiving a type that does not have an associated MPI
1076 * datatype, so it must have been serialized then sent as
1077 * MPI_PACKED. We'll receive it and then deserialize.
1078 */
1079 template<typename T>
1080 request irecv_impl(int source, int tag, T& value, mpl::false_) const;
1081
1082 /**
1083 * INTERNAL ONLY
1084 *
1085 * We're receiving a type that has an associated MPI datatype, so we
1086 * map directly to that datatype.
1087 */
1088 template<typename T>
1089 request
1090 array_irecv_impl(int source, int tag, T* values, int n, mpl::true_) const;
1091
1092 /**
1093 * INTERNAL ONLY
1094 *
1095 * We're receiving a type that does not have an associated MPI
1096 * datatype, so it must have been serialized then sent as
1097 * MPI_PACKED. We'll receive it and then deserialize.
1098 */
1099 template<typename T>
1100 request
1101 array_irecv_impl(int source, int tag, T* values, int n, mpl::false_) const;
1102
1103 shared_ptr<MPI_Comm> comm_ptr;
1104 };
1105
1106 /**
1107 * @brief Determines whether two communicators are identical.
1108 *
1109 * Equivalent to calling @c MPI_Comm_compare and checking whether the
1110 * result is @c MPI_IDENT.
1111 *
1112 * @returns True when the two communicators refer to the same
1113 * underlying MPI communicator.
1114 */
1115 BOOST_MPI_DECL bool operator==(const communicator& comm1, const communicator& comm2);
1116
1117 /**
1118 * @brief Determines whether two communicators are different.
1119 *
1120 * @returns @c !(comm1 == comm2)
1121 */
1122 inline bool operator!=(const communicator& comm1, const communicator& comm2)
1123 {
1124 return !(comm1 == comm2);
1125 }
1126
1127
1128 /************************************************************************
1129 * Implementation details *
1130 ************************************************************************/
1131
1132 /**
1133 * INTERNAL ONLY (using the same 'end' name might be considerd unfortunate
1134 */
1135 template<>
1136 BOOST_MPI_DECL void
1137 communicator::send<packed_oarchive>(int dest, int tag,
1138 const packed_oarchive& ar) const;
1139
1140 /**
1141 * INTERNAL ONLY
1142 */
1143 template<>
1144 BOOST_MPI_DECL void
1145 communicator::send<packed_skeleton_oarchive>
1146 (int dest, int tag, const packed_skeleton_oarchive& ar) const;
1147
1148 /**
1149 * INTERNAL ONLY
1150 */
1151 template<>
1152 BOOST_MPI_DECL void
1153 communicator::send<content>(int dest, int tag, const content& c) const;
1154
1155 /**
1156 * INTERNAL ONLY
1157 */
1158 template<>
1159 BOOST_MPI_DECL status
1160 communicator::recv<packed_iarchive>(int source, int tag,
1161 packed_iarchive& ar) const;
1162
1163 /**
1164 * INTERNAL ONLY
1165 */
1166 template<>
1167 BOOST_MPI_DECL status
1168 communicator::recv<packed_skeleton_iarchive>
1169 (int source, int tag, packed_skeleton_iarchive& ar) const;
1170
1171 /**
1172 * INTERNAL ONLY
1173 */
1174 template<>
1175 BOOST_MPI_DECL status
1176 communicator::recv<const content>(int source, int tag,
1177 const content& c) const;
1178
1179 /**
1180 * INTERNAL ONLY
1181 */
1182 template<>
1183 inline status
1184 communicator::recv<content>(int source, int tag,
1185 content& c) const
1186 {
1187 return recv<const content>(source,tag,c);
1188 }
1189
1190 /**
1191 * INTERNAL ONLY
1192 */
1193 template<>
1194 BOOST_MPI_DECL request
1195 communicator::isend<packed_oarchive>(int dest, int tag,
1196 const packed_oarchive& ar) const;
1197
1198 /**
1199 * INTERNAL ONLY
1200 */
1201 template<>
1202 BOOST_MPI_DECL request
1203 communicator::isend<packed_skeleton_oarchive>
1204 (int dest, int tag, const packed_skeleton_oarchive& ar) const;
1205
1206 /**
1207 * INTERNAL ONLY
1208 */
1209 template<>
1210 BOOST_MPI_DECL request
1211 communicator::isend<content>(int dest, int tag, const content& c) const;
1212
1213 /**
1214 * INTERNAL ONLY
1215 */
1216 template<>
1217 BOOST_MPI_DECL request
1218 communicator::irecv<packed_skeleton_iarchive>
1219 (int source, int tag, packed_skeleton_iarchive& ar) const;
1220
1221 /**
1222 * INTERNAL ONLY
1223 */
1224 template<>
1225 BOOST_MPI_DECL request
1226 communicator::irecv<const content>(int source, int tag,
1227 const content& c) const;
1228
1229 /**
1230 * INTERNAL ONLY
1231 */
1232 template<>
1233 inline request
1234 communicator::irecv<content>(int source, int tag,
1235 content& c) const
1236 {
1237 return irecv<const content>(source, tag, c);
1238 }
1239
1240 // Count elements in a message
1241 template<typename T>
1242 inline optional<int> status::count() const
1243 {
1244 return count_impl<T>(is_mpi_datatype<T>());
1245 }
1246
1247 template<typename T>
1248 optional<int> status::count_impl(mpl::true_) const
1249 {
1250 if (m_count != -1)
1251 return m_count;
1252
1253 int return_value;
1254 BOOST_MPI_CHECK_RESULT(MPI_Get_count,
1255 (&m_status, get_mpi_datatype<T>(T()), &return_value));
1256 if (return_value == MPI_UNDEFINED)
1257 return optional<int>();
1258 else
1259 /* Cache the result. */
1260 return m_count = return_value;
1261 }
1262
1263 template<typename T>
1264 inline optional<int> status::count_impl(mpl::false_) const
1265 {
1266 if (m_count == -1)
1267 return optional<int>();
1268 else
1269 return m_count;
1270 }
1271
1272 // We're sending a type that has an associated MPI datatype, so we
1273 // map directly to that datatype.
1274 template<typename T>
1275 void
1276 communicator::send_impl(int dest, int tag, const T& value, mpl::true_) const
1277 {
1278 BOOST_MPI_CHECK_RESULT(MPI_Send,
1279 (const_cast<T*>(&value), 1, get_mpi_datatype<T>(value),
1280 dest, tag, MPI_Comm(*this)));
1281 }
1282
1283 // We're sending a type that does not have an associated MPI
1284 // datatype, so it must be serialized then sent as MPI_PACKED data,
1285 // to be deserialized on the receiver side.
1286 template<typename T>
1287 void
1288 communicator::send_impl(int dest, int tag, const T& value, mpl::false_) const
1289 {
1290 packed_oarchive oa(*this);
1291 oa << value;
1292 send(dest, tag, oa);
1293 }
1294
1295 // Single-element receive may either send the element directly or
1296 // serialize it via a buffer.
1297 template<typename T>
1298 void communicator::send(int dest, int tag, const T& value) const
1299 {
1300 this->send_impl(dest, tag, value, is_mpi_datatype<T>());
1301 }
1302
1303 // We're sending an array of a type that has an associated MPI
1304 // datatype, so we map directly to that datatype.
1305 template<typename T>
1306 void
1307 communicator::array_send_impl(int dest, int tag, const T* values, int n,
1308 mpl::true_) const
1309 {
1310 BOOST_MPI_CHECK_RESULT(MPI_Send,
1311 (const_cast<T*>(values), n,
1312 get_mpi_datatype<T>(*values),
1313 dest, tag, MPI_Comm(*this)));
1314 }
1315
1316 // We're sending an array of a type that does not have an associated
1317 // MPI datatype, so it must be serialized then sent as MPI_PACKED
1318 // data, to be deserialized on the receiver side.
1319 template<typename T>
1320 void
1321 communicator::array_send_impl(int dest, int tag, const T* values, int n,
1322 mpl::false_) const
1323 {
1324 packed_oarchive oa(*this);
1325 oa << n << boost::serialization::make_array(values, n);
1326 send(dest, tag, oa);
1327 }
1328
1329 template<typename T, typename A>
1330 void communicator::send_vector(int dest, int tag,
1331 const std::vector<T,A>& value, mpl::true_ true_type) const
1332 {
1333 // send the vector size
1334 typename std::vector<T,A>::size_type size = value.size();
1335 send(dest, tag, size);
1336 // send the data
1337 this->array_send_impl(dest, tag, value.data(), size, true_type);
1338 }
1339
1340 template<typename T, typename A>
1341 void communicator::send_vector(int dest, int tag,
1342 const std::vector<T,A>& value, mpl::false_ false_type) const
1343 {
1344 this->send_impl(dest, tag, value, false_type);
1345 }
1346
1347 template<typename T, typename A>
1348 void communicator::send(int dest, int tag, const std::vector<T,A>& value) const
1349 {
1350 send_vector(dest, tag, value, is_mpi_datatype<T>());
1351 }
1352
1353 // Array send must send the elements directly
1354 template<typename T>
1355 void communicator::send(int dest, int tag, const T* values, int n) const
1356 {
1357 this->array_send_impl(dest, tag, values, n, is_mpi_datatype<T>());
1358 }
1359
1360 // We're receiving a type that has an associated MPI datatype, so we
1361 // map directly to that datatype.
1362 template<typename T>
1363 status communicator::recv_impl(int source, int tag, T& value, mpl::true_) const
1364 {
1365 status stat;
1366
1367 BOOST_MPI_CHECK_RESULT(MPI_Recv,
1368 (const_cast<T*>(&value), 1,
1369 get_mpi_datatype<T>(value),
1370 source, tag, MPI_Comm(*this), &stat.m_status));
1371 return stat;
1372 }
1373
1374 template<typename T>
1375 status
1376 communicator::recv_impl(int source, int tag, T& value, mpl::false_) const
1377 {
1378 // Receive the message
1379 packed_iarchive ia(*this);
1380 status stat = recv(source, tag, ia);
1381
1382 // Deserialize the data in the message
1383 ia >> value;
1384
1385 return stat;
1386 }
1387
1388 // Single-element receive may either receive the element directly or
1389 // deserialize it from a buffer.
1390 template<typename T>
1391 status communicator::recv(int source, int tag, T& value) const
1392 {
1393 return this->recv_impl(source, tag, value, is_mpi_datatype<T>());
1394 }
1395
1396 template<typename T>
1397 status
1398 communicator::array_recv_impl(int source, int tag, T* values, int n,
1399 mpl::true_) const
1400 {
1401 status stat;
1402 BOOST_MPI_CHECK_RESULT(MPI_Recv,
1403 (const_cast<T*>(values), n,
1404 get_mpi_datatype<T>(*values),
1405 source, tag, MPI_Comm(*this), &stat.m_status));
1406 return stat;
1407 }
1408
1409 template<typename T>
1410 status
1411 communicator::array_recv_impl(int source, int tag, T* values, int n,
1412 mpl::false_) const
1413 {
1414 // Receive the message
1415 packed_iarchive ia(*this);
1416 status stat = recv(source, tag, ia);
1417
1418 // Determine how much data we are going to receive
1419 int count;
1420 ia >> count;
1421
1422 // Deserialize the data in the message
1423 boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
1424 ia >> arr;
1425
1426 if (count > n) {
1427 boost::throw_exception(
1428 std::range_error("communicator::recv: message receive overflow"));
1429 }
1430
1431 stat.m_count = count;
1432 return stat;
1433 }
1434
1435 template<typename T, typename A>
1436 status communicator::recv_vector(int source, int tag,
1437 std::vector<T,A>& value, mpl::true_ true_type) const
1438 {
1439 // receive the vector size
1440 typename std::vector<T,A>::size_type size = 0;
1441 recv(source, tag, size);
1442 // size the vector
1443 value.resize(size);
1444 // receive the data
1445 return this->array_recv_impl(source, tag, value.data(), size, true_type);
1446 }
1447
1448 template<typename T, typename A>
1449 status communicator::recv_vector(int source, int tag,
1450 std::vector<T,A>& value, mpl::false_ false_type) const
1451 {
1452 return this->recv_impl(source, tag, value, false_type);
1453 }
1454
1455 template<typename T, typename A>
1456 status communicator::recv(int source, int tag, std::vector<T,A>& value) const
1457 {
1458 return recv_vector(source, tag, value, is_mpi_datatype<T>());
1459 }
1460
1461 // Array receive must receive the elements directly into a buffer.
1462 template<typename T>
1463 status communicator::recv(int source, int tag, T* values, int n) const
1464 {
1465 return this->array_recv_impl(source, tag, values, n, is_mpi_datatype<T>());
1466 }
1467
1468
1469 template<typename T>
1470 status communicator::sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
1471 mpl::true_) const
1472 {
1473 status stat;
1474 BOOST_MPI_CHECK_RESULT(MPI_Sendrecv,
1475 (const_cast<T*>(&sval), 1,
1476 get_mpi_datatype<T>(sval),
1477 dest, stag,
1478 &rval, 1,
1479 get_mpi_datatype<T>(rval),
1480 src, rtag,
1481 MPI_Comm(*this), &stat.m_status));
1482 return stat;
1483 }
1484
1485 template<typename T>
1486 status communicator::sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
1487 mpl::false_) const
1488 {
1489 int const SEND = 0;
1490 int const RECV = 1;
1491 request srrequests[2];
1492 srrequests[SEND] = this->isend_impl(dest, stag, sval, mpl::false_());
1493 srrequests[RECV] = this->irecv_impl(src, rtag, rval, mpl::false_());
1494 status srstatuses[2];
1495 wait_all(srrequests, srrequests + 2, srstatuses);
1496 return srstatuses[RECV];
1497 }
1498
1499 template<typename T>
1500 status communicator::sendrecv(int dest, int stag, const T& sval, int src, int rtag, T& rval) const
1501 {
1502 return this->sendrecv_impl(dest, stag, sval, src, rtag, rval, is_mpi_datatype<T>());
1503 }
1504
1505
1506 // We're sending a type that has an associated MPI datatype, so we
1507 // map directly to that datatype.
1508 template<typename T>
1509 request
1510 communicator::isend_impl(int dest, int tag, const T& value, mpl::true_) const
1511 {
1512 request req;
1513 BOOST_MPI_CHECK_RESULT(MPI_Isend,
1514 (const_cast<T*>(&value), 1,
1515 get_mpi_datatype<T>(value),
1516 dest, tag, MPI_Comm(*this), &req.m_requests[0]));
1517 return req;
1518 }
1519
1520 // We're sending a type that does not have an associated MPI
1521 // datatype, so it must be serialized then sent as MPI_PACKED data,
1522 // to be deserialized on the receiver side.
1523 template<typename T>
1524 request
1525 communicator::isend_impl(int dest, int tag, const T& value, mpl::false_) const
1526 {
1527 shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
1528 *archive << value;
1529 request result = isend(dest, tag, *archive);
1530 result.m_data = archive;
1531 return result;
1532 }
1533
1534 // Single-element receive may either send the element directly or
1535 // serialize it via a buffer.
1536 template<typename T>
1537 request communicator::isend(int dest, int tag, const T& value) const
1538 {
1539 return this->isend_impl(dest, tag, value, is_mpi_datatype<T>());
1540 }
1541
1542 template<typename T>
1543 request
1544 communicator::array_isend_impl(int dest, int tag, const T* values, int n,
1545 mpl::true_) const
1546 {
1547 request req;
1548 BOOST_MPI_CHECK_RESULT(MPI_Isend,
1549 (const_cast<T*>(values), n,
1550 get_mpi_datatype<T>(*values),
1551 dest, tag, MPI_Comm(*this), &req.m_requests[0]));
1552 return req;
1553 }
1554
1555 template<typename T>
1556 request
1557 communicator::array_isend_impl(int dest, int tag, const T* values, int n,
1558 mpl::false_) const
1559 {
1560 shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
1561 *archive << n << boost::serialization::make_array(values, n);
1562 request result = isend(dest, tag, *archive);
1563 result.m_data = archive;
1564 return result;
1565 }
1566
1567
1568 // Array isend must send the elements directly
1569 template<typename T>
1570 request communicator::isend(int dest, int tag, const T* values, int n) const
1571 {
1572 return array_isend_impl(dest, tag, values, n, is_mpi_datatype<T>());
1573 }
1574
1575 namespace detail {
1576 /**
1577 * Internal data structure that stores everything required to manage
1578 * the receipt of serialized data via a request object.
1579 */
1580 template<typename T>
1581 struct serialized_irecv_data
1582 {
1583 serialized_irecv_data(const communicator& comm, int source, int tag,
1584 T& value)
1585 : comm(comm), source(source), tag(tag), ia(comm), value(value)
1586 {
1587 }
1588
1589 void deserialize(status& stat)
1590 {
1591 ia >> value;
1592 stat.m_count = 1;
1593 }
1594
1595 communicator comm;
1596 int source;
1597 int tag;
1598 std::size_t count;
1599 packed_iarchive ia;
1600 T& value;
1601 };
1602
1603 template<>
1604 struct serialized_irecv_data<packed_iarchive>
1605 {
1606 serialized_irecv_data(const communicator& comm, int source, int tag,
1607 packed_iarchive& ia)
1608 : comm(comm), source(source), tag(tag), ia(ia) { }
1609
1610 void deserialize(status&) { /* Do nothing. */ }
1611
1612 communicator comm;
1613 int source;
1614 int tag;
1615 std::size_t count;
1616 packed_iarchive& ia;
1617 };
1618
1619 /**
1620 * Internal data structure that stores everything required to manage
1621 * the receipt of an array of serialized data via a request object.
1622 */
1623 template<typename T>
1624 struct serialized_array_irecv_data
1625 {
1626 serialized_array_irecv_data(const communicator& comm, int source, int tag,
1627 T* values, int n)
1628 : comm(comm), source(source), tag(tag), ia(comm), values(values), n(n)
1629 {
1630 }
1631
1632 void deserialize(status& stat);
1633
1634 communicator comm;
1635 int source;
1636 int tag;
1637 std::size_t count;
1638 packed_iarchive ia;
1639 T* values;
1640 int n;
1641 };
1642
1643 template<typename T>
1644 void serialized_array_irecv_data<T>::deserialize(status& stat)
1645 {
1646 // Determine how much data we are going to receive
1647 int count;
1648 ia >> count;
1649
1650 // Deserialize the data in the message
1651 boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
1652 ia >> arr;
1653
1654 if (count > n) {
1655 boost::throw_exception(
1656 std::range_error("communicator::recv: message receive overflow"));
1657 }
1658
1659 stat.m_count = count;
1660 }
1661 }
1662
1663 template<typename T>
1664 optional<status>
1665 request::handle_serialized_irecv(request* self, request_action action)
1666 {
1667 typedef detail::serialized_irecv_data<T> data_t;
1668 shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
1669
1670 if (action == ra_wait) {
1671 status stat;
1672 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1673 // Wait for the count message to complete
1674 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1675 (self->m_requests, &stat.m_status));
1676 // Resize our buffer and get ready to receive its data
1677 data->ia.resize(data->count);
1678 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1679 (data->ia.address(), data->ia.size(), MPI_PACKED,
1680 stat.source(), stat.tag(),
1681 MPI_Comm(data->comm), self->m_requests + 1));
1682 }
1683
1684 // Wait until we have received the entire message
1685 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1686 (self->m_requests + 1, &stat.m_status));
1687
1688 data->deserialize(stat);
1689 return stat;
1690 } else if (action == ra_test) {
1691 status stat;
1692 int flag = 0;
1693
1694 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1695 // Check if the count message has completed
1696 BOOST_MPI_CHECK_RESULT(MPI_Test,
1697 (self->m_requests, &flag, &stat.m_status));
1698 if (flag) {
1699 // Resize our buffer and get ready to receive its data
1700 data->ia.resize(data->count);
1701 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1702 (data->ia.address(), data->ia.size(),MPI_PACKED,
1703 stat.source(), stat.tag(),
1704 MPI_Comm(data->comm), self->m_requests + 1));
1705 } else
1706 return optional<status>(); // We have not finished yet
1707 }
1708
1709 // Check if we have received the message data
1710 BOOST_MPI_CHECK_RESULT(MPI_Test,
1711 (self->m_requests + 1, &flag, &stat.m_status));
1712 if (flag) {
1713 data->deserialize(stat);
1714 return stat;
1715 } else
1716 return optional<status>();
1717 } else {
1718 return optional<status>();
1719 }
1720 }
1721
1722 template<typename T>
1723 optional<status>
1724 request::handle_serialized_array_irecv(request* self, request_action action)
1725 {
1726 typedef detail::serialized_array_irecv_data<T> data_t;
1727 shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
1728
1729 if (action == ra_wait) {
1730 status stat;
1731 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1732 // Wait for the count message to complete
1733 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1734 (self->m_requests, &stat.m_status));
1735 // Resize our buffer and get ready to receive its data
1736 data->ia.resize(data->count);
1737 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1738 (data->ia.address(), data->ia.size(), MPI_PACKED,
1739 stat.source(), stat.tag(),
1740 MPI_Comm(data->comm), self->m_requests + 1));
1741 }
1742
1743 // Wait until we have received the entire message
1744 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1745 (self->m_requests + 1, &stat.m_status));
1746
1747 data->deserialize(stat);
1748 return stat;
1749 } else if (action == ra_test) {
1750 status stat;
1751 int flag = 0;
1752
1753 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1754 // Check if the count message has completed
1755 BOOST_MPI_CHECK_RESULT(MPI_Test,
1756 (self->m_requests, &flag, &stat.m_status));
1757 if (flag) {
1758 // Resize our buffer and get ready to receive its data
1759 data->ia.resize(data->count);
1760 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1761 (data->ia.address(), data->ia.size(),MPI_PACKED,
1762 stat.source(), stat.tag(),
1763 MPI_Comm(data->comm), self->m_requests + 1));
1764 } else
1765 return optional<status>(); // We have not finished yet
1766 }
1767
1768 // Check if we have received the message data
1769 BOOST_MPI_CHECK_RESULT(MPI_Test,
1770 (self->m_requests + 1, &flag, &stat.m_status));
1771 if (flag) {
1772 data->deserialize(stat);
1773 return stat;
1774 } else
1775 return optional<status>();
1776 } else {
1777 return optional<status>();
1778 }
1779 }
1780
1781 // We're receiving a type that has an associated MPI datatype, so we
1782 // map directly to that datatype.
1783 template<typename T>
1784 request
1785 communicator::irecv_impl(int source, int tag, T& value, mpl::true_) const
1786 {
1787 request req;
1788 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1789 (const_cast<T*>(&value), 1,
1790 get_mpi_datatype<T>(value),
1791 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1792 return req;
1793 }
1794
1795 template<typename T>
1796 request
1797 communicator::irecv_impl(int source, int tag, T& value, mpl::false_) const
1798 {
1799 typedef detail::serialized_irecv_data<T> data_t;
1800 shared_ptr<data_t> data(new data_t(*this, source, tag, value));
1801 request req;
1802 req.m_data = data;
1803 req.m_handler = request::handle_serialized_irecv<T>;
1804
1805 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1806 (&data->count, 1,
1807 get_mpi_datatype<std::size_t>(data->count),
1808 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1809
1810 return req;
1811 }
1812
1813 template<typename T>
1814 request
1815 communicator::irecv(int source, int tag, T& value) const
1816 {
1817 return this->irecv_impl(source, tag, value, is_mpi_datatype<T>());
1818 }
1819
1820 template<typename T>
1821 request
1822 communicator::array_irecv_impl(int source, int tag, T* values, int n,
1823 mpl::true_) const
1824 {
1825 request req;
1826 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1827 (const_cast<T*>(values), n,
1828 get_mpi_datatype<T>(*values),
1829 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1830 return req;
1831 }
1832
1833 template<typename T>
1834 request
1835 communicator::array_irecv_impl(int source, int tag, T* values, int n,
1836 mpl::false_) const
1837 {
1838 typedef detail::serialized_array_irecv_data<T> data_t;
1839 shared_ptr<data_t> data(new data_t(*this, source, tag, values, n));
1840 request req;
1841 req.m_data = data;
1842 req.m_handler = request::handle_serialized_array_irecv<T>;
1843
1844 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1845 (&data->count, 1,
1846 get_mpi_datatype<std::size_t>(data->count),
1847 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1848
1849 return req;
1850 }
1851
1852
1853 // Array receive must receive the elements directly into a buffer.
1854 template<typename T>
1855 request communicator::irecv(int source, int tag, T* values, int n) const
1856 {
1857 return this->array_irecv_impl(source, tag, values, n, is_mpi_datatype<T>());
1858 }
1859
1860 } } // end namespace boost::mpi
1861
1862 // If the user has already included skeleton_and_content.hpp, include
1863 // the code to send/receive skeletons and content.
1864 #ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
1865 # include <boost/mpi/detail/communicator_sc.hpp>
1866 #endif
1867
1868 #ifdef BOOST_MSVC
1869 # pragma warning(pop)
1870 #endif
1871
1872 #endif // BOOST_MPI_COMMUNICATOR_HPP