]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/mpi/communicator.hpp
update sources to v12.2.3
[ceph.git] / ceph / src / boost / boost / mpi / communicator.hpp
1 // Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
2 // Copyright (C) 2016 K. Noel Belcourt <kbelco -at- sandia.gov>.
3
4 // Use, modification and distribution is subject to the Boost Software
5 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
6 // http://www.boost.org/LICENSE_1_0.txt)
7
8 /** @file communicator.hpp
9 *
10 * This header defines the @c communicator class, which is the basis
11 * of all communication within Boost.MPI, and provides point-to-point
12 * communication operations.
13 */
14 #ifndef BOOST_MPI_COMMUNICATOR_HPP
15 #define BOOST_MPI_COMMUNICATOR_HPP
16
17 #include <boost/assert.hpp>
18 #include <boost/mpi/config.hpp>
19 #include <boost/mpi/exception.hpp>
20 #include <boost/optional.hpp>
21 #include <boost/shared_ptr.hpp>
22 #include <boost/mpi/datatype.hpp>
23 #include <boost/mpi/nonblocking.hpp>
24 #include <utility>
25 #include <iterator>
26 #include <stdexcept> // for std::range_error
27 #include <vector>
28
29 // For (de-)serializing sends and receives
30 #include <boost/mpi/packed_oarchive.hpp>
31 #include <boost/mpi/packed_iarchive.hpp>
32
33 // For (de-)serializing skeletons and content
34 #include <boost/mpi/skeleton_and_content_fwd.hpp>
35
36 // For (de-)serializing arrays
37 #include <boost/serialization/array.hpp>
38
39 #include <boost/mpi/detail/point_to_point.hpp>
40 #include <boost/mpi/status.hpp>
41 #include <boost/mpi/request.hpp>
42
43 #ifdef BOOST_MSVC
44 # pragma warning(push)
45 # pragma warning(disable : 4800) // forcing to bool 'true' or 'false'
46 #endif
47
48 namespace boost { namespace mpi {
49
50 /**
51 * @brief A constant representing "any process."
52 *
53 * This constant may be used for the @c source parameter of @c receive
54 * operations to indicate that a message may be received from any
55 * source.
56 */
57 const int any_source = MPI_ANY_SOURCE;
58
59 /**
60 * @brief A constant representing "any tag."
61 *
62 * This constant may be used for the @c tag parameter of @c receive
63 * operations to indicate that a @c send with any tag will be matched
64 * by the receive.
65 */
66 const int any_tag = MPI_ANY_TAG;
67
68 /**
69 * @brief Enumeration used to describe how to adopt a C @c MPI_Comm into
70 * a Boost.MPI communicator.
71 *
72 * The values for this enumeration determine how a Boost.MPI
73 * communicator will behave when constructed with an MPI
74 * communicator. The options are:
75 *
76 * - @c comm_duplicate: Duplicate the MPI_Comm communicator to
77 * create a new communicator (e.g., with MPI_Comm_dup). This new
78 * MPI_Comm communicator will be automatically freed when the
79 * Boost.MPI communicator (and all copies of it) is destroyed.
80 *
81 * - @c comm_take_ownership: Take ownership of the communicator. It
82 * will be freed automatically when all of the Boost.MPI
83 * communicators go out of scope. This option must not be used with
84 * MPI_COMM_WORLD.
85 *
86 * - @c comm_attach: The Boost.MPI communicator will reference the
87 * existing MPI communicator but will not free it when the Boost.MPI
88 * communicator goes out of scope. This option should only be used
89 * when the communicator is managed by the user or MPI library
90 * (e.g., MPI_COMM_WORLD).
91 */
92 enum comm_create_kind { comm_duplicate, comm_take_ownership, comm_attach };
93
94 /**
95 * INTERNAL ONLY
96 *
97 * Forward declaration of @c group needed for the @c group
98 * constructor and accessor.
99 */
100 class group;
101
102 /**
103 * INTERNAL ONLY
104 *
105 * Forward declaration of @c intercommunicator needed for the "cast"
106 * from a communicator to an intercommunicator.
107 */
108 class intercommunicator;
109
110 /**
111 * INTERNAL ONLY
112 *
113 * Forward declaration of @c graph_communicator needed for the "cast"
114 * from a communicator to a graph communicator.
115 */
116 class graph_communicator;
117
118 /**
119 * @brief A communicator that permits communication and
120 * synchronization among a set of processes.
121 *
122 * The @c communicator class abstracts a set of communicating
123 * processes in MPI. All of the processes that belong to a certain
124 * communicator can determine the size of the communicator, their rank
125 * within the communicator, and communicate with any other processes
126 * in the communicator.
127 */
128 class BOOST_MPI_DECL communicator
129 {
130 public:
131 /**
132 * Build a new Boost.MPI communicator for @c MPI_COMM_WORLD.
133 *
134 * Constructs a Boost.MPI communicator that attaches to @c
135 * MPI_COMM_WORLD. This is the equivalent of constructing with
136 * @c (MPI_COMM_WORLD, comm_attach).
137 */
138 communicator();
139
140 /**
141 * Build a new Boost.MPI communicator based on the MPI communicator
142 * @p comm.
143 *
144 * @p comm may be any valid MPI communicator. If @p comm is
145 * MPI_COMM_NULL, an empty communicator (that cannot be used for
146 * communication) is created and the @p kind parameter is
147 * ignored. Otherwise, the @p kind parameters determines how the
148 * Boost.MPI communicator will be related to @p comm:
149 *
150 * - If @p kind is @c comm_duplicate, duplicate @c comm to create
151 * a new communicator. This new communicator will be freed when
152 * the Boost.MPI communicator (and all copies of it) is destroyed.
153 * This option is only permitted if @p comm is a valid MPI
154 * intracommunicator or if the underlying MPI implementation
155 * supports MPI 2.0 (which supports duplication of
156 * intercommunicators).
157 *
158 * - If @p kind is @c comm_take_ownership, take ownership of @c
159 * comm. It will be freed automatically when all of the Boost.MPI
160 * communicators go out of scope. This option must not be used
161 * when @c comm is MPI_COMM_WORLD.
162 *
163 * - If @p kind is @c comm_attach, this Boost.MPI communicator
164 * will reference the existing MPI communicator @p comm but will
165 * not free @p comm when the Boost.MPI communicator goes out of
166 * scope. This option should only be used when the communicator is
167 * managed by the user or MPI library (e.g., MPI_COMM_WORLD).
168 */
169 communicator(const MPI_Comm& comm, comm_create_kind kind);
170
171 /**
172 * Build a new Boost.MPI communicator based on a subgroup of another
173 * MPI communicator.
174 *
175 * This routine will construct a new communicator containing all of
176 * the processes from communicator @c comm that are listed within
177 * the group @c subgroup. Equivalent to @c MPI_Comm_create.
178 *
179 * @param comm An MPI communicator.
180 *
181 * @param subgroup A subgroup of the MPI communicator, @p comm, for
182 * which we will construct a new communicator.
183 */
184 communicator(const communicator& comm, const boost::mpi::group& subgroup);
185
186 /**
187 * @brief Determine the rank of the executing process in a
188 * communicator.
189 *
190 * This routine is equivalent to @c MPI_Comm_rank.
191 *
192 * @returns The rank of the process in the communicator, which
193 * will be a value in [0, size())
194 */
195 int rank() const;
196
197 /**
198 * @brief Determine the number of processes in a communicator.
199 *
200 * This routine is equivalent to @c MPI_Comm_size.
201 *
202 * @returns The number of processes in the communicator.
203 */
204 int size() const;
205
206 /**
207 * This routine constructs a new group whose members are the
208 * processes within this communicator. Equivalent to
209 * calling @c MPI_Comm_group.
210 */
211 boost::mpi::group group() const;
212
213 // ----------------------------------------------------------------
214 // Point-to-point communication
215 // ----------------------------------------------------------------
216
217 /**
218 * @brief Send data to another process.
219 *
220 * This routine executes a potentially blocking send with tag @p tag
221 * to the process with rank @p dest. It can be received by the
222 * destination process with a matching @c recv call.
223 *
224 * The given @p value must be suitable for transmission over
225 * MPI. There are several classes of types that meet these
226 * requirements:
227 *
228 * - Types with mappings to MPI data types: If @c
229 * is_mpi_datatype<T> is convertible to @c mpl::true_, then @p
230 * value will be transmitted using the MPI data type
231 * @c get_mpi_datatype<T>(). All primitive C++ data types that have
232 * MPI equivalents, e.g., @c int, @c float, @c char, @c double,
233 * etc., have built-in mappings to MPI data types. You may turn a
234 * Serializable type with fixed structure into an MPI data type by
235 * specializing @c is_mpi_datatype for your type.
236 *
237 * - Serializable types: Any type that provides the @c serialize()
238 * functionality required by the Boost.Serialization library can be
239 * transmitted and received.
240 *
241 * - Packed archives and skeletons: Data that has been packed into
242 * an @c mpi::packed_oarchive or the skeletons of data that have
243 * been backed into an @c mpi::packed_skeleton_oarchive can be
244 * transmitted, but will be received as @c mpi::packed_iarchive and
245 * @c mpi::packed_skeleton_iarchive, respectively, to allow the
246 * values (or skeletons) to be extracted by the destination process.
247 *
248 * - Content: Content associated with a previously-transmitted
249 * skeleton can be transmitted by @c send and received by @c
250 * recv. The receiving process may only receive content into the
251 * content of a value that has been constructed with the matching
252 * skeleton.
253 *
254 * For types that have mappings to an MPI data type (including the
255 * concent of a type), an invocation of this routine will result in
256 * a single MPI_Send call. For variable-length data, e.g.,
257 * serialized types and packed archives, two messages will be sent
258 * via MPI_Send: one containing the length of the data and the
259 * second containing the data itself. Note that the transmission
260 * mode for variable-length data is an implementation detail that
261 * is subject to change.
262 *
263 * @param dest The rank of the remote process to which the data
264 * will be sent.
265 *
266 * @param tag The tag that will be associated with this message. Tags
267 * may be any integer between zero and an implementation-defined
268 * upper limit. This limit is accessible via @c environment::max_tag().
269 *
270 * @param value The value that will be transmitted to the
271 * receiver. The type @c T of this value must meet the aforementioned
272 * criteria for transmission.
273 */
274 template<typename T>
275 void send(int dest, int tag, const T& value) const;
276
277 template<typename T, typename A>
278 void send(int dest, int tag, const std::vector<T,A>& value) const;
279
280 template<typename T, typename A>
281 void send_vector(int dest, int tag, const std::vector<T,A>& value,
282 mpl::true_) const;
283
284 template<typename T, typename A>
285 void send_vector(int dest, int tag, const std::vector<T,A>& value,
286 mpl::false_) const;
287
288 /**
289 * @brief Send the skeleton of an object.
290 *
291 * This routine executes a potentially blocking send with tag @p
292 * tag to the process with rank @p dest. It can be received by the
293 * destination process with a matching @c recv call. This variation
294 * on @c send will be used when a send of a skeleton is explicitly
295 * requested via code such as:
296 *
297 * @code
298 * comm.send(dest, tag, skeleton(object));
299 * @endcode
300 *
301 * The semantics of this routine are equivalent to that of sending
302 * a @c packed_skeleton_oarchive storing the skeleton of the @c
303 * object.
304 *
305 * @param dest The rank of the remote process to which the skeleton
306 * will be sent.
307 *
308 * @param tag The tag that will be associated with this message. Tags
309 * may be any integer between zero and an implementation-defined
310 * upper limit. This limit is accessible via @c environment::max_tag().
311 *
312 * @param proxy The @c skeleton_proxy containing a reference to the
313 * object whose skeleton will be transmitted.
314 *
315 */
316 template<typename T>
317 void send(int dest, int tag, const skeleton_proxy<T>& proxy) const;
318
319 /**
320 * @brief Send an array of values to another process.
321 *
322 * This routine executes a potentially blocking send of an array of
323 * data with tag @p tag to the process with rank @p dest. It can be
324 * received by the destination process with a matching array @c
325 * recv call.
326 *
327 * If @c T is an MPI datatype, an invocation of this routine will
328 * be mapped to a single call to MPI_Send, using the datatype @c
329 * get_mpi_datatype<T>().
330 *
331 * @param dest The process rank of the remote process to which
332 * the data will be sent.
333 *
334 * @param tag The tag that will be associated with this message. Tags
335 * may be any integer between zero and an implementation-defined
336 * upper limit. This limit is accessible via @c environment::max_tag().
337 *
338 * @param values The array of values that will be transmitted to the
339 * receiver. The type @c T of these values must be mapped to an MPI
340 * data type.
341 *
342 * @param n The number of values stored in the array. The destination
343 * process must call receive with at least this many elements to
344 * correctly receive the message.
345 */
346 template<typename T>
347 void send(int dest, int tag, const T* values, int n) const;
348
349 /**
350 * @brief Send a message to another process without any data.
351 *
352 * This routine executes a potentially blocking send of a message
353 * to another process. The message contains no extra data, and can
354 * therefore only be received by a matching call to @c recv().
355 *
356 * @param dest The process rank of the remote process to which
357 * the message will be sent.
358 *
359 * @param tag The tag that will be associated with this message. Tags
360 * may be any integer between zero and an implementation-defined
361 * upper limit. This limit is accessible via @c environment::max_tag().
362 *
363 */
364 void send(int dest, int tag) const;
365
366 /**
367 * @brief Receive data from a remote process.
368 *
369 * This routine blocks until it receives a message from the process @p
370 * source with the given @p tag. The type @c T of the @p value must be
371 * suitable for transmission over MPI, which includes serializable
372 * types, types that can be mapped to MPI data types (including most
373 * built-in C++ types), packed MPI archives, skeletons, and content
374 * associated with skeletons; see the documentation of @c send for a
375 * complete description.
376 *
377 * @param source The process that will be sending data. This will
378 * either be a process rank within the communicator or the
379 * constant @c any_source, indicating that we can receive the
380 * message from any process.
381 *
382 * @param tag The tag that matches a particular kind of message sent
383 * by the source process. This may be any tag value permitted by @c
384 * send. Alternatively, the argument may be the constant @c any_tag,
385 * indicating that this receive matches a message with any tag.
386 *
387 * @param value Will contain the value of the message after a
388 * successful receive. The type of this value must match the value
389 * transmitted by the sender, unless the sender transmitted a packed
390 * archive or skeleton: in these cases, the sender transmits a @c
391 * packed_oarchive or @c packed_skeleton_oarchive and the
392 * destination receives a @c packed_iarchive or @c
393 * packed_skeleton_iarchive, respectively.
394 *
395 * @returns Information about the received message.
396 */
397 template<typename T>
398 status recv(int source, int tag, T& value) const;
399
400 template<typename T, typename A>
401 status recv(int source, int tag, std::vector<T,A>& value) const;
402
403 template<typename T, typename A>
404 status recv_vector(int source, int tag, std::vector<T,A>& value,
405 mpl::true_) const;
406
407 template<typename T, typename A>
408 status recv_vector(int source, int tag, std::vector<T,A>& value,
409 mpl::false_) const;
410
411 /**
412 * @brief Receive a skeleton from a remote process.
413 *
414 * This routine blocks until it receives a message from the process @p
415 * source with the given @p tag containing a skeleton.
416 *
417 * @param source The process that will be sending data. This will
418 * either be a process rank within the communicator or the constant
419 * @c any_source, indicating that we can receive the message from
420 * any process.
421 *
422 * @param tag The tag that matches a particular kind of message
423 * sent by the source process. This may be any tag value permitted
424 * by @c send. Alternatively, the argument may be the constant @c
425 * any_tag, indicating that this receive matches a message with any
426 * tag.
427 *
428 * @param proxy The @c skeleton_proxy containing a reference to the
429 * object that will be reshaped to match the received skeleton.
430 *
431 * @returns Information about the received message.
432 */
433 template<typename T>
434 status recv(int source, int tag, const skeleton_proxy<T>& proxy) const;
435
436 /**
437 * @brief Receive a skeleton from a remote process.
438 *
439 * This routine blocks until it receives a message from the process @p
440 * source with the given @p tag containing a skeleton.
441 *
442 * @param source The process that will be sending data. This will
443 * either be a process rank within the communicator or the constant
444 * @c any_source, indicating that we can receive the message from
445 * any process.
446 *
447 * @param tag The tag that matches a particular kind of message
448 * sent by the source process. This may be any tag value permitted
449 * by @c send. Alternatively, the argument may be the constant @c
450 * any_tag, indicating that this receive matches a message with any
451 * tag.
452 *
453 * @param proxy The @c skeleton_proxy containing a reference to the
454 * object that will be reshaped to match the received skeleton.
455 *
456 * @returns Information about the received message.
457 */
458 template<typename T>
459 status recv(int source, int tag, skeleton_proxy<T>& proxy) const;
460
461 /**
462 * @brief Receive an array of values from a remote process.
463 *
464 * This routine blocks until it receives an array of values from the
465 * process @p source with the given @p tag. If the type @c T is
466 *
467 * @param source The process that will be sending data. This will
468 * either be a process rank within the communicator or the
469 * constant @c any_source, indicating that we can receive the
470 * message from any process.
471 *
472 * @param tag The tag that matches a particular kind of message sent
473 * by the source process. This may be any tag value permitted by @c
474 * send. Alternatively, the argument may be the constant @c any_tag,
475 * indicating that this receive matches a message with any tag.
476 *
477 * @param values Will contain the values in the message after a
478 * successful receive. The type of these elements must match the
479 * type of the elements transmitted by the sender.
480 *
481 * @param n The number of values that can be stored into the @p
482 * values array. This shall not be smaller than the number of
483 * elements transmitted by the sender.
484 *
485 * @throws std::range_error if the message to be received contains
486 * more than @p n values.
487 *
488 * @returns Information about the received message.
489 */
490 template<typename T>
491 status recv(int source, int tag, T* values, int n) const;
492
493 /**
494 * @brief Receive a message from a remote process without any data.
495 *
496 * This routine blocks until it receives a message from the process
497 * @p source with the given @p tag.
498 *
499 * @param source The process that will be sending the message. This
500 * will either be a process rank within the communicator or the
501 * constant @c any_source, indicating that we can receive the
502 * message from any process.
503 *
504 * @param tag The tag that matches a particular kind of message
505 * sent by the source process. This may be any tag value permitted
506 * by @c send. Alternatively, the argument may be the constant @c
507 * any_tag, indicating that this receive matches a message with any
508 * tag.
509 *
510 * @returns Information about the received message.
511 */
512 status recv(int source, int tag) const;
513
514 /** @brief Send a message to remote process nd receive another message
515 * from another process.
516 */
517 template<typename T>
518 status sendrecv(int dest, int stag, const T& sval, int src, int rtag, T& rval) const;
519
520 /**
521 * @brief Send a message to a remote process without blocking.
522 *
523 * The @c isend method is functionality identical to the @c send
524 * method and transmits data in the same way, except that @c isend
525 * will not block while waiting for the data to be
526 * transmitted. Instead, a request object will be immediately
527 * returned, allowing one to query the status of the communication
528 * or wait until it has completed.
529 *
530 * @param dest The rank of the remote process to which the data
531 * will be sent.
532 *
533 * @param tag The tag that will be associated with this message. Tags
534 * may be any integer between zero and an implementation-defined
535 * upper limit. This limit is accessible via @c environment::max_tag().
536 *
537 * @param value The value that will be transmitted to the
538 * receiver. The type @c T of this value must meet the aforementioned
539 * criteria for transmission.
540 *
541 * @returns a @c request object that describes this communication.
542 */
543 template<typename T>
544 request isend(int dest, int tag, const T& value) const;
545
546 /**
547 * @brief Send the skeleton of an object without blocking.
548 *
549 * This routine is functionally identical to the @c send method for
550 * @c skeleton_proxy objects except that @c isend will not block
551 * while waiting for the data to be transmitted. Instead, a request
552 * object will be immediately returned, allowing one to query the
553 * status of the communication or wait until it has completed.
554 *
555 * The semantics of this routine are equivalent to a non-blocking
556 * send of a @c packed_skeleton_oarchive storing the skeleton of
557 * the @c object.
558 *
559 * @param dest The rank of the remote process to which the skeleton
560 * will be sent.
561 *
562 * @param tag The tag that will be associated with this message. Tags
563 * may be any integer between zero and an implementation-defined
564 * upper limit. This limit is accessible via @c environment::max_tag().
565 *
566 * @param proxy The @c skeleton_proxy containing a reference to the
567 * object whose skeleton will be transmitted.
568 *
569 * @returns a @c request object that describes this communication.
570 */
571 template<typename T>
572 request isend(int dest, int tag, const skeleton_proxy<T>& proxy) const;
573
574 /**
575 * @brief Send an array of values to another process without
576 * blocking.
577 *
578 * This routine is functionally identical to the @c send method for
579 * arrays except that @c isend will not block while waiting for the
580 * data to be transmitted. Instead, a request object will be
581 * immediately returned, allowing one to query the status of the
582 * communication or wait until it has completed.
583 *
584 * @param dest The process rank of the remote process to which
585 * the data will be sent.
586 *
587 * @param tag The tag that will be associated with this message. Tags
588 * may be any integer between zero and an implementation-defined
589 * upper limit. This limit is accessible via @c environment::max_tag().
590 *
591 * @param values The array of values that will be transmitted to the
592 * receiver. The type @c T of these values must be mapped to an MPI
593 * data type.
594 *
595 * @param n The number of values stored in the array. The destination
596 * process must call receive with at least this many elements to
597 * correctly receive the message.
598 *
599 * @returns a @c request object that describes this communication.
600 */
601 template<typename T>
602 request isend(int dest, int tag, const T* values, int n) const;
603
604 /**
605 * @brief Send a message to another process without any data
606 * without blocking.
607 *
608 * This routine is functionally identical to the @c send method for
609 * sends with no data, except that @c isend will not block while
610 * waiting for the message to be transmitted. Instead, a request
611 * object will be immediately returned, allowing one to query the
612 * status of the communication or wait until it has completed.
613 *
614 * @param dest The process rank of the remote process to which
615 * the message will be sent.
616 *
617 * @param tag The tag that will be associated with this message. Tags
618 * may be any integer between zero and an implementation-defined
619 * upper limit. This limit is accessible via @c environment::max_tag().
620 *
621 *
622 * @returns a @c request object that describes this communication.
623 */
624 request isend(int dest, int tag) const;
625
626 /**
627 * @brief Prepare to receive a message from a remote process.
628 *
629 * The @c irecv method is functionally identical to the @c recv
630 * method and receive data in the same way, except that @c irecv
631 * will not block while waiting for data to be
632 * transmitted. Instead, it immediately returns a request object
633 * that allows one to query the status of the receive or wait until
634 * it has completed.
635 *
636 * @param source The process that will be sending data. This will
637 * either be a process rank within the communicator or the
638 * constant @c any_source, indicating that we can receive the
639 * message from any process.
640 *
641 * @param tag The tag that matches a particular kind of message sent
642 * by the source process. This may be any tag value permitted by @c
643 * send. Alternatively, the argument may be the constant @c any_tag,
644 * indicating that this receive matches a message with any tag.
645 *
646 * @param value Will contain the value of the message after a
647 * successful receive. The type of this value must match the value
648 * transmitted by the sender, unless the sender transmitted a packed
649 * archive or skeleton: in these cases, the sender transmits a @c
650 * packed_oarchive or @c packed_skeleton_oarchive and the
651 * destination receives a @c packed_iarchive or @c
652 * packed_skeleton_iarchive, respectively.
653 *
654 * @returns a @c request object that describes this communication.
655 */
656 template<typename T>
657 request irecv(int source, int tag, T& value) const;
658
659 /**
660 * @brief Initiate receipt of an array of values from a remote process.
661 *
662 * This routine initiates a receive operation for an array of values
663 * transmitted by process @p source with the given @p tag.
664 *
665 * @param source The process that will be sending data. This will
666 * either be a process rank within the communicator or the
667 * constant @c any_source, indicating that we can receive the
668 * message from any process.
669 *
670 * @param tag The tag that matches a particular kind of message sent
671 * by the source process. This may be any tag value permitted by @c
672 * send. Alternatively, the argument may be the constant @c any_tag,
673 * indicating that this receive matches a message with any tag.
674 *
675 * @param values Will contain the values in the message after a
676 * successful receive. The type of these elements must match the
677 * type of the elements transmitted by the sender.
678 *
679 * @param n The number of values that can be stored into the @p
680 * values array. This shall not be smaller than the number of
681 * elements transmitted by the sender.
682 *
683 * @returns a @c request object that describes this communication.
684 */
685 template<typename T>
686 request irecv(int source, int tag, T* values, int n) const;
687
688 /**
689 * @brief Initiate receipt of a message from a remote process that
690 * carries no data.
691 *
692 * This routine initiates a receive operation for a message from
693 * process @p source with the given @p tag that carries no data.
694 *
695 * @param source The process that will be sending the message. This
696 * will either be a process rank within the communicator or the
697 * constant @c any_source, indicating that we can receive the
698 * message from any process.
699 *
700 * @param tag The tag that matches a particular kind of message
701 * sent by the source process. This may be any tag value permitted
702 * by @c send. Alternatively, the argument may be the constant @c
703 * any_tag, indicating that this receive matches a message with any
704 * tag.
705 *
706 * @returns a @c request object that describes this communication.
707 */
708 request irecv(int source, int tag) const;
709
710 /**
711 * @brief Waits until a message is available to be received.
712 *
713 * This operation waits until a message matching (@p source, @p tag)
714 * is available to be received. It then returns information about
715 * that message. The functionality is equivalent to @c MPI_Probe. To
716 * check if a message is available without blocking, use @c iprobe.
717 *
718 * @param source Determine if there is a message available from
719 * this rank. If @c any_source, then the message returned may come
720 * from any source.
721 *
722 * @param tag Determine if there is a message available with the
723 * given tag. If @c any_tag, then the message returned may have any
724 * tag.
725 *
726 * @returns Returns information about the first message that
727 * matches the given criteria.
728 */
729 status probe(int source = any_source, int tag = any_tag) const;
730
731 /**
732 * @brief Determine if a message is available to be received.
733 *
734 * This operation determines if a message matching (@p source, @p
735 * tag) is available to be received. If so, it returns information
736 * about that message; otherwise, it returns immediately with an
737 * empty optional. The functionality is equivalent to @c
738 * MPI_Iprobe. To wait until a message is available, use @c wait.
739 *
740 * @param source Determine if there is a message available from
741 * this rank. If @c any_source, then the message returned may come
742 * from any source.
743 *
744 * @param tag Determine if there is a message available with the
745 * given tag. If @c any_tag, then the message returned may have any
746 * tag.
747 *
748 * @returns If a matching message is available, returns
749 * information about that message. Otherwise, returns an empty
750 * @c boost::optional.
751 */
752 optional<status>
753 iprobe(int source = any_source, int tag = any_tag) const;
754
755 #ifdef barrier
756 // Linux defines a function-like macro named "barrier". So, we need
757 // to avoid expanding the macro when we define our barrier()
758 // function. However, some C++ parsers (Doxygen, for instance) can't
759 // handle this syntax, so we only use it when necessary.
760 void (barrier)() const;
761 #else
762 /**
763 * @brief Wait for all processes within a communicator to reach the
764 * barrier.
765 *
766 * This routine is a collective operation that blocks each process
767 * until all processes have entered it, then releases all of the
768 * processes "simultaneously". It is equivalent to @c MPI_Barrier.
769 */
770 void barrier() const;
771 #endif
772
773 /** @brief Determine if this communicator is valid for
774 * communication.
775 *
776 * Evaluates @c true in a boolean context if this communicator is
777 * valid for communication, i.e., does not represent
778 * MPI_COMM_NULL. Otherwise, evaluates @c false.
779 */
780 operator bool() const { return (bool)comm_ptr; }
781
782 /**
783 * @brief Access the MPI communicator associated with a Boost.MPI
784 * communicator.
785 *
786 * This routine permits the implicit conversion from a Boost.MPI
787 * communicator to an MPI communicator.
788 *
789 * @returns The associated MPI communicator.
790 */
791 operator MPI_Comm() const;
792
793 /**
794 * Split the communicator into multiple, disjoint communicators
795 * each of which is based on a particular color. This is a
796 * collective operation that returns a new communicator that is a
797 * subgroup of @p this. This routine is functionally equivalent to
798 * @c MPI_Comm_split.
799 *
800 * @param color The color of this process. All processes with the
801 * same @p color value will be placed into the same group.
802 *
803 * @returns A new communicator containing all of the processes in
804 * @p this that have the same @p color.
805 */
806 communicator split(int color) const;
807
808 /**
809 * Split the communicator into multiple, disjoint communicators
810 * each of which is based on a particular color. This is a
811 * collective operation that returns a new communicator that is a
812 * subgroup of @p this. This routine is functionally equivalent to
813 * @c MPI_Comm_split.
814 *
815 * @param color The color of this process. All processes with the
816 * same @p color value will be placed into the same group.
817 *
818 * @param key A key value that will be used to determine the
819 * ordering of processes with the same color in the resulting
820 * communicator. If omitted, the rank of the processes in @p this
821 * will determine the ordering of processes in the resulting
822 * group.
823 *
824 * @returns A new communicator containing all of the processes in
825 * @p this that have the same @p color.
826 */
827 communicator split(int color, int key) const;
828
829 /**
830 * Determine if the communicator is in fact an intercommunicator
831 * and, if so, return that intercommunicator.
832 *
833 * @returns an @c optional containing the intercommunicator, if this
834 * communicator is in fact an intercommunicator. Otherwise, returns
835 * an empty @c optional.
836 */
837 optional<intercommunicator> as_intercommunicator() const;
838
839 /**
840 * Determine if the communicator has a graph topology and, if so,
841 * return that @c graph_communicator. Even though the communicators
842 * have different types, they refer to the same underlying
843 * communication space and can be used interchangeably for
844 * communication.
845 *
846 * @returns an @c optional containing the graph communicator, if this
847 * communicator does in fact have a graph topology. Otherwise, returns
848 * an empty @c optional.
849 */
850 optional<graph_communicator> as_graph_communicator() const;
851
852 /**
853 * Determines whether this communicator has a Cartesian topology.
854 */
855 bool has_cartesian_topology() const;
856
857 #if 0
858 template<typename Extents>
859 communicator
860 with_cartesian_topology(const Extents& extents,
861 bool periodic = false,
862 bool reorder = false) const;
863
864 template<typename DimInputIterator, typename PeriodicInputIterator>
865 communicator
866 with_cartesian_topology(DimInputIterator first_dim,
867 DimInputIterator last_dim,
868 PeriodicInputIterator first_periodic,
869 bool reorder = false);
870
871 template<typename Allocator, std::size_t NumDims>
872 communicator
873 with_cartesian_topology(const multi_array<bool, NumDims, Allocator>& periods,
874 bool reorder = false);
875 #endif
876
877 /** Abort all tasks in the group of this communicator.
878 *
879 * Makes a "best attempt" to abort all of the tasks in the group of
880 * this communicator. Depending on the underlying MPI
881 * implementation, this may either abort the entire program (and
882 * possibly return @p errcode to the environment) or only abort
883 * some processes, allowing the others to continue. Consult the
884 * documentation for your MPI implementation. This is equivalent to
885 * a call to @c MPI_Abort
886 *
887 * @param errcode The error code to return from aborted processes.
888 * @returns Will not return.
889 */
890 void abort(int errcode) const;
891
892 protected:
893
894 /**
895 * INTERNAL ONLY
896 *
897 * Implementation of sendrecv for mpi type.
898 */
899 template<typename T>
900 status sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
901 mpl::true_) const;
902
903 /**
904 * INTERNAL ONLY
905 *
906 * Implementation of sendrecv for complex types, which must be passed as archives.
907 */
908 template<typename T>
909 status sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
910 mpl::false_) const;
911
912 /**
913 * INTERNAL ONLY
914 *
915 * Function object that frees an MPI communicator and deletes the
916 * memory associated with it. Intended to be used as a deleter with
917 * shared_ptr.
918 */
919 struct comm_free
920 {
921 void operator()(MPI_Comm* comm) const
922 {
923 BOOST_ASSERT( comm != 0 );
924 BOOST_ASSERT(*comm != MPI_COMM_NULL);
925 int finalized;
926 BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&finalized));
927 if (!finalized)
928 BOOST_MPI_CHECK_RESULT(MPI_Comm_free, (comm));
929 delete comm;
930 }
931 };
932
933
934 /**
935 * INTERNAL ONLY
936 *
937 * We're sending a type that has an associated MPI datatype, so we
938 * map directly to that datatype.
939 */
940 template<typename T>
941 void send_impl(int dest, int tag, const T& value, mpl::true_) const;
942
943 /**
944 * INTERNAL ONLY
945 *
946 * We're sending a type that does not have an associated MPI
947 * datatype, so it must be serialized then sent as MPI_PACKED data,
948 * to be deserialized on the receiver side.
949 */
950 template<typename T>
951 void send_impl(int dest, int tag, const T& value, mpl::false_) const;
952
953 /**
954 * INTERNAL ONLY
955 *
956 * We're sending an array of a type that has an associated MPI
957 * datatype, so we map directly to that datatype.
958 */
959 template<typename T>
960 void
961 array_send_impl(int dest, int tag, const T* values, int n, mpl::true_) const;
962
963 /**
964 * INTERNAL ONLY
965 *
966 * We're sending an array of a type that does not have an associated
967 * MPI datatype, so it must be serialized then sent as MPI_PACKED
968 * data, to be deserialized on the receiver side.
969 */
970 template<typename T>
971 void
972 array_send_impl(int dest, int tag, const T* values, int n,
973 mpl::false_) const;
974
975 /**
976 * INTERNAL ONLY
977 *
978 * We're sending a type that has an associated MPI datatype, so we
979 * map directly to that datatype.
980 */
981 template<typename T>
982 request isend_impl(int dest, int tag, const T& value, mpl::true_) const;
983
984 /**
985 * INTERNAL ONLY
986 *
987 * We're sending a type that does not have an associated MPI
988 * datatype, so it must be serialized then sent as MPI_PACKED data,
989 * to be deserialized on the receiver side.
990 */
991 template<typename T>
992 request isend_impl(int dest, int tag, const T& value, mpl::false_) const;
993
994 /**
995 * INTERNAL ONLY
996 *
997 * We're sending an array of a type that has an associated MPI
998 * datatype, so we map directly to that datatype.
999 */
1000 template<typename T>
1001 request
1002 array_isend_impl(int dest, int tag, const T* values, int n,
1003 mpl::true_) const;
1004
1005 /**
1006 * INTERNAL ONLY
1007 *
1008 * We're sending an array of a type that does not have an associated
1009 * MPI datatype, so it must be serialized then sent as MPI_PACKED
1010 * data, to be deserialized on the receiver side.
1011 */
1012 template<typename T>
1013 request
1014 array_isend_impl(int dest, int tag, const T* values, int n,
1015 mpl::false_) const;
1016
1017 /**
1018 * INTERNAL ONLY
1019 *
1020 * We're receiving a type that has an associated MPI datatype, so we
1021 * map directly to that datatype.
1022 */
1023 template<typename T>
1024 status recv_impl(int source, int tag, T& value, mpl::true_) const;
1025
1026 /**
1027 * INTERNAL ONLY
1028 *
1029 * We're receiving a type that does not have an associated MPI
1030 * datatype, so it must have been serialized then sent as
1031 * MPI_PACKED. We'll receive it and then deserialize.
1032 */
1033 template<typename T>
1034 status recv_impl(int source, int tag, T& value, mpl::false_) const;
1035
1036 /**
1037 * INTERNAL ONLY
1038 *
1039 * We're receiving an array of a type that has an associated MPI
1040 * datatype, so we map directly to that datatype.
1041 */
1042 template<typename T>
1043 status
1044 array_recv_impl(int source, int tag, T* values, int n, mpl::true_) const;
1045
1046 /**
1047 * INTERNAL ONLY
1048 *
1049 * We're receiving a type that does not have an associated MPI
1050 * datatype, so it must have been serialized then sent as
1051 * MPI_PACKED. We'll receive it and then deserialize.
1052 */
1053 template<typename T>
1054 status
1055 array_recv_impl(int source, int tag, T* values, int n, mpl::false_) const;
1056
1057 /**
1058 * INTERNAL ONLY
1059 *
1060 * We're receiving a type that has an associated MPI datatype, so we
1061 * map directly to that datatype.
1062 */
1063 template<typename T>
1064 request irecv_impl(int source, int tag, T& value, mpl::true_) const;
1065
1066 /**
1067 * INTERNAL ONLY
1068 *
1069 * We're receiving a type that does not have an associated MPI
1070 * datatype, so it must have been serialized then sent as
1071 * MPI_PACKED. We'll receive it and then deserialize.
1072 */
1073 template<typename T>
1074 request irecv_impl(int source, int tag, T& value, mpl::false_) const;
1075
1076 /**
1077 * INTERNAL ONLY
1078 *
1079 * We're receiving a type that has an associated MPI datatype, so we
1080 * map directly to that datatype.
1081 */
1082 template<typename T>
1083 request
1084 array_irecv_impl(int source, int tag, T* values, int n, mpl::true_) const;
1085
1086 /**
1087 * INTERNAL ONLY
1088 *
1089 * We're receiving a type that does not have an associated MPI
1090 * datatype, so it must have been serialized then sent as
1091 * MPI_PACKED. We'll receive it and then deserialize.
1092 */
1093 template<typename T>
1094 request
1095 array_irecv_impl(int source, int tag, T* values, int n, mpl::false_) const;
1096
1097 shared_ptr<MPI_Comm> comm_ptr;
1098 };
1099
1100 /**
1101 * @brief Determines whether two communicators are identical.
1102 *
1103 * Equivalent to calling @c MPI_Comm_compare and checking whether the
1104 * result is @c MPI_IDENT.
1105 *
1106 * @returns True when the two communicators refer to the same
1107 * underlying MPI communicator.
1108 */
1109 BOOST_MPI_DECL bool operator==(const communicator& comm1, const communicator& comm2);
1110
1111 /**
1112 * @brief Determines whether two communicators are different.
1113 *
1114 * @returns @c !(comm1 == comm2)
1115 */
1116 inline bool operator!=(const communicator& comm1, const communicator& comm2)
1117 {
1118 return !(comm1 == comm2);
1119 }
1120
1121
1122 /************************************************************************
1123 * Implementation details *
1124 ************************************************************************/
1125 // Count elements in a message
1126 template<typename T>
1127 inline optional<int> status::count() const
1128 {
1129 return count_impl<T>(is_mpi_datatype<T>());
1130 }
1131
1132 template<typename T>
1133 optional<int> status::count_impl(mpl::true_) const
1134 {
1135 if (m_count != -1)
1136 return m_count;
1137
1138 int return_value;
1139 BOOST_MPI_CHECK_RESULT(MPI_Get_count,
1140 (&m_status, get_mpi_datatype<T>(T()), &return_value));
1141 if (return_value == MPI_UNDEFINED)
1142 return optional<int>();
1143 else
1144 /* Cache the result. */
1145 return m_count = return_value;
1146 }
1147
1148 template<typename T>
1149 inline optional<int> status::count_impl(mpl::false_) const
1150 {
1151 if (m_count == -1)
1152 return optional<int>();
1153 else
1154 return m_count;
1155 }
1156
1157 // We're sending a type that has an associated MPI datatype, so we
1158 // map directly to that datatype.
1159 template<typename T>
1160 void
1161 communicator::send_impl(int dest, int tag, const T& value, mpl::true_) const
1162 {
1163 BOOST_MPI_CHECK_RESULT(MPI_Send,
1164 (const_cast<T*>(&value), 1, get_mpi_datatype<T>(value),
1165 dest, tag, MPI_Comm(*this)));
1166 }
1167
1168 // We're sending a type that does not have an associated MPI
1169 // datatype, so it must be serialized then sent as MPI_PACKED data,
1170 // to be deserialized on the receiver side.
1171 template<typename T>
1172 void
1173 communicator::send_impl(int dest, int tag, const T& value, mpl::false_) const
1174 {
1175 packed_oarchive oa(*this);
1176 oa << value;
1177 send(dest, tag, oa);
1178 }
1179
1180 // Single-element receive may either send the element directly or
1181 // serialize it via a buffer.
1182 template<typename T>
1183 void communicator::send(int dest, int tag, const T& value) const
1184 {
1185 this->send_impl(dest, tag, value, is_mpi_datatype<T>());
1186 }
1187
1188 // We're sending an array of a type that has an associated MPI
1189 // datatype, so we map directly to that datatype.
1190 template<typename T>
1191 void
1192 communicator::array_send_impl(int dest, int tag, const T* values, int n,
1193 mpl::true_) const
1194 {
1195 BOOST_MPI_CHECK_RESULT(MPI_Send,
1196 (const_cast<T*>(values), n,
1197 get_mpi_datatype<T>(*values),
1198 dest, tag, MPI_Comm(*this)));
1199 }
1200
1201 // We're sending an array of a type that does not have an associated
1202 // MPI datatype, so it must be serialized then sent as MPI_PACKED
1203 // data, to be deserialized on the receiver side.
1204 template<typename T>
1205 void
1206 communicator::array_send_impl(int dest, int tag, const T* values, int n,
1207 mpl::false_) const
1208 {
1209 packed_oarchive oa(*this);
1210 oa << n << boost::serialization::make_array(values, n);
1211 send(dest, tag, oa);
1212 }
1213
1214 template<typename T, typename A>
1215 void communicator::send_vector(int dest, int tag,
1216 const std::vector<T,A>& value, mpl::true_ true_type) const
1217 {
1218 // send the vector size
1219 typename std::vector<T,A>::size_type size = value.size();
1220 send(dest, tag, size);
1221 // send the data
1222 this->array_send_impl(dest, tag, value.data(), size, true_type);
1223 }
1224
1225 template<typename T, typename A>
1226 void communicator::send_vector(int dest, int tag,
1227 const std::vector<T,A>& value, mpl::false_ false_type) const
1228 {
1229 this->send_impl(dest, tag, value, false_type);
1230 }
1231
1232 template<typename T, typename A>
1233 void communicator::send(int dest, int tag, const std::vector<T,A>& value) const
1234 {
1235 send_vector(dest, tag, value, is_mpi_datatype<T>());
1236 }
1237
1238 // Array send must send the elements directly
1239 template<typename T>
1240 void communicator::send(int dest, int tag, const T* values, int n) const
1241 {
1242 this->array_send_impl(dest, tag, values, n, is_mpi_datatype<T>());
1243 }
1244
1245 // We're receiving a type that has an associated MPI datatype, so we
1246 // map directly to that datatype.
1247 template<typename T>
1248 status communicator::recv_impl(int source, int tag, T& value, mpl::true_) const
1249 {
1250 status stat;
1251
1252 BOOST_MPI_CHECK_RESULT(MPI_Recv,
1253 (const_cast<T*>(&value), 1,
1254 get_mpi_datatype<T>(value),
1255 source, tag, MPI_Comm(*this), &stat.m_status));
1256 return stat;
1257 }
1258
1259 template<typename T>
1260 status
1261 communicator::recv_impl(int source, int tag, T& value, mpl::false_) const
1262 {
1263 // Receive the message
1264 packed_iarchive ia(*this);
1265 status stat = recv(source, tag, ia);
1266
1267 // Deserialize the data in the message
1268 ia >> value;
1269
1270 return stat;
1271 }
1272
1273 // Single-element receive may either receive the element directly or
1274 // deserialize it from a buffer.
1275 template<typename T>
1276 status communicator::recv(int source, int tag, T& value) const
1277 {
1278 return this->recv_impl(source, tag, value, is_mpi_datatype<T>());
1279 }
1280
1281 template<typename T>
1282 status
1283 communicator::array_recv_impl(int source, int tag, T* values, int n,
1284 mpl::true_) const
1285 {
1286 status stat;
1287 BOOST_MPI_CHECK_RESULT(MPI_Recv,
1288 (const_cast<T*>(values), n,
1289 get_mpi_datatype<T>(*values),
1290 source, tag, MPI_Comm(*this), &stat.m_status));
1291 return stat;
1292 }
1293
1294 template<typename T>
1295 status
1296 communicator::array_recv_impl(int source, int tag, T* values, int n,
1297 mpl::false_) const
1298 {
1299 // Receive the message
1300 packed_iarchive ia(*this);
1301 status stat = recv(source, tag, ia);
1302
1303 // Determine how much data we are going to receive
1304 int count;
1305 ia >> count;
1306
1307 // Deserialize the data in the message
1308 boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
1309 ia >> arr;
1310
1311 if (count > n) {
1312 boost::throw_exception(
1313 std::range_error("communicator::recv: message receive overflow"));
1314 }
1315
1316 stat.m_count = count;
1317 return stat;
1318 }
1319
1320 template<typename T, typename A>
1321 status communicator::recv_vector(int source, int tag,
1322 std::vector<T,A>& value, mpl::true_ true_type) const
1323 {
1324 // receive the vector size
1325 typename std::vector<T,A>::size_type size = 0;
1326 recv(source, tag, size);
1327 // size the vector
1328 value.resize(size);
1329 // receive the data
1330 return this->array_recv_impl(source, tag, value.data(), size, true_type);
1331 }
1332
1333 template<typename T, typename A>
1334 status communicator::recv_vector(int source, int tag,
1335 std::vector<T,A>& value, mpl::false_ false_type) const
1336 {
1337 return this->recv_impl(source, tag, value, false_type);
1338 }
1339
1340 template<typename T, typename A>
1341 status communicator::recv(int source, int tag, std::vector<T,A>& value) const
1342 {
1343 return recv_vector(source, tag, value, is_mpi_datatype<T>());
1344 }
1345
1346 // Array receive must receive the elements directly into a buffer.
1347 template<typename T>
1348 status communicator::recv(int source, int tag, T* values, int n) const
1349 {
1350 return this->array_recv_impl(source, tag, values, n, is_mpi_datatype<T>());
1351 }
1352
1353
1354 template<typename T>
1355 status communicator::sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
1356 mpl::true_) const
1357 {
1358 status stat;
1359 BOOST_MPI_CHECK_RESULT(MPI_Sendrecv,
1360 (const_cast<T*>(&sval), 1,
1361 get_mpi_datatype<T>(sval),
1362 dest, stag,
1363 &rval, 1,
1364 get_mpi_datatype<T>(rval),
1365 src, rtag,
1366 MPI_Comm(*this), &stat.m_status));
1367 return stat;
1368 }
1369
1370 template<typename T>
1371 status communicator::sendrecv_impl(int dest, int stag, const T& sval, int src, int rtag, T& rval,
1372 mpl::false_) const
1373 {
1374 int const SEND = 0;
1375 int const RECV = 1;
1376 request srrequests[2];
1377 srrequests[SEND] = this->isend_impl(dest, stag, sval, mpl::false_());
1378 srrequests[RECV] = this->irecv_impl(src, rtag, rval, mpl::false_());
1379 status srstatuses[2];
1380 wait_all(srrequests, srrequests + 2, srstatuses);
1381 return srstatuses[RECV];
1382 }
1383
1384 template<typename T>
1385 status communicator::sendrecv(int dest, int stag, const T& sval, int src, int rtag, T& rval) const
1386 {
1387 return this->sendrecv_impl(dest, stag, sval, src, rtag, rval, is_mpi_datatype<T>());
1388 }
1389
1390
1391 // We're sending a type that has an associated MPI datatype, so we
1392 // map directly to that datatype.
1393 template<typename T>
1394 request
1395 communicator::isend_impl(int dest, int tag, const T& value, mpl::true_) const
1396 {
1397 request req;
1398 BOOST_MPI_CHECK_RESULT(MPI_Isend,
1399 (const_cast<T*>(&value), 1,
1400 get_mpi_datatype<T>(value),
1401 dest, tag, MPI_Comm(*this), &req.m_requests[0]));
1402 return req;
1403 }
1404
1405 // We're sending a type that does not have an associated MPI
1406 // datatype, so it must be serialized then sent as MPI_PACKED data,
1407 // to be deserialized on the receiver side.
1408 template<typename T>
1409 request
1410 communicator::isend_impl(int dest, int tag, const T& value, mpl::false_) const
1411 {
1412 shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
1413 *archive << value;
1414 request result = isend(dest, tag, *archive);
1415 result.m_data = archive;
1416 return result;
1417 }
1418
1419 // Single-element receive may either send the element directly or
1420 // serialize it via a buffer.
1421 template<typename T>
1422 request communicator::isend(int dest, int tag, const T& value) const
1423 {
1424 return this->isend_impl(dest, tag, value, is_mpi_datatype<T>());
1425 }
1426
1427 template<typename T>
1428 request
1429 communicator::array_isend_impl(int dest, int tag, const T* values, int n,
1430 mpl::true_) const
1431 {
1432 request req;
1433 BOOST_MPI_CHECK_RESULT(MPI_Isend,
1434 (const_cast<T*>(values), n,
1435 get_mpi_datatype<T>(*values),
1436 dest, tag, MPI_Comm(*this), &req.m_requests[0]));
1437 return req;
1438 }
1439
1440 template<typename T>
1441 request
1442 communicator::array_isend_impl(int dest, int tag, const T* values, int n,
1443 mpl::false_) const
1444 {
1445 shared_ptr<packed_oarchive> archive(new packed_oarchive(*this));
1446 *archive << n << boost::serialization::make_array(values, n);
1447 request result = isend(dest, tag, *archive);
1448 result.m_data = archive;
1449 return result;
1450 }
1451
1452
1453 // Array isend must send the elements directly
1454 template<typename T>
1455 request communicator::isend(int dest, int tag, const T* values, int n) const
1456 {
1457 return array_isend_impl(dest, tag, values, n, is_mpi_datatype<T>());
1458 }
1459
1460 namespace detail {
1461 /**
1462 * Internal data structure that stores everything required to manage
1463 * the receipt of serialized data via a request object.
1464 */
1465 template<typename T>
1466 struct serialized_irecv_data
1467 {
1468 serialized_irecv_data(const communicator& comm, int source, int tag,
1469 T& value)
1470 : comm(comm), source(source), tag(tag), ia(comm), value(value)
1471 {
1472 }
1473
1474 void deserialize(status& stat)
1475 {
1476 ia >> value;
1477 stat.m_count = 1;
1478 }
1479
1480 communicator comm;
1481 int source;
1482 int tag;
1483 std::size_t count;
1484 packed_iarchive ia;
1485 T& value;
1486 };
1487
1488 template<>
1489 struct serialized_irecv_data<packed_iarchive>
1490 {
1491 serialized_irecv_data(const communicator& comm, int source, int tag,
1492 packed_iarchive& ia)
1493 : comm(comm), source(source), tag(tag), ia(ia) { }
1494
1495 void deserialize(status&) { /* Do nothing. */ }
1496
1497 communicator comm;
1498 int source;
1499 int tag;
1500 std::size_t count;
1501 packed_iarchive& ia;
1502 };
1503
1504 /**
1505 * Internal data structure that stores everything required to manage
1506 * the receipt of an array of serialized data via a request object.
1507 */
1508 template<typename T>
1509 struct serialized_array_irecv_data
1510 {
1511 serialized_array_irecv_data(const communicator& comm, int source, int tag,
1512 T* values, int n)
1513 : comm(comm), source(source), tag(tag), ia(comm), values(values), n(n)
1514 {
1515 }
1516
1517 void deserialize(status& stat);
1518
1519 communicator comm;
1520 int source;
1521 int tag;
1522 std::size_t count;
1523 packed_iarchive ia;
1524 T* values;
1525 int n;
1526 };
1527
1528 template<typename T>
1529 void serialized_array_irecv_data<T>::deserialize(status& stat)
1530 {
1531 // Determine how much data we are going to receive
1532 int count;
1533 ia >> count;
1534
1535 // Deserialize the data in the message
1536 boost::serialization::array_wrapper<T> arr(values, count > n? n : count);
1537 ia >> arr;
1538
1539 if (count > n) {
1540 boost::throw_exception(
1541 std::range_error("communicator::recv: message receive overflow"));
1542 }
1543
1544 stat.m_count = count;
1545 }
1546 }
1547
1548 template<typename T>
1549 optional<status>
1550 request::handle_serialized_irecv(request* self, request_action action)
1551 {
1552 typedef detail::serialized_irecv_data<T> data_t;
1553 shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
1554
1555 if (action == ra_wait) {
1556 status stat;
1557 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1558 // Wait for the count message to complete
1559 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1560 (self->m_requests, &stat.m_status));
1561 // Resize our buffer and get ready to receive its data
1562 data->ia.resize(data->count);
1563 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1564 (data->ia.address(), data->ia.size(), MPI_PACKED,
1565 stat.source(), stat.tag(),
1566 MPI_Comm(data->comm), self->m_requests + 1));
1567 }
1568
1569 // Wait until we have received the entire message
1570 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1571 (self->m_requests + 1, &stat.m_status));
1572
1573 data->deserialize(stat);
1574 return stat;
1575 } else if (action == ra_test) {
1576 status stat;
1577 int flag = 0;
1578
1579 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1580 // Check if the count message has completed
1581 BOOST_MPI_CHECK_RESULT(MPI_Test,
1582 (self->m_requests, &flag, &stat.m_status));
1583 if (flag) {
1584 // Resize our buffer and get ready to receive its data
1585 data->ia.resize(data->count);
1586 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1587 (data->ia.address(), data->ia.size(),MPI_PACKED,
1588 stat.source(), stat.tag(),
1589 MPI_Comm(data->comm), self->m_requests + 1));
1590 } else
1591 return optional<status>(); // We have not finished yet
1592 }
1593
1594 // Check if we have received the message data
1595 BOOST_MPI_CHECK_RESULT(MPI_Test,
1596 (self->m_requests + 1, &flag, &stat.m_status));
1597 if (flag) {
1598 data->deserialize(stat);
1599 return stat;
1600 } else
1601 return optional<status>();
1602 } else {
1603 return optional<status>();
1604 }
1605 }
1606
1607 template<typename T>
1608 optional<status>
1609 request::handle_serialized_array_irecv(request* self, request_action action)
1610 {
1611 typedef detail::serialized_array_irecv_data<T> data_t;
1612 shared_ptr<data_t> data = static_pointer_cast<data_t>(self->m_data);
1613
1614 if (action == ra_wait) {
1615 status stat;
1616 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1617 // Wait for the count message to complete
1618 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1619 (self->m_requests, &stat.m_status));
1620 // Resize our buffer and get ready to receive its data
1621 data->ia.resize(data->count);
1622 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1623 (data->ia.address(), data->ia.size(), MPI_PACKED,
1624 stat.source(), stat.tag(),
1625 MPI_Comm(data->comm), self->m_requests + 1));
1626 }
1627
1628 // Wait until we have received the entire message
1629 BOOST_MPI_CHECK_RESULT(MPI_Wait,
1630 (self->m_requests + 1, &stat.m_status));
1631
1632 data->deserialize(stat);
1633 return stat;
1634 } else if (action == ra_test) {
1635 status stat;
1636 int flag = 0;
1637
1638 if (self->m_requests[1] == MPI_REQUEST_NULL) {
1639 // Check if the count message has completed
1640 BOOST_MPI_CHECK_RESULT(MPI_Test,
1641 (self->m_requests, &flag, &stat.m_status));
1642 if (flag) {
1643 // Resize our buffer and get ready to receive its data
1644 data->ia.resize(data->count);
1645 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1646 (data->ia.address(), data->ia.size(),MPI_PACKED,
1647 stat.source(), stat.tag(),
1648 MPI_Comm(data->comm), self->m_requests + 1));
1649 } else
1650 return optional<status>(); // We have not finished yet
1651 }
1652
1653 // Check if we have received the message data
1654 BOOST_MPI_CHECK_RESULT(MPI_Test,
1655 (self->m_requests + 1, &flag, &stat.m_status));
1656 if (flag) {
1657 data->deserialize(stat);
1658 return stat;
1659 } else
1660 return optional<status>();
1661 } else {
1662 return optional<status>();
1663 }
1664 }
1665
1666 // We're receiving a type that has an associated MPI datatype, so we
1667 // map directly to that datatype.
1668 template<typename T>
1669 request
1670 communicator::irecv_impl(int source, int tag, T& value, mpl::true_) const
1671 {
1672 request req;
1673 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1674 (const_cast<T*>(&value), 1,
1675 get_mpi_datatype<T>(value),
1676 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1677 return req;
1678 }
1679
1680 template<typename T>
1681 request
1682 communicator::irecv_impl(int source, int tag, T& value, mpl::false_) const
1683 {
1684 typedef detail::serialized_irecv_data<T> data_t;
1685 shared_ptr<data_t> data(new data_t(*this, source, tag, value));
1686 request req;
1687 req.m_data = data;
1688 req.m_handler = request::handle_serialized_irecv<T>;
1689
1690 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1691 (&data->count, 1,
1692 get_mpi_datatype<std::size_t>(data->count),
1693 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1694
1695 return req;
1696 }
1697
1698 template<typename T>
1699 request
1700 communicator::irecv(int source, int tag, T& value) const
1701 {
1702 return this->irecv_impl(source, tag, value, is_mpi_datatype<T>());
1703 }
1704
1705 template<typename T>
1706 request
1707 communicator::array_irecv_impl(int source, int tag, T* values, int n,
1708 mpl::true_) const
1709 {
1710 request req;
1711 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1712 (const_cast<T*>(values), n,
1713 get_mpi_datatype<T>(*values),
1714 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1715 return req;
1716 }
1717
1718 template<typename T>
1719 request
1720 communicator::array_irecv_impl(int source, int tag, T* values, int n,
1721 mpl::false_) const
1722 {
1723 typedef detail::serialized_array_irecv_data<T> data_t;
1724 shared_ptr<data_t> data(new data_t(*this, source, tag, values, n));
1725 request req;
1726 req.m_data = data;
1727 req.m_handler = request::handle_serialized_array_irecv<T>;
1728
1729 BOOST_MPI_CHECK_RESULT(MPI_Irecv,
1730 (&data->count, 1,
1731 get_mpi_datatype<std::size_t>(data->count),
1732 source, tag, MPI_Comm(*this), &req.m_requests[0]));
1733
1734 return req;
1735 }
1736
1737
1738 // Array receive must receive the elements directly into a buffer.
1739 template<typename T>
1740 request communicator::irecv(int source, int tag, T* values, int n) const
1741 {
1742 return this->array_irecv_impl(source, tag, values, n, is_mpi_datatype<T>());
1743 }
1744
1745 /**
1746 * INTERNAL ONLY
1747 */
1748 template<>
1749 BOOST_MPI_DECL void
1750 communicator::send<packed_oarchive>(int dest, int tag,
1751 const packed_oarchive& ar) const;
1752
1753 /**
1754 * INTERNAL ONLY
1755 */
1756 template<>
1757 BOOST_MPI_DECL void
1758 communicator::send<packed_skeleton_oarchive>
1759 (int dest, int tag, const packed_skeleton_oarchive& ar) const;
1760
1761 /**
1762 * INTERNAL ONLY
1763 */
1764 template<>
1765 BOOST_MPI_DECL void
1766 communicator::send<content>(int dest, int tag, const content& c) const;
1767
1768 /**
1769 * INTERNAL ONLY
1770 */
1771 template<>
1772 BOOST_MPI_DECL status
1773 communicator::recv<packed_iarchive>(int source, int tag,
1774 packed_iarchive& ar) const;
1775
1776 /**
1777 * INTERNAL ONLY
1778 */
1779 template<>
1780 BOOST_MPI_DECL status
1781 communicator::recv<packed_skeleton_iarchive>
1782 (int source, int tag, packed_skeleton_iarchive& ar) const;
1783
1784 /**
1785 * INTERNAL ONLY
1786 */
1787 template<>
1788 BOOST_MPI_DECL status
1789 communicator::recv<const content>(int source, int tag,
1790 const content& c) const;
1791
1792 /**
1793 * INTERNAL ONLY
1794 */
1795 template<>
1796 inline status
1797 communicator::recv<content>(int source, int tag,
1798 content& c) const
1799 {
1800 return recv<const content>(source,tag,c);
1801 }
1802
1803 /**
1804 * INTERNAL ONLY
1805 */
1806 template<>
1807 BOOST_MPI_DECL request
1808 communicator::isend<packed_oarchive>(int dest, int tag,
1809 const packed_oarchive& ar) const;
1810
1811 /**
1812 * INTERNAL ONLY
1813 */
1814 template<>
1815 BOOST_MPI_DECL request
1816 communicator::isend<packed_skeleton_oarchive>
1817 (int dest, int tag, const packed_skeleton_oarchive& ar) const;
1818
1819 /**
1820 * INTERNAL ONLY
1821 */
1822 template<>
1823 BOOST_MPI_DECL request
1824 communicator::isend<content>(int dest, int tag, const content& c) const;
1825
1826 /**
1827 * INTERNAL ONLY
1828 */
1829 template<>
1830 BOOST_MPI_DECL request
1831 communicator::irecv<packed_skeleton_iarchive>
1832 (int source, int tag, packed_skeleton_iarchive& ar) const;
1833
1834 /**
1835 * INTERNAL ONLY
1836 */
1837 template<>
1838 BOOST_MPI_DECL request
1839 communicator::irecv<const content>(int source, int tag,
1840 const content& c) const;
1841
1842 /**
1843 * INTERNAL ONLY
1844 */
1845 template<>
1846 inline request
1847 communicator::irecv<content>(int source, int tag,
1848 content& c) const
1849 {
1850 return irecv<const content>(source, tag, c);
1851 }
1852
1853
1854 } } // end namespace boost::mpi
1855
1856 // If the user has already included skeleton_and_content.hpp, include
1857 // the code to send/receive skeletons and content.
1858 #ifdef BOOST_MPI_SKELETON_AND_CONTENT_HPP
1859 # include <boost/mpi/detail/communicator_sc.hpp>
1860 #endif
1861
1862 #ifdef BOOST_MSVC
1863 # pragma warning(pop)
1864 #endif
1865
1866 #endif // BOOST_MPI_COMMUNICATOR_HPP