]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/graph_parallel/doc/html/mpi_bsp_process_group.html
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / graph_parallel / doc / html / mpi_bsp_process_group.html
CommitLineData
7c673cae
FG
1<?xml version="1.0" encoding="utf-8" ?>
2<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
3<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
4<head>
5<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
6<meta name="generator" content="Docutils 0.6: http://docutils.sourceforge.net/" />
7<title>Parallel BGL MPI BSP Process Group</title>
8<link rel="stylesheet" href="../../../../rst.css" type="text/css" />
9</head>
10<body>
11<div class="document" id="logo-mpi-bsp-process-group">
12<h1 class="title"><a class="reference external" href="http://www.osl.iu.edu/research/pbgl"><img align="middle" alt="Parallel BGL" class="align-middle" src="pbgl-logo.png" /></a> MPI BSP Process Group</h1>
13
14<!-- Copyright (C) 2004-2009 The Trustees of Indiana University.
15Use, modification and distribution is subject to the Boost Software
16License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
17http://www.boost.org/LICENSE_1_0.txt) -->
18<div class="contents topic" id="contents">
19<p class="topic-title first">Contents</p>
20<ul class="simple">
21<li><a class="reference internal" href="#introduction" id="id1">Introduction</a></li>
22<li><a class="reference internal" href="#where-defined" id="id2">Where Defined</a></li>
23<li><a class="reference internal" href="#reference" id="id3">Reference</a></li>
24</ul>
25</div>
26<div class="section" id="introduction">
27<h1><a class="toc-backref" href="#id1">Introduction</a></h1>
28<p>The MPI <tt class="docutils literal"><span class="pre">mpi_process_group</span></tt> is an implementation of the <a class="reference external" href="process_group.html">process
29group</a> interface using the Message Passing Interface (MPI). It is the
30primary process group used in the Parallel BGL at this time.</p>
31</div>
32<div class="section" id="where-defined">
33<h1><a class="toc-backref" href="#id2">Where Defined</a></h1>
34<p>Header <tt class="docutils literal"><span class="pre">&lt;boost/graph/distributed/mpi_process_group.hpp&gt;</span></tt></p>
35</div>
36<div class="section" id="reference">
37<h1><a class="toc-backref" href="#id3">Reference</a></h1>
38<pre class="literal-block">
39namespace boost { namespace graph { namespace distributed {
40
41class mpi_process_group
42{
43public:
44 typedef boost::mpi::communicator communicator_type;
45
46 // Process group constructors
47 mpi_process_group(communicator_type comm = communicator_type());
48 mpi_process_group(std::size_t num_headers, std::size_t buffer_size,
49 communicator_type comm = communicator_type());
50
51 mpi_process_group();
52 mpi_process_group(const mpi_process_group&amp;, boost::parallel::attach_distributed_object);
53
54 // Triggers
55 template&lt;typename Type, typename Handler&gt;
56 void trigger(int tag, const Handler&amp; handler);
57
58 template&lt;typename Type, typename Handler&gt;
59 void trigger_with_reply(int tag, const Handler&amp; handler);
60
61 trigger_receive_context trigger_context() const;
62
63 // Helper operations
64 void poll();
65 mpi_process_group base() const;
66};
67
68// Process query
69int process_id(const mpi_process_group&amp;);
70int num_processes(const mpi_process_group&amp;);
71
72// Message transmission
73template&lt;typename T&gt;
74 void send(const mpi_process_group&amp; pg, int dest, int tag, const T&amp; value);
75
76template&lt;typename T&gt;
77 void receive(const mpi_process_group&amp; pg, int source, int tag, T&amp; value);
78
79optional&lt;std::pair&lt;int, int&gt; &gt; probe(const mpi_process_group&amp; pg);
80
81// Synchronization
82void synchronize(const mpi_process_group&amp; pg);
83
84// Out-of-band communication
85template&lt;typename T&gt;
86 void send_oob(const mpi_process_group&amp; pg, int dest, int tag, const T&amp; value);
87
88template&lt;typename T, typename U&gt;
89 void
90 send_oob_with_reply(const mpi_process_group&amp; pg, int dest, int
91 tag, const T&amp; send_value, U&amp; receive_value);
92
93template&lt;typename T&gt;
94 void receive_oob(const mpi_process_group&amp; pg, int source, int tag, T&amp; value);
95
96} } }
97</pre>
98<p>Since the <tt class="docutils literal"><span class="pre">mpi_process_group</span></tt> is an implementation of the <a class="reference external" href="process_group.html">process
99group</a> interface, we omit the description of most of the functions in
100the prototype. Two constructors need special mentioning:</p>
101<pre class="literal-block">
102mpi_process_group(communicator_type comm = communicator_type());
103</pre>
104<p>The constructor can take an optional MPI communicator. As default a communicator
105constructed from MPI_COMM_WORLD is used.</p>
106<pre class="literal-block">
107mpi_process_group(std::size_t num_headers, std::size_t buffer_size,
108 communicator_type comm = communicator_type());
109</pre>
110<p>For performance fine tuning the maximum number of headers in a message batch
111(num_headers) and the maximum combined size of batched messages (buffer_size)
112can be specified. The maximum message size of a batch is
11316*num_headers+buffer_size. Sensible default values have been found by optimizing
114a typical application on a cluster with Ethernet network, and are num_header=64k
115and buffer_size=1MB, for a total maximum batches message size of 2MB.</p>
116<hr class="docutils" />
117<p>Copyright (C) 2007 Douglas Gregor</p>
118<p>Copyright (C) 2007 Matthias Troyer</p>
119</div>
120</div>
121<div class="footer">
122<hr class="footer" />
123Generated on: 2009-05-31 00:22 UTC.
124Generated by <a class="reference external" href="http://docutils.sourceforge.net/">Docutils</a> from <a class="reference external" href="http://docutils.sourceforge.net/rst.html">reStructuredText</a> source.
125
126</div>
127</body>
128</html>