]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/graph_parallel/include/boost/graph/distributed/page_rank.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / graph_parallel / include / boost / graph / distributed / page_rank.hpp
1 // Copyright (C) 2004-2006 The Trustees of Indiana University.
2 // Copyright (C) 2002 Brad King and Douglas Gregor
3
4 // Use, modification and distribution is subject to the Boost Software
5 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
6 // http://www.boost.org/LICENSE_1_0.txt)
7
8 // Authors: Douglas Gregor
9 // Andrew Lumsdaine
10 // Brian Barrett
11 #ifndef BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
12 #define BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
13
14 #ifndef BOOST_GRAPH_USE_MPI
15 #error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
16 #endif
17
18 #include <boost/assert.hpp>
19 #include <boost/graph/overloading.hpp>
20 #include <boost/graph/page_rank.hpp>
21 #include <boost/graph/distributed/concepts.hpp>
22 #include <boost/property_map/parallel/distributed_property_map.hpp>
23 #include <boost/property_map/parallel/caching_property_map.hpp>
24 #include <boost/graph/parallel/algorithm.hpp>
25 #include <boost/graph/parallel/container_traits.hpp>
26
27 // #define WANT_MPI_ONESIDED 1
28
29 namespace boost { namespace graph { namespace distributed {
30
31 namespace detail {
32 #ifdef WANT_MPI_ONESIDED
33 template<typename Graph, typename RankMap, typename owner_map_t>
34 void page_rank_step(const Graph& g, RankMap from_rank, MPI_Win to_win,
35 typename property_traits<RankMap>::value_type damping,
36 owner_map_t owner)
37 {
38 typedef typename property_traits<RankMap>::value_type rank_type;
39 int me, ret;
40 MPI_Comm_rank(MPI_COMM_WORLD, &me);
41
42 // MPI_Accumulate is not required to store the value of the data
43 // being sent, only the address. The value of the memory location
44 // must not change until the end of the access epoch, meaning the
45 // call to MPI_Fence. We therefore store the updated value back
46 // into the from_rank map before the accumulate rather than using
47 // a temporary. We're going to reset the values in the from_rank
48 // before the end of page_rank_step() anyway, so this isn't a huge
49 // deal. But MPI-2 One-sided is an abomination.
50 BGL_FORALL_VERTICES_T(u, g, Graph) {
51 put(from_rank, u, (damping * get(from_rank, u) / out_degree(u, g)));
52 BGL_FORALL_ADJ_T(u, v, g, Graph) {
53 ret = MPI_Accumulate(&(from_rank[u]),
54 1, MPI_DOUBLE,
55 get(owner, v), local(v),
56 1, MPI_DOUBLE, MPI_SUM, to_win);
57 BOOST_ASSERT(MPI_SUCCESS == ret);
58 }
59 }
60 MPI_Win_fence(0, to_win);
61
62 // Set new rank maps for the other map. Do this now to get around
63 // the stupid synchronization rules of MPI-2 One-sided
64 BGL_FORALL_VERTICES_T(v, g, Graph) put(from_rank, v, rank_type(1 - damping));
65 }
66 #endif
67
68 template<typename T>
69 struct rank_accumulate_reducer {
70 BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
71
72 template<typename K>
73 T operator()(const K&) const { return T(0); }
74
75 template<typename K>
76 T operator()(const K&, const T& x, const T& y) const { return x + y; }
77 };
78 } // end namespace detail
79
80 template<typename Graph, typename RankMap, typename Done, typename RankMap2>
81 void
82 page_rank_impl(const Graph& g, RankMap rank_map, Done done,
83 typename property_traits<RankMap>::value_type damping,
84 typename graph_traits<Graph>::vertices_size_type n,
85 RankMap2 rank_map2)
86 {
87 typedef typename property_traits<RankMap>::value_type rank_type;
88
89 int me;
90 MPI_Comm_rank(MPI_COMM_WORLD, &me);
91
92 typename property_map<Graph, vertex_owner_t>::const_type
93 owner = get(vertex_owner, g);
94 (void)owner;
95
96 typedef typename boost::graph::parallel::process_group_type<Graph>
97 ::type process_group_type;
98 typedef typename process_group_type::process_id_type process_id_type;
99
100 process_group_type pg = process_group(g);
101 process_id_type id = process_id(pg);
102
103 BOOST_ASSERT(me == id);
104
105 rank_type initial_rank = rank_type(rank_type(1) / n);
106 BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, initial_rank);
107
108 #ifdef WANT_MPI_ONESIDED
109
110 BOOST_ASSERT(sizeof(rank_type) == sizeof(double));
111
112 bool to_map_2 = true;
113 MPI_Win win, win2;
114
115 MPI_Win_create(&(rank_map[*(vertices(g).first)]),
116 sizeof(double) * num_vertices(g),
117 sizeof(double),
118 MPI_INFO_NULL, MPI_COMM_WORLD, &win);
119 MPI_Win_set_name(win, "rank_map_win");
120 MPI_Win_create(&(rank_map2[*(vertices(g).first)]),
121 sizeof(double) * num_vertices(g),
122 sizeof(double),
123 MPI_INFO_NULL, MPI_COMM_WORLD, &win2);
124 MPI_Win_set_name(win, "rank_map2_win");
125
126 // set initial rank maps for the first iteration...
127 BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map2, v, rank_type(1 - damping));
128
129 MPI_Win_fence(0, win);
130 MPI_Win_fence(0, win2);
131
132 while ((to_map_2 && !done(rank_map, g)) ||
133 (!to_map_2 && !done(rank_map2, g))) {
134 if (to_map_2) {
135 graph::distributed::detail::page_rank_step(g, rank_map, win2, damping, owner);
136 to_map_2 = false;
137 } else {
138 graph::distributed::detail::page_rank_step(g, rank_map2, win, damping, owner);
139 to_map_2 = true;
140 }
141 }
142 synchronize(boost::graph::parallel::process_group(g));
143
144 MPI_Win_free(&win);
145 MPI_Win_free(&win2);
146
147 #else
148 // The ranks accumulate after each step.
149 rank_map.set_reduce(detail::rank_accumulate_reducer<rank_type>());
150 rank_map2.set_reduce(detail::rank_accumulate_reducer<rank_type>());
151 rank_map.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
152 rank_map2.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
153
154 bool to_map_2 = true;
155 while ((to_map_2 && !done(rank_map, g)) ||
156 (!to_map_2 && !done(rank_map2, g))) {
157 /**
158 * PageRank can implemented slightly more efficiently on a
159 * bidirectional graph than on an incidence graph. However,
160 * distributed PageRank requires that we have the rank of the
161 * source vertex available locally, so we force the incidence
162 * graph implementation, which pushes rank from source to
163 * target.
164 */
165 typedef incidence_graph_tag category;
166 if (to_map_2) {
167 graph::detail::page_rank_step(g, rank_map, rank_map2, damping,
168 category());
169 to_map_2 = false;
170 } else {
171 graph::detail::page_rank_step(g, rank_map2, rank_map, damping,
172 category());
173 to_map_2 = true;
174 }
175 using boost::graph::parallel::process_group;
176 synchronize(process_group(g));
177 }
178
179 rank_map.reset();
180 #endif
181
182 if (!to_map_2)
183 BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, get(rank_map2, v));
184 }
185
186 template<typename Graph, typename RankMap, typename Done, typename RankMap2>
187 void
188 page_rank(const Graph& g, RankMap rank_map, Done done,
189 typename property_traits<RankMap>::value_type damping,
190 typename graph_traits<Graph>::vertices_size_type n,
191 RankMap2 rank_map2
192 BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag))
193 {
194 (page_rank_impl)(g, rank_map, done, damping, n, rank_map2);
195 }
196
197 template<typename MutableGraph>
198 void
199 remove_dangling_links(MutableGraph& g
200 BOOST_GRAPH_ENABLE_IF_MODELS_PARM(MutableGraph,
201 distributed_graph_tag))
202 {
203 typename graph_traits<MutableGraph>::vertices_size_type old_n;
204 do {
205 old_n = num_vertices(g);
206
207 typename graph_traits<MutableGraph>::vertex_iterator vi, vi_end;
208 for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; /* in loop */) {
209 typename graph_traits<MutableGraph>::vertex_descriptor v = *vi++;
210 if (out_degree(v, g) == 0) {
211 clear_vertex(v, g);
212 remove_vertex(v, g);
213 }
214 }
215 } while (num_vertices(g) < old_n);
216 }
217
218 } // end namespace distributed
219
220 using distributed::page_rank;
221 using distributed::remove_dangling_links;
222
223 } } // end namespace boost::graph
224
225 #endif // BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP