1 // Copyright (C) 2004-2008 The Trustees of Indiana University.
3 // Use, modification and distribution is subject to the Boost Software
4 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
7 // Authors: Douglas Gregor
11 // The placement of this #include probably looks very odd relative to
12 // the #ifndef/#define pair below. However, this placement is
13 // extremely important to allow the various property map headers to be
14 // included in any order.
15 #include <boost/property_map/property_map.hpp>
17 #ifndef BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP
18 #define BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP
20 #include <boost/assert.hpp>
21 #include <boost/type_traits/is_base_and_derived.hpp>
22 #include <boost/shared_ptr.hpp>
23 #include <boost/weak_ptr.hpp>
24 #include <boost/optional.hpp>
25 #include <boost/property_map/parallel/process_group.hpp>
26 #include <boost/function/function1.hpp>
29 #include <boost/property_map/parallel/basic_reduce.hpp>
30 #include <boost/property_map/parallel/detail/untracked_pair.hpp>
31 #include <boost/type_traits/is_same.hpp>
32 #include <boost/property_map/parallel/local_property_map.hpp>
34 #include <boost/version.hpp>
35 #include <boost/property_map/parallel/unsafe_serialize.hpp>
36 #include <boost/multi_index_container.hpp>
37 #include <boost/multi_index/hashed_index.hpp>
38 #include <boost/multi_index/member.hpp>
39 #include <boost/multi_index/sequenced_index.hpp>
41 // Serialization functions for constructs we use
42 #include <boost/serialization/utility.hpp>
44 namespace boost { namespace parallel {
47 /**************************************************************************
48 * Metafunction that degrades an Lvalue Property Map category tag to
49 * a Read Write Property Map category tag.
50 **************************************************************************/
51 template<bool IsLvaluePropertyMap>
52 struct make_nonlvalue_property_map
54 template<typename T> struct apply { typedef T type; };
58 struct make_nonlvalue_property_map<true>
63 typedef read_write_property_map_tag type;
67 /**************************************************************************
68 * Performs a "put" on a property map so long as the property map is
69 * a Writable Property Map or a mutable Lvalue Property Map. This
70 * is required because the distributed property map's message
71 * handler handles "put" messages even for a const property map,
72 * although receipt of a "put" message is ill-formed.
73 **************************************************************************/
74 template<bool IsLvaluePropertyMap>
75 struct maybe_put_in_lvalue_pm
77 template<typename PropertyMap, typename Key, typename Value>
79 do_put(PropertyMap, const Key&, const Value&)
80 { BOOST_ASSERT(false); }
84 struct maybe_put_in_lvalue_pm<true>
86 template<typename PropertyMap, typename Key, typename Value>
88 do_put(PropertyMap pm, const Key& key, const Value& value)
96 template<typename PropertyMap, typename Key, typename Value>
98 maybe_put_impl(PropertyMap pm, const Key& key, const Value& value,
99 writable_property_map_tag)
106 template<typename PropertyMap, typename Key, typename Value>
108 maybe_put_impl(PropertyMap pm, const Key& key, const Value& value,
109 lvalue_property_map_tag)
111 typedef typename property_traits<PropertyMap>::value_type value_type;
112 typedef typename property_traits<PropertyMap>::reference reference;
113 // DPG TBD: Some property maps are improperly characterized as
114 // lvalue_property_maps, when in fact they do not provide true
115 // references. The most typical example is those property maps
116 // built from vector<bool> and its iterators, which deal with
117 // proxies. We don't want to mischaracterize these as not having a
118 // "put" operation, so we only consider an lvalue_property_map as
119 // constant if its reference is const value_type&. In fact, this
120 // isn't even quite correct (think of a
121 // vector<bool>::const_iterator), but at present C++ doesn't
122 // provide us with any alternatives.
123 typedef is_same<const value_type&, reference> is_constant;
125 maybe_put_in_lvalue_pm<(!is_constant::value)>::do_put(pm, key, value);
128 template<typename PropertyMap, typename Key, typename Value>
130 maybe_put_impl(PropertyMap, const Key&, const Value&, ...)
131 { BOOST_ASSERT(false); }
133 template<typename PropertyMap, typename Key, typename Value>
135 maybe_put(PropertyMap pm, const Key& key, const Value& value)
137 maybe_put_impl(pm, key, value,
138 typename property_traits<PropertyMap>::category());
140 } // end namespace detail
142 /** The consistency model used by the distributed property map. */
143 enum consistency_model {
145 cm_backward = 1 << 1,
146 cm_bidirectional = cm_forward | cm_backward,
152 /** Distributed property map adaptor.
154 * The distributed property map adaptor is a property map whose
155 * stored values are distributed across multiple non-overlapping
156 * memory spaces on different processes. Values local to the current
157 * process are stored within a local property map and may be
158 * immediately accessed via @c get and @c put. Values stored on
159 * remote processes may also be access via @c get and @c put, but the
160 * behavior differs slightly:
162 * - @c put operations update a local ghost cell and send a "put"
163 * message to the process that owns the value. The owner is free to
164 * update its own "official" value or may ignore the put request.
166 * - @c get operations returns the contents of the local ghost
167 * cell. If no ghost cell is available, one is created using the
168 * default value provided by the "reduce" operation. See, e.g.,
169 * @ref basic_reduce and @ref property_reduce.
171 * Using distributed property maps requires a bit more care than using
172 * local, sequential property maps. While the syntax and semantics are
173 * similar, distributed property maps may contain out-of-date
174 * information that can only be guaranteed to be synchronized by
175 * calling the @ref synchronize function in all processes.
177 * To address the issue of out-of-date values, distributed property
178 * maps are supplied with a reduction operation. The reduction
179 * operation has two roles:
181 * -# When a value is needed for a remote key but no value is
182 * immediately available, the reduction operation provides a
183 * suitable default. For instance, a distributed property map
184 * storing distances may have a reduction operation that returns
185 * an infinite value as the default, whereas a distributed
186 * property map for vertex colors may return white as the
189 * -# When a value is received from a remote process, the process
190 * owning the key associated with that value must determine which
191 * value---the locally stored value, the value received from a
192 * remote process, or some combination of the two---will be
193 * stored as the "official" value in the property map. The
194 * reduction operation transforms the local and remote values
195 * into the "official" value to be stored.
197 * @tparam ProcessGroup the type of the process group over which the
198 * property map is distributed and is also the medium for
201 * @tparam StorageMap the type of the property map that will
202 * store values for keys local to this processor. The @c value_type of
203 * this property map will become the @c value_type of the distributed
204 * property map. The distributed property map models the same property
205 * map concepts as the @c LocalPropertyMap, with one exception: a
206 * distributed property map cannot be an LvaluePropertyMap (because
207 * remote values are not addressable), and is therefore limited to
208 * ReadWritePropertyMap.
210 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
211 class distributed_property_map
214 /// The key type of the property map.
215 typedef typename property_traits<GlobalMap>::key_type key_type;
217 /// The value type of the property map.
218 typedef typename property_traits<StorageMap>::value_type value_type;
219 typedef typename property_traits<StorageMap>::reference reference;
220 typedef ProcessGroup process_group_type;
223 typedef distributed_property_map self_type;
224 typedef typename property_traits<StorageMap>::category local_category;
225 typedef typename property_traits<StorageMap>::key_type local_key_type;
226 typedef typename property_traits<GlobalMap>::value_type owner_local_pair;
227 typedef typename ProcessGroup::process_id_type process_id_type;
229 enum property_map_messages {
230 /** A request to store a value in a property map. The message
231 * contains a std::pair<key, data>.
235 /** A request to retrieve a particular value in a property
236 * map. The message contains a key. The owner of that key will
237 * reply with a value.
241 /** A request to update values stored on a remote processor. The
242 * message contains a vector of keys for which the source
243 * requests updated values. This message will only be transmitted
244 * during synchronization.
246 property_map_multiget,
248 /** A request to store values in a ghost cell. This message
249 * contains a vector of key/value pairs corresponding to the
250 * sequence of keys sent to the source processor.
252 property_map_multiget_reply,
254 /** The payload containing a vector of local key-value pairs to be
255 * put into the remote property map. A key-value std::pair will be
256 * used to store each local key-value pair.
258 property_map_multiput
261 // Code from JoaquÃn M López Muñoz to work around unusual implementation of
262 // std::pair in VC++ 10:
263 template<typename First,typename Second>
264 class pair_first_extractor {
265 typedef std::pair<First,Second> value_type;
268 typedef First result_type;
269 const result_type& operator()(const value_type& x) const {
273 result_type& operator()(value_type& x) const {
279 /// The type of the ghost cells
280 typedef multi_index::multi_index_container<
281 std::pair<key_type, value_type>,
282 multi_index::indexed_by<
283 multi_index::sequenced<>,
284 multi_index::hashed_unique<
285 pair_first_extractor<key_type, value_type>
290 /// Iterator into the ghost cells
291 typedef typename ghost_cells_type::iterator iterator;
293 /// Key-based index into the ghost cells
294 typedef typename ghost_cells_type::template nth_index<1>::type
295 ghost_cells_key_index_type;
297 /// Iterator into the ghost cells (by key)
298 typedef typename ghost_cells_key_index_type::iterator key_iterator;
300 /** The property map category. A distributed property map cannot be
301 * an Lvalue Property Map, because values on remote processes cannot
304 typedef typename detail::make_nonlvalue_property_map<
305 (is_base_and_derived<lvalue_property_map_tag, local_category>::value
306 || is_same<lvalue_property_map_tag, local_category>::value)>
307 ::template apply<local_category>::type category;
309 /** Default-construct a distributed property map. This function
310 * creates an initialized property map that must be assigned to a
311 * valid value before being used. It is only provided here because
312 * property maps must be Default Constructible.
314 distributed_property_map() {}
316 /** Construct a distributed property map. Builds a distributed
317 * property map communicating over the given process group and using
318 * the given local property map for storage. Since no reduction
319 * operation is provided, the default reduction operation @c
320 * basic_reduce<value_type> is used.
322 distributed_property_map(const ProcessGroup& pg, const GlobalMap& global,
323 const StorageMap& pm)
324 : data(new data_t(pg, global, pm, basic_reduce<value_type>(), false))
326 typedef handle_message<basic_reduce<value_type> > Handler;
328 data->ghost_cells.reset(new ghost_cells_type());
329 Handler handler(data);
330 data->process_group.replace_handler(handler, true);
331 data->process_group.template get_receiver<Handler>()
332 ->setup_triggers(data->process_group);
335 /** Construct a distributed property map. Builds a distributed
336 * property map communicating over the given process group and using
337 * the given local property map for storage. The given @p reduce
338 * parameter is used as the reduction operation.
340 template<typename Reduce>
341 distributed_property_map(const ProcessGroup& pg, const GlobalMap& global,
342 const StorageMap& pm,
343 const Reduce& reduce);
345 ~distributed_property_map();
347 /// Set the reduce operation of the distributed property map.
348 template<typename Reduce>
349 void set_reduce(const Reduce& reduce);
351 // Set the consistency model for the distributed property map
352 void set_consistency_model(int model);
354 // Get the consistency model
355 int get_consistency_model() const { return data->model; }
357 // Set the maximum number of ghost cells that we are allowed to
358 // maintain. If 0, all ghost cells will be retained.
359 void set_max_ghost_cells(std::size_t max_ghost_cells);
361 // Clear out all ghost cells
364 // Reset the values in all ghost cells to the default value
367 // Flush all values destined for remote processors
370 reference operator[](const key_type& key) const
372 owner_local_pair p = get(data->global, key);
374 if (p.first == process_id(data->process_group)) {
375 return data->storage[p.second];
381 process_group_type process_group() const
383 return data->process_group.base();
386 StorageMap& base() { return data->storage; }
387 const StorageMap& base() const { return data->storage; }
389 /** Sends a "put" request.
394 request_put(process_id_type p, const key_type& k, const value_type& v) const
396 send(data->process_group, p, property_map_put,
397 boost::parallel::detail::make_untracked_pair(k, v));
400 /** Access the ghost cell for the given key.
403 value_type& cell(const key_type& k, bool request_if_missing = true) const;
405 /** Perform synchronization
408 void do_synchronize();
410 const GlobalMap& global() const { return data->global; }
411 GlobalMap& global() { return data->global; }
415 data_t(const ProcessGroup& pg, const GlobalMap& global,
416 const StorageMap& pm, const function1<value_type, key_type>& dv,
417 bool has_default_resolver)
418 : process_group(pg), global(global), storage(pm),
419 ghost_cells(), max_ghost_cells(1000000), get_default_value(dv),
420 has_default_resolver(has_default_resolver), model(cm_forward) { }
422 /// The process group
423 ProcessGroup process_group;
425 /// A mapping from the keys of this property map to the global
429 /// Local property map
433 shared_ptr<ghost_cells_type> ghost_cells;
435 /// The maximum number of ghost cells we are permitted to hold. If
436 /// zero, we are permitted to have an infinite number of ghost
438 std::size_t max_ghost_cells;
440 /// Default value for remote ghost cells, as defined by the
441 /// reduction operation.
442 function1<value_type, key_type> get_default_value;
444 /// True if this resolver is the "default" resolver, meaning that
445 /// we should not be able to get() a default value; it needs to be
446 /// request()ed first.
447 bool has_default_resolver;
449 // Current consistency model
452 // Function that resets all of the ghost cells to their default
453 // values. It knows the type of the resolver, so we can eliminate
454 // a large number of calls through function pointers.
455 void (data_t::*reset)();
457 // Clear out all ghost cells
460 // Flush all values destined for remote processors
463 // Send out requests to "refresh" the values of ghost cells that
465 void refresh_ghost_cells();
468 template<typename Resolver> void do_reset();
470 friend class distributed_property_map;
472 friend struct data_t;
474 shared_ptr<data_t> data;
477 // Prunes the least recently used ghost cells until we have @c
478 // max_ghost_cells or fewer ghost cells.
479 void prune_ghost_cells() const;
481 /** Handles incoming messages.
483 * This function object is responsible for handling all incoming
484 * messages for the distributed property map.
486 template<typename Reduce>
487 struct handle_message
489 explicit handle_message(const shared_ptr<data_t>& data,
490 const Reduce& reduce = Reduce())
491 : data_ptr(data), reduce(reduce) { }
493 void operator()(process_id_type source, int tag);
495 /// Individual message handlers
497 handle_put(int source, int tag,
498 const boost::parallel::detail::untracked_pair<key_type, value_type>& data,
499 trigger_receive_context);
502 handle_get(int source, int tag, const key_type& data,
503 trigger_receive_context);
506 handle_multiget(int source, int tag,
507 const std::vector<key_type>& data,
508 trigger_receive_context);
511 handle_multiget_reply
512 (int source, int tag,
513 const std::vector<boost::parallel::detail::untracked_pair<key_type, value_type> >& msg,
514 trigger_receive_context);
518 (int source, int tag,
519 const std::vector<unsafe_pair<local_key_type, value_type> >& data,
520 trigger_receive_context);
522 void setup_triggers(process_group_type& pg);
525 weak_ptr<data_t> data_ptr;
529 /* Sets up the next stage in a multi-stage synchronization, for
530 bidirectional consistency. */
531 struct on_synchronize
533 explicit on_synchronize(const shared_ptr<data_t>& data) : data_ptr(data) { }
538 weak_ptr<data_t> data_ptr;
542 /* An implementation helper macro for the common case of naming
543 distributed property maps with all of the normal template
545 #define PBGL_DISTRIB_PMAP \
546 distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
548 /* Request that the value for the given remote key be retrieved in
549 the next synchronization round. */
550 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
552 request(const PBGL_DISTRIB_PMAP& pm,
553 typename PBGL_DISTRIB_PMAP::key_type const& key)
555 if (get(pm.data->global, key).first != process_id(pm.data->process_group))
559 /** Get the value associated with a particular key. Retrieves the
560 * value associated with the given key. If the key denotes a
561 * locally-owned object, it returns the value from the local property
562 * map; if the key denotes a remotely-owned object, retrieves the
563 * value of the ghost cell for that key, which may be the default
564 * value provided by the reduce operation.
566 * Complexity: For a local key, O(1) get operations on the underlying
567 * property map. For a non-local key, O(1) accesses to the ghost cells.
569 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
571 typename PBGL_DISTRIB_PMAP::value_type
572 get(const PBGL_DISTRIB_PMAP& pm,
573 typename PBGL_DISTRIB_PMAP::key_type const& key)
577 typename property_traits<GlobalMap>::value_type p =
578 get(pm.data->global, key);
580 if (p.first == process_id(pm.data->process_group)) {
581 return get(pm.data->storage, p.second);
587 /** Put a value associated with the given key into the property map.
588 * When the key denotes a locally-owned object, this operation updates
589 * the underlying local property map. Otherwise, the local ghost cell
590 * is updated and a "put" message is sent to the processor owning this
593 * Complexity: For a local key, O(1) put operations on the underlying
594 * property map. For a nonlocal key, O(1) accesses to the ghost cells
595 * and will send O(1) messages of size O(sizeof(key) + sizeof(value)).
597 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
599 put(const PBGL_DISTRIB_PMAP& pm,
600 typename PBGL_DISTRIB_PMAP::key_type const & key,
601 typename PBGL_DISTRIB_PMAP::value_type const & value)
605 typename property_traits<GlobalMap>::value_type p =
606 get(pm.data->global, key);
608 if (p.first == process_id(pm.data->process_group)) {
609 put(pm.data->storage, p.second, value);
611 if (pm.data->model & cm_forward)
612 pm.request_put(p.first, key, value);
614 pm.cell(key, false) = value;
618 /** Put a value associated with a given key into the local view of the
619 * property map. This operation is equivalent to @c put, but with one
620 * exception: no message will be sent to the owning processor in the
621 * case of a remote update. The effect is that any value written via
622 * @c local_put for a remote key may be overwritten in the next
623 * synchronization round.
625 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
627 local_put(const PBGL_DISTRIB_PMAP& pm,
628 typename PBGL_DISTRIB_PMAP::key_type const & key,
629 typename PBGL_DISTRIB_PMAP::value_type const & value)
633 typename property_traits<GlobalMap>::value_type p =
634 get(pm.data->global, key);
636 if (p.first == process_id(pm.data->process_group))
637 put(pm.data->storage, p.second, value);
638 else pm.cell(key, false) = value;
641 /** Cache the value associated with the given remote key. If the key
642 * is local, ignore the operation. */
643 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
645 cache(const PBGL_DISTRIB_PMAP& pm,
646 typename PBGL_DISTRIB_PMAP::key_type const & key,
647 typename PBGL_DISTRIB_PMAP::value_type const & value)
649 typename ProcessGroup::process_id_type id = get(pm.data->global, key).first;
651 if (id != process_id(pm.data->process_group)) pm.cell(key, false) = value;
654 /// Synchronize the property map.
655 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
657 synchronize(PBGL_DISTRIB_PMAP& pm)
662 /// Create a distributed property map.
663 template<typename ProcessGroup, typename GlobalMap, typename StorageMap>
664 inline distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
665 make_distributed_property_map(const ProcessGroup& pg, GlobalMap global,
668 typedef distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
670 return result_type(pg, global, storage);
676 template<typename ProcessGroup, typename GlobalMap, typename StorageMap,
678 inline distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
679 make_distributed_property_map(const ProcessGroup& pg, GlobalMap global,
680 StorageMap storage, Reduce reduce)
682 typedef distributed_property_map<ProcessGroup, GlobalMap, StorageMap>
684 return result_type(pg, global, storage, reduce);
687 } } // end namespace boost::parallel
689 #include <boost/property_map/parallel/impl/distributed_property_map.ipp>
691 #undef PBGL_DISTRIB_PMAP
693 #endif // BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP