1 //////////////////////////////////////////////////////////////////////////////
3 // (C) Copyright Ion Gaztanaga 2008-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
7 // See http://www.boost.org/libs/interprocess for documentation.
9 //////////////////////////////////////////////////////////////////////////////
11 #ifndef BOOST_INTERPROCESS_ALLOCATOR_DETAIL_ALLOCATOR_COMMON_HPP
12 #define BOOST_INTERPROCESS_ALLOCATOR_DETAIL_ALLOCATOR_COMMON_HPP
14 #ifndef BOOST_CONFIG_HPP
15 # include <boost/config.hpp>
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
22 #include <boost/interprocess/detail/config_begin.hpp>
23 #include <boost/interprocess/detail/workaround.hpp>
25 #include <boost/intrusive/pointer_traits.hpp>
27 #include <boost/interprocess/interprocess_fwd.hpp>
28 #include <boost/interprocess/detail/utilities.hpp> //to_raw_pointer
29 #include <boost/utility/addressof.hpp> //boost::addressof
30 #include <boost/assert.hpp> //BOOST_ASSERT
31 #include <boost/interprocess/exceptions.hpp> //bad_alloc
32 #include <boost/interprocess/sync/scoped_lock.hpp> //scoped_lock
33 #include <boost/interprocess/containers/allocation_type.hpp> //boost::interprocess::allocation_type
34 #include <boost/container/detail/multiallocation_chain.hpp>
35 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
36 #include <boost/interprocess/detail/segment_manager_helper.hpp>
37 #include <boost/move/utility_core.hpp>
38 #include <boost/interprocess/detail/type_traits.hpp>
39 #include <boost/interprocess/detail/utilities.hpp>
40 #include <boost/container/detail/placement_new.hpp>
41 #include <boost/move/adl_move_swap.hpp>
44 namespace interprocess {
49 static const std::size_t value = sizeof(T);
53 struct sizeof_value<void>
55 static const std::size_t value = sizeof(void*);
59 struct sizeof_value<const void>
61 static const std::size_t value = sizeof(void*);
65 struct sizeof_value<volatile void>
67 static const std::size_t value = sizeof(void*);
71 struct sizeof_value<const volatile void>
73 static const std::size_t value = sizeof(void*);
78 //!Object function that creates the node allocator if it is not created and
79 //!increments reference count if it is already created
80 template<class NodePool>
81 struct get_or_create_node_pool_func
84 //!This connects or constructs the unique instance of node_pool_t
85 //!Can throw boost::interprocess::bad_alloc
88 //Find or create the node_pool_t
89 mp_node_pool = mp_segment_manager->template find_or_construct
90 <NodePool>(boost::interprocess::unique_instance)(mp_segment_manager);
91 //If valid, increment link count
93 mp_node_pool->inc_ref_count();
96 //!Constructor. Initializes function
98 get_or_create_node_pool_func(typename NodePool::segment_manager *mngr)
99 : mp_segment_manager(mngr){}
101 NodePool *mp_node_pool;
102 typename NodePool::segment_manager *mp_segment_manager;
105 template<class NodePool>
106 inline NodePool *get_or_create_node_pool(typename NodePool::segment_manager *mgnr)
108 ipcdetail::get_or_create_node_pool_func<NodePool> func(mgnr);
109 mgnr->atomic_func(func);
110 return func.mp_node_pool;
113 //!Object function that decrements the reference count. If the count
114 //!reaches to zero destroys the node allocator from memory.
116 template<class NodePool>
117 struct destroy_if_last_link_func
119 //!Decrements reference count and destroys the object if there is no
120 //!more attached allocators. Never throws
123 //If not the last link return
124 if(mp_node_pool->dec_ref_count() != 0) return;
126 //Last link, let's destroy the segment_manager
127 mp_node_pool->get_segment_manager()->template destroy<NodePool>(boost::interprocess::unique_instance);
130 //!Constructor. Initializes function
132 destroy_if_last_link_func(NodePool *pool)
136 NodePool *mp_node_pool;
139 //!Destruction function, initializes and executes destruction function
140 //!object. Never throws
141 template<class NodePool>
142 inline void destroy_node_pool_if_last_link(NodePool *pool)
144 //Get segment manager
145 typename NodePool::segment_manager *mngr = pool->get_segment_manager();
146 //Execute destruction functor atomically
147 destroy_if_last_link_func<NodePool>func(pool);
148 mngr->atomic_func(func);
151 template<class NodePool>
154 typedef typename NodePool::segment_manager::
155 void_pointer void_pointer;
156 typedef typename boost::intrusive::
157 pointer_traits<void_pointer>::template
158 rebind_pointer<NodePool>::type node_pool_ptr;
159 typedef typename NodePool::multiallocation_chain multiallocation_chain;
160 typedef typename NodePool::segment_manager::size_type size_type;
161 node_pool_ptr mp_node_pool;
162 multiallocation_chain m_cached_nodes;
163 size_type m_max_cached_nodes;
166 typedef typename NodePool::segment_manager segment_manager;
168 cache_impl(segment_manager *segment_mngr, size_type max_cached_nodes)
169 : mp_node_pool(get_or_create_node_pool<NodePool>(segment_mngr))
170 , m_max_cached_nodes(max_cached_nodes)
173 cache_impl(const cache_impl &other)
174 : mp_node_pool(other.get_node_pool())
175 , m_max_cached_nodes(other.get_max_cached_nodes())
177 mp_node_pool->inc_ref_count();
182 this->deallocate_all_cached_nodes();
183 ipcdetail::destroy_node_pool_if_last_link(ipcdetail::to_raw_pointer(mp_node_pool));
186 NodePool *get_node_pool() const
187 { return ipcdetail::to_raw_pointer(mp_node_pool); }
189 segment_manager *get_segment_manager() const
190 { return mp_node_pool->get_segment_manager(); }
192 size_type get_max_cached_nodes() const
193 { return m_max_cached_nodes; }
195 void *cached_allocation()
197 //If don't have any cached node, we have to get a new list of free nodes from the pool
198 if(m_cached_nodes.empty()){
199 mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes);
201 void *ret = ipcdetail::to_raw_pointer(m_cached_nodes.pop_front());
205 void cached_allocation(size_type n, multiallocation_chain &chain)
207 size_type count = n, allocated(0);
209 //If don't have any cached node, we have to get a new list of free nodes from the pool
210 while(!m_cached_nodes.empty() && count--){
211 void *ret = ipcdetail::to_raw_pointer(m_cached_nodes.pop_front());
212 chain.push_back(ret);
217 mp_node_pool->allocate_nodes(n - allocated, chain);
221 this->cached_deallocation(chain);
227 void cached_deallocation(void *ptr)
229 //Check if cache is full
230 if(m_cached_nodes.size() >= m_max_cached_nodes){
231 //This only occurs if this allocator deallocate memory allocated
232 //with other equal allocator. Since the cache is full, and more
233 //deallocations are probably coming, we'll make some room in cache
234 //in a single, efficient multi node deallocation.
235 this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
237 m_cached_nodes.push_front(ptr);
240 void cached_deallocation(multiallocation_chain &chain)
242 m_cached_nodes.splice_after(m_cached_nodes.before_begin(), chain);
244 //Check if cache is full
245 if(m_cached_nodes.size() >= m_max_cached_nodes){
246 //This only occurs if this allocator deallocate memory allocated
247 //with other equal allocator. Since the cache is full, and more
248 //deallocations are probably coming, we'll make some room in cache
249 //in a single, efficient multi node deallocation.
250 this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
254 //!Sets the new max cached nodes value. This can provoke deallocations
255 //!if "newmax" is less than current cached nodes. Never throws
256 void set_max_cached_nodes(size_type newmax)
258 m_max_cached_nodes = newmax;
259 this->priv_deallocate_remaining_nodes();
262 //!Frees all cached nodes.
264 void deallocate_all_cached_nodes()
266 if(m_cached_nodes.empty()) return;
267 mp_node_pool->deallocate_nodes(m_cached_nodes);
271 //!Frees all cached nodes at once.
273 void priv_deallocate_remaining_nodes()
275 if(m_cached_nodes.size() > m_max_cached_nodes){
276 priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
280 //!Frees n cached nodes at once. Never throws
281 void priv_deallocate_n_nodes(size_type n)
283 //This only occurs if this allocator deallocate memory allocated
284 //with other equal allocator. Since the cache is full, and more
285 //deallocations are probably coming, we'll make some room in cache
286 //in a single, efficient multi node deallocation.
288 typename multiallocation_chain::iterator it(m_cached_nodes.before_begin());
292 multiallocation_chain chain;
293 chain.splice_after(chain.before_begin(), m_cached_nodes, m_cached_nodes.before_begin(), it, n);
294 //Deallocate all new linked list at once
295 mp_node_pool->deallocate_nodes(chain);
299 void swap(cache_impl &other)
301 ::boost::adl_move_swap(mp_node_pool, other.mp_node_pool);
302 ::boost::adl_move_swap(m_cached_nodes, other.m_cached_nodes);
303 ::boost::adl_move_swap(m_max_cached_nodes, other.m_max_cached_nodes);
307 template<class Derived, class T, class SegmentManager>
308 class array_allocation_impl
310 const Derived *derived() const
311 { return static_cast<const Derived*>(this); }
313 { return static_cast<Derived*>(this); }
315 typedef typename SegmentManager::void_pointer void_pointer;
318 typedef typename boost::intrusive::
319 pointer_traits<void_pointer>::template
320 rebind_pointer<T>::type pointer;
321 typedef typename boost::intrusive::
322 pointer_traits<void_pointer>::template
323 rebind_pointer<const T>::type const_pointer;
324 typedef T value_type;
325 typedef typename ipcdetail::add_reference
326 <value_type>::type reference;
327 typedef typename ipcdetail::add_reference
328 <const value_type>::type const_reference;
329 typedef typename SegmentManager::size_type size_type;
330 typedef typename SegmentManager::difference_type difference_type;
331 typedef boost::container::container_detail::transform_multiallocation_chain
332 <typename SegmentManager::multiallocation_chain, T>multiallocation_chain;
336 //!Returns maximum the number of objects the previously allocated memory
337 //!pointed by p can hold. This size only works for memory allocated with
338 //!allocate, allocation_command and allocate_many.
339 size_type size(const pointer &p) const
341 return (size_type)this->derived()->get_segment_manager()->size(ipcdetail::to_raw_pointer(p))/sizeof(T);
344 pointer allocation_command(boost::interprocess::allocation_type command,
345 size_type limit_size, size_type &prefer_in_recvd_out_size, pointer &reuse)
347 value_type *reuse_raw = ipcdetail::to_raw_pointer(reuse);
348 pointer const p = this->derived()->get_segment_manager()->allocation_command
349 (command, limit_size, prefer_in_recvd_out_size, reuse_raw);
354 //!Allocates many elements of size elem_size in a contiguous block
355 //!of memory. The minimum number to be allocated is min_elements,
356 //!the preferred and maximum number is
357 //!preferred_elements. The number of actually allocated elements is
358 //!will be assigned to received_size. The elements must be deallocated
359 //!with deallocate(...)
360 void allocate_many(size_type elem_size, size_type num_elements, multiallocation_chain &chain)
362 if(size_overflows<sizeof(T)>(elem_size)){
365 this->derived()->get_segment_manager()->allocate_many(elem_size*sizeof(T), num_elements, chain);
368 //!Allocates n_elements elements, each one of size elem_sizes[i]in a
370 //!of memory. The elements must be deallocated
371 void allocate_many(const size_type *elem_sizes, size_type n_elements, multiallocation_chain &chain)
373 this->derived()->get_segment_manager()->allocate_many(elem_sizes, n_elements, sizeof(T), chain);
376 //!Allocates many elements of size elem_size in a contiguous block
377 //!of memory. The minimum number to be allocated is min_elements,
378 //!the preferred and maximum number is
379 //!preferred_elements. The number of actually allocated elements is
380 //!will be assigned to received_size. The elements must be deallocated
381 //!with deallocate(...)
382 void deallocate_many(multiallocation_chain &chain)
383 { this->derived()->get_segment_manager()->deallocate_many(chain); }
385 //!Returns the number of elements that could be
386 //!allocated. Never throws
387 size_type max_size() const
388 { return this->derived()->get_segment_manager()->get_size()/sizeof(T); }
390 //!Returns address of mutable object.
392 pointer address(reference value) const
393 { return pointer(boost::addressof(value)); }
395 //!Returns address of non mutable object.
397 const_pointer address(const_reference value) const
398 { return const_pointer(boost::addressof(value)); }
400 //!Constructs an object
401 //!Throws if T's constructor throws
402 //!For backwards compatibility with libraries using C++03 allocators
404 void construct(const pointer &ptr, BOOST_FWD_REF(P) p)
405 { ::new((void*)ipcdetail::to_raw_pointer(ptr), boost_container_new_t()) value_type(::boost::forward<P>(p)); }
407 //!Destroys object. Throws if object's
409 void destroy(const pointer &ptr)
410 { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
414 template<class Derived, unsigned int Version, class T, class SegmentManager>
415 class node_pool_allocation_impl
416 : public array_allocation_impl
421 const Derived *derived() const
422 { return static_cast<const Derived*>(this); }
424 { return static_cast<Derived*>(this); }
426 typedef typename SegmentManager::void_pointer void_pointer;
427 typedef typename boost::intrusive::
428 pointer_traits<void_pointer>::template
429 rebind_pointer<const void>::type cvoid_pointer;
432 typedef typename boost::intrusive::
433 pointer_traits<void_pointer>::template
434 rebind_pointer<T>::type pointer;
435 typedef typename boost::intrusive::
436 pointer_traits<void_pointer>::template
437 rebind_pointer<const T>::type const_pointer;
438 typedef T value_type;
439 typedef typename ipcdetail::add_reference
440 <value_type>::type reference;
441 typedef typename ipcdetail::add_reference
442 <const value_type>::type const_reference;
443 typedef typename SegmentManager::size_type size_type;
444 typedef typename SegmentManager::difference_type difference_type;
445 typedef boost::container::container_detail::transform_multiallocation_chain
446 <typename SegmentManager::multiallocation_chain, T>multiallocation_chain;
452 typedef typename Derived::template node_pool<0>::type type;
453 static type *get(void *p)
454 { return static_cast<type*>(p); }
458 //!Allocate memory for an array of count elements.
459 //!Throws boost::interprocess::bad_alloc if there is no enough memory
460 pointer allocate(size_type count, cvoid_pointer hint = 0)
463 typedef typename node_pool<0>::type node_pool_t;
464 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
465 if(size_overflows<sizeof(T)>(count)){
468 else if(Version == 1 && count == 1){
469 return pointer(static_cast<value_type*>
470 (pool->allocate_node()));
473 return pointer(static_cast<value_type*>
474 (pool->get_segment_manager()->allocate(count*sizeof(T))));
478 //!Deallocate allocated memory. Never throws
479 void deallocate(const pointer &ptr, size_type count)
482 typedef typename node_pool<0>::type node_pool_t;
483 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
484 if(Version == 1 && count == 1)
485 pool->deallocate_node(ipcdetail::to_raw_pointer(ptr));
487 pool->get_segment_manager()->deallocate((void*)ipcdetail::to_raw_pointer(ptr));
490 //!Allocates just one object. Memory allocated with this function
491 //!must be deallocated only with deallocate_one().
492 //!Throws boost::interprocess::bad_alloc if there is no enough memory
493 pointer allocate_one()
495 typedef typename node_pool<0>::type node_pool_t;
496 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
497 return pointer(static_cast<value_type*>(pool->allocate_node()));
500 //!Allocates many elements of size == 1 in a contiguous block
501 //!of memory. The minimum number to be allocated is min_elements,
502 //!the preferred and maximum number is
503 //!preferred_elements. The number of actually allocated elements is
504 //!will be assigned to received_size. Memory allocated with this function
505 //!must be deallocated only with deallocate_one().
506 void allocate_individual(size_type num_elements, multiallocation_chain &chain)
508 typedef typename node_pool<0>::type node_pool_t;
509 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
510 pool->allocate_nodes(num_elements, chain);
513 //!Deallocates memory previously allocated with allocate_one().
514 //!You should never use deallocate_one to deallocate memory allocated
515 //!with other functions different from allocate_one(). Never throws
516 void deallocate_one(const pointer &p)
518 typedef typename node_pool<0>::type node_pool_t;
519 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
520 pool->deallocate_node(ipcdetail::to_raw_pointer(p));
523 //!Allocates many elements of size == 1 in a contiguous block
524 //!of memory. The minimum number to be allocated is min_elements,
525 //!the preferred and maximum number is
526 //!preferred_elements. The number of actually allocated elements is
527 //!will be assigned to received_size. Memory allocated with this function
528 //!must be deallocated only with deallocate_one().
529 void deallocate_individual(multiallocation_chain &chain)
531 node_pool<0>::get(this->derived()->get_node_pool())->deallocate_nodes
535 //!Deallocates all free blocks of the pool
536 void deallocate_free_blocks()
537 { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_blocks(); }
539 //!Deprecated, use deallocate_free_blocks.
540 //!Deallocates all free chunks of the pool.
541 void deallocate_free_chunks()
542 { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_blocks(); }
545 template<class T, class NodePool, unsigned int Version>
546 class cached_allocator_impl
547 : public array_allocation_impl
548 <cached_allocator_impl<T, NodePool, Version>, T, typename NodePool::segment_manager>
550 cached_allocator_impl & operator=(const cached_allocator_impl& other);
551 typedef array_allocation_impl
552 < cached_allocator_impl
553 <T, NodePool, Version>
555 , typename NodePool::segment_manager> base_t;
558 typedef NodePool node_pool_t;
559 typedef typename NodePool::segment_manager segment_manager;
560 typedef typename segment_manager::void_pointer void_pointer;
561 typedef typename boost::intrusive::
562 pointer_traits<void_pointer>::template
563 rebind_pointer<const void>::type cvoid_pointer;
564 typedef typename base_t::pointer pointer;
565 typedef typename base_t::size_type size_type;
566 typedef typename base_t::multiallocation_chain multiallocation_chain;
567 typedef typename base_t::value_type value_type;
570 static const std::size_t DEFAULT_MAX_CACHED_NODES = 64;
572 cached_allocator_impl(segment_manager *segment_mngr, size_type max_cached_nodes)
573 : m_cache(segment_mngr, max_cached_nodes)
576 cached_allocator_impl(const cached_allocator_impl &other)
577 : m_cache(other.m_cache)
580 //!Copy constructor from related cached_adaptive_pool_base. If not present, constructs
581 //!a node pool. Increments the reference count of the associated node pool.
582 //!Can throw boost::interprocess::bad_alloc
583 template<class T2, class NodePool2>
584 cached_allocator_impl
585 (const cached_allocator_impl
586 <T2, NodePool2, Version> &other)
587 : m_cache(other.get_segment_manager(), other.get_max_cached_nodes())
590 //!Returns a pointer to the node pool.
592 node_pool_t* get_node_pool() const
593 { return m_cache.get_node_pool(); }
595 //!Returns the segment manager.
597 segment_manager* get_segment_manager()const
598 { return m_cache.get_segment_manager(); }
600 //!Sets the new max cached nodes value. This can provoke deallocations
601 //!if "newmax" is less than current cached nodes. Never throws
602 void set_max_cached_nodes(size_type newmax)
603 { m_cache.set_max_cached_nodes(newmax); }
605 //!Returns the max cached nodes parameter.
607 size_type get_max_cached_nodes() const
608 { return m_cache.get_max_cached_nodes(); }
610 //!Allocate memory for an array of count elements.
611 //!Throws boost::interprocess::bad_alloc if there is no enough memory
612 pointer allocate(size_type count, cvoid_pointer hint = 0)
616 if(size_overflows<sizeof(T)>(count)){
619 else if(Version == 1 && count == 1){
620 ret = m_cache.cached_allocation();
623 ret = this->get_segment_manager()->allocate(count*sizeof(T));
625 return pointer(static_cast<T*>(ret));
628 //!Deallocate allocated memory. Never throws
629 void deallocate(const pointer &ptr, size_type count)
632 if(Version == 1 && count == 1){
633 m_cache.cached_deallocation(ipcdetail::to_raw_pointer(ptr));
636 this->get_segment_manager()->deallocate((void*)ipcdetail::to_raw_pointer(ptr));
640 //!Allocates just one object. Memory allocated with this function
641 //!must be deallocated only with deallocate_one().
642 //!Throws boost::interprocess::bad_alloc if there is no enough memory
643 pointer allocate_one()
644 { return pointer(static_cast<value_type*>(this->m_cache.cached_allocation())); }
646 //!Allocates many elements of size == 1 in a contiguous block
647 //!of memory. The minimum number to be allocated is min_elements,
648 //!the preferred and maximum number is
649 //!preferred_elements. The number of actually allocated elements is
650 //!will be assigned to received_size. Memory allocated with this function
651 //!must be deallocated only with deallocate_one().
652 void allocate_individual(size_type num_elements, multiallocation_chain &chain)
653 { this->m_cache.cached_allocation(num_elements, chain); }
655 //!Deallocates memory previously allocated with allocate_one().
656 //!You should never use deallocate_one to deallocate memory allocated
657 //!with other functions different from allocate_one(). Never throws
658 void deallocate_one(const pointer &p)
659 { this->m_cache.cached_deallocation(ipcdetail::to_raw_pointer(p)); }
661 //!Allocates many elements of size == 1 in a contiguous block
662 //!of memory. The minimum number to be allocated is min_elements,
663 //!the preferred and maximum number is
664 //!preferred_elements. The number of actually allocated elements is
665 //!will be assigned to received_size. Memory allocated with this function
666 //!must be deallocated only with deallocate_one().
667 void deallocate_individual(multiallocation_chain &chain)
668 { m_cache.cached_deallocation(chain); }
670 //!Deallocates all free blocks of the pool
671 void deallocate_free_blocks()
672 { m_cache.get_node_pool()->deallocate_free_blocks(); }
674 //!Swaps allocators. Does not throw. If each allocator is placed in a
675 //!different shared memory segments, the result is undefined.
676 friend void swap(cached_allocator_impl &alloc1, cached_allocator_impl &alloc2)
677 { ::boost::adl_move_swap(alloc1.m_cache, alloc2.m_cache); }
679 void deallocate_cache()
680 { m_cache.deallocate_all_cached_nodes(); }
682 //!Deprecated use deallocate_free_blocks.
683 void deallocate_free_chunks()
684 { m_cache.get_node_pool()->deallocate_free_blocks(); }
686 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
688 cache_impl<node_pool_t> m_cache;
689 #endif //!defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
692 //!Equality test for same type of
693 //!cached_allocator_impl
694 template<class T, class N, unsigned int V> inline
695 bool operator==(const cached_allocator_impl<T, N, V> &alloc1,
696 const cached_allocator_impl<T, N, V> &alloc2)
697 { return alloc1.get_node_pool() == alloc2.get_node_pool(); }
699 //!Inequality test for same type of
700 //!cached_allocator_impl
701 template<class T, class N, unsigned int V> inline
702 bool operator!=(const cached_allocator_impl<T, N, V> &alloc1,
703 const cached_allocator_impl<T, N, V> &alloc2)
704 { return alloc1.get_node_pool() != alloc2.get_node_pool(); }
707 //!Pooled shared memory allocator using adaptive pool. Includes
708 //!a reference count but the class does not delete itself, this is
709 //!responsibility of user classes. Node size (NodeSize) and the number of
710 //!nodes allocated per block (NodesPerBlock) are known at compile time
711 template<class private_node_allocator_t>
712 class shared_pool_impl
713 : public private_node_allocator_t
716 //!Segment manager typedef
717 typedef typename private_node_allocator_t::
718 segment_manager segment_manager;
719 typedef typename private_node_allocator_t::
720 multiallocation_chain multiallocation_chain;
721 typedef typename private_node_allocator_t::
725 typedef typename segment_manager::mutex_family::mutex_type mutex_type;
728 //!Constructor from a segment manager. Never throws
729 shared_pool_impl(segment_manager *segment_mngr)
730 : private_node_allocator_t(segment_mngr)
733 //!Destructor. Deallocates all allocated blocks. Never throws
737 //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
738 void *allocate_node()
740 //-----------------------
741 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
742 //-----------------------
743 return private_node_allocator_t::allocate_node();
746 //!Deallocates an array pointed by ptr. Never throws
747 void deallocate_node(void *ptr)
749 //-----------------------
750 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
751 //-----------------------
752 private_node_allocator_t::deallocate_node(ptr);
755 //!Allocates n nodes.
756 //!Can throw boost::interprocess::bad_alloc
757 void allocate_nodes(const size_type n, multiallocation_chain &chain)
759 //-----------------------
760 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
761 //-----------------------
762 private_node_allocator_t::allocate_nodes(n, chain);
765 //!Deallocates a linked list of nodes ending in null pointer. Never throws
766 void deallocate_nodes(multiallocation_chain &nodes, size_type num)
768 //-----------------------
769 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
770 //-----------------------
771 private_node_allocator_t::deallocate_nodes(nodes, num);
774 //!Deallocates the nodes pointed by the multiallocation iterator. Never throws
775 void deallocate_nodes(multiallocation_chain &chain)
777 //-----------------------
778 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
779 //-----------------------
780 private_node_allocator_t::deallocate_nodes(chain);
783 //!Deallocates all the free blocks of memory. Never throws
784 void deallocate_free_blocks()
786 //-----------------------
787 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
788 //-----------------------
789 private_node_allocator_t::deallocate_free_blocks();
792 //!Deallocates all used memory from the common pool.
793 //!Precondition: all nodes allocated from this pool should
794 //!already be deallocated. Otherwise, undefined behavior. Never throws
797 //-----------------------
798 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
799 //-----------------------
800 private_node_allocator_t::purge_blocks();
803 //!Increments internal reference count and returns new count. Never throws
804 size_type inc_ref_count()
806 //-----------------------
807 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
808 //-----------------------
809 return ++m_header.m_usecount;
812 //!Decrements internal reference count and returns new count. Never throws
813 size_type dec_ref_count()
815 //-----------------------
816 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
817 //-----------------------
818 BOOST_ASSERT(m_header.m_usecount > 0);
819 return --m_header.m_usecount;
822 //!Deprecated, use deallocate_free_blocks.
823 void deallocate_free_chunks()
825 //-----------------------
826 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
827 //-----------------------
828 private_node_allocator_t::deallocate_free_blocks();
831 //!Deprecated, use purge_blocks.
834 //-----------------------
835 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
836 //-----------------------
837 private_node_allocator_t::purge_blocks();
841 //!This struct includes needed data and derives from
842 //!the mutex type to allow EBO when using null_mutex
843 struct header_t : mutex_type
845 size_type m_usecount; //Number of attached allocators
852 } //namespace ipcdetail {
853 } //namespace interprocess {
854 } //namespace boost {
856 #include <boost/interprocess/detail/config_end.hpp>
858 #endif //#ifndef BOOST_INTERPROCESS_ALLOCATOR_DETAIL_ALLOCATOR_COMMON_HPP