]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/interprocess/include/boost/interprocess/allocators/detail/allocator_common.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / interprocess / include / boost / interprocess / allocators / detail / allocator_common.hpp
CommitLineData
7c673cae
FG
1//////////////////////////////////////////////////////////////////////////////
2//
3// (C) Copyright Ion Gaztanaga 2008-2012. Distributed under the Boost
4// Software License, Version 1.0. (See accompanying file
5// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6//
7// See http://www.boost.org/libs/interprocess for documentation.
8//
9//////////////////////////////////////////////////////////////////////////////
10
11#ifndef BOOST_INTERPROCESS_ALLOCATOR_DETAIL_ALLOCATOR_COMMON_HPP
12#define BOOST_INTERPROCESS_ALLOCATOR_DETAIL_ALLOCATOR_COMMON_HPP
13
14#ifndef BOOST_CONFIG_HPP
15# include <boost/config.hpp>
16#endif
17#
18#if defined(BOOST_HAS_PRAGMA_ONCE)
19# pragma once
20#endif
21
22#include <boost/interprocess/detail/config_begin.hpp>
23#include <boost/interprocess/detail/workaround.hpp>
24
25#include <boost/intrusive/pointer_traits.hpp>
26
27#include <boost/interprocess/interprocess_fwd.hpp>
28#include <boost/interprocess/detail/utilities.hpp> //to_raw_pointer
29#include <boost/utility/addressof.hpp> //boost::addressof
30#include <boost/assert.hpp> //BOOST_ASSERT
31#include <boost/interprocess/exceptions.hpp> //bad_alloc
32#include <boost/interprocess/sync/scoped_lock.hpp> //scoped_lock
33#include <boost/interprocess/containers/allocation_type.hpp> //boost::interprocess::allocation_type
34#include <boost/container/detail/multiallocation_chain.hpp>
35#include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
36#include <boost/interprocess/detail/segment_manager_helper.hpp>
37#include <boost/move/utility_core.hpp>
38#include <boost/interprocess/detail/type_traits.hpp>
39#include <boost/interprocess/detail/utilities.hpp>
40#include <boost/container/detail/placement_new.hpp>
41#include <boost/move/adl_move_swap.hpp>
42
43namespace boost {
44namespace interprocess {
45
46template <class T>
47struct sizeof_value
48{
49 static const std::size_t value = sizeof(T);
50};
51
52template <>
53struct sizeof_value<void>
54{
55 static const std::size_t value = sizeof(void*);
56};
57
58template <>
59struct sizeof_value<const void>
60{
61 static const std::size_t value = sizeof(void*);
62};
63
64template <>
65struct sizeof_value<volatile void>
66{
67 static const std::size_t value = sizeof(void*);
68};
69
70template <>
71struct sizeof_value<const volatile void>
72{
73 static const std::size_t value = sizeof(void*);
74};
75
76namespace ipcdetail {
77
78//!Object function that creates the node allocator if it is not created and
79//!increments reference count if it is already created
80template<class NodePool>
81struct get_or_create_node_pool_func
82{
83
84 //!This connects or constructs the unique instance of node_pool_t
85 //!Can throw boost::interprocess::bad_alloc
86 void operator()()
87 {
88 //Find or create the node_pool_t
89 mp_node_pool = mp_segment_manager->template find_or_construct
90 <NodePool>(boost::interprocess::unique_instance)(mp_segment_manager);
91 //If valid, increment link count
92 if(mp_node_pool != 0)
93 mp_node_pool->inc_ref_count();
94 }
95
96 //!Constructor. Initializes function
97 //!object parameters
98 get_or_create_node_pool_func(typename NodePool::segment_manager *mngr)
99 : mp_segment_manager(mngr){}
100
101 NodePool *mp_node_pool;
102 typename NodePool::segment_manager *mp_segment_manager;
103};
104
105template<class NodePool>
106inline NodePool *get_or_create_node_pool(typename NodePool::segment_manager *mgnr)
107{
108 ipcdetail::get_or_create_node_pool_func<NodePool> func(mgnr);
109 mgnr->atomic_func(func);
110 return func.mp_node_pool;
111}
112
113//!Object function that decrements the reference count. If the count
114//!reaches to zero destroys the node allocator from memory.
115//!Never throws
116template<class NodePool>
117struct destroy_if_last_link_func
118{
119 //!Decrements reference count and destroys the object if there is no
120 //!more attached allocators. Never throws
121 void operator()()
122 {
123 //If not the last link return
124 if(mp_node_pool->dec_ref_count() != 0) return;
125
126 //Last link, let's destroy the segment_manager
127 mp_node_pool->get_segment_manager()->template destroy<NodePool>(boost::interprocess::unique_instance);
128 }
129
130 //!Constructor. Initializes function
131 //!object parameters
132 destroy_if_last_link_func(NodePool *pool)
133 : mp_node_pool(pool)
134 {}
135
136 NodePool *mp_node_pool;
137};
138
139//!Destruction function, initializes and executes destruction function
140//!object. Never throws
141template<class NodePool>
142inline void destroy_node_pool_if_last_link(NodePool *pool)
143{
144 //Get segment manager
145 typename NodePool::segment_manager *mngr = pool->get_segment_manager();
146 //Execute destruction functor atomically
147 destroy_if_last_link_func<NodePool>func(pool);
148 mngr->atomic_func(func);
149}
150
151template<class NodePool>
152class cache_impl
153{
154 typedef typename NodePool::segment_manager::
155 void_pointer void_pointer;
156 typedef typename boost::intrusive::
157 pointer_traits<void_pointer>::template
158 rebind_pointer<NodePool>::type node_pool_ptr;
159 typedef typename NodePool::multiallocation_chain multiallocation_chain;
160 typedef typename NodePool::segment_manager::size_type size_type;
161 node_pool_ptr mp_node_pool;
162 multiallocation_chain m_cached_nodes;
163 size_type m_max_cached_nodes;
164
165 public:
166 typedef typename NodePool::segment_manager segment_manager;
167
168 cache_impl(segment_manager *segment_mngr, size_type max_cached_nodes)
169 : mp_node_pool(get_or_create_node_pool<NodePool>(segment_mngr))
170 , m_max_cached_nodes(max_cached_nodes)
171 {}
172
173 cache_impl(const cache_impl &other)
174 : mp_node_pool(other.get_node_pool())
175 , m_max_cached_nodes(other.get_max_cached_nodes())
176 {
177 mp_node_pool->inc_ref_count();
178 }
179
180 ~cache_impl()
181 {
182 this->deallocate_all_cached_nodes();
183 ipcdetail::destroy_node_pool_if_last_link(ipcdetail::to_raw_pointer(mp_node_pool));
184 }
185
186 NodePool *get_node_pool() const
187 { return ipcdetail::to_raw_pointer(mp_node_pool); }
188
189 segment_manager *get_segment_manager() const
190 { return mp_node_pool->get_segment_manager(); }
191
192 size_type get_max_cached_nodes() const
193 { return m_max_cached_nodes; }
194
195 void *cached_allocation()
196 {
197 //If don't have any cached node, we have to get a new list of free nodes from the pool
198 if(m_cached_nodes.empty()){
199 mp_node_pool->allocate_nodes(m_max_cached_nodes/2, m_cached_nodes);
200 }
201 void *ret = ipcdetail::to_raw_pointer(m_cached_nodes.pop_front());
202 return ret;
203 }
204
205 void cached_allocation(size_type n, multiallocation_chain &chain)
206 {
207 size_type count = n, allocated(0);
208 BOOST_TRY{
209 //If don't have any cached node, we have to get a new list of free nodes from the pool
210 while(!m_cached_nodes.empty() && count--){
211 void *ret = ipcdetail::to_raw_pointer(m_cached_nodes.pop_front());
212 chain.push_back(ret);
213 ++allocated;
214 }
215
216 if(allocated != n){
217 mp_node_pool->allocate_nodes(n - allocated, chain);
218 }
219 }
220 BOOST_CATCH(...){
221 this->cached_deallocation(chain);
222 BOOST_RETHROW
223 }
224 BOOST_CATCH_END
225 }
226
227 void cached_deallocation(void *ptr)
228 {
229 //Check if cache is full
230 if(m_cached_nodes.size() >= m_max_cached_nodes){
231 //This only occurs if this allocator deallocate memory allocated
232 //with other equal allocator. Since the cache is full, and more
233 //deallocations are probably coming, we'll make some room in cache
234 //in a single, efficient multi node deallocation.
235 this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
236 }
237 m_cached_nodes.push_front(ptr);
238 }
239
240 void cached_deallocation(multiallocation_chain &chain)
241 {
242 m_cached_nodes.splice_after(m_cached_nodes.before_begin(), chain);
243
244 //Check if cache is full
245 if(m_cached_nodes.size() >= m_max_cached_nodes){
246 //This only occurs if this allocator deallocate memory allocated
247 //with other equal allocator. Since the cache is full, and more
248 //deallocations are probably coming, we'll make some room in cache
249 //in a single, efficient multi node deallocation.
250 this->priv_deallocate_n_nodes(m_cached_nodes.size() - m_max_cached_nodes/2);
251 }
252 }
253
254 //!Sets the new max cached nodes value. This can provoke deallocations
255 //!if "newmax" is less than current cached nodes. Never throws
256 void set_max_cached_nodes(size_type newmax)
257 {
258 m_max_cached_nodes = newmax;
259 this->priv_deallocate_remaining_nodes();
260 }
261
262 //!Frees all cached nodes.
263 //!Never throws
264 void deallocate_all_cached_nodes()
265 {
266 if(m_cached_nodes.empty()) return;
267 mp_node_pool->deallocate_nodes(m_cached_nodes);
268 }
269
270 private:
271 //!Frees all cached nodes at once.
272 //!Never throws
273 void priv_deallocate_remaining_nodes()
274 {
275 if(m_cached_nodes.size() > m_max_cached_nodes){
276 priv_deallocate_n_nodes(m_cached_nodes.size()-m_max_cached_nodes);
277 }
278 }
279
280 //!Frees n cached nodes at once. Never throws
281 void priv_deallocate_n_nodes(size_type n)
282 {
283 //This only occurs if this allocator deallocate memory allocated
284 //with other equal allocator. Since the cache is full, and more
285 //deallocations are probably coming, we'll make some room in cache
286 //in a single, efficient multi node deallocation.
287 size_type count(n);
288 typename multiallocation_chain::iterator it(m_cached_nodes.before_begin());
289 while(count--){
290 ++it;
291 }
292 multiallocation_chain chain;
293 chain.splice_after(chain.before_begin(), m_cached_nodes, m_cached_nodes.before_begin(), it, n);
294 //Deallocate all new linked list at once
295 mp_node_pool->deallocate_nodes(chain);
296 }
297
298 public:
299 void swap(cache_impl &other)
300 {
301 ::boost::adl_move_swap(mp_node_pool, other.mp_node_pool);
302 ::boost::adl_move_swap(m_cached_nodes, other.m_cached_nodes);
303 ::boost::adl_move_swap(m_max_cached_nodes, other.m_max_cached_nodes);
304 }
305};
306
307template<class Derived, class T, class SegmentManager>
308class array_allocation_impl
309{
310 const Derived *derived() const
311 { return static_cast<const Derived*>(this); }
312 Derived *derived()
313 { return static_cast<Derived*>(this); }
314
315 typedef typename SegmentManager::void_pointer void_pointer;
316
317 public:
318 typedef typename boost::intrusive::
319 pointer_traits<void_pointer>::template
320 rebind_pointer<T>::type pointer;
321 typedef typename boost::intrusive::
322 pointer_traits<void_pointer>::template
323 rebind_pointer<const T>::type const_pointer;
324 typedef T value_type;
325 typedef typename ipcdetail::add_reference
326 <value_type>::type reference;
327 typedef typename ipcdetail::add_reference
328 <const value_type>::type const_reference;
329 typedef typename SegmentManager::size_type size_type;
330 typedef typename SegmentManager::difference_type difference_type;
331 typedef boost::container::container_detail::transform_multiallocation_chain
332 <typename SegmentManager::multiallocation_chain, T>multiallocation_chain;
333
334
335 public:
336 //!Returns maximum the number of objects the previously allocated memory
337 //!pointed by p can hold. This size only works for memory allocated with
338 //!allocate, allocation_command and allocate_many.
339 size_type size(const pointer &p) const
340 {
341 return (size_type)this->derived()->get_segment_manager()->size(ipcdetail::to_raw_pointer(p))/sizeof(T);
342 }
343
344 pointer allocation_command(boost::interprocess::allocation_type command,
345 size_type limit_size, size_type &prefer_in_recvd_out_size, pointer &reuse)
346 {
347 value_type *reuse_raw = ipcdetail::to_raw_pointer(reuse);
348 pointer const p = this->derived()->get_segment_manager()->allocation_command
349 (command, limit_size, prefer_in_recvd_out_size, reuse_raw);
350 reuse = reuse_raw;
351 return p;
352 }
353
354 //!Allocates many elements of size elem_size in a contiguous block
355 //!of memory. The minimum number to be allocated is min_elements,
356 //!the preferred and maximum number is
357 //!preferred_elements. The number of actually allocated elements is
358 //!will be assigned to received_size. The elements must be deallocated
359 //!with deallocate(...)
360 void allocate_many(size_type elem_size, size_type num_elements, multiallocation_chain &chain)
361 {
362 if(size_overflows<sizeof(T)>(elem_size)){
363 throw bad_alloc();
364 }
365 this->derived()->get_segment_manager()->allocate_many(elem_size*sizeof(T), num_elements, chain);
366 }
367
368 //!Allocates n_elements elements, each one of size elem_sizes[i]in a
369 //!contiguous block
370 //!of memory. The elements must be deallocated
371 void allocate_many(const size_type *elem_sizes, size_type n_elements, multiallocation_chain &chain)
372 {
373 this->derived()->get_segment_manager()->allocate_many(elem_sizes, n_elements, sizeof(T), chain);
374 }
375
376 //!Allocates many elements of size elem_size in a contiguous block
377 //!of memory. The minimum number to be allocated is min_elements,
378 //!the preferred and maximum number is
379 //!preferred_elements. The number of actually allocated elements is
380 //!will be assigned to received_size. The elements must be deallocated
381 //!with deallocate(...)
382 void deallocate_many(multiallocation_chain &chain)
383 { this->derived()->get_segment_manager()->deallocate_many(chain); }
384
385 //!Returns the number of elements that could be
386 //!allocated. Never throws
387 size_type max_size() const
388 { return this->derived()->get_segment_manager()->get_size()/sizeof(T); }
389
390 //!Returns address of mutable object.
391 //!Never throws
392 pointer address(reference value) const
393 { return pointer(boost::addressof(value)); }
394
395 //!Returns address of non mutable object.
396 //!Never throws
397 const_pointer address(const_reference value) const
398 { return const_pointer(boost::addressof(value)); }
399
400 //!Constructs an object
401 //!Throws if T's constructor throws
402 //!For backwards compatibility with libraries using C++03 allocators
403 template<class P>
404 void construct(const pointer &ptr, BOOST_FWD_REF(P) p)
405 { ::new((void*)ipcdetail::to_raw_pointer(ptr), boost_container_new_t()) value_type(::boost::forward<P>(p)); }
406
407 //!Destroys object. Throws if object's
408 //!destructor throws
409 void destroy(const pointer &ptr)
410 { BOOST_ASSERT(ptr != 0); (*ptr).~value_type(); }
411};
412
413
414template<class Derived, unsigned int Version, class T, class SegmentManager>
415class node_pool_allocation_impl
416 : public array_allocation_impl
417 < Derived
418 , T
419 , SegmentManager>
420{
421 const Derived *derived() const
422 { return static_cast<const Derived*>(this); }
423 Derived *derived()
424 { return static_cast<Derived*>(this); }
425
426 typedef typename SegmentManager::void_pointer void_pointer;
427 typedef typename boost::intrusive::
428 pointer_traits<void_pointer>::template
429 rebind_pointer<const void>::type cvoid_pointer;
430
431 public:
432 typedef typename boost::intrusive::
433 pointer_traits<void_pointer>::template
434 rebind_pointer<T>::type pointer;
435 typedef typename boost::intrusive::
436 pointer_traits<void_pointer>::template
437 rebind_pointer<const T>::type const_pointer;
438 typedef T value_type;
439 typedef typename ipcdetail::add_reference
440 <value_type>::type reference;
441 typedef typename ipcdetail::add_reference
442 <const value_type>::type const_reference;
443 typedef typename SegmentManager::size_type size_type;
444 typedef typename SegmentManager::difference_type difference_type;
445 typedef boost::container::container_detail::transform_multiallocation_chain
446 <typename SegmentManager::multiallocation_chain, T>multiallocation_chain;
447
448
449 template <int Dummy>
450 struct node_pool
451 {
452 typedef typename Derived::template node_pool<0>::type type;
453 static type *get(void *p)
454 { return static_cast<type*>(p); }
455 };
456
457 public:
458 //!Allocate memory for an array of count elements.
459 //!Throws boost::interprocess::bad_alloc if there is no enough memory
460 pointer allocate(size_type count, cvoid_pointer hint = 0)
461 {
462 (void)hint;
463 typedef typename node_pool<0>::type node_pool_t;
464 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
465 if(size_overflows<sizeof(T)>(count)){
466 throw bad_alloc();
467 }
468 else if(Version == 1 && count == 1){
469 return pointer(static_cast<value_type*>
470 (pool->allocate_node()));
471 }
472 else{
473 return pointer(static_cast<value_type*>
474 (pool->get_segment_manager()->allocate(count*sizeof(T))));
475 }
476 }
477
478 //!Deallocate allocated memory. Never throws
479 void deallocate(const pointer &ptr, size_type count)
480 {
481 (void)count;
482 typedef typename node_pool<0>::type node_pool_t;
483 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
484 if(Version == 1 && count == 1)
485 pool->deallocate_node(ipcdetail::to_raw_pointer(ptr));
486 else
487 pool->get_segment_manager()->deallocate((void*)ipcdetail::to_raw_pointer(ptr));
488 }
489
490 //!Allocates just one object. Memory allocated with this function
491 //!must be deallocated only with deallocate_one().
492 //!Throws boost::interprocess::bad_alloc if there is no enough memory
493 pointer allocate_one()
494 {
495 typedef typename node_pool<0>::type node_pool_t;
496 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
497 return pointer(static_cast<value_type*>(pool->allocate_node()));
498 }
499
500 //!Allocates many elements of size == 1 in a contiguous block
501 //!of memory. The minimum number to be allocated is min_elements,
502 //!the preferred and maximum number is
503 //!preferred_elements. The number of actually allocated elements is
504 //!will be assigned to received_size. Memory allocated with this function
505 //!must be deallocated only with deallocate_one().
506 void allocate_individual(size_type num_elements, multiallocation_chain &chain)
507 {
508 typedef typename node_pool<0>::type node_pool_t;
509 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
510 pool->allocate_nodes(num_elements, chain);
511 }
512
513 //!Deallocates memory previously allocated with allocate_one().
514 //!You should never use deallocate_one to deallocate memory allocated
515 //!with other functions different from allocate_one(). Never throws
516 void deallocate_one(const pointer &p)
517 {
518 typedef typename node_pool<0>::type node_pool_t;
519 node_pool_t *pool = node_pool<0>::get(this->derived()->get_node_pool());
520 pool->deallocate_node(ipcdetail::to_raw_pointer(p));
521 }
522
523 //!Allocates many elements of size == 1 in a contiguous block
524 //!of memory. The minimum number to be allocated is min_elements,
525 //!the preferred and maximum number is
526 //!preferred_elements. The number of actually allocated elements is
527 //!will be assigned to received_size. Memory allocated with this function
528 //!must be deallocated only with deallocate_one().
529 void deallocate_individual(multiallocation_chain &chain)
530 {
531 node_pool<0>::get(this->derived()->get_node_pool())->deallocate_nodes
532 (chain);
533 }
534
535 //!Deallocates all free blocks of the pool
536 void deallocate_free_blocks()
537 { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_blocks(); }
538
539 //!Deprecated, use deallocate_free_blocks.
540 //!Deallocates all free chunks of the pool.
541 void deallocate_free_chunks()
542 { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_blocks(); }
543};
544
545template<class T, class NodePool, unsigned int Version>
546class cached_allocator_impl
547 : public array_allocation_impl
548 <cached_allocator_impl<T, NodePool, Version>, T, typename NodePool::segment_manager>
549{
550 cached_allocator_impl & operator=(const cached_allocator_impl& other);
551 typedef array_allocation_impl
552 < cached_allocator_impl
553 <T, NodePool, Version>
554 , T
555 , typename NodePool::segment_manager> base_t;
556
557 public:
558 typedef NodePool node_pool_t;
559 typedef typename NodePool::segment_manager segment_manager;
560 typedef typename segment_manager::void_pointer void_pointer;
561 typedef typename boost::intrusive::
562 pointer_traits<void_pointer>::template
563 rebind_pointer<const void>::type cvoid_pointer;
564 typedef typename base_t::pointer pointer;
565 typedef typename base_t::size_type size_type;
566 typedef typename base_t::multiallocation_chain multiallocation_chain;
567 typedef typename base_t::value_type value_type;
568
569 public:
570 static const std::size_t DEFAULT_MAX_CACHED_NODES = 64;
571
572 cached_allocator_impl(segment_manager *segment_mngr, size_type max_cached_nodes)
573 : m_cache(segment_mngr, max_cached_nodes)
574 {}
575
576 cached_allocator_impl(const cached_allocator_impl &other)
577 : m_cache(other.m_cache)
578 {}
579
580 //!Copy constructor from related cached_adaptive_pool_base. If not present, constructs
581 //!a node pool. Increments the reference count of the associated node pool.
582 //!Can throw boost::interprocess::bad_alloc
583 template<class T2, class NodePool2>
584 cached_allocator_impl
585 (const cached_allocator_impl
586 <T2, NodePool2, Version> &other)
587 : m_cache(other.get_segment_manager(), other.get_max_cached_nodes())
588 {}
589
590 //!Returns a pointer to the node pool.
591 //!Never throws
592 node_pool_t* get_node_pool() const
593 { return m_cache.get_node_pool(); }
594
595 //!Returns the segment manager.
596 //!Never throws
597 segment_manager* get_segment_manager()const
598 { return m_cache.get_segment_manager(); }
599
600 //!Sets the new max cached nodes value. This can provoke deallocations
601 //!if "newmax" is less than current cached nodes. Never throws
602 void set_max_cached_nodes(size_type newmax)
603 { m_cache.set_max_cached_nodes(newmax); }
604
605 //!Returns the max cached nodes parameter.
606 //!Never throws
607 size_type get_max_cached_nodes() const
608 { return m_cache.get_max_cached_nodes(); }
609
610 //!Allocate memory for an array of count elements.
611 //!Throws boost::interprocess::bad_alloc if there is no enough memory
612 pointer allocate(size_type count, cvoid_pointer hint = 0)
613 {
614 (void)hint;
615 void * ret;
616 if(size_overflows<sizeof(T)>(count)){
617 throw bad_alloc();
618 }
619 else if(Version == 1 && count == 1){
620 ret = m_cache.cached_allocation();
621 }
622 else{
623 ret = this->get_segment_manager()->allocate(count*sizeof(T));
624 }
625 return pointer(static_cast<T*>(ret));
626 }
627
628 //!Deallocate allocated memory. Never throws
629 void deallocate(const pointer &ptr, size_type count)
630 {
631 (void)count;
632 if(Version == 1 && count == 1){
633 m_cache.cached_deallocation(ipcdetail::to_raw_pointer(ptr));
634 }
635 else{
636 this->get_segment_manager()->deallocate((void*)ipcdetail::to_raw_pointer(ptr));
637 }
638 }
639
640 //!Allocates just one object. Memory allocated with this function
641 //!must be deallocated only with deallocate_one().
642 //!Throws boost::interprocess::bad_alloc if there is no enough memory
643 pointer allocate_one()
644 { return pointer(static_cast<value_type*>(this->m_cache.cached_allocation())); }
645
646 //!Allocates many elements of size == 1 in a contiguous block
647 //!of memory. The minimum number to be allocated is min_elements,
648 //!the preferred and maximum number is
649 //!preferred_elements. The number of actually allocated elements is
650 //!will be assigned to received_size. Memory allocated with this function
651 //!must be deallocated only with deallocate_one().
652 void allocate_individual(size_type num_elements, multiallocation_chain &chain)
653 { this->m_cache.cached_allocation(num_elements, chain); }
654
655 //!Deallocates memory previously allocated with allocate_one().
656 //!You should never use deallocate_one to deallocate memory allocated
657 //!with other functions different from allocate_one(). Never throws
658 void deallocate_one(const pointer &p)
659 { this->m_cache.cached_deallocation(ipcdetail::to_raw_pointer(p)); }
660
661 //!Allocates many elements of size == 1 in a contiguous block
662 //!of memory. The minimum number to be allocated is min_elements,
663 //!the preferred and maximum number is
664 //!preferred_elements. The number of actually allocated elements is
665 //!will be assigned to received_size. Memory allocated with this function
666 //!must be deallocated only with deallocate_one().
667 void deallocate_individual(multiallocation_chain &chain)
668 { m_cache.cached_deallocation(chain); }
669
670 //!Deallocates all free blocks of the pool
671 void deallocate_free_blocks()
672 { m_cache.get_node_pool()->deallocate_free_blocks(); }
673
674 //!Swaps allocators. Does not throw. If each allocator is placed in a
675 //!different shared memory segments, the result is undefined.
676 friend void swap(cached_allocator_impl &alloc1, cached_allocator_impl &alloc2)
677 { ::boost::adl_move_swap(alloc1.m_cache, alloc2.m_cache); }
678
679 void deallocate_cache()
680 { m_cache.deallocate_all_cached_nodes(); }
681
682 //!Deprecated use deallocate_free_blocks.
683 void deallocate_free_chunks()
684 { m_cache.get_node_pool()->deallocate_free_blocks(); }
685
686 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
687 private:
688 cache_impl<node_pool_t> m_cache;
689 #endif //!defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
690};
691
692//!Equality test for same type of
693//!cached_allocator_impl
694template<class T, class N, unsigned int V> inline
695bool operator==(const cached_allocator_impl<T, N, V> &alloc1,
696 const cached_allocator_impl<T, N, V> &alloc2)
697 { return alloc1.get_node_pool() == alloc2.get_node_pool(); }
698
699//!Inequality test for same type of
700//!cached_allocator_impl
701template<class T, class N, unsigned int V> inline
702bool operator!=(const cached_allocator_impl<T, N, V> &alloc1,
703 const cached_allocator_impl<T, N, V> &alloc2)
704 { return alloc1.get_node_pool() != alloc2.get_node_pool(); }
705
706
707//!Pooled shared memory allocator using adaptive pool. Includes
708//!a reference count but the class does not delete itself, this is
709//!responsibility of user classes. Node size (NodeSize) and the number of
710//!nodes allocated per block (NodesPerBlock) are known at compile time
711template<class private_node_allocator_t>
712class shared_pool_impl
713 : public private_node_allocator_t
714{
715 public:
716 //!Segment manager typedef
717 typedef typename private_node_allocator_t::
718 segment_manager segment_manager;
719 typedef typename private_node_allocator_t::
720 multiallocation_chain multiallocation_chain;
721 typedef typename private_node_allocator_t::
722 size_type size_type;
723
724 private:
725 typedef typename segment_manager::mutex_family::mutex_type mutex_type;
726
727 public:
728 //!Constructor from a segment manager. Never throws
729 shared_pool_impl(segment_manager *segment_mngr)
730 : private_node_allocator_t(segment_mngr)
731 {}
732
733 //!Destructor. Deallocates all allocated blocks. Never throws
734 ~shared_pool_impl()
735 {}
736
737 //!Allocates array of count elements. Can throw boost::interprocess::bad_alloc
738 void *allocate_node()
739 {
740 //-----------------------
741 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
742 //-----------------------
743 return private_node_allocator_t::allocate_node();
744 }
745
746 //!Deallocates an array pointed by ptr. Never throws
747 void deallocate_node(void *ptr)
748 {
749 //-----------------------
750 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
751 //-----------------------
752 private_node_allocator_t::deallocate_node(ptr);
753 }
754
755 //!Allocates n nodes.
756 //!Can throw boost::interprocess::bad_alloc
757 void allocate_nodes(const size_type n, multiallocation_chain &chain)
758 {
759 //-----------------------
760 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
761 //-----------------------
762 private_node_allocator_t::allocate_nodes(n, chain);
763 }
764
765 //!Deallocates a linked list of nodes ending in null pointer. Never throws
766 void deallocate_nodes(multiallocation_chain &nodes, size_type num)
767 {
768 //-----------------------
769 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
770 //-----------------------
771 private_node_allocator_t::deallocate_nodes(nodes, num);
772 }
773
774 //!Deallocates the nodes pointed by the multiallocation iterator. Never throws
775 void deallocate_nodes(multiallocation_chain &chain)
776 {
777 //-----------------------
778 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
779 //-----------------------
780 private_node_allocator_t::deallocate_nodes(chain);
781 }
782
783 //!Deallocates all the free blocks of memory. Never throws
784 void deallocate_free_blocks()
785 {
786 //-----------------------
787 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
788 //-----------------------
789 private_node_allocator_t::deallocate_free_blocks();
790 }
791
792 //!Deallocates all used memory from the common pool.
793 //!Precondition: all nodes allocated from this pool should
794 //!already be deallocated. Otherwise, undefined behavior. Never throws
795 void purge_blocks()
796 {
797 //-----------------------
798 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
799 //-----------------------
800 private_node_allocator_t::purge_blocks();
801 }
802
803 //!Increments internal reference count and returns new count. Never throws
804 size_type inc_ref_count()
805 {
806 //-----------------------
807 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
808 //-----------------------
809 return ++m_header.m_usecount;
810 }
811
812 //!Decrements internal reference count and returns new count. Never throws
813 size_type dec_ref_count()
814 {
815 //-----------------------
816 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
817 //-----------------------
818 BOOST_ASSERT(m_header.m_usecount > 0);
819 return --m_header.m_usecount;
820 }
821
822 //!Deprecated, use deallocate_free_blocks.
823 void deallocate_free_chunks()
824 {
825 //-----------------------
826 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
827 //-----------------------
828 private_node_allocator_t::deallocate_free_blocks();
829 }
830
831 //!Deprecated, use purge_blocks.
832 void purge_chunks()
833 {
834 //-----------------------
835 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
836 //-----------------------
837 private_node_allocator_t::purge_blocks();
838 }
839
840 private:
841 //!This struct includes needed data and derives from
842 //!the mutex type to allow EBO when using null_mutex
843 struct header_t : mutex_type
844 {
845 size_type m_usecount; //Number of attached allocators
846
847 header_t()
848 : m_usecount(0) {}
849 } m_header;
850};
851
852} //namespace ipcdetail {
853} //namespace interprocess {
854} //namespace boost {
855
856#include <boost/interprocess/detail/config_end.hpp>
857
858#endif //#ifndef BOOST_INTERPROCESS_ALLOCATOR_DETAIL_ALLOCATOR_COMMON_HPP