]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/container/detail/adaptive_node_pool_impl.hpp
update sources to v12.2.3
[ceph.git] / ceph / src / boost / boost / container / detail / adaptive_node_pool_impl.hpp
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2013. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/container for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10
11 #ifndef BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP
12 #define BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP
13
14 #ifndef BOOST_CONFIG_HPP
15 # include <boost/config.hpp>
16 #endif
17
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
19 # pragma once
20 #endif
21
22 #include <boost/container/detail/config_begin.hpp>
23 #include <boost/container/detail/workaround.hpp>
24
25 // container
26 #include <boost/container/container_fwd.hpp>
27 #include <boost/container/throw_exception.hpp>
28 // container/detail
29 #include <boost/container/detail/pool_common.hpp>
30 #include <boost/container/detail/iterator.hpp>
31 #include <boost/move/detail/iterator_to_raw_pointer.hpp>
32 #include <boost/container/detail/math_functions.hpp>
33 #include <boost/container/detail/mpl.hpp>
34 #include <boost/move/detail/to_raw_pointer.hpp>
35 #include <boost/container/detail/type_traits.hpp>
36 // intrusive
37 #include <boost/intrusive/pointer_traits.hpp>
38 #include <boost/intrusive/set.hpp>
39 #include <boost/intrusive/list.hpp>
40 #include <boost/intrusive/slist.hpp>
41 // other
42 #include <boost/assert.hpp>
43 #include <boost/core/no_exceptions_support.hpp>
44 #include <cstddef>
45
46 namespace boost {
47 namespace container {
48
49 namespace adaptive_pool_flag {
50
51 static const unsigned int none = 0u;
52 static const unsigned int align_only = 1u << 0u;
53 static const unsigned int size_ordered = 1u << 1u;
54 static const unsigned int address_ordered = 1u << 2u;
55
56 } //namespace adaptive_pool_flag{
57
58 namespace container_detail {
59
60 template<class size_type>
61 struct hdr_offset_holder_t
62 {
63 hdr_offset_holder_t(size_type offset = 0)
64 : hdr_offset(offset)
65 {}
66 size_type hdr_offset;
67 };
68
69 template<class SizeType, unsigned int Flags>
70 struct less_func;
71
72 template<class SizeType>
73 struct less_func<SizeType, adaptive_pool_flag::none>
74 {
75 static bool less(SizeType, SizeType, const void *, const void *)
76 { return true; }
77 };
78
79 template<class SizeType>
80 struct less_func<SizeType, adaptive_pool_flag::size_ordered>
81 {
82 static bool less(SizeType ls, SizeType rs, const void *, const void *)
83 { return ls < rs; }
84 };
85
86 template<class SizeType>
87 struct less_func<SizeType, adaptive_pool_flag::address_ordered>
88 {
89 static bool less(SizeType, SizeType, const void *la, const void *ra)
90 { return &la < &ra; }
91 };
92
93 template<class SizeType>
94 struct less_func<SizeType, adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered>
95 {
96 static bool less(SizeType ls, SizeType rs, const void *la, const void *ra)
97 { return (ls < rs) || ((ls == rs) && (la < ra)); }
98 };
99
100 template<class VoidPointer, class SizeType, bool ordered>
101 struct block_container_traits
102 {
103 typedef typename bi::make_set_base_hook
104 < bi::void_pointer<VoidPointer>
105 , bi::optimize_size<true>
106 , bi::link_mode<bi::normal_link> >::type hook_t;
107
108 template<class T>
109 struct container
110 {
111 typedef typename bi::make_multiset
112 <T, bi::base_hook<hook_t>, bi::size_type<SizeType> >::type type;
113 };
114
115 template<class Container>
116 static void reinsert_was_used(Container &container, typename Container::reference v, bool)
117 {
118 typedef typename Container::const_iterator const_block_iterator;
119 const const_block_iterator this_block
120 (Container::s_iterator_to(const_cast<typename Container::const_reference>(v)));
121 const_block_iterator next_block(this_block);
122 if(++next_block != container.cend()){
123 if(this_block->free_nodes.size() > next_block->free_nodes.size()){
124 container.erase(this_block);
125 container.insert(v);
126 }
127 }
128 }
129
130 template<class Container>
131 static void insert_was_empty(Container &container, typename Container::value_type &v, bool)
132 {
133 container.insert(v);
134 }
135
136 template<class Container>
137 static void erase_first(Container &container)
138 {
139 container.erase(container.cbegin());
140 }
141
142 template<class Container>
143 static void erase_last(Container &container)
144 {
145 container.erase(--container.cend());
146 }
147 };
148
149 template<class VoidPointer, class SizeType>
150 struct block_container_traits<VoidPointer, SizeType, false>
151 {
152 typedef typename bi::make_list_base_hook
153 < bi::void_pointer<VoidPointer>
154 , bi::link_mode<bi::normal_link> >::type hook_t;
155
156 template<class T>
157 struct container
158 {
159 typedef typename bi::make_list
160 <T, bi::base_hook<hook_t>, bi::size_type<SizeType>, bi::constant_time_size<false> >::type type;
161 };
162
163 template<class Container>
164 static void reinsert_was_used(Container &container, typename Container::value_type &v, bool is_full)
165 {
166 if(is_full){
167 container.erase(Container::s_iterator_to(v));
168 container.push_back(v);
169 }
170 }
171
172 template<class Container>
173 static void insert_was_empty(Container &container, typename Container::value_type &v, bool is_full)
174 {
175 if(is_full){
176 container.push_back(v);
177 }
178 else{
179 container.push_front(v);
180 }
181 }
182
183 template<class Container>
184 static void erase_first(Container &container)
185 {
186 container.pop_front();
187 }
188
189 template<class Container>
190 static void erase_last(Container &container)
191 {
192 container.pop_back();
193 }
194 };
195
196 template<class MultiallocationChain, class VoidPointer, class SizeType, unsigned int Flags>
197 struct adaptive_pool_types
198 {
199 typedef VoidPointer void_pointer;
200 static const bool ordered = (Flags & (adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered)) != 0;
201 typedef block_container_traits<VoidPointer, SizeType, ordered> block_container_traits_t;
202 typedef typename block_container_traits_t::hook_t hook_t;
203 typedef hdr_offset_holder_t<SizeType> hdr_offset_holder;
204 static const unsigned int order_flags = Flags & (adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered);
205 typedef MultiallocationChain free_nodes_t;
206
207 struct block_info_t
208 : public hdr_offset_holder,
209 public hook_t
210 {
211 //An intrusive list of free node from this block
212 free_nodes_t free_nodes;
213 friend bool operator <(const block_info_t &l, const block_info_t &r)
214 {
215 return less_func<SizeType, order_flags>::
216 less(l.free_nodes.size(), r.free_nodes.size(), &l , &r);
217 }
218
219 friend bool operator ==(const block_info_t &l, const block_info_t &r)
220 { return &l == &r; }
221 };
222 typedef typename block_container_traits_t:: template container<block_info_t>::type block_container_t;
223 };
224
225 template<class size_type>
226 inline size_type calculate_alignment
227 ( size_type overhead_percent, size_type real_node_size
228 , size_type hdr_size, size_type hdr_offset_size, size_type payload_per_allocation)
229 {
230 //to-do: handle real_node_size != node_size
231 const size_type divisor = overhead_percent*real_node_size;
232 const size_type dividend = hdr_offset_size*100;
233 size_type elements_per_subblock = (dividend - 1)/divisor + 1;
234 size_type candidate_power_of_2 =
235 upper_power_of_2(elements_per_subblock*real_node_size + hdr_offset_size);
236 bool overhead_satisfied = false;
237 //Now calculate the wors-case overhead for a subblock
238 const size_type max_subblock_overhead = hdr_size + payload_per_allocation;
239 while(!overhead_satisfied){
240 elements_per_subblock = (candidate_power_of_2 - max_subblock_overhead)/real_node_size;
241 const size_type overhead_size = candidate_power_of_2 - elements_per_subblock*real_node_size;
242 if(overhead_size*100/candidate_power_of_2 < overhead_percent){
243 overhead_satisfied = true;
244 }
245 else{
246 candidate_power_of_2 <<= 1;
247 }
248 }
249 return candidate_power_of_2;
250 }
251
252 template<class size_type>
253 inline void calculate_num_subblocks
254 (size_type alignment, size_type real_node_size, size_type elements_per_block
255 , size_type &num_subblocks, size_type &real_num_node, size_type overhead_percent
256 , size_type hdr_size, size_type hdr_offset_size, size_type payload_per_allocation)
257 {
258 const size_type hdr_subblock_elements = (alignment - hdr_size - payload_per_allocation)/real_node_size;
259 size_type elements_per_subblock = (alignment - hdr_offset_size)/real_node_size;
260 size_type possible_num_subblock = (elements_per_block - 1)/elements_per_subblock + 1;
261 while(((possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements) < elements_per_block){
262 ++possible_num_subblock;
263 }
264 elements_per_subblock = (alignment - hdr_offset_size)/real_node_size;
265 bool overhead_satisfied = false;
266 while(!overhead_satisfied){
267 const size_type total_data = (elements_per_subblock*(possible_num_subblock-1) + hdr_subblock_elements)*real_node_size;
268 const size_type total_size = alignment*possible_num_subblock;
269 if((total_size - total_data)*100/total_size < overhead_percent){
270 overhead_satisfied = true;
271 }
272 else{
273 ++possible_num_subblock;
274 }
275 }
276 num_subblocks = possible_num_subblock;
277 real_num_node = (possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements;
278 }
279
280 template<class SegmentManagerBase, unsigned int Flags>
281 class private_adaptive_node_pool_impl
282 {
283 //Non-copyable
284 private_adaptive_node_pool_impl();
285 private_adaptive_node_pool_impl(const private_adaptive_node_pool_impl &);
286 private_adaptive_node_pool_impl &operator=(const private_adaptive_node_pool_impl &);
287 typedef private_adaptive_node_pool_impl this_type;
288
289 typedef typename SegmentManagerBase::void_pointer void_pointer;
290 static const typename SegmentManagerBase::
291 size_type PayloadPerAllocation = SegmentManagerBase::PayloadPerAllocation;
292 //Flags
293 //align_only
294 static const bool AlignOnly = (Flags & adaptive_pool_flag::align_only) != 0;
295 typedef bool_<AlignOnly> IsAlignOnly;
296 typedef true_ AlignOnlyTrue;
297 typedef false_ AlignOnlyFalse;
298 //size_ordered
299 static const bool SizeOrdered = (Flags & adaptive_pool_flag::size_ordered) != 0;
300 typedef bool_<SizeOrdered> IsSizeOrdered;
301 typedef true_ SizeOrderedTrue;
302 typedef false_ SizeOrderedFalse;
303 //address_ordered
304 static const bool AddressOrdered = (Flags & adaptive_pool_flag::address_ordered) != 0;
305 typedef bool_<AddressOrdered> IsAddressOrdered;
306 typedef true_ AddressOrderedTrue;
307 typedef false_ AddressOrderedFalse;
308
309 public:
310 typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain;
311 typedef typename SegmentManagerBase::size_type size_type;
312
313 private:
314 typedef adaptive_pool_types
315 <multiallocation_chain, void_pointer, size_type, Flags> adaptive_pool_types_t;
316 typedef typename adaptive_pool_types_t::free_nodes_t free_nodes_t;
317 typedef typename adaptive_pool_types_t::block_info_t block_info_t;
318 typedef typename adaptive_pool_types_t::block_container_t block_container_t;
319 typedef typename adaptive_pool_types_t::block_container_traits_t block_container_traits_t;
320 typedef typename block_container_t::iterator block_iterator;
321 typedef typename block_container_t::const_iterator const_block_iterator;
322 typedef typename adaptive_pool_types_t::hdr_offset_holder hdr_offset_holder;
323
324 static const size_type MaxAlign = alignment_of<void_pointer>::value;
325 static const size_type HdrSize = ((sizeof(block_info_t)-1)/MaxAlign+1)*MaxAlign;
326 static const size_type HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign;
327
328 public:
329 //!Segment manager typedef
330 typedef SegmentManagerBase segment_manager_base_type;
331
332 //!Constructor from a segment manager. Never throws
333 private_adaptive_node_pool_impl
334 ( segment_manager_base_type *segment_mngr_base
335 , size_type node_size
336 , size_type nodes_per_block
337 , size_type max_free_blocks
338 , unsigned char overhead_percent
339 )
340 : m_max_free_blocks(max_free_blocks)
341 , m_real_node_size(lcm(node_size, size_type(alignment_of<void_pointer>::value)))
342 //Round the size to a power of two value.
343 //This is the total memory size (including payload) that we want to
344 //allocate from the general-purpose allocator
345 , m_real_block_alignment
346 (AlignOnly ?
347 upper_power_of_2(HdrSize + m_real_node_size*nodes_per_block) :
348 calculate_alignment( (size_type)overhead_percent, m_real_node_size
349 , HdrSize, HdrOffsetSize, PayloadPerAllocation))
350 //This is the real number of nodes per block
351 , m_num_subblocks(0)
352 , m_real_num_node(AlignOnly ? (m_real_block_alignment - PayloadPerAllocation - HdrSize)/m_real_node_size : 0)
353 //General purpose allocator
354 , mp_segment_mngr_base(segment_mngr_base)
355 , m_block_container()
356 , m_totally_free_blocks(0)
357 {
358 if(!AlignOnly){
359 calculate_num_subblocks
360 ( m_real_block_alignment
361 , m_real_node_size
362 , nodes_per_block
363 , m_num_subblocks
364 , m_real_num_node
365 , (size_type)overhead_percent
366 , HdrSize
367 , HdrOffsetSize
368 , PayloadPerAllocation);
369 }
370 }
371
372 //!Destructor. Deallocates all allocated blocks. Never throws
373 ~private_adaptive_node_pool_impl()
374 { this->priv_clear(); }
375
376 size_type get_real_num_node() const
377 { return m_real_num_node; }
378
379 //!Returns the segment manager. Never throws
380 segment_manager_base_type* get_segment_manager_base()const
381 { return boost::movelib::to_raw_pointer(mp_segment_mngr_base); }
382
383 //!Allocates array of count elements. Can throw
384 void *allocate_node()
385 {
386 this->priv_invariants();
387 //If there are no free nodes we allocate a new block
388 if(!m_block_container.empty()){
389 //We take the first free node the multiset can't be empty
390 free_nodes_t &free_nodes = m_block_container.begin()->free_nodes;
391 BOOST_ASSERT(!free_nodes.empty());
392 const size_type free_nodes_count = free_nodes.size();
393 void *first_node = boost::movelib::to_raw_pointer(free_nodes.pop_front());
394 if(free_nodes.empty()){
395 block_container_traits_t::erase_first(m_block_container);
396 }
397 m_totally_free_blocks -= static_cast<size_type>(free_nodes_count == m_real_num_node);
398 this->priv_invariants();
399 return first_node;
400 }
401 else{
402 multiallocation_chain chain;
403 this->priv_append_from_new_blocks(1, chain, IsAlignOnly());
404 return boost::movelib::to_raw_pointer(chain.pop_front());
405 }
406 }
407
408 //!Deallocates an array pointed by ptr. Never throws
409 void deallocate_node(void *pElem)
410 {
411 this->priv_invariants();
412 block_info_t &block_info = *this->priv_block_from_node(pElem);
413 BOOST_ASSERT(block_info.free_nodes.size() < m_real_num_node);
414
415 //We put the node at the beginning of the free node list
416 block_info.free_nodes.push_back(void_pointer(pElem));
417
418 //The loop reinserts all blocks except the last one
419 this->priv_reinsert_block(block_info, block_info.free_nodes.size() == 1);
420 this->priv_deallocate_free_blocks(m_max_free_blocks);
421 this->priv_invariants();
422 }
423
424 //!Allocates n nodes.
425 //!Can throw
426 void allocate_nodes(const size_type n, multiallocation_chain &chain)
427 {
428 size_type i = 0;
429 BOOST_TRY{
430 this->priv_invariants();
431 while(i != n){
432 //If there are no free nodes we allocate all needed blocks
433 if (m_block_container.empty()){
434 this->priv_append_from_new_blocks(n - i, chain, IsAlignOnly());
435 BOOST_ASSERT(m_block_container.empty() || (++m_block_container.cbegin() == m_block_container.cend()));
436 BOOST_ASSERT(chain.size() == n);
437 break;
438 }
439 free_nodes_t &free_nodes = m_block_container.begin()->free_nodes;
440 const size_type free_nodes_count_before = free_nodes.size();
441 m_totally_free_blocks -= static_cast<size_type>(free_nodes_count_before == m_real_num_node);
442 const size_type num_left = n-i;
443 const size_type num_elems = (num_left < free_nodes_count_before) ? num_left : free_nodes_count_before;
444 typedef typename free_nodes_t::iterator free_nodes_iterator;
445
446 if(num_left < free_nodes_count_before){
447 const free_nodes_iterator it_bbeg(free_nodes.before_begin());
448 free_nodes_iterator it_bend(it_bbeg);
449 for(size_type j = 0; j != num_elems; ++j){
450 ++it_bend;
451 }
452 free_nodes_iterator it_end = it_bend; ++it_end;
453 free_nodes_iterator it_beg = it_bbeg; ++it_beg;
454 free_nodes.erase_after(it_bbeg, it_end, num_elems);
455 chain.incorporate_after(chain.last(), &*it_beg, &*it_bend, num_elems);
456 //chain.splice_after(chain.last(), free_nodes, it_bbeg, it_bend, num_elems);
457 BOOST_ASSERT(!free_nodes.empty());
458 }
459 else{
460 const free_nodes_iterator it_beg(free_nodes.begin()), it_bend(free_nodes.last());
461 free_nodes.clear();
462 chain.incorporate_after(chain.last(), &*it_beg, &*it_bend, num_elems);
463 block_container_traits_t::erase_first(m_block_container);
464 }
465 i += num_elems;
466 }
467 }
468 BOOST_CATCH(...){
469 this->deallocate_nodes(chain);
470 BOOST_RETHROW
471 }
472 BOOST_CATCH_END
473 this->priv_invariants();
474 }
475
476 //!Deallocates a linked list of nodes. Never throws
477 void deallocate_nodes(multiallocation_chain &nodes)
478 {
479 this->priv_invariants();
480 //To take advantage of node locality, wait until two
481 //nodes belong to different blocks. Only then reinsert
482 //the block of the first node in the block tree.
483 //Cache of the previous block
484 block_info_t *prev_block_info = 0;
485
486 //If block was empty before this call, it's not already
487 //inserted in the block tree.
488 bool prev_block_was_empty = false;
489 typedef typename free_nodes_t::iterator free_nodes_iterator;
490 {
491 const free_nodes_iterator itbb(nodes.before_begin()), ite(nodes.end());
492 free_nodes_iterator itf(nodes.begin()), itbf(itbb);
493 size_type splice_node_count = size_type(-1);
494 while(itf != ite){
495 void *pElem = boost::movelib::to_raw_pointer(boost::movelib::iterator_to_raw_pointer(itf));
496 block_info_t &block_info = *this->priv_block_from_node(pElem);
497 BOOST_ASSERT(block_info.free_nodes.size() < m_real_num_node);
498 ++splice_node_count;
499
500 //If block change is detected calculate the cached block position in the tree
501 if(&block_info != prev_block_info){
502 if(prev_block_info){ //Make sure we skip the initial "dummy" cache
503 free_nodes_iterator it(itbb); ++it;
504 nodes.erase_after(itbb, itf, splice_node_count);
505 prev_block_info->free_nodes.incorporate_after(prev_block_info->free_nodes.last(), &*it, &*itbf, splice_node_count);
506 this->priv_reinsert_block(*prev_block_info, prev_block_was_empty);
507 splice_node_count = 0;
508 }
509 //Update cache with new data
510 prev_block_was_empty = block_info.free_nodes.empty();
511 prev_block_info = &block_info;
512 }
513 itbf = itf;
514 ++itf;
515 }
516 }
517 if(prev_block_info){
518 //The loop reinserts all blocks except the last one
519 const free_nodes_iterator itfirst(nodes.begin()), itlast(nodes.last());
520 const size_type splice_node_count = nodes.size();
521 nodes.clear();
522 prev_block_info->free_nodes.incorporate_after(prev_block_info->free_nodes.last(), &*itfirst, &*itlast, splice_node_count);
523 this->priv_reinsert_block(*prev_block_info, prev_block_was_empty);
524 this->priv_invariants();
525 this->priv_deallocate_free_blocks(m_max_free_blocks);
526 }
527 }
528
529 void deallocate_free_blocks()
530 { this->priv_deallocate_free_blocks(0); }
531
532 size_type num_free_nodes()
533 {
534 typedef typename block_container_t::const_iterator citerator;
535 size_type count = 0;
536 citerator it (m_block_container.begin()), itend(m_block_container.end());
537 for(; it != itend; ++it){
538 count += it->free_nodes.size();
539 }
540 return count;
541 }
542
543 void swap(private_adaptive_node_pool_impl &other)
544 {
545 BOOST_ASSERT(m_max_free_blocks == other.m_max_free_blocks);
546 BOOST_ASSERT(m_real_node_size == other.m_real_node_size);
547 BOOST_ASSERT(m_real_block_alignment == other.m_real_block_alignment);
548 BOOST_ASSERT(m_real_num_node == other.m_real_num_node);
549 std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
550 std::swap(m_totally_free_blocks, other.m_totally_free_blocks);
551 m_block_container.swap(other.m_block_container);
552 }
553
554 //Deprecated, use deallocate_free_blocks
555 void deallocate_free_chunks()
556 { this->priv_deallocate_free_blocks(0); }
557
558 private:
559
560 void priv_deallocate_free_blocks(size_type max_free_blocks)
561 { //Trampoline function to ease inlining
562 if(m_totally_free_blocks > max_free_blocks){
563 this->priv_deallocate_free_blocks_impl(max_free_blocks);
564 }
565 }
566
567 void priv_deallocate_free_blocks_impl(size_type max_free_blocks)
568 {
569 this->priv_invariants();
570 //Now check if we've reached the free nodes limit
571 //and check if we have free blocks. If so, deallocate as much
572 //as we can to stay below the limit
573 multiallocation_chain chain;
574 {
575 const const_block_iterator itend = m_block_container.cend();
576 const_block_iterator it = itend;
577 --it;
578 size_type totally_free_blocks = m_totally_free_blocks;
579
580 for( ; totally_free_blocks > max_free_blocks; --totally_free_blocks){
581 BOOST_ASSERT(it->free_nodes.size() == m_real_num_node);
582 void *addr = priv_first_subblock_from_block(const_cast<block_info_t*>(&*it));
583 --it;
584 block_container_traits_t::erase_last(m_block_container);
585 chain.push_front(void_pointer(addr));
586 }
587 BOOST_ASSERT((m_totally_free_blocks - max_free_blocks) == chain.size());
588 m_totally_free_blocks = max_free_blocks;
589 }
590 this->mp_segment_mngr_base->deallocate_many(chain);
591 }
592
593 void priv_reinsert_block(block_info_t &prev_block_info, const bool prev_block_was_empty)
594 {
595 //Cache the free nodes from the block
596 const size_type this_block_free_nodes = prev_block_info.free_nodes.size();
597 const bool is_full = this_block_free_nodes == m_real_num_node;
598
599 //Update free block count
600 m_totally_free_blocks += static_cast<size_type>(is_full);
601 if(prev_block_was_empty){
602 block_container_traits_t::insert_was_empty(m_block_container, prev_block_info, is_full);
603 }
604 else{
605 block_container_traits_t::reinsert_was_used(m_block_container, prev_block_info, is_full);
606 }
607 }
608
609 class block_destroyer;
610 friend class block_destroyer;
611
612 class block_destroyer
613 {
614 public:
615 block_destroyer(const this_type *impl, multiallocation_chain &chain)
616 : mp_impl(impl), m_chain(chain)
617 {}
618
619 void operator()(typename block_container_t::pointer to_deallocate)
620 { return this->do_destroy(to_deallocate, IsAlignOnly()); }
621
622 private:
623 void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyTrue)
624 {
625 BOOST_ASSERT(to_deallocate->free_nodes.size() == mp_impl->m_real_num_node);
626 m_chain.push_back(to_deallocate);
627 }
628
629 void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyFalse)
630 {
631 BOOST_ASSERT(to_deallocate->free_nodes.size() == mp_impl->m_real_num_node);
632 BOOST_ASSERT(0 == to_deallocate->hdr_offset);
633 hdr_offset_holder *hdr_off_holder =
634 mp_impl->priv_first_subblock_from_block(boost::movelib::to_raw_pointer(to_deallocate));
635 m_chain.push_back(hdr_off_holder);
636 }
637
638 const this_type *mp_impl;
639 multiallocation_chain &m_chain;
640 };
641
642 //This macro will activate invariant checking. Slow, but helpful for debugging the code.
643 //#define BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
644 void priv_invariants()
645 #ifdef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
646 #undef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
647 {
648 const const_block_iterator itend(m_block_container.end());
649
650 { //We iterate through the block tree to free the memory
651 const_block_iterator it(m_block_container.begin());
652
653 if(it != itend){
654 for(++it; it != itend; ++it){
655 const_block_iterator prev(it);
656 --prev;
657 BOOST_ASSERT(*prev < *it);
658 (void)prev; (void)it;
659 }
660 }
661 }
662 { //Check that the total free nodes are correct
663 const_block_iterator it(m_block_container.cbegin());
664 size_type total_free_nodes = 0;
665 for(; it != itend; ++it){
666 total_free_nodes += it->free_nodes.size();
667 }
668 BOOST_ASSERT(total_free_nodes >= m_totally_free_blocks*m_real_num_node);
669 }
670 { //Check that the total totally free blocks are correct
671 BOOST_ASSERT(m_block_container.size() >= m_totally_free_blocks);
672 const_block_iterator it = m_block_container.cend();
673 size_type total_free_blocks = m_totally_free_blocks;
674 while(total_free_blocks--){
675 BOOST_ASSERT((--it)->free_nodes.size() == m_real_num_node);
676 }
677 }
678
679 if(!AlignOnly){
680 //Check that header offsets are correct
681 const_block_iterator it = m_block_container.begin();
682 for(; it != itend; ++it){
683 hdr_offset_holder *hdr_off_holder = this->priv_first_subblock_from_block(const_cast<block_info_t *>(&*it));
684 for(size_type i = 0, max = m_num_subblocks; i < max; ++i){
685 const size_type offset = reinterpret_cast<char*>(const_cast<block_info_t *>(&*it)) - reinterpret_cast<char*>(hdr_off_holder);
686 BOOST_ASSERT(hdr_off_holder->hdr_offset == offset);
687 BOOST_ASSERT(0 == ((size_type)hdr_off_holder & (m_real_block_alignment - 1)));
688 BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
689 hdr_off_holder = reinterpret_cast<hdr_offset_holder *>(reinterpret_cast<char*>(hdr_off_holder) + m_real_block_alignment);
690 }
691 }
692 }
693 }
694 #else
695 {} //empty
696 #endif
697
698 //!Deallocates all used memory. Never throws
699 void priv_clear()
700 {
701 #ifndef NDEBUG
702 block_iterator it = m_block_container.begin();
703 block_iterator itend = m_block_container.end();
704 size_type n_free_nodes = 0;
705 for(; it != itend; ++it){
706 //Check for memory leak
707 BOOST_ASSERT(it->free_nodes.size() == m_real_num_node);
708 ++n_free_nodes;
709 }
710 BOOST_ASSERT(n_free_nodes == m_totally_free_blocks);
711 #endif
712 //Check for memory leaks
713 this->priv_invariants();
714 multiallocation_chain chain;
715 m_block_container.clear_and_dispose(block_destroyer(this, chain));
716 this->mp_segment_mngr_base->deallocate_many(chain);
717 m_totally_free_blocks = 0;
718 }
719
720 block_info_t *priv_block_from_node(void *node, AlignOnlyFalse) const
721 {
722 hdr_offset_holder *hdr_off_holder =
723 reinterpret_cast<hdr_offset_holder*>((std::size_t)node & size_type(~(m_real_block_alignment - 1)));
724 BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
725 BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
726 block_info_t *block = reinterpret_cast<block_info_t *>
727 (reinterpret_cast<char*>(hdr_off_holder) + hdr_off_holder->hdr_offset);
728 BOOST_ASSERT(block->hdr_offset == 0);
729 return block;
730 }
731
732 block_info_t *priv_block_from_node(void *node, AlignOnlyTrue) const
733 {
734 return (block_info_t *)((std::size_t)node & std::size_t(~(m_real_block_alignment - 1)));
735 }
736
737 block_info_t *priv_block_from_node(void *node) const
738 { return this->priv_block_from_node(node, IsAlignOnly()); }
739
740 hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block) const
741 { return this->priv_first_subblock_from_block(block, IsAlignOnly()); }
742
743 hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, AlignOnlyFalse) const
744 {
745 hdr_offset_holder *const hdr_off_holder = reinterpret_cast<hdr_offset_holder*>
746 (reinterpret_cast<char*>(block) - (m_num_subblocks-1)*m_real_block_alignment);
747 BOOST_ASSERT(hdr_off_holder->hdr_offset == size_type(reinterpret_cast<char*>(block) - reinterpret_cast<char*>(hdr_off_holder)));
748 BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
749 BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
750 return hdr_off_holder;
751 }
752
753 hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, AlignOnlyTrue) const
754 {
755 return reinterpret_cast<hdr_offset_holder*>(block);
756 }
757
758 void priv_dispatch_block_chain_or_free
759 ( multiallocation_chain &chain, block_info_t &c_info, size_type num_node
760 , char *mem_address, size_type total_elements, bool insert_block_if_free)
761 {
762 BOOST_ASSERT(chain.size() <= total_elements);
763 //First add all possible nodes to the chain
764 const size_type left = total_elements - chain.size();
765 const size_type max_chain = (num_node < left) ? num_node : left;
766 mem_address = static_cast<char *>(boost::movelib::to_raw_pointer
767 (chain.incorporate_after(chain.last(), void_pointer(mem_address), m_real_node_size, max_chain)));
768 //Now store remaining nodes in the free list
769 if(const size_type max_free = num_node - max_chain){
770 free_nodes_t & free_nodes = c_info.free_nodes;
771 free_nodes.incorporate_after(free_nodes.last(), void_pointer(mem_address), m_real_node_size, max_free);
772 if(insert_block_if_free){
773 m_block_container.push_front(c_info);
774 }
775 }
776 }
777
778 //!Allocates a several blocks of nodes. Can throw
779 void priv_append_from_new_blocks(size_type min_elements, multiallocation_chain &chain, AlignOnlyTrue)
780 {
781 BOOST_ASSERT(m_block_container.empty());
782 BOOST_ASSERT(min_elements > 0);
783 const size_type n = (min_elements - 1)/m_real_num_node + 1;
784 const size_type real_block_size = m_real_block_alignment - PayloadPerAllocation;
785 const size_type total_elements = chain.size() + min_elements;
786 for(size_type i = 0; i != n; ++i){
787 //We allocate a new NodeBlock and put it the last
788 //element of the tree
789 char *mem_address = static_cast<char*>
790 (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment));
791 if(!mem_address){
792 //In case of error, free memory deallocating all nodes (the new ones allocated
793 //in this function plus previously stored nodes in chain).
794 this->deallocate_nodes(chain);
795 throw_bad_alloc();
796 }
797 block_info_t &c_info = *new(mem_address)block_info_t();
798 mem_address += HdrSize;
799 if(i != (n-1)){
800 chain.incorporate_after(chain.last(), void_pointer(mem_address), m_real_node_size, m_real_num_node);
801 }
802 else{
803 this->priv_dispatch_block_chain_or_free(chain, c_info, m_real_num_node, mem_address, total_elements, true);
804 }
805 }
806 }
807
808 void priv_append_from_new_blocks(size_type min_elements, multiallocation_chain &chain, AlignOnlyFalse)
809 {
810 BOOST_ASSERT(m_block_container.empty());
811 BOOST_ASSERT(min_elements > 0);
812 const size_type n = (min_elements - 1)/m_real_num_node + 1;
813 const size_type real_block_size = m_real_block_alignment*m_num_subblocks - PayloadPerAllocation;
814 const size_type elements_per_subblock = (m_real_block_alignment - HdrOffsetSize)/m_real_node_size;
815 const size_type hdr_subblock_elements = (m_real_block_alignment - HdrSize - PayloadPerAllocation)/m_real_node_size;
816 const size_type total_elements = chain.size() + min_elements;
817
818 for(size_type i = 0; i != n; ++i){
819 //We allocate a new NodeBlock and put it the last
820 //element of the tree
821 char *mem_address = static_cast<char*>
822 (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment));
823 if(!mem_address){
824 //In case of error, free memory deallocating all nodes (the new ones allocated
825 //in this function plus previously stored nodes in chain).
826 this->deallocate_nodes(chain);
827 throw_bad_alloc();
828 }
829 //First initialize header information on the last subblock
830 char *hdr_addr = mem_address + m_real_block_alignment*(m_num_subblocks-1);
831 block_info_t &c_info = *new(hdr_addr)block_info_t();
832 //Some structural checks
833 BOOST_ASSERT(static_cast<void*>(&static_cast<hdr_offset_holder&>(c_info).hdr_offset) ==
834 static_cast<void*>(&c_info)); (void)c_info;
835 if(i != (n-1)){
836 for( size_type subblock = 0, maxsubblock = m_num_subblocks - 1
837 ; subblock < maxsubblock
838 ; ++subblock, mem_address += m_real_block_alignment){
839 //Initialize header offset mark
840 new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address));
841 chain.incorporate_after
842 (chain.last(), void_pointer(mem_address + HdrOffsetSize), m_real_node_size, elements_per_subblock);
843 }
844 chain.incorporate_after(chain.last(), void_pointer(hdr_addr + HdrSize), m_real_node_size, hdr_subblock_elements);
845 }
846 else{
847 for( size_type subblock = 0, maxsubblock = m_num_subblocks - 1
848 ; subblock < maxsubblock
849 ; ++subblock, mem_address += m_real_block_alignment){
850 //Initialize header offset mark
851 new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address));
852 this->priv_dispatch_block_chain_or_free
853 (chain, c_info, elements_per_subblock, mem_address + HdrOffsetSize, total_elements, false);
854 }
855 this->priv_dispatch_block_chain_or_free
856 (chain, c_info, hdr_subblock_elements, hdr_addr + HdrSize, total_elements, true);
857 }
858 }
859 }
860
861 private:
862 typedef typename boost::intrusive::pointer_traits
863 <void_pointer>::template rebind_pointer<segment_manager_base_type>::type segment_mngr_base_ptr_t;
864 const size_type m_max_free_blocks;
865 const size_type m_real_node_size;
866 //Round the size to a power of two value.
867 //This is the total memory size (including payload) that we want to
868 //allocate from the general-purpose allocator
869 const size_type m_real_block_alignment;
870 size_type m_num_subblocks;
871 //This is the real number of nodes per block
872 //const
873 size_type m_real_num_node;
874 segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager
875 block_container_t m_block_container; //Intrusive block list
876 size_type m_totally_free_blocks; //Free blocks
877 };
878
879 } //namespace container_detail {
880 } //namespace container {
881 } //namespace boost {
882
883 #include <boost/container/detail/config_end.hpp>
884
885 #endif //#ifndef BOOST_CONTAINER_DETAIL_ADAPTIVE_NODE_POOL_IMPL_HPP