]> git.proxmox.com Git - ceph.git/blame - ceph/src/boost/libs/interprocess/include/boost/interprocess/mem_algo/detail/mem_algo_common.hpp
bump version to 12.2.2-pve1
[ceph.git] / ceph / src / boost / libs / interprocess / include / boost / interprocess / mem_algo / detail / mem_algo_common.hpp
CommitLineData
7c673cae
FG
1//////////////////////////////////////////////////////////////////////////////
2//
3// (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4// Software License, Version 1.0. (See accompanying file
5// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6//
7// See http://www.boost.org/libs/interprocess for documentation.
8//
9//////////////////////////////////////////////////////////////////////////////
10
11#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
12#define BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP
13
14#ifndef BOOST_CONFIG_HPP
15# include <boost/config.hpp>
16#endif
17#
18#if defined(BOOST_HAS_PRAGMA_ONCE)
19# pragma once
20#endif
21
22#include <boost/interprocess/detail/config_begin.hpp>
23#include <boost/interprocess/detail/workaround.hpp>
24
25// interprocess
26#include <boost/interprocess/interprocess_fwd.hpp>
27#include <boost/interprocess/containers/allocation_type.hpp>
28// interprocess/detail
29#include <boost/interprocess/detail/math_functions.hpp>
30#include <boost/interprocess/detail/min_max.hpp>
31#include <boost/interprocess/detail/type_traits.hpp>
32#include <boost/interprocess/detail/utilities.hpp>
33// container/detail
34#include <boost/container/detail/multiallocation_chain.hpp>
35#include <boost/container/detail/placement_new.hpp>
36// move
37#include <boost/move/utility_core.hpp>
38// other boost
39#include <boost/static_assert.hpp>
40#include <boost/assert.hpp>
41
42//!\file
43//!Implements common operations for memory algorithms.
44
45namespace boost {
46namespace interprocess {
47namespace ipcdetail {
48
49template<class VoidPointer>
50class basic_multiallocation_chain
51 : public boost::container::container_detail::
52 basic_multiallocation_chain<VoidPointer>
53{
54 BOOST_MOVABLE_BUT_NOT_COPYABLE(basic_multiallocation_chain)
55 typedef boost::container::container_detail::
56 basic_multiallocation_chain<VoidPointer> base_t;
57 public:
58
59 basic_multiallocation_chain()
60 : base_t()
61 {}
62
63 basic_multiallocation_chain(BOOST_RV_REF(basic_multiallocation_chain) other)
64 : base_t(::boost::move(static_cast<base_t&>(other)))
65 {}
66
67 basic_multiallocation_chain& operator=(BOOST_RV_REF(basic_multiallocation_chain) other)
68 {
69 this->base_t::operator=(::boost::move(static_cast<base_t&>(other)));
70 return *this;
71 }
72
73 void *pop_front()
74 {
75 return boost::interprocess::ipcdetail::to_raw_pointer(this->base_t::pop_front());
76 }
77};
78
79
80//!This class implements several allocation functions shared by different algorithms
81//!(aligned allocation, multiple allocation...).
82template<class MemoryAlgorithm>
83class memory_algorithm_common
84{
85 public:
86 typedef typename MemoryAlgorithm::void_pointer void_pointer;
87 typedef typename MemoryAlgorithm::block_ctrl block_ctrl;
88 typedef typename MemoryAlgorithm::multiallocation_chain multiallocation_chain;
89 typedef memory_algorithm_common<MemoryAlgorithm> this_type;
90 typedef typename MemoryAlgorithm::size_type size_type;
91
92 static const size_type Alignment = MemoryAlgorithm::Alignment;
93 static const size_type MinBlockUnits = MemoryAlgorithm::MinBlockUnits;
94 static const size_type AllocatedCtrlBytes = MemoryAlgorithm::AllocatedCtrlBytes;
95 static const size_type AllocatedCtrlUnits = MemoryAlgorithm::AllocatedCtrlUnits;
96 static const size_type BlockCtrlBytes = MemoryAlgorithm::BlockCtrlBytes;
97 static const size_type BlockCtrlUnits = MemoryAlgorithm::BlockCtrlUnits;
98 static const size_type UsableByPreviousChunk = MemoryAlgorithm::UsableByPreviousChunk;
99
100 static void assert_alignment(const void *ptr)
101 { assert_alignment((std::size_t)ptr); }
102
103 static void assert_alignment(size_type uint_ptr)
104 {
105 (void)uint_ptr;
106 BOOST_ASSERT(uint_ptr % Alignment == 0);
107 }
108
109 static bool check_alignment(const void *ptr)
110 { return (((std::size_t)ptr) % Alignment == 0); }
111
112 static size_type ceil_units(size_type size)
113 { return get_rounded_size(size, Alignment)/Alignment; }
114
115 static size_type floor_units(size_type size)
116 { return size/Alignment; }
117
118 static size_type multiple_of_units(size_type size)
119 { return get_rounded_size(size, Alignment); }
120
121 static void allocate_many
122 (MemoryAlgorithm *memory_algo, size_type elem_bytes, size_type n_elements, multiallocation_chain &chain)
123 {
124 return this_type::priv_allocate_many(memory_algo, &elem_bytes, n_elements, 0, chain);
125 }
126
127 static void deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
128 {
129 return this_type::priv_deallocate_many(memory_algo, chain);
130 }
131
132 static bool calculate_lcm_and_needs_backwards_lcmed
133 (size_type backwards_multiple, size_type received_size, size_type size_to_achieve,
134 size_type &lcm_out, size_type &needs_backwards_lcmed_out)
135 {
136 // Now calculate lcm_val
137 size_type max = backwards_multiple;
138 size_type min = Alignment;
139 size_type needs_backwards;
140 size_type needs_backwards_lcmed;
141 size_type lcm_val;
142 size_type current_forward;
143 //Swap if necessary
144 if(max < min){
145 size_type tmp = min;
146 min = max;
147 max = tmp;
148 }
149 //Check if it's power of two
150 if((backwards_multiple & (backwards_multiple-1)) == 0){
151 if(0 != (size_to_achieve & ((backwards_multiple-1)))){
152 return false;
153 }
154
155 lcm_val = max;
156 //If we want to use minbytes data to get a buffer between maxbytes
157 //and minbytes if maxbytes can't be achieved, calculate the
158 //biggest of all possibilities
159 current_forward = get_truncated_size_po2(received_size, backwards_multiple);
160 needs_backwards = size_to_achieve - current_forward;
161 BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
162 needs_backwards_lcmed = get_rounded_size_po2(needs_backwards, lcm_val);
163 lcm_out = lcm_val;
164 needs_backwards_lcmed_out = needs_backwards_lcmed;
165 return true;
166 }
167 //Check if it's multiple of alignment
168 else if((backwards_multiple & (Alignment - 1u)) == 0){
169 lcm_val = backwards_multiple;
170 current_forward = get_truncated_size(received_size, backwards_multiple);
171 //No need to round needs_backwards because backwards_multiple == lcm_val
172 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
173 BOOST_ASSERT((needs_backwards_lcmed & (Alignment - 1u)) == 0);
174 lcm_out = lcm_val;
175 needs_backwards_lcmed_out = needs_backwards_lcmed;
176 return true;
177 }
178 //Check if it's multiple of the half of the alignmment
179 else if((backwards_multiple & ((Alignment/2u) - 1u)) == 0){
180 lcm_val = backwards_multiple*2u;
181 current_forward = get_truncated_size(received_size, backwards_multiple);
182 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
183 if(0 != (needs_backwards_lcmed & (Alignment-1)))
184 //while(0 != (needs_backwards_lcmed & (Alignment-1)))
185 needs_backwards_lcmed += backwards_multiple;
186 BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
187 lcm_out = lcm_val;
188 needs_backwards_lcmed_out = needs_backwards_lcmed;
189 return true;
190 }
191 //Check if it's multiple of the quarter of the alignmment
192 else if((backwards_multiple & ((Alignment/4u) - 1u)) == 0){
193 size_type remainder;
194 lcm_val = backwards_multiple*4u;
195 current_forward = get_truncated_size(received_size, backwards_multiple);
196 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
197 //while(0 != (needs_backwards_lcmed & (Alignment-1)))
198 //needs_backwards_lcmed += backwards_multiple;
199 if(0 != (remainder = ((needs_backwards_lcmed & (Alignment-1))>>(Alignment/8u)))){
200 if(backwards_multiple & Alignment/2u){
201 needs_backwards_lcmed += (remainder)*backwards_multiple;
202 }
203 else{
204 needs_backwards_lcmed += (4-remainder)*backwards_multiple;
205 }
206 }
207 BOOST_ASSERT((needs_backwards_lcmed % lcm_val) == 0);
208 lcm_out = lcm_val;
209 needs_backwards_lcmed_out = needs_backwards_lcmed;
210 return true;
211 }
212 else{
213 lcm_val = lcm(max, min);
214 }
215 //If we want to use minbytes data to get a buffer between maxbytes
216 //and minbytes if maxbytes can't be achieved, calculate the
217 //biggest of all possibilities
218 current_forward = get_truncated_size(received_size, backwards_multiple);
219 needs_backwards = size_to_achieve - current_forward;
220 BOOST_ASSERT((needs_backwards % backwards_multiple) == 0);
221 needs_backwards_lcmed = get_rounded_size(needs_backwards, lcm_val);
222 lcm_out = lcm_val;
223 needs_backwards_lcmed_out = needs_backwards_lcmed;
224 return true;
225 }
226
227 static void allocate_many
228 ( MemoryAlgorithm *memory_algo
229 , const size_type *elem_sizes
230 , size_type n_elements
231 , size_type sizeof_element
232 , multiallocation_chain &chain)
233 {
234 this_type::priv_allocate_many(memory_algo, elem_sizes, n_elements, sizeof_element, chain);
235 }
236
237 static void* allocate_aligned
238 (MemoryAlgorithm *memory_algo, size_type nbytes, size_type alignment)
239 {
240
241 //Ensure power of 2
242 if ((alignment & (alignment - size_type(1u))) != 0){
243 //Alignment is not power of two
244 BOOST_ASSERT((alignment & (alignment - size_type(1u))) == 0);
245 return 0;
246 }
247
248 size_type real_size = nbytes;
249 if(alignment <= Alignment){
250 void *ignore_reuse = 0;
251 return memory_algo->priv_allocate
252 (boost::interprocess::allocate_new, nbytes, real_size, ignore_reuse);
253 }
254
255 if(nbytes > UsableByPreviousChunk)
256 nbytes -= UsableByPreviousChunk;
257
258 //We can find a aligned portion if we allocate a block that has alignment
259 //nbytes + alignment bytes or more.
260 size_type minimum_allocation = max_value
261 (nbytes + alignment, size_type(MinBlockUnits*Alignment));
262 //Since we will split that block, we must request a bit more memory
263 //if the alignment is near the beginning of the buffer, because otherwise,
264 //there is no space for a new block before the alignment.
265 //
266 // ____ Aligned here
267 // |
268 // -----------------------------------------------------
269 // | MBU |
270 // -----------------------------------------------------
271 size_type request =
272 minimum_allocation + (2*MinBlockUnits*Alignment - AllocatedCtrlBytes
273 //prevsize - UsableByPreviousChunk
274 );
275
276 //Now allocate the buffer
277 real_size = request;
278 void *ignore_reuse = 0;
279 void *buffer = memory_algo->priv_allocate(boost::interprocess::allocate_new, request, real_size, ignore_reuse);
280 if(!buffer){
281 return 0;
282 }
283 else if ((((std::size_t)(buffer)) % alignment) == 0){
284 //If we are lucky and the buffer is aligned, just split it and
285 //return the high part
286 block_ctrl *first = memory_algo->priv_get_block(buffer);
287 size_type old_size = first->m_size;
288 const size_type first_min_units =
289 max_value(ceil_units(nbytes) + AllocatedCtrlUnits, size_type(MinBlockUnits));
290 //We can create a new block in the end of the segment
291 if(old_size >= (first_min_units + MinBlockUnits)){
292 block_ctrl *second = reinterpret_cast<block_ctrl *>
293 (reinterpret_cast<char*>(first) + Alignment*first_min_units);
294 first->m_size = first_min_units;
295 second->m_size = old_size - first->m_size;
296 BOOST_ASSERT(second->m_size >= MinBlockUnits);
297 memory_algo->priv_mark_new_allocated_block(first);
298 memory_algo->priv_mark_new_allocated_block(second);
299 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(second));
300 }
301 return buffer;
302 }
303
304 //Buffer not aligned, find the aligned part.
305 //
306 // ____ Aligned here
307 // |
308 // -----------------------------------------------------
309 // | MBU +more | ACB |
310 // -----------------------------------------------------
311 char *pos = reinterpret_cast<char*>
312 (reinterpret_cast<std::size_t>(static_cast<char*>(buffer) +
313 //This is the minimum size of (2)
314 (MinBlockUnits*Alignment - AllocatedCtrlBytes) +
315 //This is the next MBU for the aligned memory
316 AllocatedCtrlBytes +
317 //This is the alignment trick
318 alignment - 1) & -alignment);
319
320 //Now obtain the address of the blocks
321 block_ctrl *first = memory_algo->priv_get_block(buffer);
322 block_ctrl *second = memory_algo->priv_get_block(pos);
323 BOOST_ASSERT(pos <= (reinterpret_cast<char*>(first) + first->m_size*Alignment));
324 BOOST_ASSERT(first->m_size >= 2*MinBlockUnits);
325 BOOST_ASSERT((pos + MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment) <=
326 (reinterpret_cast<char*>(first) + first->m_size*Alignment));
327 //Set the new size of the first block
328 size_type old_size = first->m_size;
329 first->m_size = (size_type)(reinterpret_cast<char*>(second) - reinterpret_cast<char*>(first))/Alignment;
330 memory_algo->priv_mark_new_allocated_block(first);
331
332 //Now check if we can create a new buffer in the end
333 //
334 // __"second" block
335 // | __Aligned here
336 // | | __"third" block
337 // -----------|-----|-----|------------------------------
338 // | MBU +more | ACB | (3) | BCU |
339 // -----------------------------------------------------
340 //This size will be the minimum size to be able to create a
341 //new block in the end.
342 const size_type second_min_units = max_value(size_type(MinBlockUnits),
343 ceil_units(nbytes) + AllocatedCtrlUnits );
344
345 //Check if we can create a new block (of size MinBlockUnits) in the end of the segment
346 if((old_size - first->m_size) >= (second_min_units + MinBlockUnits)){
347 //Now obtain the address of the end block
348 block_ctrl *third = new (reinterpret_cast<char*>(second) + Alignment*second_min_units)block_ctrl;
349 second->m_size = second_min_units;
350 third->m_size = old_size - first->m_size - second->m_size;
351 BOOST_ASSERT(third->m_size >= MinBlockUnits);
352 memory_algo->priv_mark_new_allocated_block(second);
353 memory_algo->priv_mark_new_allocated_block(third);
354 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(third));
355 }
356 else{
357 second->m_size = old_size - first->m_size;
358 BOOST_ASSERT(second->m_size >= MinBlockUnits);
359 memory_algo->priv_mark_new_allocated_block(second);
360 }
361
362 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(first));
363 return memory_algo->priv_get_user_buffer(second);
364 }
365
366 static bool try_shrink
367 (MemoryAlgorithm *memory_algo, void *ptr
368 ,const size_type max_size, size_type &received_size)
369 {
370 size_type const preferred_size = received_size;
371 (void)memory_algo;
372 //Obtain the real block
373 block_ctrl *block = memory_algo->priv_get_block(ptr);
374 size_type old_block_units = (size_type)block->m_size;
375
376 //The block must be marked as allocated
377 BOOST_ASSERT(memory_algo->priv_is_allocated_block(block));
378
379 //Check if alignment and block size are right
380 assert_alignment(ptr);
381
382 //Put this to a safe value
383 received_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
384
385 //Now translate it to Alignment units
386 const size_type max_user_units = floor_units(max_size - UsableByPreviousChunk);
387 const size_type preferred_user_units = ceil_units(preferred_size - UsableByPreviousChunk);
388
389 //Check if rounded max and preferred are possible correct
390 if(max_user_units < preferred_user_units)
391 return false;
392
393 //Check if the block is smaller than the requested minimum
394 size_type old_user_units = old_block_units - AllocatedCtrlUnits;
395
396 if(old_user_units < preferred_user_units)
397 return false;
398
399 //If the block is smaller than the requested minimum
400 if(old_user_units == preferred_user_units)
401 return true;
402
403 size_type shrunk_user_units =
404 ((BlockCtrlUnits - AllocatedCtrlUnits) >= preferred_user_units)
405 ? (BlockCtrlUnits - AllocatedCtrlUnits)
406 : preferred_user_units;
407
408 //Some parameter checks
409 if(max_user_units < shrunk_user_units)
410 return false;
411
412 //We must be able to create at least a new empty block
413 if((old_user_units - shrunk_user_units) < BlockCtrlUnits ){
414 return false;
415 }
416
417 //Update new size
418 received_size = shrunk_user_units*Alignment + UsableByPreviousChunk;
419 return true;
420 }
421
422 static bool shrink
423 (MemoryAlgorithm *memory_algo, void *ptr
424 ,const size_type max_size, size_type &received_size)
425 {
426 size_type const preferred_size = received_size;
427 //Obtain the real block
428 block_ctrl *block = memory_algo->priv_get_block(ptr);
429 size_type old_block_units = (size_type)block->m_size;
430
431 if(!try_shrink(memory_algo, ptr, max_size, received_size)){
432 return false;
433 }
434
435 //Check if the old size was just the shrunk size (no splitting)
436 if((old_block_units - AllocatedCtrlUnits) == ceil_units(preferred_size - UsableByPreviousChunk))
437 return true;
438
439 //Now we can just rewrite the size of the old buffer
440 block->m_size = (received_size-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits;
441 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
442
443 //We create the new block
444 block_ctrl *new_block = reinterpret_cast<block_ctrl*>
445 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
446 //Write control data to simulate this new block was previously allocated
447 //and deallocate it
448 new_block->m_size = old_block_units - block->m_size;
449 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
450 memory_algo->priv_mark_new_allocated_block(block);
451 memory_algo->priv_mark_new_allocated_block(new_block);
452 memory_algo->priv_deallocate(memory_algo->priv_get_user_buffer(new_block));
453 return true;
454 }
455
456 private:
457 static void priv_allocate_many
458 ( MemoryAlgorithm *memory_algo
459 , const size_type *elem_sizes
460 , size_type n_elements
461 , size_type sizeof_element
462 , multiallocation_chain &chain)
463 {
464 //Note: sizeof_element == 0 indicates that we want to
465 //allocate n_elements of the same size "*elem_sizes"
466
467 //Calculate the total size of all requests
468 size_type total_request_units = 0;
469 size_type elem_units = 0;
470 const size_type ptr_size_units = memory_algo->priv_get_total_units(sizeof(void_pointer));
471 if(!sizeof_element){
472 elem_units = memory_algo->priv_get_total_units(*elem_sizes);
473 elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
474 total_request_units = n_elements*elem_units;
475 }
476 else{
477 for(size_type i = 0; i < n_elements; ++i){
478 if(multiplication_overflows(elem_sizes[i], sizeof_element)){
479 total_request_units = 0;
480 break;
481 }
482 elem_units = memory_algo->priv_get_total_units(elem_sizes[i]*sizeof_element);
483 elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
484 if(sum_overflows(total_request_units, elem_units)){
485 total_request_units = 0;
486 break;
487 }
488 total_request_units += elem_units;
489 }
490 }
491
492 if(total_request_units && !multiplication_overflows(total_request_units, Alignment)){
493 size_type low_idx = 0;
494 while(low_idx < n_elements){
495 size_type total_bytes = total_request_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
496 size_type min_allocation = (!sizeof_element)
497 ? elem_units
498 : memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
499 min_allocation = min_allocation*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
500
501 size_type received_size = total_bytes;
502 void *ignore_reuse = 0;
503 void *ret = memory_algo->priv_allocate
504 (boost::interprocess::allocate_new, min_allocation, received_size, ignore_reuse);
505 if(!ret){
506 break;
507 }
508
509 block_ctrl *block = memory_algo->priv_get_block(ret);
510 size_type received_units = (size_type)block->m_size;
511 char *block_address = reinterpret_cast<char*>(block);
512
513 size_type total_used_units = 0;
514 while(total_used_units < received_units){
515 if(sizeof_element){
516 elem_units = memory_algo->priv_get_total_units(elem_sizes[low_idx]*sizeof_element);
517 elem_units = ptr_size_units > elem_units ? ptr_size_units : elem_units;
518 }
519 if(total_used_units + elem_units > received_units)
520 break;
521 total_request_units -= elem_units;
522 //This is the position where the new block must be created
523 block_ctrl *new_block = reinterpret_cast<block_ctrl *>(block_address);
524 assert_alignment(new_block);
525
526 //The last block should take all the remaining space
527 if((low_idx + 1) == n_elements ||
528 (total_used_units + elem_units +
529 ((!sizeof_element)
530 ? elem_units
531 : max_value(memory_algo->priv_get_total_units(elem_sizes[low_idx+1]*sizeof_element), ptr_size_units))
532 > received_units)){
533 //By default, the new block will use the rest of the buffer
534 new_block->m_size = received_units - total_used_units;
535 memory_algo->priv_mark_new_allocated_block(new_block);
536
537 //If the remaining units are bigger than needed and we can
538 //split it obtaining a new free memory block do it.
539 if((received_units - total_used_units) >= (elem_units + MemoryAlgorithm::BlockCtrlUnits)){
540 size_type shrunk_request = elem_units*Alignment - AllocatedCtrlBytes + UsableByPreviousChunk;
541 size_type shrunk_received = shrunk_request;
542 bool shrink_ok = shrink
543 (memory_algo
544 ,memory_algo->priv_get_user_buffer(new_block)
545 ,shrunk_request
546 ,shrunk_received);
547 (void)shrink_ok;
548 //Shrink must always succeed with passed parameters
549 BOOST_ASSERT(shrink_ok);
550 //Some sanity checks
551 BOOST_ASSERT(shrunk_request == shrunk_received);
552 BOOST_ASSERT(elem_units == ((shrunk_request-UsableByPreviousChunk)/Alignment + AllocatedCtrlUnits));
553 //"new_block->m_size" must have been reduced to elem_units by "shrink"
554 BOOST_ASSERT(new_block->m_size == elem_units);
555 //Now update the total received units with the reduction
556 received_units = elem_units + total_used_units;
557 }
558 }
559 else{
560 new_block->m_size = elem_units;
561 memory_algo->priv_mark_new_allocated_block(new_block);
562 }
563
564 block_address += new_block->m_size*Alignment;
565 total_used_units += (size_type)new_block->m_size;
566 //Check we have enough room to overwrite the intrusive pointer
567 BOOST_ASSERT((new_block->m_size*Alignment - AllocatedCtrlUnits) >= sizeof(void_pointer));
568 void_pointer p = ::new(memory_algo->priv_get_user_buffer(new_block), boost_container_new_t())void_pointer(0);
569 chain.push_back(p);
570 ++low_idx;
571 }
572 //Sanity check
573 BOOST_ASSERT(total_used_units == received_units);
574 }
575
576 if(low_idx != n_elements){
577 priv_deallocate_many(memory_algo, chain);
578 }
579 }
580 }
581
582 static void priv_deallocate_many(MemoryAlgorithm *memory_algo, multiallocation_chain &chain)
583 {
584 while(!chain.empty()){
585 memory_algo->priv_deallocate(to_raw_pointer(chain.pop_front()));
586 }
587 }
588};
589
590} //namespace ipcdetail {
591} //namespace interprocess {
592} //namespace boost {
593
594#include <boost/interprocess/detail/config_end.hpp>
595
596#endif //#ifndef BOOST_INTERPROCESS_DETAIL_MEM_ALGO_COMMON_HPP