]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/boost/interprocess/mem_algo/rbtree_best_fit.hpp
update sources to v12.2.3
[ceph.git] / ceph / src / boost / boost / interprocess / mem_algo / rbtree_best_fit.hpp
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/interprocess for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10
11 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
12 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
13
14 #ifndef BOOST_CONFIG_HPP
15 # include <boost/config.hpp>
16 #endif
17 #
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
19 # pragma once
20 #endif
21
22 #include <boost/interprocess/detail/config_begin.hpp>
23 #include <boost/interprocess/detail/workaround.hpp>
24
25 // interprocess
26 #include <boost/interprocess/containers/allocation_type.hpp>
27 #include <boost/interprocess/exceptions.hpp>
28 #include <boost/interprocess/interprocess_fwd.hpp>
29 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
30 #include <boost/interprocess/offset_ptr.hpp>
31 #include <boost/interprocess/sync/scoped_lock.hpp>
32 // interprocess/detail
33 #include <boost/interprocess/detail/min_max.hpp>
34 #include <boost/interprocess/detail/math_functions.hpp>
35 #include <boost/interprocess/detail/type_traits.hpp>
36 #include <boost/interprocess/detail/utilities.hpp>
37 // container
38 #include <boost/container/detail/multiallocation_chain.hpp>
39 // container/detail
40 #include <boost/container/detail/placement_new.hpp>
41 // move/detail
42 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
43 // intrusive
44 #include <boost/intrusive/pointer_traits.hpp>
45 #include <boost/intrusive/set.hpp>
46 // other boost
47 #include <boost/assert.hpp>
48 #include <boost/static_assert.hpp>
49 // std
50 #include <climits>
51 #include <cstring>
52
53 //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
54 //to maintain ABI compatible with the original version
55 //ABI had to be updated to fix compatibility issues when
56 //sharing shared memory between 32 adn 64 bit processes.
57
58 //!\file
59 //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
60 //!objects in shared memory. This class is intended as a base class for single segment
61 //!and multi-segment implementations.
62
63 namespace boost {
64 namespace interprocess {
65
66 //!This class implements an algorithm that stores the free nodes in a red-black tree
67 //!to have logarithmic search/insert times.
68 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
69 class rbtree_best_fit
70 {
71 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
72 //Non-copyable
73 rbtree_best_fit();
74 rbtree_best_fit(const rbtree_best_fit &);
75 rbtree_best_fit &operator=(const rbtree_best_fit &);
76
77 private:
78 struct block_ctrl;
79 typedef typename boost::intrusive::
80 pointer_traits<VoidPointer>::template
81 rebind_pointer<block_ctrl>::type block_ctrl_ptr;
82
83 typedef typename boost::intrusive::
84 pointer_traits<VoidPointer>::template
85 rebind_pointer<char>::type char_ptr;
86
87 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
88
89 public:
90 //!Shared mutex family used for the rest of the Interprocess framework
91 typedef MutexFamily mutex_family;
92 //!Pointer type to be used with the rest of the Interprocess framework
93 typedef VoidPointer void_pointer;
94 typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
95
96 typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
97 typedef typename boost::container::container_detail::make_unsigned<difference_type>::type size_type;
98
99 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
100
101 private:
102
103 typedef typename bi::make_set_base_hook
104 < bi::void_pointer<VoidPointer>
105 , bi::optimize_size<true>
106 , bi::link_mode<bi::normal_link> >::type TreeHook;
107
108 struct SizeHolder
109 {
110 //!This block's memory size (including block_ctrl
111 //!header) in Alignment units
112 size_type m_prev_size;
113 size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
114 size_type m_prev_allocated : 1;
115 size_type m_allocated : 1;
116 };
117
118 //!Block control structure
119 struct block_ctrl
120 : public SizeHolder, public TreeHook
121 {
122 block_ctrl()
123 { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
124
125 friend bool operator<(const block_ctrl &a, const block_ctrl &b)
126 { return a.m_size < b.m_size; }
127 friend bool operator==(const block_ctrl &a, const block_ctrl &b)
128 { return a.m_size == b.m_size; }
129 };
130
131 struct size_block_ctrl_compare
132 {
133 bool operator()(size_type size, const block_ctrl &block) const
134 { return size < block.m_size; }
135
136 bool operator()(const block_ctrl &block, size_type size) const
137 { return block.m_size < size; }
138 };
139
140 //!Shared mutex to protect memory allocate/deallocate
141 typedef typename MutexFamily::mutex_type mutex_type;
142 typedef typename bi::make_multiset
143 <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
144
145 typedef typename Imultiset::iterator imultiset_iterator;
146 typedef typename Imultiset::const_iterator imultiset_const_iterator;
147
148 //!This struct includes needed data and derives from
149 //!mutex_type to allow EBO when using null mutex_type
150 struct header_t : public mutex_type
151 {
152 Imultiset m_imultiset;
153
154 //!The extra size required by the segment
155 size_type m_extra_hdr_bytes;
156 //!Allocated bytes for internal checking
157 size_type m_allocated;
158 //!The size of the memory segment
159 size_type m_size;
160 } m_header;
161
162 friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
163
164 typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
165
166 public:
167 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
168
169 //!Constructor. "size" is the total size of the managed memory segment,
170 //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
171 //!offset that the allocator should not use at all.
172 rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
173
174 //!Destructor.
175 ~rbtree_best_fit();
176
177 //!Obtains the minimum size needed by the algorithm
178 static size_type get_min_size (size_type extra_hdr_bytes);
179
180 //Functions for single segment management
181
182 //!Allocates bytes, returns 0 if there is not more memory
183 void* allocate (size_type nbytes);
184
185 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
186
187 //Experimental. Dont' use
188
189 //!Multiple element allocation, same size
190 void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
191 {
192 //-----------------------
193 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
194 //-----------------------
195 algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
196 }
197
198 //!Multiple element allocation, different size
199 void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
200 {
201 //-----------------------
202 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
203 //-----------------------
204 algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
205 }
206
207 //!Multiple element allocation, different size
208 void deallocate_many(multiallocation_chain &chain);
209
210 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
211
212 //!Deallocates previously allocated bytes
213 void deallocate (void *addr);
214
215 //!Returns the size of the memory segment
216 size_type get_size() const;
217
218 //!Returns the number of free bytes of the segment
219 size_type get_free_memory() const;
220
221 //!Initializes to zero all the memory that's not in use.
222 //!This function is normally used for security reasons.
223 void zero_free_memory();
224
225 //!Increases managed memory in
226 //!extra_size bytes more
227 void grow(size_type extra_size);
228
229 //!Decreases managed memory as much as possible
230 void shrink_to_fit();
231
232 //!Returns true if all allocated memory has been deallocated
233 bool all_memory_deallocated();
234
235 //!Makes an internal sanity check
236 //!and returns true if success
237 bool check_sanity();
238
239 template<class T>
240 T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
241 size_type &prefer_in_recvd_out_size, T *&reuse);
242
243 void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
244 size_type &prefer_in_recvd_out_size,
245 void *&reuse_ptr, size_type sizeof_object = 1);
246
247 //!Returns the size of the buffer previously allocated pointed by ptr
248 size_type size(const void *ptr) const;
249
250 //!Allocates aligned bytes, returns 0 if there is not more memory.
251 //!Alignment must be power of 2
252 void* allocate_aligned (size_type nbytes, size_type alignment);
253
254 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
255 private:
256 static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
257
258 block_ctrl *priv_first_block();
259
260 block_ctrl *priv_end_block();
261
262 void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
263 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
264
265
266 //!Real allocation algorithm with min allocation option
267 void * priv_allocate( boost::interprocess::allocation_type command
268 , size_type limit_size, size_type &prefer_in_recvd_out_size
269 , void *&reuse_ptr, size_type backwards_multiple = 1);
270
271 //!Obtains the block control structure of the user buffer
272 static block_ctrl *priv_get_block(const void *ptr);
273
274 //!Obtains the pointer returned to the user from the block control
275 static void *priv_get_user_buffer(const block_ctrl *block);
276
277 //!Returns the number of total units that a user buffer
278 //!of "userbytes" bytes really occupies (including header)
279 static size_type priv_get_total_units(size_type userbytes);
280
281 //!Real expand function implementation
282 bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
283
284 //!Real expand to both sides implementation
285 void* priv_expand_both_sides(boost::interprocess::allocation_type command
286 ,size_type min_size
287 ,size_type &prefer_in_recvd_out_size
288 ,void *reuse_ptr
289 ,bool only_preferred_backwards
290 ,size_type backwards_multiple);
291
292 //!Returns true if the previous block is allocated
293 bool priv_is_prev_allocated(block_ctrl *ptr);
294
295 //!Get a pointer of the "end" block from the first block of the segment
296 static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
297
298 //!Get a pointer of the "first" block from the end block of the segment
299 static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
300
301 //!Get poitner of the previous block (previous block must be free)
302 static block_ctrl * priv_prev_block(block_ctrl *ptr);
303
304 //!Get the size in the tail of the previous block
305 static block_ctrl * priv_next_block(block_ctrl *ptr);
306
307 //!Check if this block is free (not allocated)
308 bool priv_is_allocated_block(block_ctrl *ptr);
309
310 //!Marks the block as allocated
311 void priv_mark_as_allocated_block(block_ctrl *ptr);
312
313 //!Marks the block as allocated
314 void priv_mark_new_allocated_block(block_ctrl *ptr)
315 { return priv_mark_as_allocated_block(ptr); }
316
317 //!Marks the block as allocated
318 void priv_mark_as_free_block(block_ctrl *ptr);
319
320 //!Checks if block has enough memory and splits/unlinks the block
321 //!returning the address to the users
322 void* priv_check_and_allocate(size_type units
323 ,block_ctrl* block
324 ,size_type &received_size);
325 //!Real deallocation algorithm
326 void priv_deallocate(void *addr);
327
328 //!Makes a new memory portion available for allocation
329 void priv_add_segment(void *addr, size_type size);
330
331 public:
332
333 static const size_type Alignment = !MemAlignment
334 ? size_type(::boost::container::container_detail::alignment_of
335 < ::boost::container::container_detail::max_align_t>::value)
336 : size_type(MemAlignment)
337 ;
338
339 private:
340 //Due to embedded bits in size, Alignment must be at least 4
341 BOOST_STATIC_ASSERT((Alignment >= 4));
342 //Due to rbtree size optimizations, Alignment must have at least pointer alignment
343 BOOST_STATIC_ASSERT((Alignment >= ::boost::container::container_detail::alignment_of<void_pointer>::value));
344 static const size_type AlignmentMask = (Alignment - 1);
345 static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
346 static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
347 static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
348 static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
349 static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
350 static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
351 static const size_type MinBlockUnits = BlockCtrlUnits;
352 static const size_type UsableByPreviousChunk = sizeof(size_type);
353
354 //Make sure the maximum alignment is power of two
355 BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
356 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
357 public:
358 static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
359 };
360
361 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
362
363 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
364 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
365 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
366 ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
367 {
368 size_type uint_this = (std::size_t)this_ptr;
369 size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
370 size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
371 size_type block1_off = aligned_main_hdr_end - uint_this;
372 algo_impl_t::assert_alignment(aligned_main_hdr_end);
373 algo_impl_t::assert_alignment(uint_this + block1_off);
374 return block1_off;
375 }
376
377 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
378 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
379 priv_add_segment(void *addr, size_type segment_size)
380 {
381 //Check alignment
382 algo_impl_t::check_alignment(addr);
383 //Check size
384 BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
385
386 //Initialize the first big block and the "end" node
387 block_ctrl *first_big_block = ::new(addr, boost_container_new_t())block_ctrl;
388 first_big_block->m_size = segment_size/Alignment - EndCtrlBlockUnits;
389 BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
390
391 //The "end" node is just a node of size 0 with the "end" bit set
392 block_ctrl *end_block = static_cast<block_ctrl*>
393 (new (reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment)SizeHolder);
394
395 //This will overwrite the prev part of the "end" node
396 priv_mark_as_free_block (first_big_block);
397 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
398 first_big_block->m_prev_size = end_block->m_size =
399 (reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment;
400 #else
401 first_big_block->m_prev_size = end_block->m_size =
402 (reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment;
403 #endif
404 end_block->m_allocated = 1;
405 first_big_block->m_prev_allocated = 1;
406
407 BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
408 BOOST_ASSERT(priv_prev_block(end_block) == first_big_block);
409 BOOST_ASSERT(priv_first_block() == first_big_block);
410 BOOST_ASSERT(priv_end_block() == end_block);
411
412 //Some check to validate the algorithm, since it makes some assumptions
413 //to optimize the space wasted in bookkeeping:
414
415 //Check that the sizes of the header are placed before the rbtree
416 BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
417 < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
418 //Insert it in the intrusive containers
419 m_header.m_imultiset.insert(*first_big_block);
420 }
421
422 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
423 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
424 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
425 ::priv_first_block()
426 {
427 size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
428 return reinterpret_cast<block_ctrl *>(reinterpret_cast<char*>(this) + block1_off);
429 }
430
431 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
432 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
433 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
434 ::priv_end_block()
435 {
436 size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
437 const size_type original_first_block_size = m_header.m_size/Alignment*Alignment - block1_off/Alignment*Alignment - EndCtrlBlockBytes;
438 block_ctrl *end_block = reinterpret_cast<block_ctrl*>
439 (reinterpret_cast<char*>(this) + block1_off + original_first_block_size);
440 return end_block;
441 }
442
443 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
444 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
445 rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
446 {
447 //Initialize the header
448 m_header.m_allocated = 0;
449 m_header.m_size = segment_size;
450 m_header.m_extra_hdr_bytes = extra_hdr_bytes;
451
452 //Now write calculate the offset of the first big block that will
453 //cover the whole segment
454 BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
455 size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
456 priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
457 }
458
459 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
460 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
461 {
462 //There is a memory leak!
463 // BOOST_ASSERT(m_header.m_allocated == 0);
464 // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
465 }
466
467 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
468 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
469 {
470 //Get the address of the first block
471 block_ctrl *first_block = priv_first_block();
472 block_ctrl *old_end_block = priv_end_block();
473 size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
474 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
475
476 //Update managed buffer's size
477 m_header.m_size += extra_size;
478
479 //We need at least MinBlockUnits blocks to create a new block
480 if((m_header.m_size - old_border_offset) < MinBlockUnits){
481 return;
482 }
483
484 //Now create a new block between the old end and the new end
485 size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
486 block_ctrl *new_end_block = reinterpret_cast<block_ctrl*>
487 (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
488
489 //the last and first block are special:
490 //new_end_block->m_size & first_block->m_prev_size store the absolute value
491 //between them
492 new_end_block->m_allocated = 1;
493 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
494 new_end_block->m_size = (reinterpret_cast<char*>(first_block) -
495 reinterpret_cast<char*>(new_end_block))/Alignment;
496 #else
497 new_end_block->m_size = (reinterpret_cast<char*>(new_end_block) -
498 reinterpret_cast<char*>(first_block))/Alignment;
499 #endif
500 first_block->m_prev_size = new_end_block->m_size;
501 first_block->m_prev_allocated = 1;
502 BOOST_ASSERT(new_end_block == priv_end_block());
503
504 //The old end block is the new block
505 block_ctrl *new_block = old_end_block;
506 new_block->m_size = (reinterpret_cast<char*>(new_end_block) -
507 reinterpret_cast<char*>(new_block))/Alignment;
508 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
509 priv_mark_as_allocated_block(new_block);
510 BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
511
512 m_header.m_allocated += (size_type)new_block->m_size*Alignment;
513
514 //Now deallocate the newly created block
515 this->priv_deallocate(priv_get_user_buffer(new_block));
516 }
517
518 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
519 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
520 {
521 //Get the address of the first block
522 block_ctrl *first_block = priv_first_block();
523 algo_impl_t::assert_alignment(first_block);
524
525 //block_ctrl *old_end_block = priv_end_block(first_block);
526 block_ctrl *old_end_block = priv_end_block();
527 algo_impl_t::assert_alignment(old_end_block);
528 size_type old_end_block_size = old_end_block->m_size;
529
530 void *unique_buffer = 0;
531 block_ctrl *last_block;
532 //Check if no memory is allocated between the first and last block
533 if(priv_next_block(first_block) == old_end_block){
534 //If so check if we can allocate memory
535 size_type ignore_recvd = 0;
536 void *ignore_reuse = 0;
537 unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
538 //If not, return, we can't shrink
539 if(!unique_buffer)
540 return;
541 //If we can, mark the position just after the new allocation as the new end
542 algo_impl_t::assert_alignment(unique_buffer);
543 block_ctrl *unique_block = priv_get_block(unique_buffer);
544 BOOST_ASSERT(priv_is_allocated_block(unique_block));
545 algo_impl_t::assert_alignment(unique_block);
546 last_block = priv_next_block(unique_block);
547 BOOST_ASSERT(!priv_is_allocated_block(last_block));
548 algo_impl_t::assert_alignment(last_block);
549 }
550 else{
551 //If memory is allocated, check if the last block is allocated
552 if(priv_is_prev_allocated(old_end_block))
553 return;
554 //If not, mark last block after the free block
555 last_block = priv_prev_block(old_end_block);
556 }
557
558 size_type last_block_size = last_block->m_size;
559
560 //Erase block from the free tree, since we will erase it
561 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
562
563 size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
564 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
565
566 block_ctrl *new_end_block = last_block;
567 algo_impl_t::assert_alignment(new_end_block);
568
569 //Write new end block attributes
570 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
571 new_end_block->m_size = first_block->m_prev_size =
572 (reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment;
573 #else
574 new_end_block->m_size = first_block->m_prev_size =
575 (reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment;
576 #endif
577
578 new_end_block->m_allocated = 1;
579 (void)last_block_size;
580 (void)old_end_block_size;
581 BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
582
583 //Update managed buffer's size
584 m_header.m_size = shrunk_border_offset;
585 BOOST_ASSERT(priv_end_block() == new_end_block);
586 if(unique_buffer)
587 priv_deallocate(unique_buffer);
588 }
589
590 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
591 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
592 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
593 { return m_header.m_size; }
594
595 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
596 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
597 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
598 {
599 return m_header.m_size - m_header.m_allocated -
600 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
601 }
602
603 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
604 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
605 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
606 get_min_size (size_type extra_hdr_bytes)
607 {
608 return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
609 algo_impl_t::ceil_units(extra_hdr_bytes) +
610 MinBlockUnits + EndCtrlBlockUnits)*Alignment;
611 }
612
613 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
614 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
615 all_memory_deallocated()
616 {
617 //-----------------------
618 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
619 //-----------------------
620 size_type block1_off =
621 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
622
623 return m_header.m_allocated == 0 &&
624 m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
625 (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
626 && m_header.m_imultiset.begin()->m_size ==
627 (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
628 }
629
630 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
631 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
632 check_sanity()
633 {
634 //-----------------------
635 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
636 //-----------------------
637 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
638
639 size_type free_memory = 0;
640
641 //Iterate through all blocks obtaining their size
642 for(; ib != ie; ++ib){
643 free_memory += (size_type)ib->m_size*Alignment;
644 algo_impl_t::assert_alignment(&*ib);
645 if(!algo_impl_t::check_alignment(&*ib))
646 return false;
647 }
648
649 //Check allocated bytes are less than size
650 if(m_header.m_allocated > m_header.m_size){
651 return false;
652 }
653
654 size_type block1_off =
655 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
656
657 //Check free bytes are less than size
658 if(free_memory > (m_header.m_size - block1_off)){
659 return false;
660 }
661 return true;
662 }
663
664 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
665 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
666 allocate(size_type nbytes)
667 {
668 //-----------------------
669 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
670 //-----------------------
671 size_type ignore_recvd = nbytes;
672 void *ignore_reuse = 0;
673 return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
674 }
675
676 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
677 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
678 allocate_aligned(size_type nbytes, size_type alignment)
679 {
680 //-----------------------
681 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
682 //-----------------------
683 return algo_impl_t::allocate_aligned(this, nbytes, alignment);
684 }
685
686 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
687 template<class T>
688 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
689 allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
690 size_type &prefer_in_recvd_out_size, T *&reuse)
691 {
692 void* raw_reuse = reuse;
693 void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
694 reuse = static_cast<T*>(raw_reuse);
695 BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::container_detail::alignment_of<T>::value));
696 return static_cast<T*>(ret);
697 }
698
699 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
700 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
701 raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
702 size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
703 {
704 size_type const preferred_objects = prefer_in_recvd_out_objects;
705 if(!sizeof_object)
706 return reuse_ptr = 0, static_cast<void*>(0);
707 if(command & boost::interprocess::try_shrink_in_place){
708 if(!reuse_ptr) return static_cast<void*>(0);
709 const bool success = algo_impl_t::try_shrink
710 ( this, reuse_ptr, limit_objects*sizeof_object
711 , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
712 prefer_in_recvd_out_objects /= sizeof_object;
713 return success ? reuse_ptr : 0;
714 }
715 else{
716 return priv_allocation_command
717 (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
718 }
719 }
720
721
722 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
723 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
724 priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
725 size_type &prefer_in_recvd_out_size,
726 void *&reuse_ptr, size_type sizeof_object)
727 {
728 void* ret;
729 size_type const preferred_size = prefer_in_recvd_out_size;
730 size_type const max_count = m_header.m_size/sizeof_object;
731 if(limit_size > max_count || preferred_size > max_count){
732 return reuse_ptr = 0, static_cast<void*>(0);
733 }
734 size_type l_size = limit_size*sizeof_object;
735 size_type p_size = preferred_size*sizeof_object;
736 size_type r_size;
737 {
738 //-----------------------
739 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
740 //-----------------------
741 ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
742 }
743 prefer_in_recvd_out_size = r_size/sizeof_object;
744 return ret;
745 }
746
747 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
748 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
749 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
750 size(const void *ptr) const
751 {
752 //We need no synchronization since this block's size is not going
753 //to be modified by anyone else
754 //Obtain the real size of the block
755 return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
756 }
757
758 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
759 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
760 {
761 //-----------------------
762 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
763 //-----------------------
764 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
765
766 //Iterate through all blocks obtaining their size
767 while(ib != ie){
768 //Just clear user the memory part reserved for the user
769 volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
770 size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
771 while(s--){
772 *ptr++ = 0;
773 }
774
775 //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
776 //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
777 // , 0
778 // , ib->m_size*Alignment - BlockCtrlBytes);
779 ++ib;
780 }
781 }
782
783 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
784 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
785 priv_expand_both_sides(boost::interprocess::allocation_type command
786 ,size_type min_size
787 ,size_type &prefer_in_recvd_out_size
788 ,void *reuse_ptr
789 ,bool only_preferred_backwards
790 ,size_type backwards_multiple)
791 {
792 size_type const preferred_size = prefer_in_recvd_out_size;
793 algo_impl_t::assert_alignment(reuse_ptr);
794 if(command & boost::interprocess::expand_fwd){
795 if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
796 return reuse_ptr;
797 }
798 else{
799 prefer_in_recvd_out_size = this->size(reuse_ptr);
800 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
801 return reuse_ptr;
802 }
803
804 if(backwards_multiple){
805 BOOST_ASSERT(0 == (min_size % backwards_multiple));
806 BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
807 }
808
809 if(command & boost::interprocess::expand_bwd){
810 //Obtain the real size of the block
811 block_ctrl *reuse = priv_get_block(reuse_ptr);
812
813 //Sanity check
814 algo_impl_t::assert_alignment(reuse);
815
816 block_ctrl *prev_block;
817
818 //If the previous block is not free, there is nothing to do
819 if(priv_is_prev_allocated(reuse)){
820 return 0;
821 }
822
823 prev_block = priv_prev_block(reuse);
824 BOOST_ASSERT(!priv_is_allocated_block(prev_block));
825
826 //Some sanity checks
827 BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
828 algo_impl_t::assert_alignment(prev_block);
829
830 size_type needs_backwards_aligned;
831 size_type lcm;
832 if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
833 ( backwards_multiple
834 , prefer_in_recvd_out_size
835 , only_preferred_backwards ? preferred_size : min_size
836 , lcm, needs_backwards_aligned)){
837 return 0;
838 }
839
840 //Check if previous block has enough size
841 if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
842 //Now take all next space. This will succeed
843 if(command & boost::interprocess::expand_fwd){
844 size_type received_size2;
845 if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
846 BOOST_ASSERT(0);
847 }
848 BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
849 }
850 //We need a minimum size to split the previous one
851 if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
852 block_ctrl *new_block = reinterpret_cast<block_ctrl *>
853 (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
854
855 //Free old previous buffer
856 new_block->m_size =
857 AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment;
858 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
859 priv_mark_as_allocated_block(new_block);
860
861 prev_block->m_size = (reinterpret_cast<char*>(new_block) -
862 reinterpret_cast<char*>(prev_block))/Alignment;
863 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
864 priv_mark_as_free_block(prev_block);
865
866 //Update the old previous block in the free blocks tree
867 //If the new size fulfills tree invariants do nothing,
868 //otherwise erase() + insert()
869 {
870 imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
871 imultiset_iterator was_smaller_it(prev_block_it);
872 if(prev_block_it != m_header.m_imultiset.begin() &&
873 (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
874 m_header.m_imultiset.erase(prev_block_it);
875 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
876 }
877 }
878
879 prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
880 m_header.m_allocated += needs_backwards_aligned;
881
882 //Check alignment
883 algo_impl_t::assert_alignment(new_block);
884
885 //If the backwards expansion has remaining bytes in the
886 //first bytes, fill them with a pattern
887 void *p = priv_get_user_buffer(new_block);
888 void *user_ptr = reinterpret_cast<char*>(p);
889 BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
890 algo_impl_t::assert_alignment(user_ptr);
891 return user_ptr;
892 }
893 //Check if there is no place to create a new block and
894 //the whole new block is multiple of the backwards expansion multiple
895 else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
896 0 == ((prev_block->m_size*Alignment) % lcm)) {
897 //Erase old previous block, since we will change it
898 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
899
900 //Just merge the whole previous block
901 //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
902 prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
903
904 m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
905 //Now update sizes
906 prev_block->m_size = prev_block->m_size + reuse->m_size;
907 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
908 priv_mark_as_allocated_block(prev_block);
909
910 //If the backwards expansion has remaining bytes in the
911 //first bytes, fill them with a pattern
912 void *user_ptr = priv_get_user_buffer(prev_block);
913 BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
914 algo_impl_t::assert_alignment(user_ptr);
915 return user_ptr;
916 }
917 else{
918 //Alignment issues
919 }
920 }
921 }
922 return 0;
923 }
924
925 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
926 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
927 deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
928 {
929 //-----------------------
930 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
931 //-----------------------
932 algo_impl_t::deallocate_many(this, chain);
933 }
934
935 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
936 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
937 priv_allocate(boost::interprocess::allocation_type command
938 ,size_type limit_size
939 ,size_type &prefer_in_recvd_out_size
940 ,void *&reuse_ptr
941 ,size_type backwards_multiple)
942 {
943 size_type const preferred_size = prefer_in_recvd_out_size;
944 if(command & boost::interprocess::shrink_in_place){
945 if(!reuse_ptr) return static_cast<void*>(0);
946 bool success =
947 algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
948 return success ? reuse_ptr : 0;
949 }
950
951 prefer_in_recvd_out_size = 0;
952
953 if(limit_size > preferred_size)
954 return reuse_ptr = 0, static_cast<void*>(0);
955
956 //Number of units to request (including block_ctrl header)
957 size_type preferred_units = priv_get_total_units(preferred_size);
958
959 //Number of units to request (including block_ctrl header)
960 size_type limit_units = priv_get_total_units(limit_size);
961
962 //Expand in place
963 prefer_in_recvd_out_size = preferred_size;
964 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
965 void *ret = priv_expand_both_sides
966 (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
967 if(ret)
968 return ret;
969 }
970
971 if(command & boost::interprocess::allocate_new){
972 size_block_ctrl_compare comp;
973 imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
974
975 if(it != m_header.m_imultiset.end()){
976 return reuse_ptr = 0, this->priv_check_and_allocate
977 (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
978 }
979
980 if(it != m_header.m_imultiset.begin()&&
981 (--it)->m_size >= limit_units){
982 return reuse_ptr = 0, this->priv_check_and_allocate
983 (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
984 }
985 }
986
987
988 //Now try to expand both sides with min size
989 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
990 return priv_expand_both_sides
991 (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
992 }
993 return reuse_ptr = 0, static_cast<void*>(0);
994 }
995
996 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
997 inline
998 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
999 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
1000 {
1001 return const_cast<block_ctrl*>
1002 (reinterpret_cast<const block_ctrl*>
1003 (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
1004 }
1005
1006 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1007 inline
1008 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1009 priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1010 { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
1011
1012 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1013 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
1014 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1015 priv_get_total_units(size_type userbytes)
1016 {
1017 if(userbytes < UsableByPreviousChunk)
1018 userbytes = UsableByPreviousChunk;
1019 size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
1020 if(units < BlockCtrlUnits) units = BlockCtrlUnits;
1021 return units;
1022 }
1023
1024 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1025 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1026 priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
1027 {
1028 size_type const preferred_size = prefer_in_recvd_out_size;
1029 //Obtain the real size of the block
1030 block_ctrl *block = priv_get_block(ptr);
1031 size_type old_block_units = block->m_size;
1032
1033 //The block must be marked as allocated and the sizes must be equal
1034 BOOST_ASSERT(priv_is_allocated_block(block));
1035
1036 //Put this to a safe value
1037 prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1038 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
1039 return true;
1040
1041 //Now translate it to Alignment units
1042 const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
1043 const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
1044
1045 //Some parameter checks
1046 BOOST_ASSERT(min_user_units <= preferred_user_units);
1047
1048 block_ctrl *next_block;
1049
1050 if(priv_is_allocated_block(next_block = priv_next_block(block))){
1051 return prefer_in_recvd_out_size >= min_size;
1052 }
1053 algo_impl_t::assert_alignment(next_block);
1054
1055 //Is "block" + "next_block" big enough?
1056 const size_type merged_units = old_block_units + (size_type)next_block->m_size;
1057
1058 //Now get the expansion size
1059 const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
1060
1061 if(merged_user_units < min_user_units){
1062 prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
1063 return false;
1064 }
1065
1066 //Now get the maximum size the user can allocate
1067 size_type intended_user_units = (merged_user_units < preferred_user_units) ?
1068 merged_user_units : preferred_user_units;
1069
1070 //These are total units of the merged block (supposing the next block can be split)
1071 const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
1072
1073 //Check if we can split the next one in two parts
1074 if((merged_units - intended_units) >= BlockCtrlUnits){
1075 //This block is bigger than needed, split it in
1076 //two blocks, the first one will be merged and
1077 //the second's size will be the remaining space
1078 BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
1079 const size_type rem_units = merged_units - intended_units;
1080
1081 //Check if we we need to update the old next block in the free blocks tree
1082 //If the new size fulfills tree invariants, we just need to replace the node
1083 //(the block start has been displaced), otherwise erase() + insert().
1084 //
1085 //This fixup must be done in two parts, because the new next block might
1086 //overwrite the tree hook of the old next block. So we first erase the
1087 //old if needed and we'll insert the new one after creating the new next
1088 imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
1089 const bool size_invariants_broken =
1090 (next_block->m_size - rem_units ) < BlockCtrlUnits ||
1091 (old_next_block_it != m_header.m_imultiset.begin() &&
1092 (--imultiset_iterator(old_next_block_it))->m_size > rem_units);
1093 if(size_invariants_broken){
1094 m_header.m_imultiset.erase(old_next_block_it);
1095 }
1096 //This is the remaining block
1097 block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
1098 (reinterpret_cast<char*>(block) + intended_units*Alignment), boost_container_new_t())block_ctrl;
1099 rem_block->m_size = rem_units;
1100 algo_impl_t::assert_alignment(rem_block);
1101 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1102 priv_mark_as_free_block(rem_block);
1103
1104 //Now the second part of the fixup
1105 if(size_invariants_broken)
1106 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
1107 else
1108 m_header.m_imultiset.replace_node(old_next_block_it, *rem_block);
1109
1110 //Write the new length
1111 block->m_size = intended_user_units + AllocatedCtrlUnits;
1112 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1113 m_header.m_allocated += (intended_units - old_block_units)*Alignment;
1114 }
1115 //There is no free space to create a new node: just merge both blocks
1116 else{
1117 //Now we have to update the data in the tree
1118 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
1119
1120 //Write the new length
1121 block->m_size = merged_units;
1122 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1123 m_header.m_allocated += (merged_units - old_block_units)*Alignment;
1124 }
1125 priv_mark_as_allocated_block(block);
1126 prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1127 return true;
1128 }
1129
1130 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1131 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1132 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
1133 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1134 {
1135 BOOST_ASSERT(!ptr->m_prev_allocated);
1136 return reinterpret_cast<block_ctrl *>
1137 (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
1138 }
1139
1140
1141
1142 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1143 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1144 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
1145 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
1146 {
1147 //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1148 //distance with the end block
1149 BOOST_ASSERT(first_segment_block->m_prev_allocated);
1150 block_ctrl *end_block = reinterpret_cast<block_ctrl *>
1151 (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
1152 (void)end_block;
1153 BOOST_ASSERT(end_block->m_allocated == 1);
1154 BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
1155 BOOST_ASSERT(end_block > first_segment_block);
1156 return end_block;
1157 }
1158
1159 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1160 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1161 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
1162 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
1163 {
1164 //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1165 //distance with the end block
1166 BOOST_ASSERT(end_segment_block->m_allocated);
1167 block_ctrl *first_block = reinterpret_cast<block_ctrl *>
1168 (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
1169 (void)first_block;
1170 BOOST_ASSERT(first_block->m_prev_allocated == 1);
1171 BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
1172 BOOST_ASSERT(end_segment_block > first_block);
1173 return first_block;
1174 }
1175
1176
1177 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1178 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1179 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
1180 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1181 {
1182 return reinterpret_cast<block_ctrl *>
1183 (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
1184 }
1185
1186 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1187 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
1188 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1189 {
1190 bool allocated = block->m_allocated != 0;
1191 #ifndef NDEBUG
1192 if(block != priv_end_block()){
1193 block_ctrl *next_block = reinterpret_cast<block_ctrl *>
1194 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
1195 bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
1196 (void)next_block_prev_allocated;
1197 BOOST_ASSERT(allocated == next_block_prev_allocated);
1198 }
1199 #endif
1200 return allocated;
1201 }
1202
1203 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1204 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
1205 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1206 {
1207 if(block->m_prev_allocated){
1208 return true;
1209 }
1210 else{
1211 #ifndef NDEBUG
1212 if(block != priv_first_block()){
1213 block_ctrl *prev = priv_prev_block(block);
1214 (void)prev;
1215 BOOST_ASSERT(!prev->m_allocated);
1216 BOOST_ASSERT(prev->m_size == block->m_prev_size);
1217 }
1218 #endif
1219 return false;
1220 }
1221 }
1222
1223 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1224 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
1225 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1226 {
1227 block->m_allocated = 1;
1228 reinterpret_cast<block_ctrl *>
1229 (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
1230 }
1231
1232 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1233 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
1234 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1235 {
1236 block->m_allocated = 0;
1237 block_ctrl *next_block = priv_next_block(block);
1238 next_block->m_prev_allocated = 0;
1239 next_block->m_prev_size = block->m_size;
1240 }
1241
1242 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1243 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
1244 (size_type nunits
1245 ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
1246 ,size_type &received_size)
1247 {
1248 size_type upper_nunits = nunits + BlockCtrlUnits;
1249 imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
1250 algo_impl_t::assert_alignment(block);
1251
1252 if (block->m_size >= upper_nunits){
1253 //This block is bigger than needed, split it in
1254 //two blocks, the first's size will be "units" and
1255 //the second's size "block->m_size-units"
1256 size_type block_old_size = block->m_size;
1257 block->m_size = nunits;
1258 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1259
1260 //This is the remaining block
1261 block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
1262 (reinterpret_cast<char*>(block) + Alignment*nunits), boost_container_new_t())block_ctrl;
1263 algo_impl_t::assert_alignment(rem_block);
1264 rem_block->m_size = block_old_size - nunits;
1265 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1266 priv_mark_as_free_block(rem_block);
1267
1268 imultiset_iterator it_hint;
1269 if(it_old == m_header.m_imultiset.begin()
1270 || (--imultiset_iterator(it_old))->m_size <= rem_block->m_size){
1271 //option a: slow but secure
1272 //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1273 //option b: Construct an empty node and swap
1274 //Imultiset::init_node(*rem_block);
1275 //block->swap_nodes(*rem_block);
1276 //option c: replace the node directly
1277 m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block);
1278 }
1279 else{
1280 //Now we have to update the data in the tree
1281 m_header.m_imultiset.erase(it_old);
1282 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
1283 }
1284
1285 }
1286 else if (block->m_size >= nunits){
1287 m_header.m_imultiset.erase(it_old);
1288 }
1289 else{
1290 BOOST_ASSERT(0);
1291 return 0;
1292 }
1293 //We need block_ctrl for deallocation stuff, so
1294 //return memory user can overwrite
1295 m_header.m_allocated += (size_type)block->m_size*Alignment;
1296 received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1297
1298 //Mark the block as allocated
1299 priv_mark_as_allocated_block(block);
1300
1301 //Clear the memory occupied by the tree hook, since this won't be
1302 //cleared with zero_free_memory
1303 TreeHook *t = static_cast<TreeHook*>(block);
1304 //Just clear the memory part reserved for the user
1305 std::size_t tree_hook_offset_in_block = (char*)t - (char*)block;
1306 //volatile char *ptr =
1307 char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
1308 const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
1309 std::memset(ptr, 0, s);
1310 this->priv_next_block(block)->m_prev_size = 0;
1311 return priv_get_user_buffer(block);
1312 }
1313
1314 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1315 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
1316 {
1317 if(!addr) return;
1318 //-----------------------
1319 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
1320 //-----------------------
1321 return this->priv_deallocate(addr);
1322 }
1323
1324 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1325 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
1326 {
1327 if(!addr) return;
1328
1329 block_ctrl *block = priv_get_block(addr);
1330
1331 //The blocks must be marked as allocated and the sizes must be equal
1332 BOOST_ASSERT(priv_is_allocated_block(block));
1333
1334 //Check if alignment and block size are right
1335 algo_impl_t::assert_alignment(addr);
1336
1337 size_type block_old_size = Alignment*(size_type)block->m_size;
1338 BOOST_ASSERT(m_header.m_allocated >= block_old_size);
1339
1340 //Update used memory count
1341 m_header.m_allocated -= block_old_size;
1342
1343 //The block to insert in the tree
1344 block_ctrl *block_to_insert = block;
1345
1346 //Get the next block
1347 block_ctrl *const next_block = priv_next_block(block);
1348 const bool merge_with_prev = !priv_is_prev_allocated(block);
1349 const bool merge_with_next = !priv_is_allocated_block(next_block);
1350
1351 //Merge logic. First just update block sizes, then fix free blocks tree
1352 if(merge_with_prev || merge_with_next){
1353 //Merge if the previous is free
1354 if(merge_with_prev){
1355 //Get the previous block
1356 block_to_insert = priv_prev_block(block);
1357 block_to_insert->m_size += block->m_size;
1358 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1359 }
1360 //Merge if the next is free
1361 if(merge_with_next){
1362 block_to_insert->m_size += next_block->m_size;
1363 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1364 const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
1365 if(merge_with_prev){
1366 m_header.m_imultiset.erase(next_it);
1367 }
1368 else{
1369 m_header.m_imultiset.replace_node(next_it, *block_to_insert);
1370 }
1371 }
1372
1373 //Now try to shortcut erasure + insertion (O(log(N))) with
1374 //a O(1) operation if merging does not alter tree positions
1375 const imultiset_iterator block_to_check_it = Imultiset::s_iterator_to(*block_to_insert);
1376 imultiset_const_iterator next_to_check_it(block_to_check_it), end_it(m_header.m_imultiset.end());
1377
1378 if(++next_to_check_it != end_it && block_to_insert->m_size > next_to_check_it->m_size){
1379 //Block is bigger than next, so move it
1380 m_header.m_imultiset.erase(block_to_check_it);
1381 m_header.m_imultiset.insert(end_it, *block_to_insert);
1382 }
1383 else{
1384 //Block size increment didn't violate tree invariants so there is nothing to fix
1385 }
1386 }
1387 else{
1388 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
1389 }
1390 priv_mark_as_free_block(block_to_insert);
1391 }
1392
1393 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
1394
1395 } //namespace interprocess {
1396 } //namespace boost {
1397
1398 #include <boost/interprocess/detail/config_end.hpp>
1399
1400 #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP