1 //////////////////////////////////////////////////////////////////////////////
3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
7 // See http://www.boost.org/libs/interprocess for documentation.
9 //////////////////////////////////////////////////////////////////////////////
11 #ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
12 #define BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP
14 #ifndef BOOST_CONFIG_HPP
15 # include <boost/config.hpp>
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
22 #include <boost/interprocess/detail/config_begin.hpp>
23 #include <boost/interprocess/detail/workaround.hpp>
26 #include <boost/interprocess/containers/allocation_type.hpp>
27 #include <boost/interprocess/exceptions.hpp>
28 #include <boost/interprocess/interprocess_fwd.hpp>
29 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
30 #include <boost/interprocess/offset_ptr.hpp>
31 #include <boost/interprocess/sync/scoped_lock.hpp>
32 // interprocess/detail
33 #include <boost/interprocess/detail/min_max.hpp>
34 #include <boost/interprocess/detail/math_functions.hpp>
35 #include <boost/interprocess/detail/type_traits.hpp>
36 #include <boost/interprocess/detail/utilities.hpp>
38 #include <boost/container/detail/multiallocation_chain.hpp>
40 #include <boost/container/detail/placement_new.hpp>
42 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
44 #include <boost/intrusive/pointer_traits.hpp>
45 #include <boost/intrusive/set.hpp>
47 #include <boost/assert.hpp>
48 #include <boost/static_assert.hpp>
53 //#define BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
54 //to maintain ABI compatible with the original version
55 //ABI had to be updated to fix compatibility issues when
56 //sharing shared memory between 32 adn 64 bit processes.
59 //!Describes a best-fit algorithm based in an intrusive red-black tree used to allocate
60 //!objects in shared memory. This class is intended as a base class for single segment
61 //!and multi-segment implementations.
64 namespace interprocess {
66 //!This class implements an algorithm that stores the free nodes in a red-black tree
67 //!to have logarithmic search/insert times.
68 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
71 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
74 rbtree_best_fit(const rbtree_best_fit &);
75 rbtree_best_fit &operator=(const rbtree_best_fit &);
79 typedef typename boost::intrusive::
80 pointer_traits<VoidPointer>::template
81 rebind_pointer<block_ctrl>::type block_ctrl_ptr;
83 typedef typename boost::intrusive::
84 pointer_traits<VoidPointer>::template
85 rebind_pointer<char>::type char_ptr;
87 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
90 //!Shared mutex family used for the rest of the Interprocess framework
91 typedef MutexFamily mutex_family;
92 //!Pointer type to be used with the rest of the Interprocess framework
93 typedef VoidPointer void_pointer;
94 typedef ipcdetail::basic_multiallocation_chain<VoidPointer> multiallocation_chain;
96 typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
97 typedef typename boost::container::container_detail::make_unsigned<difference_type>::type size_type;
99 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
103 typedef typename bi::make_set_base_hook
104 < bi::void_pointer<VoidPointer>
105 , bi::optimize_size<true>
106 , bi::link_mode<bi::normal_link> >::type TreeHook;
110 //!This block's memory size (including block_ctrl
111 //!header) in Alignment units
112 size_type m_prev_size;
113 size_type m_size : sizeof(size_type)*CHAR_BIT - 2;
114 size_type m_prev_allocated : 1;
115 size_type m_allocated : 1;
118 //!Block control structure
120 : public SizeHolder, public TreeHook
123 { this->m_size = 0; this->m_allocated = 0, this->m_prev_allocated = 0; }
125 friend bool operator<(const block_ctrl &a, const block_ctrl &b)
126 { return a.m_size < b.m_size; }
127 friend bool operator==(const block_ctrl &a, const block_ctrl &b)
128 { return a.m_size == b.m_size; }
131 struct size_block_ctrl_compare
133 bool operator()(size_type size, const block_ctrl &block) const
134 { return size < block.m_size; }
136 bool operator()(const block_ctrl &block, size_type size) const
137 { return block.m_size < size; }
140 //!Shared mutex to protect memory allocate/deallocate
141 typedef typename MutexFamily::mutex_type mutex_type;
142 typedef typename bi::make_multiset
143 <block_ctrl, bi::base_hook<TreeHook> >::type Imultiset;
145 typedef typename Imultiset::iterator imultiset_iterator;
146 typedef typename Imultiset::const_iterator imultiset_const_iterator;
148 //!This struct includes needed data and derives from
149 //!mutex_type to allow EBO when using null mutex_type
150 struct header_t : public mutex_type
152 Imultiset m_imultiset;
154 //!The extra size required by the segment
155 size_type m_extra_hdr_bytes;
156 //!Allocated bytes for internal checking
157 size_type m_allocated;
158 //!The size of the memory segment
162 friend class ipcdetail::memory_algorithm_common<rbtree_best_fit>;
164 typedef ipcdetail::memory_algorithm_common<rbtree_best_fit> algo_impl_t;
167 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
169 //!Constructor. "size" is the total size of the managed memory segment,
170 //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(rbtree_best_fit)
171 //!offset that the allocator should not use at all.
172 rbtree_best_fit (size_type size, size_type extra_hdr_bytes);
177 //!Obtains the minimum size needed by the algorithm
178 static size_type get_min_size (size_type extra_hdr_bytes);
180 //Functions for single segment management
182 //!Allocates bytes, returns 0 if there is not more memory
183 void* allocate (size_type nbytes);
185 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
187 //Experimental. Dont' use
189 //!Multiple element allocation, same size
190 void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
192 //-----------------------
193 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
194 //-----------------------
195 algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
198 //!Multiple element allocation, different size
199 void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
201 //-----------------------
202 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
203 //-----------------------
204 algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
207 //!Multiple element allocation, different size
208 void deallocate_many(multiallocation_chain &chain);
210 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
212 //!Deallocates previously allocated bytes
213 void deallocate (void *addr);
215 //!Returns the size of the memory segment
216 size_type get_size() const;
218 //!Returns the number of free bytes of the segment
219 size_type get_free_memory() const;
221 //!Initializes to zero all the memory that's not in use.
222 //!This function is normally used for security reasons.
223 void zero_free_memory();
225 //!Increases managed memory in
226 //!extra_size bytes more
227 void grow(size_type extra_size);
229 //!Decreases managed memory as much as possible
230 void shrink_to_fit();
232 //!Returns true if all allocated memory has been deallocated
233 bool all_memory_deallocated();
235 //!Makes an internal sanity check
236 //!and returns true if success
240 T * allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
241 size_type &prefer_in_recvd_out_size, T *&reuse);
243 void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_object,
244 size_type &prefer_in_recvd_out_size,
245 void *&reuse_ptr, size_type sizeof_object = 1);
247 //!Returns the size of the buffer previously allocated pointed by ptr
248 size_type size(const void *ptr) const;
250 //!Allocates aligned bytes, returns 0 if there is not more memory.
251 //!Alignment must be power of 2
252 void* allocate_aligned (size_type nbytes, size_type alignment);
254 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
256 static size_type priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes);
258 block_ctrl *priv_first_block();
260 block_ctrl *priv_end_block();
262 void* priv_allocation_command(boost::interprocess::allocation_type command, size_type limit_size,
263 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object);
266 //!Real allocation algorithm with min allocation option
267 void * priv_allocate( boost::interprocess::allocation_type command
268 , size_type limit_size, size_type &prefer_in_recvd_out_size
269 , void *&reuse_ptr, size_type backwards_multiple = 1);
271 //!Obtains the block control structure of the user buffer
272 static block_ctrl *priv_get_block(const void *ptr);
274 //!Obtains the pointer returned to the user from the block control
275 static void *priv_get_user_buffer(const block_ctrl *block);
277 //!Returns the number of total units that a user buffer
278 //!of "userbytes" bytes really occupies (including header)
279 static size_type priv_get_total_units(size_type userbytes);
281 //!Real expand function implementation
282 bool priv_expand(void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size);
284 //!Real expand to both sides implementation
285 void* priv_expand_both_sides(boost::interprocess::allocation_type command
287 ,size_type &prefer_in_recvd_out_size
289 ,bool only_preferred_backwards
290 ,size_type backwards_multiple);
292 //!Returns true if the previous block is allocated
293 bool priv_is_prev_allocated(block_ctrl *ptr);
295 //!Get a pointer of the "end" block from the first block of the segment
296 static block_ctrl * priv_end_block(block_ctrl *first_segment_block);
298 //!Get a pointer of the "first" block from the end block of the segment
299 static block_ctrl * priv_first_block(block_ctrl *end_segment_block);
301 //!Get poitner of the previous block (previous block must be free)
302 static block_ctrl * priv_prev_block(block_ctrl *ptr);
304 //!Get the size in the tail of the previous block
305 static block_ctrl * priv_next_block(block_ctrl *ptr);
307 //!Check if this block is free (not allocated)
308 bool priv_is_allocated_block(block_ctrl *ptr);
310 //!Marks the block as allocated
311 void priv_mark_as_allocated_block(block_ctrl *ptr);
313 //!Marks the block as allocated
314 void priv_mark_new_allocated_block(block_ctrl *ptr)
315 { return priv_mark_as_allocated_block(ptr); }
317 //!Marks the block as allocated
318 void priv_mark_as_free_block(block_ctrl *ptr);
320 //!Checks if block has enough memory and splits/unlinks the block
321 //!returning the address to the users
322 void* priv_check_and_allocate(size_type units
324 ,size_type &received_size);
325 //!Real deallocation algorithm
326 void priv_deallocate(void *addr);
328 //!Makes a new memory portion available for allocation
329 void priv_add_segment(void *addr, size_type size);
333 static const size_type Alignment = !MemAlignment
334 ? size_type(::boost::container::container_detail::alignment_of
335 < ::boost::container::container_detail::max_align_t>::value)
336 : size_type(MemAlignment)
340 //Due to embedded bits in size, Alignment must be at least 4
341 BOOST_STATIC_ASSERT((Alignment >= 4));
342 //Due to rbtree size optimizations, Alignment must have at least pointer alignment
343 BOOST_STATIC_ASSERT((Alignment >= ::boost::container::container_detail::alignment_of<void_pointer>::value));
344 static const size_type AlignmentMask = (Alignment - 1);
345 static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
346 static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
347 static const size_type AllocatedCtrlBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
348 static const size_type AllocatedCtrlUnits = AllocatedCtrlBytes/Alignment;
349 static const size_type EndCtrlBlockBytes = ipcdetail::ct_rounded_size<sizeof(SizeHolder), Alignment>::value;
350 static const size_type EndCtrlBlockUnits = EndCtrlBlockBytes/Alignment;
351 static const size_type MinBlockUnits = BlockCtrlUnits;
352 static const size_type UsableByPreviousChunk = sizeof(size_type);
354 //Make sure the maximum alignment is power of two
355 BOOST_STATIC_ASSERT((0 == (Alignment & (Alignment - size_type(1u)))));
356 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
358 static const size_type PayloadPerAllocation = AllocatedCtrlBytes - UsableByPreviousChunk;
361 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
363 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
364 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
365 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
366 ::priv_first_block_offset_from_this(const void *this_ptr, size_type extra_hdr_bytes)
368 size_type uint_this = (std::size_t)this_ptr;
369 size_type main_hdr_end = uint_this + sizeof(rbtree_best_fit) + extra_hdr_bytes;
370 size_type aligned_main_hdr_end = ipcdetail::get_rounded_size(main_hdr_end, Alignment);
371 size_type block1_off = aligned_main_hdr_end - uint_this;
372 algo_impl_t::assert_alignment(aligned_main_hdr_end);
373 algo_impl_t::assert_alignment(uint_this + block1_off);
377 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
378 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
379 priv_add_segment(void *addr, size_type segment_size)
382 algo_impl_t::check_alignment(addr);
384 BOOST_ASSERT(segment_size >= (BlockCtrlBytes + EndCtrlBlockBytes));
386 //Initialize the first big block and the "end" node
387 block_ctrl *first_big_block = ::new(addr, boost_container_new_t())block_ctrl;
388 first_big_block->m_size = segment_size/Alignment - EndCtrlBlockUnits;
389 BOOST_ASSERT(first_big_block->m_size >= BlockCtrlUnits);
391 //The "end" node is just a node of size 0 with the "end" bit set
392 block_ctrl *end_block = static_cast<block_ctrl*>
393 (new (reinterpret_cast<char*>(addr) + first_big_block->m_size*Alignment)SizeHolder);
395 //This will overwrite the prev part of the "end" node
396 priv_mark_as_free_block (first_big_block);
397 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
398 first_big_block->m_prev_size = end_block->m_size =
399 (reinterpret_cast<char*>(first_big_block) - reinterpret_cast<char*>(end_block))/Alignment;
401 first_big_block->m_prev_size = end_block->m_size =
402 (reinterpret_cast<char*>(end_block) - reinterpret_cast<char*>(first_big_block))/Alignment;
404 end_block->m_allocated = 1;
405 first_big_block->m_prev_allocated = 1;
407 BOOST_ASSERT(priv_next_block(first_big_block) == end_block);
408 BOOST_ASSERT(priv_prev_block(end_block) == first_big_block);
409 BOOST_ASSERT(priv_first_block() == first_big_block);
410 BOOST_ASSERT(priv_end_block() == end_block);
412 //Some check to validate the algorithm, since it makes some assumptions
413 //to optimize the space wasted in bookkeeping:
415 //Check that the sizes of the header are placed before the rbtree
416 BOOST_ASSERT(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
417 < static_cast<void*>(static_cast<TreeHook*>(first_big_block)));
418 //Insert it in the intrusive containers
419 m_header.m_imultiset.insert(*first_big_block);
422 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
423 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
424 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
427 size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
428 return reinterpret_cast<block_ctrl *>(reinterpret_cast<char*>(this) + block1_off);
431 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
432 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
433 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>
436 size_type block1_off = priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
437 const size_type original_first_block_size = m_header.m_size/Alignment*Alignment - block1_off/Alignment*Alignment - EndCtrlBlockBytes;
438 block_ctrl *end_block = reinterpret_cast<block_ctrl*>
439 (reinterpret_cast<char*>(this) + block1_off + original_first_block_size);
443 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
444 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
445 rbtree_best_fit(size_type segment_size, size_type extra_hdr_bytes)
447 //Initialize the header
448 m_header.m_allocated = 0;
449 m_header.m_size = segment_size;
450 m_header.m_extra_hdr_bytes = extra_hdr_bytes;
452 //Now write calculate the offset of the first big block that will
453 //cover the whole segment
454 BOOST_ASSERT(get_min_size(extra_hdr_bytes) <= segment_size);
455 size_type block1_off = priv_first_block_offset_from_this(this, extra_hdr_bytes);
456 priv_add_segment(reinterpret_cast<char*>(this) + block1_off, segment_size - block1_off);
459 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
460 inline rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::~rbtree_best_fit()
462 //There is a memory leak!
463 // BOOST_ASSERT(m_header.m_allocated == 0);
464 // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
467 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
468 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::grow(size_type extra_size)
470 //Get the address of the first block
471 block_ctrl *first_block = priv_first_block();
472 block_ctrl *old_end_block = priv_end_block();
473 size_type old_border_offset = (size_type)(reinterpret_cast<char*>(old_end_block) -
474 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
476 //Update managed buffer's size
477 m_header.m_size += extra_size;
479 //We need at least MinBlockUnits blocks to create a new block
480 if((m_header.m_size - old_border_offset) < MinBlockUnits){
484 //Now create a new block between the old end and the new end
485 size_type align_offset = (m_header.m_size - old_border_offset)/Alignment;
486 block_ctrl *new_end_block = reinterpret_cast<block_ctrl*>
487 (reinterpret_cast<char*>(old_end_block) + align_offset*Alignment);
489 //the last and first block are special:
490 //new_end_block->m_size & first_block->m_prev_size store the absolute value
492 new_end_block->m_allocated = 1;
493 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
494 new_end_block->m_size = (reinterpret_cast<char*>(first_block) -
495 reinterpret_cast<char*>(new_end_block))/Alignment;
497 new_end_block->m_size = (reinterpret_cast<char*>(new_end_block) -
498 reinterpret_cast<char*>(first_block))/Alignment;
500 first_block->m_prev_size = new_end_block->m_size;
501 first_block->m_prev_allocated = 1;
502 BOOST_ASSERT(new_end_block == priv_end_block());
504 //The old end block is the new block
505 block_ctrl *new_block = old_end_block;
506 new_block->m_size = (reinterpret_cast<char*>(new_end_block) -
507 reinterpret_cast<char*>(new_block))/Alignment;
508 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
509 priv_mark_as_allocated_block(new_block);
510 BOOST_ASSERT(priv_next_block(new_block) == new_end_block);
512 m_header.m_allocated += (size_type)new_block->m_size*Alignment;
514 //Now deallocate the newly created block
515 this->priv_deallocate(priv_get_user_buffer(new_block));
518 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
519 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::shrink_to_fit()
521 //Get the address of the first block
522 block_ctrl *first_block = priv_first_block();
523 algo_impl_t::assert_alignment(first_block);
525 //block_ctrl *old_end_block = priv_end_block(first_block);
526 block_ctrl *old_end_block = priv_end_block();
527 algo_impl_t::assert_alignment(old_end_block);
528 size_type old_end_block_size = old_end_block->m_size;
530 void *unique_buffer = 0;
531 block_ctrl *last_block;
532 //Check if no memory is allocated between the first and last block
533 if(priv_next_block(first_block) == old_end_block){
534 //If so check if we can allocate memory
535 size_type ignore_recvd = 0;
536 void *ignore_reuse = 0;
537 unique_buffer = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
538 //If not, return, we can't shrink
541 //If we can, mark the position just after the new allocation as the new end
542 algo_impl_t::assert_alignment(unique_buffer);
543 block_ctrl *unique_block = priv_get_block(unique_buffer);
544 BOOST_ASSERT(priv_is_allocated_block(unique_block));
545 algo_impl_t::assert_alignment(unique_block);
546 last_block = priv_next_block(unique_block);
547 BOOST_ASSERT(!priv_is_allocated_block(last_block));
548 algo_impl_t::assert_alignment(last_block);
551 //If memory is allocated, check if the last block is allocated
552 if(priv_is_prev_allocated(old_end_block))
554 //If not, mark last block after the free block
555 last_block = priv_prev_block(old_end_block);
558 size_type last_block_size = last_block->m_size;
560 //Erase block from the free tree, since we will erase it
561 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*last_block));
563 size_type shrunk_border_offset = (size_type)(reinterpret_cast<char*>(last_block) -
564 reinterpret_cast<char*>(this)) + EndCtrlBlockBytes;
566 block_ctrl *new_end_block = last_block;
567 algo_impl_t::assert_alignment(new_end_block);
569 //Write new end block attributes
570 #ifdef BOOST_INTERPROCESS_RBTREE_BEST_FIT_ABI_V1_HPP
571 new_end_block->m_size = first_block->m_prev_size =
572 (reinterpret_cast<char*>(first_block) - reinterpret_cast<char*>(new_end_block))/Alignment;
574 new_end_block->m_size = first_block->m_prev_size =
575 (reinterpret_cast<char*>(new_end_block) - reinterpret_cast<char*>(first_block))/Alignment;
578 new_end_block->m_allocated = 1;
579 (void)last_block_size;
580 (void)old_end_block_size;
581 BOOST_ASSERT(new_end_block->m_size == (old_end_block_size - last_block_size));
583 //Update managed buffer's size
584 m_header.m_size = shrunk_border_offset;
585 BOOST_ASSERT(priv_end_block() == new_end_block);
587 priv_deallocate(unique_buffer);
590 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
591 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
592 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_size() const
593 { return m_header.m_size; }
595 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
596 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
597 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::get_free_memory() const
599 return m_header.m_size - m_header.m_allocated -
600 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
603 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
604 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
605 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
606 get_min_size (size_type extra_hdr_bytes)
608 return (algo_impl_t::ceil_units(sizeof(rbtree_best_fit)) +
609 algo_impl_t::ceil_units(extra_hdr_bytes) +
610 MinBlockUnits + EndCtrlBlockUnits)*Alignment;
613 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
614 inline bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
615 all_memory_deallocated()
617 //-----------------------
618 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
619 //-----------------------
620 size_type block1_off =
621 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
623 return m_header.m_allocated == 0 &&
624 m_header.m_imultiset.begin() != m_header.m_imultiset.end() &&
625 (++m_header.m_imultiset.begin()) == m_header.m_imultiset.end()
626 && m_header.m_imultiset.begin()->m_size ==
627 (m_header.m_size - block1_off - EndCtrlBlockBytes)/Alignment;
630 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
631 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
634 //-----------------------
635 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
636 //-----------------------
637 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
639 size_type free_memory = 0;
641 //Iterate through all blocks obtaining their size
642 for(; ib != ie; ++ib){
643 free_memory += (size_type)ib->m_size*Alignment;
644 algo_impl_t::assert_alignment(&*ib);
645 if(!algo_impl_t::check_alignment(&*ib))
649 //Check allocated bytes are less than size
650 if(m_header.m_allocated > m_header.m_size){
654 size_type block1_off =
655 priv_first_block_offset_from_this(this, m_header.m_extra_hdr_bytes);
657 //Check free bytes are less than size
658 if(free_memory > (m_header.m_size - block1_off)){
664 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
665 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
666 allocate(size_type nbytes)
668 //-----------------------
669 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
670 //-----------------------
671 size_type ignore_recvd = nbytes;
672 void *ignore_reuse = 0;
673 return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
676 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
677 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
678 allocate_aligned(size_type nbytes, size_type alignment)
680 //-----------------------
681 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
682 //-----------------------
683 return algo_impl_t::allocate_aligned(this, nbytes, alignment);
686 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
688 inline T* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
689 allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
690 size_type &prefer_in_recvd_out_size, T *&reuse)
692 void* raw_reuse = reuse;
693 void* const ret = priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
694 reuse = static_cast<T*>(raw_reuse);
695 BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::container_detail::alignment_of<T>::value));
696 return static_cast<T*>(ret);
699 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
700 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
701 raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
702 size_type &prefer_in_recvd_out_objects, void *&reuse_ptr, size_type sizeof_object)
704 size_type const preferred_objects = prefer_in_recvd_out_objects;
706 return reuse_ptr = 0, static_cast<void*>(0);
707 if(command & boost::interprocess::try_shrink_in_place){
708 if(!reuse_ptr) return static_cast<void*>(0);
709 const bool success = algo_impl_t::try_shrink
710 ( this, reuse_ptr, limit_objects*sizeof_object
711 , prefer_in_recvd_out_objects = preferred_objects*sizeof_object);
712 prefer_in_recvd_out_objects /= sizeof_object;
713 return success ? reuse_ptr : 0;
716 return priv_allocation_command
717 (command, limit_objects, prefer_in_recvd_out_objects, reuse_ptr, sizeof_object);
722 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
723 inline void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
724 priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
725 size_type &prefer_in_recvd_out_size,
726 void *&reuse_ptr, size_type sizeof_object)
729 size_type const preferred_size = prefer_in_recvd_out_size;
730 size_type const max_count = m_header.m_size/sizeof_object;
731 if(limit_size > max_count || preferred_size > max_count){
732 return reuse_ptr = 0, static_cast<void*>(0);
734 size_type l_size = limit_size*sizeof_object;
735 size_type p_size = preferred_size*sizeof_object;
738 //-----------------------
739 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
740 //-----------------------
741 ret = priv_allocate(command, l_size, r_size = p_size, reuse_ptr, sizeof_object);
743 prefer_in_recvd_out_size = r_size/sizeof_object;
747 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
748 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
749 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
750 size(const void *ptr) const
752 //We need no synchronization since this block's size is not going
753 //to be modified by anyone else
754 //Obtain the real size of the block
755 return ((size_type)priv_get_block(ptr)->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
758 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
759 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::zero_free_memory()
761 //-----------------------
762 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
763 //-----------------------
764 imultiset_iterator ib(m_header.m_imultiset.begin()), ie(m_header.m_imultiset.end());
766 //Iterate through all blocks obtaining their size
768 //Just clear user the memory part reserved for the user
769 volatile char *ptr = reinterpret_cast<char*>(&*ib) + BlockCtrlBytes;
770 size_type s = (size_type)ib->m_size*Alignment - BlockCtrlBytes;
775 //This surprisingly is optimized out by Visual C++ 7.1 in release mode!
776 //std::memset( reinterpret_cast<char*>(&*ib) + BlockCtrlBytes
778 // , ib->m_size*Alignment - BlockCtrlBytes);
783 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
784 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
785 priv_expand_both_sides(boost::interprocess::allocation_type command
787 ,size_type &prefer_in_recvd_out_size
789 ,bool only_preferred_backwards
790 ,size_type backwards_multiple)
792 size_type const preferred_size = prefer_in_recvd_out_size;
793 algo_impl_t::assert_alignment(reuse_ptr);
794 if(command & boost::interprocess::expand_fwd){
795 if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
799 prefer_in_recvd_out_size = this->size(reuse_ptr);
800 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
804 if(backwards_multiple){
805 BOOST_ASSERT(0 == (min_size % backwards_multiple));
806 BOOST_ASSERT(0 == (preferred_size % backwards_multiple));
809 if(command & boost::interprocess::expand_bwd){
810 //Obtain the real size of the block
811 block_ctrl *reuse = priv_get_block(reuse_ptr);
814 algo_impl_t::assert_alignment(reuse);
816 block_ctrl *prev_block;
818 //If the previous block is not free, there is nothing to do
819 if(priv_is_prev_allocated(reuse)){
823 prev_block = priv_prev_block(reuse);
824 BOOST_ASSERT(!priv_is_allocated_block(prev_block));
827 BOOST_ASSERT(prev_block->m_size == reuse->m_prev_size);
828 algo_impl_t::assert_alignment(prev_block);
830 size_type needs_backwards_aligned;
832 if(!algo_impl_t::calculate_lcm_and_needs_backwards_lcmed
834 , prefer_in_recvd_out_size
835 , only_preferred_backwards ? preferred_size : min_size
836 , lcm, needs_backwards_aligned)){
840 //Check if previous block has enough size
841 if(size_type(prev_block->m_size*Alignment) >= needs_backwards_aligned){
842 //Now take all next space. This will succeed
843 if(command & boost::interprocess::expand_fwd){
844 size_type received_size2;
845 if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, received_size2 = prefer_in_recvd_out_size)){
848 BOOST_ASSERT(prefer_in_recvd_out_size == received_size2);
850 //We need a minimum size to split the previous one
851 if(prev_block->m_size >= (needs_backwards_aligned/Alignment + BlockCtrlUnits)){
852 block_ctrl *new_block = reinterpret_cast<block_ctrl *>
853 (reinterpret_cast<char*>(reuse) - needs_backwards_aligned);
855 //Free old previous buffer
857 AllocatedCtrlUnits + (needs_backwards_aligned + (prefer_in_recvd_out_size - UsableByPreviousChunk))/Alignment;
858 BOOST_ASSERT(new_block->m_size >= BlockCtrlUnits);
859 priv_mark_as_allocated_block(new_block);
861 prev_block->m_size = (reinterpret_cast<char*>(new_block) -
862 reinterpret_cast<char*>(prev_block))/Alignment;
863 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
864 priv_mark_as_free_block(prev_block);
866 //Update the old previous block in the free blocks tree
867 //If the new size fulfills tree invariants do nothing,
868 //otherwise erase() + insert()
870 imultiset_iterator prev_block_it(Imultiset::s_iterator_to(*prev_block));
871 imultiset_iterator was_smaller_it(prev_block_it);
872 if(prev_block_it != m_header.m_imultiset.begin() &&
873 (--(was_smaller_it = prev_block_it))->m_size > prev_block->m_size){
874 m_header.m_imultiset.erase(prev_block_it);
875 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *prev_block);
879 prefer_in_recvd_out_size = needs_backwards_aligned + prefer_in_recvd_out_size;
880 m_header.m_allocated += needs_backwards_aligned;
883 algo_impl_t::assert_alignment(new_block);
885 //If the backwards expansion has remaining bytes in the
886 //first bytes, fill them with a pattern
887 void *p = priv_get_user_buffer(new_block);
888 void *user_ptr = reinterpret_cast<char*>(p);
889 BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
890 algo_impl_t::assert_alignment(user_ptr);
893 //Check if there is no place to create a new block and
894 //the whole new block is multiple of the backwards expansion multiple
895 else if(prev_block->m_size >= needs_backwards_aligned/Alignment &&
896 0 == ((prev_block->m_size*Alignment) % lcm)) {
897 //Erase old previous block, since we will change it
898 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*prev_block));
900 //Just merge the whole previous block
901 //prev_block->m_size*Alignment is multiple of lcm (and backwards_multiple)
902 prefer_in_recvd_out_size = prefer_in_recvd_out_size + (size_type)prev_block->m_size*Alignment;
904 m_header.m_allocated += (size_type)prev_block->m_size*Alignment;
906 prev_block->m_size = prev_block->m_size + reuse->m_size;
907 BOOST_ASSERT(prev_block->m_size >= BlockCtrlUnits);
908 priv_mark_as_allocated_block(prev_block);
910 //If the backwards expansion has remaining bytes in the
911 //first bytes, fill them with a pattern
912 void *user_ptr = priv_get_user_buffer(prev_block);
913 BOOST_ASSERT((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) % backwards_multiple == 0);
914 algo_impl_t::assert_alignment(user_ptr);
925 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
926 inline void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
927 deallocate_many(typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::multiallocation_chain &chain)
929 //-----------------------
930 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
931 //-----------------------
932 algo_impl_t::deallocate_many(this, chain);
935 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
936 void * rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
937 priv_allocate(boost::interprocess::allocation_type command
938 ,size_type limit_size
939 ,size_type &prefer_in_recvd_out_size
941 ,size_type backwards_multiple)
943 size_type const preferred_size = prefer_in_recvd_out_size;
944 if(command & boost::interprocess::shrink_in_place){
945 if(!reuse_ptr) return static_cast<void*>(0);
947 algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size = preferred_size);
948 return success ? reuse_ptr : 0;
951 prefer_in_recvd_out_size = 0;
953 if(limit_size > preferred_size)
954 return reuse_ptr = 0, static_cast<void*>(0);
956 //Number of units to request (including block_ctrl header)
957 size_type preferred_units = priv_get_total_units(preferred_size);
959 //Number of units to request (including block_ctrl header)
960 size_type limit_units = priv_get_total_units(limit_size);
963 prefer_in_recvd_out_size = preferred_size;
964 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
965 void *ret = priv_expand_both_sides
966 (command, limit_size, prefer_in_recvd_out_size, reuse_ptr, true, backwards_multiple);
971 if(command & boost::interprocess::allocate_new){
972 size_block_ctrl_compare comp;
973 imultiset_iterator it(m_header.m_imultiset.lower_bound(preferred_units, comp));
975 if(it != m_header.m_imultiset.end()){
976 return reuse_ptr = 0, this->priv_check_and_allocate
977 (preferred_units, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
980 if(it != m_header.m_imultiset.begin()&&
981 (--it)->m_size >= limit_units){
982 return reuse_ptr = 0, this->priv_check_and_allocate
983 (it->m_size, ipcdetail::to_raw_pointer(&*it), prefer_in_recvd_out_size);
988 //Now try to expand both sides with min size
989 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
990 return priv_expand_both_sides
991 (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false, backwards_multiple);
993 return reuse_ptr = 0, static_cast<void*>(0);
996 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
998 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
999 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_get_block(const void *ptr)
1001 return const_cast<block_ctrl*>
1002 (reinterpret_cast<const block_ctrl*>
1003 (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
1006 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1008 void *rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1009 priv_get_user_buffer(const typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1010 { return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes); }
1012 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1013 inline typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::size_type
1014 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1015 priv_get_total_units(size_type userbytes)
1017 if(userbytes < UsableByPreviousChunk)
1018 userbytes = UsableByPreviousChunk;
1019 size_type units = ipcdetail::get_rounded_size(userbytes - UsableByPreviousChunk, Alignment)/Alignment + AllocatedCtrlUnits;
1020 if(units < BlockCtrlUnits) units = BlockCtrlUnits;
1024 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1025 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::
1026 priv_expand (void *ptr, const size_type min_size, size_type &prefer_in_recvd_out_size)
1028 size_type const preferred_size = prefer_in_recvd_out_size;
1029 //Obtain the real size of the block
1030 block_ctrl *block = priv_get_block(ptr);
1031 size_type old_block_units = block->m_size;
1033 //The block must be marked as allocated and the sizes must be equal
1034 BOOST_ASSERT(priv_is_allocated_block(block));
1036 //Put this to a safe value
1037 prefer_in_recvd_out_size = (old_block_units - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1038 if(prefer_in_recvd_out_size >= preferred_size || prefer_in_recvd_out_size >= min_size)
1041 //Now translate it to Alignment units
1042 const size_type min_user_units = algo_impl_t::ceil_units(min_size - UsableByPreviousChunk);
1043 const size_type preferred_user_units = algo_impl_t::ceil_units(preferred_size - UsableByPreviousChunk);
1045 //Some parameter checks
1046 BOOST_ASSERT(min_user_units <= preferred_user_units);
1048 block_ctrl *next_block;
1050 if(priv_is_allocated_block(next_block = priv_next_block(block))){
1051 return prefer_in_recvd_out_size >= min_size;
1053 algo_impl_t::assert_alignment(next_block);
1055 //Is "block" + "next_block" big enough?
1056 const size_type merged_units = old_block_units + (size_type)next_block->m_size;
1058 //Now get the expansion size
1059 const size_type merged_user_units = merged_units - AllocatedCtrlUnits;
1061 if(merged_user_units < min_user_units){
1062 prefer_in_recvd_out_size = merged_units*Alignment - UsableByPreviousChunk;
1066 //Now get the maximum size the user can allocate
1067 size_type intended_user_units = (merged_user_units < preferred_user_units) ?
1068 merged_user_units : preferred_user_units;
1070 //These are total units of the merged block (supposing the next block can be split)
1071 const size_type intended_units = AllocatedCtrlUnits + intended_user_units;
1073 //Check if we can split the next one in two parts
1074 if((merged_units - intended_units) >= BlockCtrlUnits){
1075 //This block is bigger than needed, split it in
1076 //two blocks, the first one will be merged and
1077 //the second's size will be the remaining space
1078 BOOST_ASSERT(next_block->m_size == priv_next_block(next_block)->m_prev_size);
1079 const size_type rem_units = merged_units - intended_units;
1081 //Check if we we need to update the old next block in the free blocks tree
1082 //If the new size fulfills tree invariants, we just need to replace the node
1083 //(the block start has been displaced), otherwise erase() + insert().
1085 //This fixup must be done in two parts, because the new next block might
1086 //overwrite the tree hook of the old next block. So we first erase the
1087 //old if needed and we'll insert the new one after creating the new next
1088 imultiset_iterator old_next_block_it(Imultiset::s_iterator_to(*next_block));
1089 const bool size_invariants_broken =
1090 (next_block->m_size - rem_units ) < BlockCtrlUnits ||
1091 (old_next_block_it != m_header.m_imultiset.begin() &&
1092 (--imultiset_iterator(old_next_block_it))->m_size > rem_units);
1093 if(size_invariants_broken){
1094 m_header.m_imultiset.erase(old_next_block_it);
1096 //This is the remaining block
1097 block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
1098 (reinterpret_cast<char*>(block) + intended_units*Alignment), boost_container_new_t())block_ctrl;
1099 rem_block->m_size = rem_units;
1100 algo_impl_t::assert_alignment(rem_block);
1101 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1102 priv_mark_as_free_block(rem_block);
1104 //Now the second part of the fixup
1105 if(size_invariants_broken)
1106 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
1108 m_header.m_imultiset.replace_node(old_next_block_it, *rem_block);
1110 //Write the new length
1111 block->m_size = intended_user_units + AllocatedCtrlUnits;
1112 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1113 m_header.m_allocated += (intended_units - old_block_units)*Alignment;
1115 //There is no free space to create a new node: just merge both blocks
1117 //Now we have to update the data in the tree
1118 m_header.m_imultiset.erase(Imultiset::s_iterator_to(*next_block));
1120 //Write the new length
1121 block->m_size = merged_units;
1122 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1123 m_header.m_allocated += (merged_units - old_block_units)*Alignment;
1125 priv_mark_as_allocated_block(block);
1126 prefer_in_recvd_out_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1130 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1131 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1132 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_prev_block
1133 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1135 BOOST_ASSERT(!ptr->m_prev_allocated);
1136 return reinterpret_cast<block_ctrl *>
1137 (reinterpret_cast<char*>(ptr) - ptr->m_prev_size*Alignment);
1142 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1143 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1144 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_end_block
1145 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *first_segment_block)
1147 //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1148 //distance with the end block
1149 BOOST_ASSERT(first_segment_block->m_prev_allocated);
1150 block_ctrl *end_block = reinterpret_cast<block_ctrl *>
1151 (reinterpret_cast<char*>(first_segment_block) + first_segment_block->m_prev_size*Alignment);
1153 BOOST_ASSERT(end_block->m_allocated == 1);
1154 BOOST_ASSERT(end_block->m_size == first_segment_block->m_prev_size);
1155 BOOST_ASSERT(end_block > first_segment_block);
1159 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1160 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1161 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_first_block
1162 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *end_segment_block)
1164 //The first block's logic is different from the rest of blocks: stores in m_prev_size the absolute
1165 //distance with the end block
1166 BOOST_ASSERT(end_segment_block->m_allocated);
1167 block_ctrl *first_block = reinterpret_cast<block_ctrl *>
1168 (reinterpret_cast<char*>(end_segment_block) - end_segment_block->m_size*Alignment);
1170 BOOST_ASSERT(first_block->m_prev_allocated == 1);
1171 BOOST_ASSERT(first_block->m_prev_size == end_segment_block->m_size);
1172 BOOST_ASSERT(end_segment_block > first_block);
1177 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1178 typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *
1179 rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_next_block
1180 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *ptr)
1182 return reinterpret_cast<block_ctrl *>
1183 (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
1186 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1187 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_allocated_block
1188 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1190 bool allocated = block->m_allocated != 0;
1192 if(block != priv_end_block()){
1193 block_ctrl *next_block = reinterpret_cast<block_ctrl *>
1194 (reinterpret_cast<char*>(block) + block->m_size*Alignment);
1195 bool next_block_prev_allocated = next_block->m_prev_allocated != 0;
1196 (void)next_block_prev_allocated;
1197 BOOST_ASSERT(allocated == next_block_prev_allocated);
1203 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1204 bool rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_is_prev_allocated
1205 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1207 if(block->m_prev_allocated){
1212 if(block != priv_first_block()){
1213 block_ctrl *prev = priv_prev_block(block);
1215 BOOST_ASSERT(!prev->m_allocated);
1216 BOOST_ASSERT(prev->m_size == block->m_prev_size);
1223 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1224 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_allocated_block
1225 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1227 block->m_allocated = 1;
1228 reinterpret_cast<block_ctrl *>
1229 (reinterpret_cast<char*>(block)+ block->m_size*Alignment)->m_prev_allocated = 1;
1232 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1233 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_mark_as_free_block
1234 (typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl *block)
1236 block->m_allocated = 0;
1237 block_ctrl *next_block = priv_next_block(block);
1238 next_block->m_prev_allocated = 0;
1239 next_block->m_prev_size = block->m_size;
1242 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment> inline
1243 void* rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_check_and_allocate
1245 ,typename rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::block_ctrl* block
1246 ,size_type &received_size)
1248 size_type upper_nunits = nunits + BlockCtrlUnits;
1249 imultiset_iterator it_old = Imultiset::s_iterator_to(*block);
1250 algo_impl_t::assert_alignment(block);
1252 if (block->m_size >= upper_nunits){
1253 //This block is bigger than needed, split it in
1254 //two blocks, the first's size will be "units" and
1255 //the second's size "block->m_size-units"
1256 size_type block_old_size = block->m_size;
1257 block->m_size = nunits;
1258 BOOST_ASSERT(block->m_size >= BlockCtrlUnits);
1260 //This is the remaining block
1261 block_ctrl *rem_block = ::new(reinterpret_cast<block_ctrl*>
1262 (reinterpret_cast<char*>(block) + Alignment*nunits), boost_container_new_t())block_ctrl;
1263 algo_impl_t::assert_alignment(rem_block);
1264 rem_block->m_size = block_old_size - nunits;
1265 BOOST_ASSERT(rem_block->m_size >= BlockCtrlUnits);
1266 priv_mark_as_free_block(rem_block);
1268 imultiset_iterator it_hint;
1269 if(it_old == m_header.m_imultiset.begin()
1270 || (--imultiset_iterator(it_old))->m_size <= rem_block->m_size){
1271 //option a: slow but secure
1272 //m_header.m_imultiset.insert(m_header.m_imultiset.erase(it_old), *rem_block);
1273 //option b: Construct an empty node and swap
1274 //Imultiset::init_node(*rem_block);
1275 //block->swap_nodes(*rem_block);
1276 //option c: replace the node directly
1277 m_header.m_imultiset.replace_node(Imultiset::s_iterator_to(*it_old), *rem_block);
1280 //Now we have to update the data in the tree
1281 m_header.m_imultiset.erase(it_old);
1282 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *rem_block);
1286 else if (block->m_size >= nunits){
1287 m_header.m_imultiset.erase(it_old);
1293 //We need block_ctrl for deallocation stuff, so
1294 //return memory user can overwrite
1295 m_header.m_allocated += (size_type)block->m_size*Alignment;
1296 received_size = ((size_type)block->m_size - AllocatedCtrlUnits)*Alignment + UsableByPreviousChunk;
1298 //Mark the block as allocated
1299 priv_mark_as_allocated_block(block);
1301 //Clear the memory occupied by the tree hook, since this won't be
1302 //cleared with zero_free_memory
1303 TreeHook *t = static_cast<TreeHook*>(block);
1304 //Just clear the memory part reserved for the user
1305 std::size_t tree_hook_offset_in_block = (char*)t - (char*)block;
1306 //volatile char *ptr =
1307 char *ptr = reinterpret_cast<char*>(block)+tree_hook_offset_in_block;
1308 const std::size_t s = BlockCtrlBytes - tree_hook_offset_in_block;
1309 std::memset(ptr, 0, s);
1310 this->priv_next_block(block)->m_prev_size = 0;
1311 return priv_get_user_buffer(block);
1314 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1315 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::deallocate(void* addr)
1318 //-----------------------
1319 boost::interprocess::scoped_lock<mutex_type> guard(m_header);
1320 //-----------------------
1321 return this->priv_deallocate(addr);
1324 template<class MutexFamily, class VoidPointer, std::size_t MemAlignment>
1325 void rbtree_best_fit<MutexFamily, VoidPointer, MemAlignment>::priv_deallocate(void* addr)
1329 block_ctrl *block = priv_get_block(addr);
1331 //The blocks must be marked as allocated and the sizes must be equal
1332 BOOST_ASSERT(priv_is_allocated_block(block));
1334 //Check if alignment and block size are right
1335 algo_impl_t::assert_alignment(addr);
1337 size_type block_old_size = Alignment*(size_type)block->m_size;
1338 BOOST_ASSERT(m_header.m_allocated >= block_old_size);
1340 //Update used memory count
1341 m_header.m_allocated -= block_old_size;
1343 //The block to insert in the tree
1344 block_ctrl *block_to_insert = block;
1346 //Get the next block
1347 block_ctrl *const next_block = priv_next_block(block);
1348 const bool merge_with_prev = !priv_is_prev_allocated(block);
1349 const bool merge_with_next = !priv_is_allocated_block(next_block);
1351 //Merge logic. First just update block sizes, then fix free blocks tree
1352 if(merge_with_prev || merge_with_next){
1353 //Merge if the previous is free
1354 if(merge_with_prev){
1355 //Get the previous block
1356 block_to_insert = priv_prev_block(block);
1357 block_to_insert->m_size += block->m_size;
1358 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1360 //Merge if the next is free
1361 if(merge_with_next){
1362 block_to_insert->m_size += next_block->m_size;
1363 BOOST_ASSERT(block_to_insert->m_size >= BlockCtrlUnits);
1364 const imultiset_iterator next_it = Imultiset::s_iterator_to(*next_block);
1365 if(merge_with_prev){
1366 m_header.m_imultiset.erase(next_it);
1369 m_header.m_imultiset.replace_node(next_it, *block_to_insert);
1373 //Now try to shortcut erasure + insertion (O(log(N))) with
1374 //a O(1) operation if merging does not alter tree positions
1375 const imultiset_iterator block_to_check_it = Imultiset::s_iterator_to(*block_to_insert);
1376 imultiset_const_iterator next_to_check_it(block_to_check_it), end_it(m_header.m_imultiset.end());
1378 if(++next_to_check_it != end_it && block_to_insert->m_size > next_to_check_it->m_size){
1379 //Block is bigger than next, so move it
1380 m_header.m_imultiset.erase(block_to_check_it);
1381 m_header.m_imultiset.insert(end_it, *block_to_insert);
1384 //Block size increment didn't violate tree invariants so there is nothing to fix
1388 m_header.m_imultiset.insert(m_header.m_imultiset.begin(), *block_to_insert);
1390 priv_mark_as_free_block(block_to_insert);
1393 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
1395 } //namespace interprocess {
1396 } //namespace boost {
1398 #include <boost/interprocess/detail/config_end.hpp>
1400 #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_RBTREE_BEST_FIT_HPP