]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/interprocess/include/boost/interprocess/mem_algo/detail/simple_seq_fit_impl.hpp
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / libs / interprocess / include / boost / interprocess / mem_algo / detail / simple_seq_fit_impl.hpp
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/interprocess for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10
11 #ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
12 #define BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
13
14 #ifndef BOOST_CONFIG_HPP
15 # include <boost/config.hpp>
16 #endif
17 #
18 #if defined(BOOST_HAS_PRAGMA_ONCE)
19 # pragma once
20 #endif
21
22 #include <boost/interprocess/detail/config_begin.hpp>
23 #include <boost/interprocess/detail/workaround.hpp>
24
25 #include <boost/intrusive/pointer_traits.hpp>
26
27 #include <boost/interprocess/interprocess_fwd.hpp>
28 #include <boost/interprocess/containers/allocation_type.hpp>
29 #include <boost/container/detail/multiallocation_chain.hpp>
30 #include <boost/interprocess/offset_ptr.hpp>
31 #include <boost/interprocess/sync/interprocess_mutex.hpp>
32 #include <boost/interprocess/exceptions.hpp>
33 #include <boost/interprocess/detail/utilities.hpp>
34 #include <boost/interprocess/detail/min_max.hpp>
35 #include <boost/interprocess/detail/type_traits.hpp>
36 #include <boost/interprocess/sync/scoped_lock.hpp>
37 #include <boost/intrusive/pointer_traits.hpp>
38 #include <boost/interprocess/mem_algo/detail/mem_algo_common.hpp>
39 #include <boost/move/detail/type_traits.hpp> //make_unsigned, alignment_of
40 #include <boost/intrusive/detail/minimal_pair_header.hpp>
41 #include <cstring>
42 #include <boost/assert.hpp>
43
44 //!\file
45 //!Describes sequential fit algorithm used to allocate objects in shared memory.
46 //!This class is intended as a base class for single segment and multi-segment
47 //!implementations.
48
49 namespace boost {
50 namespace interprocess {
51 namespace ipcdetail {
52
53 //!This class implements the simple sequential fit algorithm with a simply
54 //!linked list of free buffers.
55 //!This class is intended as a base class for single segment and multi-segment
56 //!implementations.
57 template<class MutexFamily, class VoidPointer>
58 class simple_seq_fit_impl
59 {
60 //Non-copyable
61 simple_seq_fit_impl();
62 simple_seq_fit_impl(const simple_seq_fit_impl &);
63 simple_seq_fit_impl &operator=(const simple_seq_fit_impl &);
64
65 typedef typename boost::intrusive::
66 pointer_traits<VoidPointer>::template
67 rebind_pointer<char>::type char_ptr;
68
69 public:
70
71 //!Shared interprocess_mutex family used for the rest of the Interprocess framework
72 typedef MutexFamily mutex_family;
73 //!Pointer type to be used with the rest of the Interprocess framework
74 typedef VoidPointer void_pointer;
75 typedef boost::container::container_detail::
76 basic_multiallocation_chain<VoidPointer> multiallocation_chain;
77
78 typedef typename boost::intrusive::pointer_traits<char_ptr>::difference_type difference_type;
79 typedef typename boost::container::container_detail::make_unsigned<difference_type>::type size_type;
80
81
82 private:
83 class block_ctrl;
84 friend class block_ctrl;
85
86 typedef typename boost::intrusive::
87 pointer_traits<VoidPointer>::template
88 rebind_pointer<block_ctrl>::type block_ctrl_ptr;
89
90 //!Block control structure
91 class block_ctrl
92 {
93 public:
94 //!Offset pointer to the next block.
95 block_ctrl_ptr m_next;
96 //!This block's memory size (including block_ctrl
97 //!header) in BasicSize units
98 size_type m_size;
99
100 size_type get_user_bytes() const
101 { return this->m_size*Alignment - BlockCtrlBytes; }
102
103 size_type get_total_bytes() const
104 { return this->m_size*Alignment; }
105 };
106
107 //!Shared interprocess_mutex to protect memory allocate/deallocate
108 typedef typename MutexFamily::mutex_type interprocess_mutex;
109
110 //!This struct includes needed data and derives from
111 //!interprocess_mutex to allow EBO when using null interprocess_mutex
112 struct header_t : public interprocess_mutex
113 {
114 //!Pointer to the first free block
115 block_ctrl m_root;
116 //!Allocated bytes for internal checking
117 size_type m_allocated;
118 //!The size of the memory segment
119 size_type m_size;
120 //!The extra size required by the segment
121 size_type m_extra_hdr_bytes;
122 } m_header;
123
124 friend class ipcdetail::memory_algorithm_common<simple_seq_fit_impl>;
125
126 typedef ipcdetail::memory_algorithm_common<simple_seq_fit_impl> algo_impl_t;
127
128 public:
129 //!Constructor. "size" is the total size of the managed memory segment,
130 //!"extra_hdr_bytes" indicates the extra bytes beginning in the sizeof(simple_seq_fit_impl)
131 //!offset that the allocator should not use at all.
132 simple_seq_fit_impl (size_type size, size_type extra_hdr_bytes);
133
134 //!Destructor
135 ~simple_seq_fit_impl();
136
137 //!Obtains the minimum size needed by the algorithm
138 static size_type get_min_size (size_type extra_hdr_bytes);
139
140 //Functions for single segment management
141
142 //!Allocates bytes, returns 0 if there is not more memory
143 void* allocate (size_type nbytes);
144
145 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
146
147 //!Multiple element allocation, same size
148 void allocate_many(size_type elem_bytes, size_type num_elements, multiallocation_chain &chain)
149 {
150 //-----------------------
151 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
152 //-----------------------
153 algo_impl_t::allocate_many(this, elem_bytes, num_elements, chain);
154 }
155
156 //!Multiple element allocation, different size
157 void allocate_many(const size_type *elem_sizes, size_type n_elements, size_type sizeof_element, multiallocation_chain &chain)
158 {
159 //-----------------------
160 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
161 //-----------------------
162 algo_impl_t::allocate_many(this, elem_sizes, n_elements, sizeof_element, chain);
163 }
164
165 //!Multiple element deallocation
166 void deallocate_many(multiallocation_chain &chain);
167
168 #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
169
170 //!Deallocates previously allocated bytes
171 void deallocate (void *addr);
172
173 //!Returns the size of the memory segment
174 size_type get_size() const;
175
176 //!Returns the number of free bytes of the memory segment
177 size_type get_free_memory() const;
178
179 //!Increases managed memory in extra_size bytes more
180 void grow(size_type extra_size);
181
182 //!Decreases managed memory as much as possible
183 void shrink_to_fit();
184
185 //!Returns true if all allocated memory has been deallocated
186 bool all_memory_deallocated();
187
188 //!Makes an internal sanity check and returns true if success
189 bool check_sanity();
190
191 //!Initializes to zero all the memory that's not in use.
192 //!This function is normally used for security reasons.
193 void zero_free_memory();
194
195 template<class T>
196 T *allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
197 size_type &prefer_in_recvd_out_size, T *&reuse);
198
199 void * raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
200 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object = 1);
201
202 //!Returns the size of the buffer previously allocated pointed by ptr
203 size_type size(const void *ptr) const;
204
205 //!Allocates aligned bytes, returns 0 if there is not more memory.
206 //!Alignment must be power of 2
207 void* allocate_aligned (size_type nbytes, size_type alignment);
208
209 private:
210
211 //!Obtains the pointer returned to the user from the block control
212 static void *priv_get_user_buffer(const block_ctrl *block);
213
214 //!Obtains the block control structure of the user buffer
215 static block_ctrl *priv_get_block(const void *ptr);
216
217 //!Real allocation algorithm with min allocation option
218 void * priv_allocate(boost::interprocess::allocation_type command
219 ,size_type min_size
220 ,size_type &prefer_in_recvd_out_size, void *&reuse_ptr);
221
222 void * priv_allocation_command(boost::interprocess::allocation_type command
223 ,size_type min_size
224 ,size_type &prefer_in_recvd_out_size
225 ,void *&reuse_ptr
226 ,size_type sizeof_object);
227
228 //!Returns the number of total units that a user buffer
229 //!of "userbytes" bytes really occupies (including header)
230 static size_type priv_get_total_units(size_type userbytes);
231
232 static size_type priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes);
233 size_type priv_block_end_offset() const;
234
235 //!Returns next block if it's free.
236 //!Returns 0 if next block is not free.
237 block_ctrl *priv_next_block_if_free(block_ctrl *ptr);
238
239 //!Check if this block is free (not allocated)
240 bool priv_is_allocated_block(block_ctrl *ptr);
241
242 //!Returns previous block's if it's free.
243 //!Returns 0 if previous block is not free.
244 std::pair<block_ctrl*, block_ctrl*> priv_prev_block_if_free(block_ctrl *ptr);
245
246 //!Real expand function implementation
247 bool priv_expand(void *ptr, size_type min_size, size_type &prefer_in_recvd_out_size);
248
249 //!Real expand to both sides implementation
250 void* priv_expand_both_sides(boost::interprocess::allocation_type command
251 ,size_type min_size, size_type &prefer_in_recvd_out_size
252 ,void *reuse_ptr
253 ,bool only_preferred_backwards);
254
255 //!Real private aligned allocation function
256 //void* priv_allocate_aligned (size_type nbytes, size_type alignment);
257
258 //!Checks if block has enough memory and splits/unlinks the block
259 //!returning the address to the users
260 void* priv_check_and_allocate(size_type units
261 ,block_ctrl* prev
262 ,block_ctrl* block
263 ,size_type &received_size);
264 //!Real deallocation algorithm
265 void priv_deallocate(void *addr);
266
267 //!Makes a new memory portion available for allocation
268 void priv_add_segment(void *addr, size_type size);
269
270 void priv_mark_new_allocated_block(block_ctrl *block);
271
272 public:
273 static const size_type Alignment = ::boost::container::container_detail::alignment_of
274 < ::boost::container::container_detail::max_align_t>::value;
275 private:
276 static const size_type BlockCtrlBytes = ipcdetail::ct_rounded_size<sizeof(block_ctrl), Alignment>::value;
277 static const size_type BlockCtrlUnits = BlockCtrlBytes/Alignment;
278 static const size_type MinBlockUnits = BlockCtrlUnits;
279 static const size_type MinBlockSize = MinBlockUnits*Alignment;
280 static const size_type AllocatedCtrlBytes = BlockCtrlBytes;
281 static const size_type AllocatedCtrlUnits = BlockCtrlUnits;
282 static const size_type UsableByPreviousChunk = 0;
283
284 public:
285 static const size_type PayloadPerAllocation = BlockCtrlBytes;
286 };
287
288 template<class MutexFamily, class VoidPointer>
289 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
290 simple_seq_fit_impl<MutexFamily, VoidPointer>
291 ::priv_first_block_offset(const void *this_ptr, size_type extra_hdr_bytes)
292 {
293 //First align "this" pointer
294 size_type uint_this = (std::size_t)this_ptr;
295 size_type uint_aligned_this = uint_this/Alignment*Alignment;
296 size_type this_disalignment = (uint_this - uint_aligned_this);
297 size_type block1_off =
298 ipcdetail::get_rounded_size(sizeof(simple_seq_fit_impl) + extra_hdr_bytes + this_disalignment, Alignment)
299 - this_disalignment;
300 algo_impl_t::assert_alignment(this_disalignment + block1_off);
301 return block1_off;
302 }
303
304 template<class MutexFamily, class VoidPointer>
305 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
306 simple_seq_fit_impl<MutexFamily, VoidPointer>
307 ::priv_block_end_offset() const
308 {
309 //First align "this" pointer
310 size_type uint_this = (std::size_t)this;
311 size_type uint_aligned_this = uint_this/Alignment*Alignment;
312 size_type this_disalignment = (uint_this - uint_aligned_this);
313 size_type old_end =
314 ipcdetail::get_truncated_size(m_header.m_size + this_disalignment, Alignment)
315 - this_disalignment;
316 algo_impl_t::assert_alignment(old_end + this_disalignment);
317 return old_end;
318 }
319
320 template<class MutexFamily, class VoidPointer>
321 inline simple_seq_fit_impl<MutexFamily, VoidPointer>::
322 simple_seq_fit_impl(size_type segment_size, size_type extra_hdr_bytes)
323 {
324 //Initialize sizes and counters
325 m_header.m_allocated = 0;
326 m_header.m_size = segment_size;
327 m_header.m_extra_hdr_bytes = extra_hdr_bytes;
328
329 //Initialize pointers
330 size_type block1_off = priv_first_block_offset(this, extra_hdr_bytes);
331
332 m_header.m_root.m_next = reinterpret_cast<block_ctrl*>
333 ((reinterpret_cast<char*>(this) + block1_off));
334 algo_impl_t::assert_alignment(ipcdetail::to_raw_pointer(m_header.m_root.m_next));
335 m_header.m_root.m_next->m_size = (segment_size - block1_off)/Alignment;
336 m_header.m_root.m_next->m_next = &m_header.m_root;
337 }
338
339 template<class MutexFamily, class VoidPointer>
340 inline simple_seq_fit_impl<MutexFamily, VoidPointer>::~simple_seq_fit_impl()
341 {
342 //There is a memory leak!
343 // BOOST_ASSERT(m_header.m_allocated == 0);
344 // BOOST_ASSERT(m_header.m_root.m_next->m_next == block_ctrl_ptr(&m_header.m_root));
345 }
346
347 template<class MutexFamily, class VoidPointer>
348 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::grow(size_type extra_size)
349 {
350 //Old highest address block's end offset
351 size_type old_end = this->priv_block_end_offset();
352
353 //Update managed buffer's size
354 m_header.m_size += extra_size;
355
356 //We need at least MinBlockSize blocks to create a new block
357 if((m_header.m_size - old_end) < MinBlockSize){
358 return;
359 }
360
361 //We'll create a new free block with extra_size bytes
362
363 block_ctrl *new_block = reinterpret_cast<block_ctrl*>
364 (reinterpret_cast<char*>(this) + old_end);
365
366 algo_impl_t::assert_alignment(new_block);
367 new_block->m_next = 0;
368 new_block->m_size = (m_header.m_size - old_end)/Alignment;
369 m_header.m_allocated += new_block->m_size*Alignment;
370 this->priv_deallocate(priv_get_user_buffer(new_block));
371 }
372
373 template<class MutexFamily, class VoidPointer>
374 void simple_seq_fit_impl<MutexFamily, VoidPointer>::shrink_to_fit()
375 {
376 //Get the root and the first memory block
377 block_ctrl *prev = &m_header.m_root;
378 block_ctrl *last = &m_header.m_root;
379 block_ctrl *block = ipcdetail::to_raw_pointer(last->m_next);
380 block_ctrl *root = &m_header.m_root;
381
382 //No free block?
383 if(block == root) return;
384
385 //Iterate through the free block list
386 while(block != root){
387 prev = last;
388 last = block;
389 block = ipcdetail::to_raw_pointer(block->m_next);
390 }
391
392 char *last_free_end_address = reinterpret_cast<char*>(last) + last->m_size*Alignment;
393 if(last_free_end_address != (reinterpret_cast<char*>(this) + priv_block_end_offset())){
394 //there is an allocated block in the end of this block
395 //so no shrinking is possible
396 return;
397 }
398
399 //Check if have only 1 big free block
400 void *unique_block = 0;
401 if(!m_header.m_allocated){
402 BOOST_ASSERT(prev == root);
403 size_type ignore_recvd = 0;
404 void *ignore_reuse = 0;
405 unique_block = priv_allocate(boost::interprocess::allocate_new, 0, ignore_recvd, ignore_reuse);
406 if(!unique_block)
407 return;
408 last = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
409 BOOST_ASSERT(last_free_end_address == (reinterpret_cast<char*>(last) + last->m_size*Alignment));
410 }
411 size_type last_units = last->m_size;
412
413 size_type received_size;
414 void *addr = priv_check_and_allocate(last_units, prev, last, received_size);
415 (void)addr;
416 BOOST_ASSERT(addr);
417 BOOST_ASSERT(received_size == last_units*Alignment - AllocatedCtrlBytes);
418
419 //Shrink it
420 m_header.m_size /= Alignment;
421 m_header.m_size -= last->m_size;
422 m_header.m_size *= Alignment;
423 m_header.m_allocated -= last->m_size*Alignment;
424
425 if(unique_block)
426 priv_deallocate(unique_block);
427 }
428
429 template<class MutexFamily, class VoidPointer>
430 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
431 priv_mark_new_allocated_block(block_ctrl *new_block)
432 {
433 new_block->m_next = 0;
434 }
435
436 template<class MutexFamily, class VoidPointer>
437 inline
438 typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
439 simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_get_block(const void *ptr)
440 {
441 return const_cast<block_ctrl*>(reinterpret_cast<const block_ctrl*>
442 (reinterpret_cast<const char*>(ptr) - AllocatedCtrlBytes));
443 }
444
445 template<class MutexFamily, class VoidPointer>
446 inline
447 void *simple_seq_fit_impl<MutexFamily, VoidPointer>::
448 priv_get_user_buffer(const typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
449 {
450 return const_cast<char*>(reinterpret_cast<const char*>(block) + AllocatedCtrlBytes);
451 }
452
453 template<class MutexFamily, class VoidPointer>
454 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_add_segment(void *addr, size_type segment_size)
455 {
456 algo_impl_t::assert_alignment(addr);
457 //Check size
458 BOOST_ASSERT(!(segment_size < MinBlockSize));
459 if(segment_size < MinBlockSize)
460 return;
461 //Construct big block using the new segment
462 block_ctrl *new_block = static_cast<block_ctrl *>(addr);
463 new_block->m_size = segment_size/Alignment;
464 new_block->m_next = 0;
465 //Simulate this block was previously allocated
466 m_header.m_allocated += new_block->m_size*Alignment;
467 //Return block and insert it in the free block list
468 this->priv_deallocate(priv_get_user_buffer(new_block));
469 }
470
471 template<class MutexFamily, class VoidPointer>
472 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
473 simple_seq_fit_impl<MutexFamily, VoidPointer>::get_size() const
474 { return m_header.m_size; }
475
476 template<class MutexFamily, class VoidPointer>
477 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
478 simple_seq_fit_impl<MutexFamily, VoidPointer>::get_free_memory() const
479 {
480 return m_header.m_size - m_header.m_allocated -
481 algo_impl_t::multiple_of_units(sizeof(*this) + m_header.m_extra_hdr_bytes);
482 }
483
484 template<class MutexFamily, class VoidPointer>
485 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
486 simple_seq_fit_impl<MutexFamily, VoidPointer>::
487 get_min_size (size_type extra_hdr_bytes)
488 {
489 return ipcdetail::get_rounded_size((size_type)sizeof(simple_seq_fit_impl),Alignment) +
490 ipcdetail::get_rounded_size(extra_hdr_bytes,Alignment)
491 + MinBlockSize;
492 }
493
494 template<class MutexFamily, class VoidPointer>
495 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
496 all_memory_deallocated()
497 {
498 //-----------------------
499 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
500 //-----------------------
501 return m_header.m_allocated == 0 &&
502 ipcdetail::to_raw_pointer(m_header.m_root.m_next->m_next) == &m_header.m_root;
503 }
504
505 template<class MutexFamily, class VoidPointer>
506 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::zero_free_memory()
507 {
508 //-----------------------
509 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
510 //-----------------------
511 block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
512
513 //Iterate through all free portions
514 do{
515 //Just clear user the memory part reserved for the user
516 std::memset( priv_get_user_buffer(block)
517 , 0
518 , block->get_user_bytes());
519 block = ipcdetail::to_raw_pointer(block->m_next);
520 }
521 while(block != &m_header.m_root);
522 }
523
524 template<class MutexFamily, class VoidPointer>
525 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
526 check_sanity()
527 {
528 //-----------------------
529 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
530 //-----------------------
531 block_ctrl *block = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
532
533 size_type free_memory = 0;
534
535 //Iterate through all blocks obtaining their size
536 while(block != &m_header.m_root){
537 algo_impl_t::assert_alignment(block);
538 if(!algo_impl_t::check_alignment(block))
539 return false;
540 //Free blocks's next must be always valid
541 block_ctrl *next = ipcdetail::to_raw_pointer(block->m_next);
542 if(!next){
543 return false;
544 }
545 free_memory += block->m_size*Alignment;
546 block = next;
547 }
548
549 //Check allocated bytes are less than size
550 if(m_header.m_allocated > m_header.m_size){
551 return false;
552 }
553
554 //Check free bytes are less than size
555 if(free_memory > m_header.m_size){
556 return false;
557 }
558 return true;
559 }
560
561 template<class MutexFamily, class VoidPointer>
562 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
563 allocate(size_type nbytes)
564 {
565 //-----------------------
566 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
567 //-----------------------
568 size_type ignore_recvd = nbytes;
569 void *ignore_reuse = 0;
570 return priv_allocate(boost::interprocess::allocate_new, nbytes, ignore_recvd, ignore_reuse);
571 }
572
573 template<class MutexFamily, class VoidPointer>
574 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
575 allocate_aligned(size_type nbytes, size_type alignment)
576 {
577 //-----------------------
578 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
579 //-----------------------
580 return algo_impl_t::
581 allocate_aligned(this, nbytes, alignment);
582 }
583
584 template<class MutexFamily, class VoidPointer>
585 template<class T>
586 inline T* simple_seq_fit_impl<MutexFamily, VoidPointer>::
587 allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
588 size_type &prefer_in_recvd_out_size, T *&reuse_ptr)
589 {
590 void *raw_reuse = reuse_ptr;
591 void * const ret = priv_allocation_command
592 (command, limit_size, prefer_in_recvd_out_size, raw_reuse, sizeof(T));
593 BOOST_ASSERT(0 == ((std::size_t)ret % ::boost::container::container_detail::alignment_of<T>::value));
594 reuse_ptr = static_cast<T*>(raw_reuse);
595 return static_cast<T*>(ret);
596 }
597
598 template<class MutexFamily, class VoidPointer>
599 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
600 raw_allocation_command (boost::interprocess::allocation_type command, size_type limit_objects,
601 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
602 {
603 size_type const preferred_objects = prefer_in_recvd_out_size;
604 if(!sizeof_object){
605 return reuse_ptr = 0, static_cast<void*>(0);
606 }
607 if(command & boost::interprocess::try_shrink_in_place){
608 if(!reuse_ptr) return static_cast<void*>(0);
609 prefer_in_recvd_out_size = preferred_objects*sizeof_object;
610 bool success = algo_impl_t::try_shrink
611 ( this, reuse_ptr, limit_objects*sizeof_object, prefer_in_recvd_out_size);
612 prefer_in_recvd_out_size /= sizeof_object;
613 return success ? reuse_ptr : 0;
614 }
615 else{
616 return priv_allocation_command
617 (command, limit_objects, prefer_in_recvd_out_size, reuse_ptr, sizeof_object);
618 }
619 }
620
621 template<class MutexFamily, class VoidPointer>
622 inline void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
623 priv_allocation_command (boost::interprocess::allocation_type command, size_type limit_size,
624 size_type &prefer_in_recvd_out_size, void *&reuse_ptr, size_type sizeof_object)
625 {
626 size_type const preferred_size = prefer_in_recvd_out_size;
627 command &= ~boost::interprocess::expand_bwd;
628 if(!command){
629 return reuse_ptr = 0, static_cast<void*>(0);
630 }
631
632 size_type max_count = m_header.m_size/sizeof_object;
633 if(limit_size > max_count || preferred_size > max_count){
634 return reuse_ptr = 0, static_cast<void*>(0);
635 }
636 size_type l_size = limit_size*sizeof_object;
637 size_type r_size = preferred_size*sizeof_object;
638 void *ret = 0;
639 {
640 //-----------------------
641 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
642 //-----------------------
643 ret = priv_allocate(command, l_size, r_size, reuse_ptr);
644 }
645 prefer_in_recvd_out_size = r_size/sizeof_object;
646 return ret;
647 }
648
649 template<class MutexFamily, class VoidPointer>
650 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
651 simple_seq_fit_impl<MutexFamily, VoidPointer>::size(const void *ptr) const
652 {
653 //We need no synchronization since this block is not going
654 //to be modified
655 //Obtain the real size of the block
656 const block_ctrl *block = static_cast<const block_ctrl*>(priv_get_block(ptr));
657 return block->get_user_bytes();
658 }
659
660 template<class MutexFamily, class VoidPointer>
661 void* simple_seq_fit_impl<MutexFamily, VoidPointer>::
662 priv_expand_both_sides(boost::interprocess::allocation_type command
663 ,size_type min_size
664 ,size_type &prefer_in_recvd_out_size
665 ,void *reuse_ptr
666 ,bool only_preferred_backwards)
667 {
668 size_type const preferred_size = prefer_in_recvd_out_size;
669 typedef std::pair<block_ctrl *, block_ctrl *> prev_block_t;
670 block_ctrl *reuse = priv_get_block(reuse_ptr);
671 prefer_in_recvd_out_size = 0;
672
673 if(this->size(reuse_ptr) > min_size){
674 prefer_in_recvd_out_size = this->size(reuse_ptr);
675 return reuse_ptr;
676 }
677
678 if(command & boost::interprocess::expand_fwd){
679 if(priv_expand(reuse_ptr, min_size, prefer_in_recvd_out_size = preferred_size))
680 return reuse_ptr;
681 }
682 else{
683 prefer_in_recvd_out_size = this->size(reuse_ptr);
684 }
685 if(command & boost::interprocess::expand_bwd){
686 size_type extra_forward = !prefer_in_recvd_out_size ? 0 : prefer_in_recvd_out_size + BlockCtrlBytes;
687 prev_block_t prev_pair = priv_prev_block_if_free(reuse);
688 block_ctrl *prev = prev_pair.second;
689 if(!prev){
690 return 0;
691 }
692
693 size_type needs_backwards =
694 ipcdetail::get_rounded_size(preferred_size - extra_forward, Alignment);
695
696 if(!only_preferred_backwards){
697 max_value(ipcdetail::get_rounded_size(min_size - extra_forward, Alignment)
698 ,min_value(prev->get_user_bytes(), needs_backwards));
699 }
700
701 //Check if previous block has enough size
702 if((prev->get_user_bytes()) >= needs_backwards){
703 //Now take all next space. This will succeed
704 if(!priv_expand(reuse_ptr, prefer_in_recvd_out_size, prefer_in_recvd_out_size)){
705 BOOST_ASSERT(0);
706 }
707
708 //We need a minimum size to split the previous one
709 if((prev->get_user_bytes() - needs_backwards) > 2*BlockCtrlBytes){
710 block_ctrl *new_block = reinterpret_cast<block_ctrl*>
711 (reinterpret_cast<char*>(reuse) - needs_backwards - BlockCtrlBytes);
712
713 new_block->m_next = 0;
714 new_block->m_size =
715 BlockCtrlUnits + (needs_backwards + extra_forward)/Alignment;
716 prev->m_size =
717 (prev->get_total_bytes() - needs_backwards)/Alignment - BlockCtrlUnits;
718 prefer_in_recvd_out_size = needs_backwards + extra_forward;
719 m_header.m_allocated += needs_backwards + BlockCtrlBytes;
720 return priv_get_user_buffer(new_block);
721 }
722 else{
723 //Just merge the whole previous block
724 block_ctrl *prev_2_block = prev_pair.first;
725 //Update received size and allocation
726 prefer_in_recvd_out_size = extra_forward + prev->get_user_bytes();
727 m_header.m_allocated += prev->get_total_bytes();
728 //Now unlink it from previous block
729 prev_2_block->m_next = prev->m_next;
730 prev->m_size = reuse->m_size + prev->m_size;
731 prev->m_next = 0;
732 priv_get_user_buffer(prev);
733 }
734 }
735 }
736 return 0;
737 }
738
739 template<class MutexFamily, class VoidPointer>
740 inline void simple_seq_fit_impl<MutexFamily, VoidPointer>::
741 deallocate_many(typename simple_seq_fit_impl<MutexFamily, VoidPointer>::multiallocation_chain &chain)
742 {
743 //-----------------------
744 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
745 //-----------------------
746 while(!chain.empty()){
747 this->priv_deallocate(to_raw_pointer(chain.pop_front()));
748 }
749 }
750
751 template<class MutexFamily, class VoidPointer>
752 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::size_type
753 simple_seq_fit_impl<MutexFamily, VoidPointer>::
754 priv_get_total_units(size_type userbytes)
755 {
756 size_type s = ipcdetail::get_rounded_size(userbytes, Alignment)/Alignment;
757 if(!s) ++s;
758 return BlockCtrlUnits + s;
759 }
760
761 template<class MutexFamily, class VoidPointer>
762 void * simple_seq_fit_impl<MutexFamily, VoidPointer>::
763 priv_allocate(boost::interprocess::allocation_type command
764 ,size_type limit_size, size_type &prefer_in_recvd_out_size, void *&reuse_ptr)
765 {
766 size_type const preferred_size = prefer_in_recvd_out_size;
767 if(command & boost::interprocess::shrink_in_place){
768 if(!reuse_ptr) return static_cast<void*>(0);
769 bool success = algo_impl_t::shrink(this, reuse_ptr, limit_size, prefer_in_recvd_out_size);
770 return success ? reuse_ptr : 0;
771 }
772 prefer_in_recvd_out_size = 0;
773
774 if(limit_size > preferred_size){
775 return reuse_ptr = 0, static_cast<void*>(0);
776 }
777
778 //Number of units to request (including block_ctrl header)
779 size_type nunits = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment + BlockCtrlUnits;
780
781 //Get the root and the first memory block
782 block_ctrl *prev = &m_header.m_root;
783 block_ctrl *block = ipcdetail::to_raw_pointer(prev->m_next);
784 block_ctrl *root = &m_header.m_root;
785 block_ctrl *biggest_block = 0;
786 block_ctrl *prev_biggest_block = 0;
787 size_type biggest_size = 0;
788
789 //Expand in place
790 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
791 void *ret = priv_expand_both_sides(command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, true);
792 if(ret){
793 algo_impl_t::assert_alignment(ret);
794 return ret;
795 }
796 }
797
798 if(command & boost::interprocess::allocate_new){
799 prefer_in_recvd_out_size = 0;
800 while(block != root){
801 //Update biggest block pointers
802 if(block->m_size > biggest_size){
803 prev_biggest_block = prev;
804 biggest_size = block->m_size;
805 biggest_block = block;
806 }
807 algo_impl_t::assert_alignment(block);
808 void *addr = this->priv_check_and_allocate(nunits, prev, block, prefer_in_recvd_out_size);
809 if(addr){
810 algo_impl_t::assert_alignment(addr);
811 return reuse_ptr = 0, addr;
812 }
813 //Bad luck, let's check next block
814 prev = block;
815 block = ipcdetail::to_raw_pointer(block->m_next);
816 }
817
818 //Bad luck finding preferred_size, now if we have any biggest_block
819 //try with this block
820 if(biggest_block){
821 size_type limit_units = ipcdetail::get_rounded_size(limit_size, Alignment)/Alignment + BlockCtrlUnits;
822 if(biggest_block->m_size < limit_units){
823 return reuse_ptr = 0, static_cast<void*>(0);
824 }
825 void *ret = this->priv_check_and_allocate
826 (biggest_block->m_size, prev_biggest_block, biggest_block, prefer_in_recvd_out_size = biggest_block->m_size*Alignment - BlockCtrlUnits);
827 BOOST_ASSERT(ret != 0);
828 algo_impl_t::assert_alignment(ret);
829 return reuse_ptr = 0, ret;
830 }
831 }
832 //Now try to expand both sides with min size
833 if(reuse_ptr && (command & (boost::interprocess::expand_fwd | boost::interprocess::expand_bwd))){
834 void *ret = priv_expand_both_sides (command, limit_size, prefer_in_recvd_out_size = preferred_size, reuse_ptr, false);
835 algo_impl_t::assert_alignment(ret);
836 return ret;
837 }
838 return reuse_ptr = 0, static_cast<void*>(0);
839 }
840
841 template<class MutexFamily, class VoidPointer> inline
842 bool simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_is_allocated_block
843 (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *block)
844 { return block->m_next == 0; }
845
846 template<class MutexFamily, class VoidPointer>
847 inline typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
848 simple_seq_fit_impl<MutexFamily, VoidPointer>::
849 priv_next_block_if_free
850 (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
851 {
852 //Take the address where the next block should go
853 block_ctrl *next_block = reinterpret_cast<block_ctrl*>
854 (reinterpret_cast<char*>(ptr) + ptr->m_size*Alignment);
855
856 //Check if the adjacent block is in the managed segment
857 char *this_char_ptr = reinterpret_cast<char*>(this);
858 char *next_char_ptr = reinterpret_cast<char*>(next_block);
859 size_type distance = (size_type)(next_char_ptr - this_char_ptr)/Alignment;
860
861 if(distance >= (m_header.m_size/Alignment)){
862 //"next_block" does not exist so we can't expand "block"
863 return 0;
864 }
865
866 if(!next_block->m_next)
867 return 0;
868
869 return next_block;
870 }
871
872 template<class MutexFamily, class VoidPointer>
873 inline
874 std::pair<typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *
875 ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *>
876 simple_seq_fit_impl<MutexFamily, VoidPointer>::
877 priv_prev_block_if_free
878 (typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl *ptr)
879 {
880 typedef std::pair<block_ctrl *, block_ctrl *> prev_pair_t;
881 //Take the address where the previous block should go
882 block_ctrl *root = &m_header.m_root;
883 block_ctrl *prev_2_block = root;
884 block_ctrl *prev_block = ipcdetail::to_raw_pointer(root->m_next);
885
886 while((reinterpret_cast<char*>(prev_block) + prev_block->m_size*Alignment)
887 != reinterpret_cast<char*>(ptr)
888 && prev_block != root){
889 prev_2_block = prev_block;
890 prev_block = ipcdetail::to_raw_pointer(prev_block->m_next);
891 }
892
893 if(prev_block == root || !prev_block->m_next)
894 return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
895
896 //Check if the previous block is in the managed segment
897 char *this_char_ptr = reinterpret_cast<char*>(this);
898 char *prev_char_ptr = reinterpret_cast<char*>(prev_block);
899 size_type distance = (size_type)(prev_char_ptr - this_char_ptr)/Alignment;
900
901 if(distance >= (m_header.m_size/Alignment)){
902 //"previous_block" does not exist so we can't expand "block"
903 return prev_pair_t(static_cast<block_ctrl*>(0), static_cast<block_ctrl*>(0));
904 }
905 return prev_pair_t(prev_2_block, prev_block);
906 }
907
908
909 template<class MutexFamily, class VoidPointer>
910 inline bool simple_seq_fit_impl<MutexFamily, VoidPointer>::
911 priv_expand (void *ptr, size_type min_size, size_type &received_size)
912 {
913 size_type preferred_size = received_size;
914 //Obtain the real size of the block
915 block_ctrl *block = reinterpret_cast<block_ctrl*>(priv_get_block(ptr));
916 size_type old_block_size = block->m_size;
917
918 //All used blocks' next is marked with 0 so check it
919 BOOST_ASSERT(block->m_next == 0);
920
921 //Put this to a safe value
922 received_size = old_block_size*Alignment - BlockCtrlBytes;
923
924 //Now translate it to Alignment units
925 min_size = ipcdetail::get_rounded_size(min_size, Alignment)/Alignment;
926 preferred_size = ipcdetail::get_rounded_size(preferred_size, Alignment)/Alignment;
927
928 //Some parameter checks
929 if(min_size > preferred_size)
930 return false;
931
932 size_type data_size = old_block_size - BlockCtrlUnits;
933
934 if(data_size >= min_size)
935 return true;
936
937 block_ctrl *next_block = priv_next_block_if_free(block);
938 if(!next_block){
939 return false;
940 }
941
942 //Is "block" + "next_block" big enough?
943 size_type merged_size = old_block_size + next_block->m_size;
944
945 //Now we can expand this block further than before
946 received_size = merged_size*Alignment - BlockCtrlBytes;
947
948 if(merged_size < (min_size + BlockCtrlUnits)){
949 return false;
950 }
951
952 //We can fill expand. Merge both blocks,
953 block->m_next = next_block->m_next;
954 block->m_size = merged_size;
955
956 //Find the previous free block of next_block
957 block_ctrl *prev = &m_header.m_root;
958 while(ipcdetail::to_raw_pointer(prev->m_next) != next_block){
959 prev = ipcdetail::to_raw_pointer(prev->m_next);
960 }
961
962 //Now insert merged block in the free list
963 //This allows reusing allocation logic in this function
964 m_header.m_allocated -= old_block_size*Alignment;
965 prev->m_next = block;
966
967 //Now use check and allocate to do the allocation logic
968 preferred_size += BlockCtrlUnits;
969 size_type nunits = preferred_size < merged_size ? preferred_size : merged_size;
970
971 //This must success since nunits is less than merged_size!
972 if(!this->priv_check_and_allocate (nunits, prev, block, received_size)){
973 //Something very ugly is happening here. This is a bug
974 //or there is memory corruption
975 BOOST_ASSERT(0);
976 return false;
977 }
978 return true;
979 }
980
981 template<class MutexFamily, class VoidPointer> inline
982 void* simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_check_and_allocate
983 (size_type nunits
984 ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* prev
985 ,typename simple_seq_fit_impl<MutexFamily, VoidPointer>::block_ctrl* block
986 ,size_type &received_size)
987 {
988 size_type upper_nunits = nunits + BlockCtrlUnits;
989 bool found = false;
990
991 if (block->m_size > upper_nunits){
992 //This block is bigger than needed, split it in
993 //two blocks, the first's size will be "units"
994 //the second's size will be "block->m_size-units"
995 size_type total_size = block->m_size;
996 block->m_size = nunits;
997
998 block_ctrl *new_block = reinterpret_cast<block_ctrl*>
999 (reinterpret_cast<char*>(block) + Alignment*nunits);
1000 new_block->m_size = total_size - nunits;
1001 new_block->m_next = block->m_next;
1002 prev->m_next = new_block;
1003 found = true;
1004 }
1005 else if (block->m_size >= nunits){
1006 //This block has exactly the right size with an extra
1007 //unusable extra bytes.
1008 prev->m_next = block->m_next;
1009 found = true;
1010 }
1011
1012 if(found){
1013 //We need block_ctrl for deallocation stuff, so
1014 //return memory user can overwrite
1015 m_header.m_allocated += block->m_size*Alignment;
1016 received_size = block->get_user_bytes();
1017 //Mark the block as allocated
1018 block->m_next = 0;
1019 //Check alignment
1020 algo_impl_t::assert_alignment(block);
1021 return priv_get_user_buffer(block);
1022 }
1023 return 0;
1024 }
1025
1026 template<class MutexFamily, class VoidPointer>
1027 void simple_seq_fit_impl<MutexFamily, VoidPointer>::deallocate(void* addr)
1028 {
1029 if(!addr) return;
1030 //-----------------------
1031 boost::interprocess::scoped_lock<interprocess_mutex> guard(m_header);
1032 //-----------------------
1033 return this->priv_deallocate(addr);
1034 }
1035
1036 template<class MutexFamily, class VoidPointer>
1037 void simple_seq_fit_impl<MutexFamily, VoidPointer>::priv_deallocate(void* addr)
1038 {
1039 if(!addr) return;
1040
1041 //Let's get free block list. List is always sorted
1042 //by memory address to allow block merging.
1043 //Pointer next always points to the first
1044 //(lower address) block
1045 block_ctrl * prev = &m_header.m_root;
1046 block_ctrl * pos = ipcdetail::to_raw_pointer(m_header.m_root.m_next);
1047 block_ctrl * block = reinterpret_cast<block_ctrl*>(priv_get_block(addr));
1048
1049 //All used blocks' next is marked with 0 so check it
1050 BOOST_ASSERT(block->m_next == 0);
1051
1052 //Check if alignment and block size are right
1053 algo_impl_t::assert_alignment(addr);
1054
1055 size_type total_size = Alignment*block->m_size;
1056 BOOST_ASSERT(m_header.m_allocated >= total_size);
1057
1058 //Update used memory count
1059 m_header.m_allocated -= total_size;
1060
1061 //Let's find the previous and the next block of the block to deallocate
1062 //This ordering comparison must be done with original pointers
1063 //types since their mapping to raw pointers can be different
1064 //in each process
1065 while((ipcdetail::to_raw_pointer(pos) != &m_header.m_root) && (block > pos)){
1066 prev = pos;
1067 pos = ipcdetail::to_raw_pointer(pos->m_next);
1068 }
1069
1070 //Try to combine with upper block
1071 char *block_char_ptr = reinterpret_cast<char*>(ipcdetail::to_raw_pointer(block));
1072
1073 if ((block_char_ptr + Alignment*block->m_size) ==
1074 reinterpret_cast<char*>(ipcdetail::to_raw_pointer(pos))){
1075 block->m_size += pos->m_size;
1076 block->m_next = pos->m_next;
1077 }
1078 else{
1079 block->m_next = pos;
1080 }
1081
1082 //Try to combine with lower block
1083 if ((reinterpret_cast<char*>(ipcdetail::to_raw_pointer(prev))
1084 + Alignment*prev->m_size) ==
1085 block_char_ptr){
1086
1087
1088 prev->m_size += block->m_size;
1089 prev->m_next = block->m_next;
1090 }
1091 else{
1092 prev->m_next = block;
1093 }
1094 }
1095
1096 } //namespace ipcdetail {
1097
1098 } //namespace interprocess {
1099
1100 } //namespace boost {
1101
1102 #include <boost/interprocess/detail/config_end.hpp>
1103
1104 #endif //#ifndef BOOST_INTERPROCESS_MEM_ALGO_DETAIL_SIMPLE_SEQ_FIT_IMPL_HPP
1105