1 //////////////////////////////////////////////////////////////////////////////
3 // (C) Copyright Ion Gaztanaga 2007-2015. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
7 // See http://www.boost.org/libs/container for documentation.
9 //////////////////////////////////////////////////////////////////////////////
11 #include <boost/container/detail/alloc_lib.h>
13 #include "errno.h" //dlmalloc bug EINVAL is used in posix_memalign without checking LACKS_ERRNO_H
14 #include "limits.h" //CHAR_BIT
15 #ifdef BOOST_CONTAINER_DLMALLOC_FOOTERS
21 #define NO_MALLOC_STATS 1
27 #define DL_DEBUG_DEFINED
34 #define FORCEINLINE inline
36 #include "dlmalloc_2_8_6.c"
39 #pragma warning (push)
40 #pragma warning (disable : 4127)
41 #pragma warning (disable : 4267)
42 #pragma warning (disable : 4127)
43 #pragma warning (disable : 4702)
44 #pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/
45 #pragma warning (disable : 4251 4231 4660) /*dll warnings*/
48 #define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p)))
50 static size_t s_allocated_memory
;
52 ///////////////////////////////////////////////////////////////
53 ///////////////////////////////////////////////////////////////
54 ///////////////////////////////////////////////////////////////
56 // SLIGHTLY MODIFIED DLMALLOC FUNCTIONS
58 ///////////////////////////////////////////////////////////////
59 ///////////////////////////////////////////////////////////////
60 ///////////////////////////////////////////////////////////////
62 //This function is equal to mspace_free
63 //replacing PREACTION with 0 and POSTACTION with nothing
64 static void mspace_free_lockless(mspace msp
, void* mem
)
67 mchunkptr p
= mem2chunk(mem
);
69 mstate fm
= get_mstate_for(p
);
70 msp
= msp
; /* placate people compiling -Wunused */
72 mstate fm
= (mstate
)msp
;
75 USAGE_ERROR_ACTION(fm
, p
);
78 if (!0){//PREACTION(fm)) {
79 check_inuse_chunk(fm
, p
);
80 if (RTCHECK(ok_address(fm
, p
) && ok_inuse(p
))) {
81 size_t psize
= chunksize(p
);
82 mchunkptr next
= chunk_plus_offset(p
, psize
);
83 s_allocated_memory
-= psize
;
85 size_t prevsize
= p
->prev_foot
;
87 psize
+= prevsize
+ MMAP_FOOT_PAD
;
88 if (CALL_MUNMAP((char*)p
- prevsize
, psize
) == 0)
89 fm
->footprint
-= psize
;
93 mchunkptr prev
= chunk_minus_offset(p
, prevsize
);
96 if (RTCHECK(ok_address(fm
, prev
))) { /* consolidate backward */
98 unlink_chunk(fm
, p
, prevsize
);
100 else if ((next
->head
& INUSE_BITS
) == INUSE_BITS
) {
102 set_free_with_pinuse(p
, psize
, next
);
111 if (RTCHECK(ok_next(p
, next
) && ok_pinuse(next
))) {
112 if (!cinuse(next
)) { /* consolidate forward */
113 if (next
== fm
->top
) {
114 size_t tsize
= fm
->topsize
+= psize
;
116 p
->head
= tsize
| PINUSE_BIT
;
121 if (should_trim(fm
, tsize
))
125 else if (next
== fm
->dv
) {
126 size_t dsize
= fm
->dvsize
+= psize
;
128 set_size_and_pinuse_of_free_chunk(p
, dsize
);
132 size_t nsize
= chunksize(next
);
134 unlink_chunk(fm
, next
, nsize
);
135 set_size_and_pinuse_of_free_chunk(p
, psize
);
143 set_free_with_pinuse(p
, psize
, next
);
145 if (is_small(psize
)) {
146 insert_small_chunk(fm
, p
, psize
);
147 check_free_chunk(fm
, p
);
150 tchunkptr tp
= (tchunkptr
)p
;
151 insert_large_chunk(fm
, tp
, psize
);
152 check_free_chunk(fm
, p
);
153 if (--fm
->release_checks
== 0)
154 release_unused_segments(fm
);
160 USAGE_ERROR_ACTION(fm
, p
);
167 //This function is equal to mspace_malloc
168 //replacing PREACTION with 0 and POSTACTION with nothing
169 void* mspace_malloc_lockless(mspace msp
, size_t bytes
)
171 mstate ms
= (mstate
)msp
;
173 USAGE_ERROR_ACTION(ms
,ms
);
176 if (!0){//PREACTION(ms)) {
179 if (bytes
<= MAX_SMALL_REQUEST
) {
182 nb
= (bytes
< MIN_REQUEST
)? MIN_CHUNK_SIZE
: pad_request(bytes
);
183 idx
= small_index(nb
);
184 smallbits
= ms
->smallmap
>> idx
;
186 if ((smallbits
& 0x3U
) != 0) { /* Remainderless fit to a smallbin. */
188 idx
+= ~smallbits
& 1; /* Uses next bin if idx empty */
189 b
= smallbin_at(ms
, idx
);
191 assert(chunksize(p
) == small_index2size(idx
));
192 unlink_first_small_chunk(ms
, b
, p
, idx
);
193 set_inuse_and_pinuse(ms
, p
, small_index2size(idx
));
195 check_malloced_chunk(ms
, mem
, nb
);
199 else if (nb
> ms
->dvsize
) {
200 if (smallbits
!= 0) { /* Use chunk in next nonempty smallbin */
204 binmap_t leftbits
= (smallbits
<< idx
) & left_bits(idx2bit(idx
));
205 binmap_t leastbit
= least_bit(leftbits
);
206 compute_bit2idx(leastbit
, i
);
207 b
= smallbin_at(ms
, i
);
209 assert(chunksize(p
) == small_index2size(i
));
210 unlink_first_small_chunk(ms
, b
, p
, i
);
211 rsize
= small_index2size(i
) - nb
;
212 /* Fit here cannot be remainderless if 4byte sizes */
213 if (SIZE_T_SIZE
!= 4 && rsize
< MIN_CHUNK_SIZE
)
214 set_inuse_and_pinuse(ms
, p
, small_index2size(i
));
216 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
217 r
= chunk_plus_offset(p
, nb
);
218 set_size_and_pinuse_of_free_chunk(r
, rsize
);
219 replace_dv(ms
, r
, rsize
);
222 check_malloced_chunk(ms
, mem
, nb
);
226 else if (ms
->treemap
!= 0 && (mem
= tmalloc_small(ms
, nb
)) != 0) {
227 check_malloced_chunk(ms
, mem
, nb
);
232 else if (bytes
>= MAX_REQUEST
)
233 nb
= MAX_SIZE_T
; /* Too big to allocate. Force failure (in sys alloc) */
235 nb
= pad_request(bytes
);
236 if (ms
->treemap
!= 0 && (mem
= tmalloc_large(ms
, nb
)) != 0) {
237 check_malloced_chunk(ms
, mem
, nb
);
242 if (nb
<= ms
->dvsize
) {
243 size_t rsize
= ms
->dvsize
- nb
;
244 mchunkptr p
= ms
->dv
;
245 if (rsize
>= MIN_CHUNK_SIZE
) { /* split dv */
246 mchunkptr r
= ms
->dv
= chunk_plus_offset(p
, nb
);
248 set_size_and_pinuse_of_free_chunk(r
, rsize
);
249 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
251 else { /* exhaust dv */
252 size_t dvs
= ms
->dvsize
;
255 set_inuse_and_pinuse(ms
, p
, dvs
);
258 check_malloced_chunk(ms
, mem
, nb
);
262 else if (nb
< ms
->topsize
) { /* Split top */
263 size_t rsize
= ms
->topsize
-= nb
;
264 mchunkptr p
= ms
->top
;
265 mchunkptr r
= ms
->top
= chunk_plus_offset(p
, nb
);
266 r
->head
= rsize
| PINUSE_BIT
;
267 set_size_and_pinuse_of_inuse_chunk(ms
, p
, nb
);
269 check_top_chunk(ms
, ms
->top
);
270 check_malloced_chunk(ms
, mem
, nb
);
274 mem
= sys_alloc(ms
, nb
);
284 //This function is equal to try_realloc_chunk but handling
285 //minimum and desired bytes
286 static mchunkptr
try_realloc_chunk_with_min(mstate m
, mchunkptr p
, size_t min_nb
, size_t des_nb
, int can_move
)
289 size_t oldsize
= chunksize(p
);
290 mchunkptr next
= chunk_plus_offset(p
, oldsize
);
291 if (RTCHECK(ok_address(m
, p
) && ok_inuse(p
) &&
292 ok_next(p
, next
) && ok_pinuse(next
))) {
294 newp
= mmap_resize(m
, p
, des_nb
, can_move
);
295 if(!newp
) //mmap does not return how many bytes we could reallocate, so go the minimum
296 newp
= mmap_resize(m
, p
, min_nb
, can_move
);
298 else if (oldsize
>= min_nb
) { /* already big enough */
299 size_t nb
= oldsize
>= des_nb
? des_nb
: oldsize
;
300 size_t rsize
= oldsize
- nb
;
301 if (rsize
>= MIN_CHUNK_SIZE
) { /* split off remainder */
302 mchunkptr r
= chunk_plus_offset(p
, nb
);
304 set_inuse(m
, r
, rsize
);
305 dispose_chunk(m
, r
, rsize
);
309 else if (next
== m
->top
) { /* extend into top */
310 if (oldsize
+ m
->topsize
> min_nb
) {
311 size_t nb
= (oldsize
+ m
->topsize
) > des_nb
? des_nb
: (oldsize
+ m
->topsize
- MALLOC_ALIGNMENT
);
312 size_t newsize
= oldsize
+ m
->topsize
;
313 size_t newtopsize
= newsize
- nb
;
314 mchunkptr newtop
= chunk_plus_offset(p
, nb
);
316 newtop
->head
= newtopsize
|PINUSE_BIT
;
318 m
->topsize
= newtopsize
;
322 else if (next
== m
->dv
) { /* extend into dv */
323 size_t dvs
= m
->dvsize
;
324 if (oldsize
+ dvs
>= min_nb
) {
325 size_t nb
= (oldsize
+ dvs
) >= des_nb
? des_nb
: (oldsize
+ dvs
);
326 size_t dsize
= oldsize
+ dvs
- nb
;
327 if (dsize
>= MIN_CHUNK_SIZE
) {
328 mchunkptr r
= chunk_plus_offset(p
, nb
);
329 mchunkptr n
= chunk_plus_offset(r
, dsize
);
331 set_size_and_pinuse_of_free_chunk(r
, dsize
);
336 else { /* exhaust dv */
337 size_t newsize
= oldsize
+ dvs
;
338 set_inuse(m
, p
, newsize
);
345 else if (!cinuse(next
)) { /* extend into next free chunk */
346 size_t nextsize
= chunksize(next
);
347 if (oldsize
+ nextsize
>= min_nb
) {
348 size_t nb
= (oldsize
+ nextsize
) >= des_nb
? des_nb
: (oldsize
+ nextsize
);
349 size_t rsize
= oldsize
+ nextsize
- nb
;
350 unlink_chunk(m
, next
, nextsize
);
351 if (rsize
< MIN_CHUNK_SIZE
) {
352 size_t newsize
= oldsize
+ nextsize
;
353 set_inuse(m
, p
, newsize
);
356 mchunkptr r
= chunk_plus_offset(p
, nb
);
358 set_inuse(m
, r
, rsize
);
359 dispose_chunk(m
, r
, rsize
);
366 USAGE_ERROR_ACTION(m
, chunk2mem(p
));
371 #define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \
372 *((void**)(THISMEM)) = *((void**)((NEXTMEM)))
374 //This function is based on internal_bulk_free
375 //replacing iteration over array[] with boost_cont_memchain.
376 //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
377 //After forward merging, backwards merging is also tried
378 static void internal_multialloc_free(mstate m
, boost_cont_memchain
*pchain
)
381 boost_cont_memchain ret_chain
;
382 BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain
);
385 boost_cont_memchain_it a_it
= BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain
);
386 while(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain
, a_it
)) { /* Iterate though all memory holded by the chain */
387 void* a_mem
= BOOST_CONTAINER_MEMIT_ADDR(a_it
);
388 mchunkptr a_p
= mem2chunk(a_mem
);
389 size_t psize
= chunksize(a_p
);
391 if (get_mstate_for(a_p
) != m
) {
392 BOOST_CONTAINER_MEMIT_NEXT(a_it
);
393 BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain
, a_mem
);
397 check_inuse_chunk(m
, a_p
);
398 if (RTCHECK(ok_address(m
, a_p
) && ok_inuse(a_p
))) {
399 while(1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */
400 boost_cont_memchain_it b_it
= a_it
;
401 BOOST_CONTAINER_MEMIT_NEXT(b_it
);
402 if(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain
, b_it
)){
403 void *b_mem
= BOOST_CONTAINER_MEMIT_ADDR(b_it
);
404 mchunkptr b_p
= mem2chunk(b_mem
);
405 if (b_p
== next_chunk(a_p
)) { /* b chunk is contiguous and next so b's size can be added to a */
406 psize
+= chunksize(b_p
);
407 set_inuse(m
, a_p
, psize
);
408 BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem
, b_mem
);
411 if(RTCHECK(ok_address(m
, b_p
) && ok_inuse(b_p
))){
412 /* b chunk is contiguous and previous so a's size can be added to b */
413 if(a_p
== next_chunk(b_p
)) {
414 psize
+= chunksize(b_p
);
415 set_inuse(m
, b_p
, psize
);
423 /* Normal deallocation starts again in the outer loop */
425 s_allocated_memory
-= psize
;
426 dispose_chunk(m
, a_p
, psize
);
431 CORRUPTION_ERROR_ACTION(m
);
435 if (should_trim(m
, m
->topsize
))
441 boost_cont_memchain_it last_pchain
= BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain
);
442 BOOST_CONTAINER_MEMCHAIN_INIT(pchain
);
443 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER
446 , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain
)
447 , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain
)
448 , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain
)
454 ///////////////////////////////////////////////////////////////
455 ///////////////////////////////////////////////////////////////
456 ///////////////////////////////////////////////////////////////
458 // NEW FUNCTIONS BASED ON DLMALLOC INTERNALS
460 ///////////////////////////////////////////////////////////////
461 ///////////////////////////////////////////////////////////////
462 ///////////////////////////////////////////////////////////////
464 #define GET_TRUNCATED_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE)/(ROUNDTO)*(ROUNDTO))
465 #define GET_ROUNDED_SIZE(ORIG_SIZE, ROUNDTO) ((((ORIG_SIZE)-1)/(ROUNDTO)+1)*(ROUNDTO))
466 #define GET_TRUNCATED_PO2_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE) & (~(ROUNDTO-1)))
467 #define GET_ROUNDED_PO2_SIZE(ORIG_SIZE, ROUNDTO) (((ORIG_SIZE - 1) & (~(ROUNDTO-1))) + ROUNDTO)
469 /* Greatest common divisor and least common multiple
470 gcd is an algorithm that calculates the greatest common divisor of two
471 integers, using Euclid's algorithm.
475 #define CALCULATE_GCD(A, B, OUT)\
489 /* lcm is an algorithm that calculates the least common multiple of two
494 #define CALCULATE_LCM(A, B, OUT)\
496 CALCULATE_GCD(A, B, OUT);\
500 static int calculate_lcm_and_needs_backwards_lcmed
501 (size_t backwards_multiple
, size_t received_size
, size_t size_to_achieve
,
502 size_t *plcm
, size_t *pneeds_backwards_lcmed
)
504 /* Now calculate lcm */
505 size_t max
= backwards_multiple
;
506 size_t min
= MALLOC_ALIGNMENT
;
507 size_t needs_backwards
;
508 size_t needs_backwards_lcmed
;
510 size_t current_forward
;
511 /*Swap if necessary*/
517 /*Check if it's power of two*/
518 if((backwards_multiple
& (backwards_multiple
-1)) == 0){
519 if(0 != (size_to_achieve
& ((backwards_multiple
-1)))){
520 USAGE_ERROR_ACTION(m
, oldp
);
525 /*If we want to use minbytes data to get a buffer between maxbytes
526 and minbytes if maxbytes can't be achieved, calculate the
527 biggest of all possibilities*/
528 current_forward
= GET_TRUNCATED_PO2_SIZE(received_size
, backwards_multiple
);
529 needs_backwards
= size_to_achieve
- current_forward
;
530 assert((needs_backwards
% backwards_multiple
) == 0);
531 needs_backwards_lcmed
= GET_ROUNDED_PO2_SIZE(needs_backwards
, lcm
);
533 *pneeds_backwards_lcmed
= needs_backwards_lcmed
;
536 /*Check if it's multiple of alignment*/
537 else if((backwards_multiple
& (MALLOC_ALIGNMENT
- 1u)) == 0){
538 lcm
= backwards_multiple
;
539 current_forward
= GET_TRUNCATED_SIZE(received_size
, backwards_multiple
);
540 //No need to round needs_backwards because backwards_multiple == lcm
541 needs_backwards_lcmed
= needs_backwards
= size_to_achieve
- current_forward
;
542 assert((needs_backwards_lcmed
& (MALLOC_ALIGNMENT
- 1u)) == 0);
544 *pneeds_backwards_lcmed
= needs_backwards_lcmed
;
547 /*Check if it's multiple of the half of the alignmment*/
548 else if((backwards_multiple
& ((MALLOC_ALIGNMENT
/2u) - 1u)) == 0){
549 lcm
= backwards_multiple
*2u;
550 current_forward
= GET_TRUNCATED_SIZE(received_size
, backwards_multiple
);
551 needs_backwards_lcmed
= needs_backwards
= size_to_achieve
- current_forward
;
552 if(0 != (needs_backwards_lcmed
& (MALLOC_ALIGNMENT
-1)))
553 //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
554 needs_backwards_lcmed
+= backwards_multiple
;
555 assert((needs_backwards_lcmed
% lcm
) == 0);
557 *pneeds_backwards_lcmed
= needs_backwards_lcmed
;
560 /*Check if it's multiple of the quarter of the alignmment*/
561 else if((backwards_multiple
& ((MALLOC_ALIGNMENT
/4u) - 1u)) == 0){
563 lcm
= backwards_multiple
*4u;
564 current_forward
= GET_TRUNCATED_SIZE(received_size
, backwards_multiple
);
565 needs_backwards_lcmed
= needs_backwards
= size_to_achieve
- current_forward
;
566 //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
567 //needs_backwards_lcmed += backwards_multiple;
568 if(0 != (remainder
= ((needs_backwards_lcmed
& (MALLOC_ALIGNMENT
-1))>>(MALLOC_ALIGNMENT
/8u)))){
569 if(backwards_multiple
& MALLOC_ALIGNMENT
/2u){
570 needs_backwards_lcmed
+= (remainder
)*backwards_multiple
;
573 needs_backwards_lcmed
+= (4-remainder
)*backwards_multiple
;
576 assert((needs_backwards_lcmed
% lcm
) == 0);
578 *pneeds_backwards_lcmed
= needs_backwards_lcmed
;
582 CALCULATE_LCM(max
, min
, lcm
);
583 /*If we want to use minbytes data to get a buffer between maxbytes
584 and minbytes if maxbytes can't be achieved, calculate the
585 biggest of all possibilities*/
586 current_forward
= GET_TRUNCATED_SIZE(received_size
, backwards_multiple
);
587 needs_backwards
= size_to_achieve
- current_forward
;
588 assert((needs_backwards
% backwards_multiple
) == 0);
589 needs_backwards_lcmed
= GET_ROUNDED_SIZE(needs_backwards
, lcm
);
591 *pneeds_backwards_lcmed
= needs_backwards_lcmed
;
596 static void *internal_grow_both_sides
598 ,allocation_type command
602 ,size_t *received_size
603 ,size_t backwards_multiple
604 ,int only_preferred_backwards
)
606 mchunkptr oldp
= mem2chunk(oldmem
);
607 size_t oldsize
= chunksize(oldp
);
608 *received_size
= oldsize
- overhead_for(oldp
);
609 if(minbytes
<= *received_size
)
612 if (RTCHECK(ok_address(m
, oldp
) && ok_inuse(oldp
))) {
613 if(command
& BOOST_CONTAINER_EXPAND_FWD
){
614 if(try_realloc_chunk_with_min(m
, oldp
, request2size(minbytes
), request2size(maxbytes
), 0)){
615 check_inuse_chunk(m
, oldp
);
616 *received_size
= DL_SIZE_IMPL(oldmem
);
617 s_allocated_memory
+= chunksize(oldp
) - oldsize
;
622 *received_size
= DL_SIZE_IMPL(oldmem
);
623 if(*received_size
>= maxbytes
)
627 Should we check this?
628 if(backwards_multiple &&
629 (0 != (minbytes % backwards_multiple) &&
630 0 != (maxbytes % backwards_multiple)) ){
631 USAGE_ERROR_ACTION(m, oldp);
635 /* We reach here only if forward expansion fails */
636 if(!(command
& BOOST_CONTAINER_EXPAND_BWD
) || pinuse(oldp
)){
640 size_t prevsize
= oldp
->prev_foot
;
641 if ((prevsize
& USE_MMAP_BIT
) != 0){
642 /*Return failure the previous chunk was mmapped.
643 mremap does not allow expanding to a fixed address (MREMAP_MAYMOVE) without
644 copying (MREMAP_MAYMOVE must be also set).*/
648 mchunkptr prev
= chunk_minus_offset(oldp
, prevsize
);
649 size_t dsize
= oldsize
+ prevsize
;
650 size_t needs_backwards_lcmed
;
653 /* Let's calculate the number of extra bytes of data before the current
654 block's begin. The value is a multiple of backwards_multiple
656 if(!calculate_lcm_and_needs_backwards_lcmed
657 ( backwards_multiple
, *received_size
658 , only_preferred_backwards
? maxbytes
: minbytes
659 , &lcm
, &needs_backwards_lcmed
)
660 || !RTCHECK(ok_address(m
, prev
))){
661 USAGE_ERROR_ACTION(m
, oldp
);
664 /* Check if previous block has enough size */
665 else if(prevsize
< needs_backwards_lcmed
){
666 /* preferred size? */
669 /* Now take all next space. This must succeed, as we've previously calculated the correct size */
670 if(command
& BOOST_CONTAINER_EXPAND_FWD
){
671 if(!try_realloc_chunk_with_min(m
, oldp
, request2size(*received_size
), request2size(*received_size
), 0)){
674 check_inuse_chunk(m
, oldp
);
675 *received_size
= DL_SIZE_IMPL(oldmem
);
676 s_allocated_memory
+= chunksize(oldp
) - oldsize
;
677 oldsize
= chunksize(oldp
);
678 dsize
= oldsize
+ prevsize
;
680 /* We need a minimum size to split the previous one */
681 if(prevsize
>= (needs_backwards_lcmed
+ MIN_CHUNK_SIZE
)){
682 mchunkptr r
= chunk_minus_offset(oldp
, needs_backwards_lcmed
);
683 size_t rsize
= oldsize
+ needs_backwards_lcmed
;
684 size_t newprevsize
= dsize
- rsize
;
685 int prev_was_dv
= prev
== m
->dv
;
687 assert(newprevsize
>= MIN_CHUNK_SIZE
);
690 m
->dvsize
= newprevsize
;
692 else{/* if ((next->head & INUSE_BITS) == INUSE_BITS) { */
693 unlink_chunk(m
, prev
, prevsize
);
694 insert_chunk(m
, prev
, newprevsize
);
697 set_size_and_pinuse_of_free_chunk(prev
, newprevsize
);
699 set_inuse(m
, r
, rsize
);
700 check_malloced_chunk(m
, chunk2mem(r
), rsize
);
701 *received_size
= chunksize(r
) - overhead_for(r
);
702 s_allocated_memory
+= chunksize(r
) - oldsize
;
705 /* Check if there is no place to create a new block and
706 the whole new block is multiple of the backwards expansion multiple */
707 else if(prevsize
>= needs_backwards_lcmed
&& !(prevsize
% lcm
)) {
708 /* Just merge the whole previous block */
709 /* prevsize is multiple of lcm (and backwards_multiple)*/
710 *received_size
+= prevsize
;
713 unlink_chunk(m
, prev
, prevsize
);
719 set_inuse(m
, prev
, dsize
);
720 check_malloced_chunk(m
, chunk2mem(prev
), dsize
);
721 s_allocated_memory
+= chunksize(prev
) - oldsize
;
722 return chunk2mem(prev
);
725 /* Previous block was big enough but there is no room
726 to create an empty block and taking the whole block does
727 not fulfill alignment requirements */
734 USAGE_ERROR_ACTION(m
, oldmem
);
740 /* This is similar to mmap_resize but:
742 * It takes min and max sizes
743 * Takes additional 'do_commit' argument to obtain the final
744 size before doing the real shrink operation.
746 static int internal_mmap_shrink_in_place(mstate m
, mchunkptr oldp
, size_t nbmin
, size_t nbmax
, size_t *received_size
, int do_commit
)
748 size_t oldsize
= chunksize(oldp
);
749 *received_size
= oldsize
;
751 if (is_small(nbmax
)) /* Can't shrink mmap regions below small size */
754 size_t effective_min
= nbmin
> MIN_LARGE_SIZE
? nbmin
: MIN_LARGE_SIZE
;
755 /* Keep old chunk if big enough but not too big */
756 if (oldsize
>= effective_min
+ SIZE_T_SIZE
&&
757 (oldsize
- effective_min
) <= (mparams
.granularity
<< 1))
759 /* Now calculate new sizes */
761 size_t offset
= oldp
->prev_foot
;
762 size_t oldmmsize
= oldsize
+ offset
+ MMAP_FOOT_PAD
;
763 size_t newmmsize
= mmap_align(effective_min
+ SIX_SIZE_T_SIZES
+ CHUNK_ALIGN_MASK
);
764 *received_size
= newmmsize
;
766 const int flags
= 0; /* placate people compiling -Wunused */
767 char* cp
= (char*)CALL_MREMAP((char*)oldp
- offset
,
768 oldmmsize
, newmmsize
, flags
);
769 /*This must always succeed */
771 USAGE_ERROR_ACTION(m
, m
);
775 mchunkptr newp
= (mchunkptr
)(cp
+ offset
);
776 size_t psize
= newmmsize
- offset
- MMAP_FOOT_PAD
;
778 mark_inuse_foot(m
, newp
, psize
);
779 chunk_plus_offset(newp
, psize
)->head
= FENCEPOST_HEAD
;
780 chunk_plus_offset(newp
, psize
+SIZE_T_SIZE
)->head
= 0;
782 if (cp
< m
->least_addr
)
784 if ((m
->footprint
+= newmmsize
- oldmmsize
) > m
->max_footprint
)
785 m
->max_footprint
= m
->footprint
;
786 check_mmapped_chunk(m
, newp
);
792 #else //#if HAVE_MREMAP
800 #endif //#if HAVE_MREMAP
803 static int internal_shrink(mstate m
, void* oldmem
, size_t minbytes
, size_t maxbytes
, size_t *received_size
, int do_commit
)
805 *received_size
= chunksize(mem2chunk(oldmem
)) - overhead_for(mem2chunk(oldmem
));
806 if (minbytes
>= MAX_REQUEST
|| maxbytes
>= MAX_REQUEST
) {
807 MALLOC_FAILURE_ACTION
;
810 else if(minbytes
< MIN_REQUEST
){
811 minbytes
= MIN_REQUEST
;
813 if (minbytes
> maxbytes
) {
818 mchunkptr oldp
= mem2chunk(oldmem
);
819 size_t oldsize
= chunksize(oldp
);
820 mchunkptr next
= chunk_plus_offset(oldp
, oldsize
);
823 /* Try to either shrink or extend into top. Else malloc-copy-free*/
824 if (RTCHECK(ok_address(m
, oldp
) && ok_inuse(oldp
) &&
825 ok_next(oldp
, next
) && ok_pinuse(next
))) {
826 size_t nbmin
= request2size(minbytes
);
827 size_t nbmax
= request2size(maxbytes
);
829 if (nbmin
> oldsize
){
830 /* Return error if old size is too small */
832 else if (is_mmapped(oldp
)){
833 return internal_mmap_shrink_in_place(m
, oldp
, nbmin
, nbmax
, received_size
, do_commit
);
835 else{ // nbmin <= oldsize /* already big enough*/
837 size_t rsize
= oldsize
- nb
;
838 if (rsize
>= MIN_CHUNK_SIZE
) {
840 mchunkptr remainder
= chunk_plus_offset(oldp
, nb
);
841 set_inuse(m
, oldp
, nb
);
842 set_inuse(m
, remainder
, rsize
);
843 extra
= chunk2mem(remainder
);
845 *received_size
= nb
- overhead_for(oldp
);
852 USAGE_ERROR_ACTION(m
, oldmem
);
856 if (extra
!= 0 && do_commit
) {
857 mspace_free_lockless(m
, extra
);
858 check_inuse_chunk(m
, oldp
);
868 #define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096
870 #define SQRT_MAX_SIZE_T (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2))
872 static int internal_node_multialloc
873 (mstate m
, size_t n_elements
, size_t element_size
, size_t contiguous_elements
, boost_cont_memchain
*pchain
) {
874 void* mem
; /* malloced aggregate space */
875 mchunkptr p
; /* corresponding chunk */
876 size_t remainder_size
; /* remaining bytes while splitting */
877 flag_t was_enabled
; /* to disable mmap */
878 size_t elements_per_segment
= 0;
879 size_t element_req_size
= request2size(element_size
);
880 boost_cont_memchain_it prev_last_it
= BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain
);
882 /*Error if wrong element_size parameter */
884 /*OR Error if n_elements less thatn contiguous_elements */
885 ((contiguous_elements
+ 1) > (DL_MULTIALLOC_DEFAULT_CONTIGUOUS
+ 1) && n_elements
< contiguous_elements
) ||
886 /* OR Error if integer overflow */
887 (SQRT_MAX_SIZE_T
< (element_req_size
| contiguous_elements
) &&
888 (MAX_SIZE_T
/element_req_size
) < contiguous_elements
)){
891 switch(contiguous_elements
){
892 case DL_MULTIALLOC_DEFAULT_CONTIGUOUS
:
894 /* Default contiguous, just check that we can store at least one element */
895 elements_per_segment
= INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM
/element_req_size
;
896 elements_per_segment
+= (size_t)(!elements_per_segment
);
899 case DL_MULTIALLOC_ALL_CONTIGUOUS
:
900 /* All elements should be allocated in a single call */
901 elements_per_segment
= n_elements
;
904 /* Allocate in chunks of "contiguous_elements" */
905 elements_per_segment
= contiguous_elements
;
912 Allocate the aggregate chunk. First disable direct-mmapping so
913 malloc won't use it, since we would not be able to later
914 free/realloc space internal to a segregated mmap region.
916 was_enabled
= use_mmap(m
);
918 for(i
= 0; i
!= n_elements
; i
= next_i
)
921 size_t n_elements_left
= n_elements
- i
;
922 next_i
= i
+ ((n_elements_left
< elements_per_segment
) ? n_elements_left
: elements_per_segment
);
923 accum_size
= element_req_size
*(next_i
- i
);
925 mem
= mspace_malloc_lockless(m
, accum_size
- CHUNK_OVERHEAD
);
927 BOOST_CONTAINER_MEMIT_NEXT(prev_last_it
);
929 void *addr
= BOOST_CONTAINER_MEMIT_ADDR(prev_last_it
);
930 BOOST_CONTAINER_MEMIT_NEXT(prev_last_it
);
931 mspace_free_lockless(m
, addr
);
938 remainder_size
= chunksize(p
);
939 s_allocated_memory
+= remainder_size
;
941 assert(!is_mmapped(p
));
942 { /* split out elements */
943 void *mem_orig
= mem
;
944 boost_cont_memchain_it last_it
= BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain
);
945 size_t num_elements
= next_i
-i
;
947 size_t num_loops
= num_elements
- 1;
948 remainder_size
-= element_req_size
*num_loops
;
950 void **mem_prev
= ((void**)mem
);
951 set_size_and_pinuse_of_inuse_chunk(m
, p
, element_req_size
);
952 p
= chunk_plus_offset(p
, element_req_size
);
956 set_size_and_pinuse_of_inuse_chunk(m
, p
, remainder_size
);
957 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain
, last_it
, mem_orig
, mem
, num_elements
);
966 static int internal_multialloc_arrays
967 (mstate m
, size_t n_elements
, const size_t* sizes
, size_t element_size
, size_t contiguous_elements
, boost_cont_memchain
*pchain
) {
968 void* mem
; /* malloced aggregate space */
969 mchunkptr p
; /* corresponding chunk */
970 size_t remainder_size
; /* remaining bytes while splitting */
971 flag_t was_enabled
; /* to disable mmap */
973 size_t boost_cont_multialloc_segmented_malloc_size
;
980 max_size
= MAX_REQUEST
/element_size
;
982 switch(contiguous_elements
){
983 case DL_MULTIALLOC_DEFAULT_CONTIGUOUS
:
984 /* Use default contiguous mem */
985 boost_cont_multialloc_segmented_malloc_size
= INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM
;
987 case DL_MULTIALLOC_ALL_CONTIGUOUS
:
988 boost_cont_multialloc_segmented_malloc_size
= MAX_REQUEST
+ CHUNK_OVERHEAD
;
991 if(max_size
< contiguous_elements
){
995 /* The suggested buffer is just the the element count by the size */
996 boost_cont_multialloc_segmented_malloc_size
= element_size
*contiguous_elements
;
1004 Allocate the aggregate chunk. First disable direct-mmapping so
1005 malloc won't use it, since we would not be able to later
1006 free/realloc space internal to a segregated mmap region.
1008 was_enabled
= use_mmap(m
);
1010 for(i
= 0, next_i
= 0; i
!= n_elements
; i
= next_i
)
1014 for(accum_size
= 0; next_i
!= n_elements
; ++next_i
){
1015 size_t cur_array_size
= sizes
[next_i
];
1016 if(max_size
< cur_array_size
){
1021 size_t reqsize
= request2size(cur_array_size
*element_size
);
1022 if(((boost_cont_multialloc_segmented_malloc_size
- CHUNK_OVERHEAD
) - accum_size
) < reqsize
){
1024 accum_size
+= reqsize
;
1029 accum_size
+= reqsize
;
1033 mem
= error
? 0 : mspace_malloc_lockless(m
, accum_size
- CHUNK_OVERHEAD
);
1035 boost_cont_memchain_it it
= BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain
);
1037 void *addr
= BOOST_CONTAINER_MEMIT_ADDR(it
);
1038 BOOST_CONTAINER_MEMIT_NEXT(it
);
1039 mspace_free_lockless(m
, addr
);
1046 remainder_size
= chunksize(p
);
1047 s_allocated_memory
+= remainder_size
;
1049 assert(!is_mmapped(p
));
1051 { /* split out elements */
1052 void *mem_orig
= mem
;
1053 boost_cont_memchain_it last_it
= BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain
);
1054 size_t num_elements
= next_i
-i
;
1056 for(++i
; i
!= next_i
; ++i
) {
1057 void **mem_prev
= ((void**)mem
);
1058 size
= request2size(sizes
[i
]*element_size
);
1059 remainder_size
-= size
;
1060 set_size_and_pinuse_of_inuse_chunk(m
, p
, size
);
1061 p
= chunk_plus_offset(p
, size
);
1065 set_size_and_pinuse_of_inuse_chunk(m
, p
, remainder_size
);
1066 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain
, last_it
, mem_orig
, mem
, num_elements
);
1075 int boost_cont_multialloc_arrays
1076 (size_t n_elements
, const size_t *sizes
, size_t element_size
, size_t contiguous_elements
, boost_cont_memchain
*pchain
)
1079 mstate ms
= (mstate
)gm
;
1080 ensure_initialization();
1081 if (!ok_magic(ms
)) {
1082 USAGE_ERROR_ACTION(ms
,ms
);
1084 else if (!PREACTION(ms
)) {
1085 ret
= internal_multialloc_arrays(ms
, n_elements
, sizes
, element_size
, contiguous_elements
, pchain
);
1092 /*Doug Lea malloc extensions*/
1093 static boost_cont_malloc_stats_t
get_malloc_stats(mstate m
)
1095 boost_cont_malloc_stats_t ret
= { 0, 0, 0 };
1096 ensure_initialization();
1097 if (!PREACTION(m
)) {
1101 check_malloc_state(m
);
1102 if (is_initialized(m
)) {
1103 msegmentptr s
= &m
->seg
;
1104 maxfp
= m
->max_footprint
;
1106 used
= fp
- (m
->topsize
+ TOP_FOOT_SIZE
);
1109 mchunkptr q
= align_as_chunk(s
->base
);
1110 while (segment_holds(s
, q
) &&
1111 q
!= m
->top
&& q
->head
!= FENCEPOST_HEAD
) {
1113 used
-= chunksize(q
);
1120 ret
.max_system_bytes
= maxfp
;
1121 ret
.system_bytes
= fp
;
1122 ret
.in_use_bytes
= used
;
1128 size_t boost_cont_size(const void *p
)
1129 { return DL_SIZE_IMPL(p
); }
1131 void* boost_cont_malloc(size_t bytes
)
1133 size_t received_bytes
;
1134 ensure_initialization();
1135 return boost_cont_allocation_command
1136 (BOOST_CONTAINER_ALLOCATE_NEW
, 1, bytes
, bytes
, &received_bytes
, 0).first
;
1139 void boost_cont_free(void* mem
)
1141 mstate ms
= (mstate
)gm
;
1142 if (!ok_magic(ms
)) {
1143 USAGE_ERROR_ACTION(ms
,ms
);
1145 else if (!PREACTION(ms
)) {
1146 mspace_free_lockless(ms
, mem
);
1151 void* boost_cont_memalign(size_t bytes
, size_t alignment
)
1154 ensure_initialization();
1155 addr
= mspace_memalign(gm
, alignment
, bytes
);
1157 s_allocated_memory
+= chunksize(mem2chunk(addr
));
1162 int boost_cont_multialloc_nodes
1163 (size_t n_elements
, size_t elem_size
, size_t contiguous_elements
, boost_cont_memchain
*pchain
)
1166 mstate ms
= (mstate
)gm
;
1167 ensure_initialization();
1168 if (!ok_magic(ms
)) {
1169 USAGE_ERROR_ACTION(ms
,ms
);
1171 else if (!PREACTION(ms
)) {
1172 ret
= internal_node_multialloc(ms
, n_elements
, elem_size
, contiguous_elements
, pchain
);
1178 size_t boost_cont_footprint()
1180 return ((mstate
)gm
)->footprint
;
1183 size_t boost_cont_allocated_memory()
1185 size_t alloc_mem
= 0;
1186 mstate m
= (mstate
)gm
;
1187 ensure_initialization();
1188 if (!ok_magic(ms
)) {
1189 USAGE_ERROR_ACTION(ms
,ms
);
1193 if (!PREACTION(m
)) {
1194 check_malloc_state(m
);
1195 if (is_initialized(m
)) {
1196 size_t nfree
= SIZE_T_ONE
; /* top always free */
1197 size_t mfree
= m
->topsize
+ TOP_FOOT_SIZE
;
1199 msegmentptr s
= &m
->seg
;
1201 mchunkptr q
= align_as_chunk(s
->base
);
1202 while (segment_holds(s
, q
) &&
1203 q
!= m
->top
&& q
->head
!= FENCEPOST_HEAD
) {
1204 size_t sz
= chunksize(q
);
1215 size_t uordblks
= m
->footprint
- mfree
;
1217 alloc_mem
= (size_t)(uordblks
- (nfree
-1)*TOP_FOOT_SIZE
);
1219 alloc_mem
= uordblks
;
1228 size_t boost_cont_chunksize(const void *p
)
1229 { return chunksize(mem2chunk(p
)); }
1231 int boost_cont_all_deallocated()
1232 { return !s_allocated_memory
; }
1234 boost_cont_malloc_stats_t
boost_cont_malloc_stats()
1236 mstate ms
= (mstate
)gm
;
1238 return get_malloc_stats(ms
);
1241 boost_cont_malloc_stats_t r
= { 0, 0, 0 };
1242 USAGE_ERROR_ACTION(ms
,ms
);
1247 size_t boost_cont_in_use_memory()
1248 { return s_allocated_memory
; }
1250 int boost_cont_trim(size_t pad
)
1252 ensure_initialization();
1253 return dlmalloc_trim(pad
);
1257 (void* oldmem
, size_t minbytes
, size_t maxbytes
, size_t *received
)
1259 mstate ms
= (mstate
)gm
;
1260 if (!ok_magic(ms
)) {
1261 USAGE_ERROR_ACTION(ms
,ms
);
1265 if (!PREACTION(ms
)) {
1266 mchunkptr p
= mem2chunk(oldmem
);
1267 size_t oldsize
= chunksize(p
);
1268 p
= try_realloc_chunk_with_min(ms
, p
, request2size(minbytes
), request2size(maxbytes
), 0);
1271 check_inuse_chunk(ms
, p
);
1272 *received
= DL_SIZE_IMPL(oldmem
);
1273 s_allocated_memory
+= chunksize(p
) - oldsize
;
1280 int boost_cont_shrink
1281 (void* oldmem
, size_t minbytes
, size_t maxbytes
, size_t *received
, int do_commit
)
1283 mstate ms
= (mstate
)gm
;
1284 if (!ok_magic(ms
)) {
1285 USAGE_ERROR_ACTION(ms
,ms
);
1289 if (!PREACTION(ms
)) {
1290 int ret
= internal_shrink(ms
, oldmem
, minbytes
, maxbytes
, received
, do_commit
);
1298 void* boost_cont_alloc
1299 (size_t minbytes
, size_t preferred_bytes
, size_t *received_bytes
)
1301 //ensure_initialization provided by boost_cont_allocation_command
1302 return boost_cont_allocation_command
1303 (BOOST_CONTAINER_ALLOCATE_NEW
, 1, minbytes
, preferred_bytes
, received_bytes
, 0).first
;
1306 void boost_cont_multidealloc(boost_cont_memchain
*pchain
)
1308 mstate ms
= (mstate
)gm
;
1309 if (!ok_magic(ms
)) {
1311 USAGE_ERROR_ACTION(ms
,ms
);
1313 internal_multialloc_free(ms
, pchain
);
1316 int boost_cont_malloc_check()
1319 mstate ms
= (mstate
)gm
;
1320 ensure_initialization();
1321 if (!ok_magic(ms
)) {
1323 USAGE_ERROR_ACTION(ms
,ms
);
1326 check_malloc_state(ms
);
1334 boost_cont_command_ret_t boost_cont_allocation_command
1335 (allocation_type command
, size_t sizeof_object
, size_t limit_size
1336 , size_t preferred_size
, size_t *received_size
, void *reuse_ptr
)
1338 boost_cont_command_ret_t ret
= { 0, 0 };
1339 ensure_initialization();
1340 if(command
& (BOOST_CONTAINER_SHRINK_IN_PLACE
| BOOST_CONTAINER_TRY_SHRINK_IN_PLACE
)){
1341 int success
= boost_cont_shrink( reuse_ptr
, preferred_size
, limit_size
1342 , received_size
, (command
& BOOST_CONTAINER_SHRINK_IN_PLACE
));
1343 ret
.first
= success
? reuse_ptr
: 0;
1349 if(limit_size
> preferred_size
)
1353 mstate ms
= (mstate
)gm
;
1356 if (!PREACTION(ms
)) {
1359 mstate m
= get_mstate_for(mem2chunk(reuse_ptr
));
1361 USAGE_ERROR_ACTION(m
, reuse_ptr
);
1366 if(reuse_ptr
&& (command
& (BOOST_CONTAINER_EXPAND_FWD
| BOOST_CONTAINER_EXPAND_BWD
))){
1367 void *r
= internal_grow_both_sides
1368 ( ms
, command
, reuse_ptr
, limit_size
1369 , preferred_size
, received_size
, sizeof_object
, 1);
1377 if(command
& BOOST_CONTAINER_ALLOCATE_NEW
){
1378 void *addr
= mspace_malloc_lockless(ms
, preferred_size
);
1379 if(!addr
) addr
= mspace_malloc_lockless(ms
, limit_size
);
1381 s_allocated_memory
+= chunksize(mem2chunk(addr
));
1382 *received_size
= DL_SIZE_IMPL(addr
);
1391 //Now try to expand both sides with min size
1392 if(reuse_ptr
&& (command
& (BOOST_CONTAINER_EXPAND_FWD
| BOOST_CONTAINER_EXPAND_BWD
))){
1393 void *r
= internal_grow_both_sides
1394 ( ms
, command
, reuse_ptr
, limit_size
1395 , preferred_size
, received_size
, sizeof_object
, 0);
1409 int boost_cont_mallopt(int param_number
, int value
)
1411 return change_mparam(param_number
, value
);
1414 void *boost_cont_sync_create()
1416 void *p
= boost_cont_malloc(sizeof(MLOCK_T
));
1418 if(0 != INITIAL_LOCK((MLOCK_T
*)p
)){
1426 void boost_cont_sync_destroy(void *sync
)
1429 (void)DESTROY_LOCK((MLOCK_T
*)sync
);
1430 boost_cont_free(sync
);
1434 int boost_cont_sync_lock(void *sync
)
1435 { return 0 == (ACQUIRE_LOCK((MLOCK_T
*)sync
)); }
1437 void boost_cont_sync_unlock(void *sync
)
1438 { RELEASE_LOCK((MLOCK_T
*)sync
); }
1440 int boost_cont_global_sync_lock()
1443 ensure_initialization();
1444 ret
= ACQUIRE_MALLOC_GLOBAL_LOCK();
1448 void boost_cont_global_sync_unlock()
1450 RELEASE_MALLOC_GLOBAL_LOCK()
1453 //#ifdef DL_DEBUG_DEFINED
1458 #pragma warning (pop)