]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/container/src/dlmalloc_ext_2_8_6.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / boost / libs / container / src / dlmalloc_ext_2_8_6.c
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2007-2015. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/container for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10
11 #include <boost/container/detail/alloc_lib.h>
12
13 #include "errno.h" //dlmalloc bug EINVAL is used in posix_memalign without checking LACKS_ERRNO_H
14 #include "limits.h" //CHAR_BIT
15 #ifdef BOOST_CONTAINER_DLMALLOC_FOOTERS
16 #define FOOTERS 1
17 #endif
18 #define USE_LOCKS 1
19 #define MSPACES 1
20 #define NO_MALLINFO 1
21 #define NO_MALLOC_STATS 1
22
23
24 #if !defined(NDEBUG)
25 #if !defined(DEBUG)
26 #define DEBUG 1
27 #define DL_DEBUG_DEFINED
28 #endif
29 #endif
30
31 #define USE_DL_PREFIX
32
33 #ifdef __GNUC__
34 #define FORCEINLINE inline
35 #endif
36 #include "dlmalloc_2_8_6.c"
37
38 #ifdef _MSC_VER
39 #pragma warning (push)
40 #pragma warning (disable : 4127)
41 #pragma warning (disable : 4267)
42 #pragma warning (disable : 4127)
43 #pragma warning (disable : 4702)
44 #pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/
45 #pragma warning (disable : 4251 4231 4660) /*dll warnings*/
46 #endif
47
48 #define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p)))
49
50 static size_t s_allocated_memory;
51
52 ///////////////////////////////////////////////////////////////
53 ///////////////////////////////////////////////////////////////
54 ///////////////////////////////////////////////////////////////
55 //
56 // SLIGHTLY MODIFIED DLMALLOC FUNCTIONS
57 //
58 ///////////////////////////////////////////////////////////////
59 ///////////////////////////////////////////////////////////////
60 ///////////////////////////////////////////////////////////////
61
62 //This function is equal to mspace_free
63 //replacing PREACTION with 0 and POSTACTION with nothing
64 static void mspace_free_lockless(mspace msp, void* mem)
65 {
66 if (mem != 0) {
67 mchunkptr p = mem2chunk(mem);
68 #if FOOTERS
69 mstate fm = get_mstate_for(p);
70 msp = msp; /* placate people compiling -Wunused */
71 #else /* FOOTERS */
72 mstate fm = (mstate)msp;
73 #endif /* FOOTERS */
74 if (!ok_magic(fm)) {
75 USAGE_ERROR_ACTION(fm, p);
76 return;
77 }
78 if (!0){//PREACTION(fm)) {
79 check_inuse_chunk(fm, p);
80 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
81 size_t psize = chunksize(p);
82 mchunkptr next = chunk_plus_offset(p, psize);
83 s_allocated_memory -= psize;
84 if (!pinuse(p)) {
85 size_t prevsize = p->prev_foot;
86 if (is_mmapped(p)) {
87 psize += prevsize + MMAP_FOOT_PAD;
88 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
89 fm->footprint -= psize;
90 goto postaction;
91 }
92 else {
93 mchunkptr prev = chunk_minus_offset(p, prevsize);
94 psize += prevsize;
95 p = prev;
96 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
97 if (p != fm->dv) {
98 unlink_chunk(fm, p, prevsize);
99 }
100 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
101 fm->dvsize = psize;
102 set_free_with_pinuse(p, psize, next);
103 goto postaction;
104 }
105 }
106 else
107 goto erroraction;
108 }
109 }
110
111 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
112 if (!cinuse(next)) { /* consolidate forward */
113 if (next == fm->top) {
114 size_t tsize = fm->topsize += psize;
115 fm->top = p;
116 p->head = tsize | PINUSE_BIT;
117 if (p == fm->dv) {
118 fm->dv = 0;
119 fm->dvsize = 0;
120 }
121 if (should_trim(fm, tsize))
122 sys_trim(fm, 0);
123 goto postaction;
124 }
125 else if (next == fm->dv) {
126 size_t dsize = fm->dvsize += psize;
127 fm->dv = p;
128 set_size_and_pinuse_of_free_chunk(p, dsize);
129 goto postaction;
130 }
131 else {
132 size_t nsize = chunksize(next);
133 psize += nsize;
134 unlink_chunk(fm, next, nsize);
135 set_size_and_pinuse_of_free_chunk(p, psize);
136 if (p == fm->dv) {
137 fm->dvsize = psize;
138 goto postaction;
139 }
140 }
141 }
142 else
143 set_free_with_pinuse(p, psize, next);
144
145 if (is_small(psize)) {
146 insert_small_chunk(fm, p, psize);
147 check_free_chunk(fm, p);
148 }
149 else {
150 tchunkptr tp = (tchunkptr)p;
151 insert_large_chunk(fm, tp, psize);
152 check_free_chunk(fm, p);
153 if (--fm->release_checks == 0)
154 release_unused_segments(fm);
155 }
156 goto postaction;
157 }
158 }
159 erroraction:
160 USAGE_ERROR_ACTION(fm, p);
161 postaction:
162 ;//POSTACTION(fm);
163 }
164 }
165 }
166
167 //This function is equal to mspace_malloc
168 //replacing PREACTION with 0 and POSTACTION with nothing
169 void* mspace_malloc_lockless(mspace msp, size_t bytes)
170 {
171 mstate ms = (mstate)msp;
172 if (!ok_magic(ms)) {
173 USAGE_ERROR_ACTION(ms,ms);
174 return 0;
175 }
176 if (!0){//PREACTION(ms)) {
177 void* mem;
178 size_t nb;
179 if (bytes <= MAX_SMALL_REQUEST) {
180 bindex_t idx;
181 binmap_t smallbits;
182 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
183 idx = small_index(nb);
184 smallbits = ms->smallmap >> idx;
185
186 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
187 mchunkptr b, p;
188 idx += ~smallbits & 1; /* Uses next bin if idx empty */
189 b = smallbin_at(ms, idx);
190 p = b->fd;
191 assert(chunksize(p) == small_index2size(idx));
192 unlink_first_small_chunk(ms, b, p, idx);
193 set_inuse_and_pinuse(ms, p, small_index2size(idx));
194 mem = chunk2mem(p);
195 check_malloced_chunk(ms, mem, nb);
196 goto postaction;
197 }
198
199 else if (nb > ms->dvsize) {
200 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
201 mchunkptr b, p, r;
202 size_t rsize;
203 bindex_t i;
204 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
205 binmap_t leastbit = least_bit(leftbits);
206 compute_bit2idx(leastbit, i);
207 b = smallbin_at(ms, i);
208 p = b->fd;
209 assert(chunksize(p) == small_index2size(i));
210 unlink_first_small_chunk(ms, b, p, i);
211 rsize = small_index2size(i) - nb;
212 /* Fit here cannot be remainderless if 4byte sizes */
213 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
214 set_inuse_and_pinuse(ms, p, small_index2size(i));
215 else {
216 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
217 r = chunk_plus_offset(p, nb);
218 set_size_and_pinuse_of_free_chunk(r, rsize);
219 replace_dv(ms, r, rsize);
220 }
221 mem = chunk2mem(p);
222 check_malloced_chunk(ms, mem, nb);
223 goto postaction;
224 }
225
226 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
227 check_malloced_chunk(ms, mem, nb);
228 goto postaction;
229 }
230 }
231 }
232 else if (bytes >= MAX_REQUEST)
233 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
234 else {
235 nb = pad_request(bytes);
236 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
237 check_malloced_chunk(ms, mem, nb);
238 goto postaction;
239 }
240 }
241
242 if (nb <= ms->dvsize) {
243 size_t rsize = ms->dvsize - nb;
244 mchunkptr p = ms->dv;
245 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
246 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
247 ms->dvsize = rsize;
248 set_size_and_pinuse_of_free_chunk(r, rsize);
249 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
250 }
251 else { /* exhaust dv */
252 size_t dvs = ms->dvsize;
253 ms->dvsize = 0;
254 ms->dv = 0;
255 set_inuse_and_pinuse(ms, p, dvs);
256 }
257 mem = chunk2mem(p);
258 check_malloced_chunk(ms, mem, nb);
259 goto postaction;
260 }
261
262 else if (nb < ms->topsize) { /* Split top */
263 size_t rsize = ms->topsize -= nb;
264 mchunkptr p = ms->top;
265 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
266 r->head = rsize | PINUSE_BIT;
267 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
268 mem = chunk2mem(p);
269 check_top_chunk(ms, ms->top);
270 check_malloced_chunk(ms, mem, nb);
271 goto postaction;
272 }
273
274 mem = sys_alloc(ms, nb);
275
276 postaction:
277 ;//POSTACTION(ms);
278 return mem;
279 }
280
281 return 0;
282 }
283
284 //This function is equal to try_realloc_chunk but handling
285 //minimum and desired bytes
286 static mchunkptr try_realloc_chunk_with_min(mstate m, mchunkptr p, size_t min_nb, size_t des_nb, int can_move)
287 {
288 mchunkptr newp = 0;
289 size_t oldsize = chunksize(p);
290 mchunkptr next = chunk_plus_offset(p, oldsize);
291 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
292 ok_next(p, next) && ok_pinuse(next))) {
293 if (is_mmapped(p)) {
294 newp = mmap_resize(m, p, des_nb, can_move);
295 if(!newp) //mmap does not return how many bytes we could reallocate, so go the minimum
296 newp = mmap_resize(m, p, min_nb, can_move);
297 }
298 else if (oldsize >= min_nb) { /* already big enough */
299 size_t nb = oldsize >= des_nb ? des_nb : oldsize;
300 size_t rsize = oldsize - nb;
301 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
302 mchunkptr r = chunk_plus_offset(p, nb);
303 set_inuse(m, p, nb);
304 set_inuse(m, r, rsize);
305 dispose_chunk(m, r, rsize);
306 }
307 newp = p;
308 }
309 else if (next == m->top) { /* extend into top */
310 if (oldsize + m->topsize > min_nb) {
311 size_t nb = (oldsize + m->topsize) > des_nb ? des_nb : (oldsize + m->topsize - MALLOC_ALIGNMENT);
312 size_t newsize = oldsize + m->topsize;
313 size_t newtopsize = newsize - nb;
314 mchunkptr newtop = chunk_plus_offset(p, nb);
315 set_inuse(m, p, nb);
316 newtop->head = newtopsize |PINUSE_BIT;
317 m->top = newtop;
318 m->topsize = newtopsize;
319 newp = p;
320 }
321 }
322 else if (next == m->dv) { /* extend into dv */
323 size_t dvs = m->dvsize;
324 if (oldsize + dvs >= min_nb) {
325 size_t nb = (oldsize + dvs) >= des_nb ? des_nb : (oldsize + dvs);
326 size_t dsize = oldsize + dvs - nb;
327 if (dsize >= MIN_CHUNK_SIZE) {
328 mchunkptr r = chunk_plus_offset(p, nb);
329 mchunkptr n = chunk_plus_offset(r, dsize);
330 set_inuse(m, p, nb);
331 set_size_and_pinuse_of_free_chunk(r, dsize);
332 clear_pinuse(n);
333 m->dvsize = dsize;
334 m->dv = r;
335 }
336 else { /* exhaust dv */
337 size_t newsize = oldsize + dvs;
338 set_inuse(m, p, newsize);
339 m->dvsize = 0;
340 m->dv = 0;
341 }
342 newp = p;
343 }
344 }
345 else if (!cinuse(next)) { /* extend into next free chunk */
346 size_t nextsize = chunksize(next);
347 if (oldsize + nextsize >= min_nb) {
348 size_t nb = (oldsize + nextsize) >= des_nb ? des_nb : (oldsize + nextsize);
349 size_t rsize = oldsize + nextsize - nb;
350 unlink_chunk(m, next, nextsize);
351 if (rsize < MIN_CHUNK_SIZE) {
352 size_t newsize = oldsize + nextsize;
353 set_inuse(m, p, newsize);
354 }
355 else {
356 mchunkptr r = chunk_plus_offset(p, nb);
357 set_inuse(m, p, nb);
358 set_inuse(m, r, rsize);
359 dispose_chunk(m, r, rsize);
360 }
361 newp = p;
362 }
363 }
364 }
365 else {
366 USAGE_ERROR_ACTION(m, chunk2mem(p));
367 }
368 return newp;
369 }
370
371 #define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \
372 *((void**)(THISMEM)) = *((void**)((NEXTMEM)))
373
374 //This function is based on internal_bulk_free
375 //replacing iteration over array[] with boost_cont_memchain.
376 //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
377 //After forward merging, backwards merging is also tried
378 static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain)
379 {
380 #if FOOTERS
381 boost_cont_memchain ret_chain;
382 BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain);
383 #endif
384 if (!PREACTION(m)) {
385 boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
386 while(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */
387 void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it);
388 mchunkptr a_p = mem2chunk(a_mem);
389 size_t psize = chunksize(a_p);
390 #if FOOTERS
391 if (get_mstate_for(a_p) != m) {
392 BOOST_CONTAINER_MEMIT_NEXT(a_it);
393 BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem);
394 continue;
395 }
396 #endif
397 check_inuse_chunk(m, a_p);
398 if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) {
399 while(1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */
400 boost_cont_memchain_it b_it = a_it;
401 BOOST_CONTAINER_MEMIT_NEXT(b_it);
402 if(!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)){
403 void *b_mem = BOOST_CONTAINER_MEMIT_ADDR(b_it);
404 mchunkptr b_p = mem2chunk(b_mem);
405 if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */
406 psize += chunksize(b_p);
407 set_inuse(m, a_p, psize);
408 BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem);
409 continue;
410 }
411 if(RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))){
412 /* b chunk is contiguous and previous so a's size can be added to b */
413 if(a_p == next_chunk(b_p)) {
414 psize += chunksize(b_p);
415 set_inuse(m, b_p, psize);
416 a_it = b_it;
417 a_p = b_p;
418 a_mem = b_mem;
419 continue;
420 }
421 }
422 }
423 /* Normal deallocation starts again in the outer loop */
424 a_it = b_it;
425 s_allocated_memory -= psize;
426 dispose_chunk(m, a_p, psize);
427 break;
428 }
429 }
430 else {
431 CORRUPTION_ERROR_ACTION(m);
432 break;
433 }
434 }
435 if (should_trim(m, m->topsize))
436 sys_trim(m, 0);
437 POSTACTION(m);
438 }
439 #if FOOTERS
440 {
441 boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
442 BOOST_CONTAINER_MEMCHAIN_INIT(pchain);
443 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER
444 (pchain
445 , last_pchain
446 , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain)
447 , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain)
448 , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain)
449 );
450 }
451 #endif
452 }
453
454 ///////////////////////////////////////////////////////////////
455 ///////////////////////////////////////////////////////////////
456 ///////////////////////////////////////////////////////////////
457 //
458 // NEW FUNCTIONS BASED ON DLMALLOC INTERNALS
459 //
460 ///////////////////////////////////////////////////////////////
461 ///////////////////////////////////////////////////////////////
462 ///////////////////////////////////////////////////////////////
463
464 #define GET_TRUNCATED_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE)/(ROUNDTO)*(ROUNDTO))
465 #define GET_ROUNDED_SIZE(ORIG_SIZE, ROUNDTO) ((((ORIG_SIZE)-1)/(ROUNDTO)+1)*(ROUNDTO))
466 #define GET_TRUNCATED_PO2_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE) & (~(ROUNDTO-1)))
467 #define GET_ROUNDED_PO2_SIZE(ORIG_SIZE, ROUNDTO) (((ORIG_SIZE - 1) & (~(ROUNDTO-1))) + ROUNDTO)
468
469 /* Greatest common divisor and least common multiple
470 gcd is an algorithm that calculates the greatest common divisor of two
471 integers, using Euclid's algorithm.
472
473 Pre: A > 0 && B > 0
474 Recommended: A > B*/
475 #define CALCULATE_GCD(A, B, OUT)\
476 {\
477 size_t a = A;\
478 size_t b = B;\
479 do\
480 {\
481 size_t tmp = b;\
482 b = a % b;\
483 a = tmp;\
484 } while (b != 0);\
485 \
486 OUT = a;\
487 }
488
489 /* lcm is an algorithm that calculates the least common multiple of two
490 integers.
491
492 Pre: A > 0 && B > 0
493 Recommended: A > B*/
494 #define CALCULATE_LCM(A, B, OUT)\
495 {\
496 CALCULATE_GCD(A, B, OUT);\
497 OUT = (A / OUT)*B;\
498 }
499
500 static int calculate_lcm_and_needs_backwards_lcmed
501 (size_t backwards_multiple, size_t received_size, size_t size_to_achieve,
502 size_t *plcm, size_t *pneeds_backwards_lcmed)
503 {
504 /* Now calculate lcm */
505 size_t max = backwards_multiple;
506 size_t min = MALLOC_ALIGNMENT;
507 size_t needs_backwards;
508 size_t needs_backwards_lcmed;
509 size_t lcm;
510 size_t current_forward;
511 /*Swap if necessary*/
512 if(max < min){
513 size_t tmp = min;
514 min = max;
515 max = tmp;
516 }
517 /*Check if it's power of two*/
518 if((backwards_multiple & (backwards_multiple-1)) == 0){
519 if(0 != (size_to_achieve & ((backwards_multiple-1)))){
520 USAGE_ERROR_ACTION(m, oldp);
521 return 0;
522 }
523
524 lcm = max;
525 /*If we want to use minbytes data to get a buffer between maxbytes
526 and minbytes if maxbytes can't be achieved, calculate the
527 biggest of all possibilities*/
528 current_forward = GET_TRUNCATED_PO2_SIZE(received_size, backwards_multiple);
529 needs_backwards = size_to_achieve - current_forward;
530 assert((needs_backwards % backwards_multiple) == 0);
531 needs_backwards_lcmed = GET_ROUNDED_PO2_SIZE(needs_backwards, lcm);
532 *plcm = lcm;
533 *pneeds_backwards_lcmed = needs_backwards_lcmed;
534 return 1;
535 }
536 /*Check if it's multiple of alignment*/
537 else if((backwards_multiple & (MALLOC_ALIGNMENT - 1u)) == 0){
538 lcm = backwards_multiple;
539 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
540 //No need to round needs_backwards because backwards_multiple == lcm
541 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
542 assert((needs_backwards_lcmed & (MALLOC_ALIGNMENT - 1u)) == 0);
543 *plcm = lcm;
544 *pneeds_backwards_lcmed = needs_backwards_lcmed;
545 return 1;
546 }
547 /*Check if it's multiple of the half of the alignmment*/
548 else if((backwards_multiple & ((MALLOC_ALIGNMENT/2u) - 1u)) == 0){
549 lcm = backwards_multiple*2u;
550 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
551 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
552 if(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
553 //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
554 needs_backwards_lcmed += backwards_multiple;
555 assert((needs_backwards_lcmed % lcm) == 0);
556 *plcm = lcm;
557 *pneeds_backwards_lcmed = needs_backwards_lcmed;
558 return 1;
559 }
560 /*Check if it's multiple of the quarter of the alignmment*/
561 else if((backwards_multiple & ((MALLOC_ALIGNMENT/4u) - 1u)) == 0){
562 size_t remainder;
563 lcm = backwards_multiple*4u;
564 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
565 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
566 //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
567 //needs_backwards_lcmed += backwards_multiple;
568 if(0 != (remainder = ((needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))>>(MALLOC_ALIGNMENT/8u)))){
569 if(backwards_multiple & MALLOC_ALIGNMENT/2u){
570 needs_backwards_lcmed += (remainder)*backwards_multiple;
571 }
572 else{
573 needs_backwards_lcmed += (4-remainder)*backwards_multiple;
574 }
575 }
576 assert((needs_backwards_lcmed % lcm) == 0);
577 *plcm = lcm;
578 *pneeds_backwards_lcmed = needs_backwards_lcmed;
579 return 1;
580 }
581 else{
582 CALCULATE_LCM(max, min, lcm);
583 /*If we want to use minbytes data to get a buffer between maxbytes
584 and minbytes if maxbytes can't be achieved, calculate the
585 biggest of all possibilities*/
586 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
587 needs_backwards = size_to_achieve - current_forward;
588 assert((needs_backwards % backwards_multiple) == 0);
589 needs_backwards_lcmed = GET_ROUNDED_SIZE(needs_backwards, lcm);
590 *plcm = lcm;
591 *pneeds_backwards_lcmed = needs_backwards_lcmed;
592 return 1;
593 }
594 }
595
596 static void *internal_grow_both_sides
597 (mstate m
598 ,allocation_type command
599 ,void *oldmem
600 ,size_t minbytes
601 ,size_t maxbytes
602 ,size_t *received_size
603 ,size_t backwards_multiple
604 ,int only_preferred_backwards)
605 {
606 mchunkptr oldp = mem2chunk(oldmem);
607 size_t oldsize = chunksize(oldp);
608 *received_size = oldsize - overhead_for(oldp);
609 if(minbytes <= *received_size)
610 return oldmem;
611
612 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp))) {
613 if(command & BOOST_CONTAINER_EXPAND_FWD){
614 if(try_realloc_chunk_with_min(m, oldp, request2size(minbytes), request2size(maxbytes), 0)){
615 check_inuse_chunk(m, oldp);
616 *received_size = DL_SIZE_IMPL(oldmem);
617 s_allocated_memory += chunksize(oldp) - oldsize;
618 return oldmem;
619 }
620 }
621 else{
622 *received_size = DL_SIZE_IMPL(oldmem);
623 if(*received_size >= maxbytes)
624 return oldmem;
625 }
626 /*
627 Should we check this?
628 if(backwards_multiple &&
629 (0 != (minbytes % backwards_multiple) &&
630 0 != (maxbytes % backwards_multiple)) ){
631 USAGE_ERROR_ACTION(m, oldp);
632 return 0;
633 }
634 */
635 /* We reach here only if forward expansion fails */
636 if(!(command & BOOST_CONTAINER_EXPAND_BWD) || pinuse(oldp)){
637 return 0;
638 }
639 {
640 size_t prevsize = oldp->prev_foot;
641 if ((prevsize & USE_MMAP_BIT) != 0){
642 /*Return failure the previous chunk was mmapped.
643 mremap does not allow expanding to a fixed address (MREMAP_MAYMOVE) without
644 copying (MREMAP_MAYMOVE must be also set).*/
645 return 0;
646 }
647 else {
648 mchunkptr prev = chunk_minus_offset(oldp, prevsize);
649 size_t dsize = oldsize + prevsize;
650 size_t needs_backwards_lcmed;
651 size_t lcm;
652
653 /* Let's calculate the number of extra bytes of data before the current
654 block's begin. The value is a multiple of backwards_multiple
655 and the alignment*/
656 if(!calculate_lcm_and_needs_backwards_lcmed
657 ( backwards_multiple, *received_size
658 , only_preferred_backwards ? maxbytes : minbytes
659 , &lcm, &needs_backwards_lcmed)
660 || !RTCHECK(ok_address(m, prev))){
661 USAGE_ERROR_ACTION(m, oldp);
662 return 0;
663 }
664 /* Check if previous block has enough size */
665 else if(prevsize < needs_backwards_lcmed){
666 /* preferred size? */
667 return 0;
668 }
669 /* Now take all next space. This must succeed, as we've previously calculated the correct size */
670 if(command & BOOST_CONTAINER_EXPAND_FWD){
671 if(!try_realloc_chunk_with_min(m, oldp, request2size(*received_size), request2size(*received_size), 0)){
672 assert(0);
673 }
674 check_inuse_chunk(m, oldp);
675 *received_size = DL_SIZE_IMPL(oldmem);
676 s_allocated_memory += chunksize(oldp) - oldsize;
677 oldsize = chunksize(oldp);
678 dsize = oldsize + prevsize;
679 }
680 /* We need a minimum size to split the previous one */
681 if(prevsize >= (needs_backwards_lcmed + MIN_CHUNK_SIZE)){
682 mchunkptr r = chunk_minus_offset(oldp, needs_backwards_lcmed);
683 size_t rsize = oldsize + needs_backwards_lcmed;
684 size_t newprevsize = dsize - rsize;
685 int prev_was_dv = prev == m->dv;
686
687 assert(newprevsize >= MIN_CHUNK_SIZE);
688
689 if (prev_was_dv) {
690 m->dvsize = newprevsize;
691 }
692 else{/* if ((next->head & INUSE_BITS) == INUSE_BITS) { */
693 unlink_chunk(m, prev, prevsize);
694 insert_chunk(m, prev, newprevsize);
695 }
696
697 set_size_and_pinuse_of_free_chunk(prev, newprevsize);
698 clear_pinuse(r);
699 set_inuse(m, r, rsize);
700 check_malloced_chunk(m, chunk2mem(r), rsize);
701 *received_size = chunksize(r) - overhead_for(r);
702 s_allocated_memory += chunksize(r) - oldsize;
703 return chunk2mem(r);
704 }
705 /* Check if there is no place to create a new block and
706 the whole new block is multiple of the backwards expansion multiple */
707 else if(prevsize >= needs_backwards_lcmed && !(prevsize % lcm)) {
708 /* Just merge the whole previous block */
709 /* prevsize is multiple of lcm (and backwards_multiple)*/
710 *received_size += prevsize;
711
712 if (prev != m->dv) {
713 unlink_chunk(m, prev, prevsize);
714 }
715 else{
716 m->dvsize = 0;
717 m->dv = 0;
718 }
719 set_inuse(m, prev, dsize);
720 check_malloced_chunk(m, chunk2mem(prev), dsize);
721 s_allocated_memory += chunksize(prev) - oldsize;
722 return chunk2mem(prev);
723 }
724 else{
725 /* Previous block was big enough but there is no room
726 to create an empty block and taking the whole block does
727 not fulfill alignment requirements */
728 return 0;
729 }
730 }
731 }
732 }
733 else{
734 USAGE_ERROR_ACTION(m, oldmem);
735 return 0;
736 }
737 return 0;
738 }
739
740 /* This is similar to mmap_resize but:
741 * Only to shrink
742 * It takes min and max sizes
743 * Takes additional 'do_commit' argument to obtain the final
744 size before doing the real shrink operation.
745 */
746 static int internal_mmap_shrink_in_place(mstate m, mchunkptr oldp, size_t nbmin, size_t nbmax, size_t *received_size, int do_commit)
747 {
748 size_t oldsize = chunksize(oldp);
749 *received_size = oldsize;
750 #if HAVE_MREMAP
751 if (is_small(nbmax)) /* Can't shrink mmap regions below small size */
752 return 0;
753 {
754 size_t effective_min = nbmin > MIN_LARGE_SIZE ? nbmin : MIN_LARGE_SIZE;
755 /* Keep old chunk if big enough but not too big */
756 if (oldsize >= effective_min + SIZE_T_SIZE &&
757 (oldsize - effective_min) <= (mparams.granularity << 1))
758 return 0;
759 /* Now calculate new sizes */
760 {
761 size_t offset = oldp->prev_foot;
762 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
763 size_t newmmsize = mmap_align(effective_min + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
764 *received_size = newmmsize;
765 if(!do_commit){
766 const int flags = 0; /* placate people compiling -Wunused */
767 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
768 oldmmsize, newmmsize, flags);
769 /*This must always succeed */
770 if(!cp){
771 USAGE_ERROR_ACTION(m, m);
772 return 0;
773 }
774 {
775 mchunkptr newp = (mchunkptr)(cp + offset);
776 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
777 newp->head = psize;
778 mark_inuse_foot(m, newp, psize);
779 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
780 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
781
782 if (cp < m->least_addr)
783 m->least_addr = cp;
784 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
785 m->max_footprint = m->footprint;
786 check_mmapped_chunk(m, newp);
787 }
788 }
789 }
790 return 1;
791 }
792 #else //#if HAVE_MREMAP
793 (void)m;
794 (void)oldp;
795 (void)nbmin;
796 (void)nbmax;
797 (void)received_size;
798 (void)do_commit;
799 return 0;
800 #endif //#if HAVE_MREMAP
801 }
802
803 static int internal_shrink(mstate m, void* oldmem, size_t minbytes, size_t maxbytes, size_t *received_size, int do_commit)
804 {
805 *received_size = chunksize(mem2chunk(oldmem)) - overhead_for(mem2chunk(oldmem));
806 if (minbytes >= MAX_REQUEST || maxbytes >= MAX_REQUEST) {
807 MALLOC_FAILURE_ACTION;
808 return 0;
809 }
810 else if(minbytes < MIN_REQUEST){
811 minbytes = MIN_REQUEST;
812 }
813 if (minbytes > maxbytes) {
814 return 0;
815 }
816
817 {
818 mchunkptr oldp = mem2chunk(oldmem);
819 size_t oldsize = chunksize(oldp);
820 mchunkptr next = chunk_plus_offset(oldp, oldsize);
821 void* extra = 0;
822
823 /* Try to either shrink or extend into top. Else malloc-copy-free*/
824 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
825 ok_next(oldp, next) && ok_pinuse(next))) {
826 size_t nbmin = request2size(minbytes);
827 size_t nbmax = request2size(maxbytes);
828
829 if (nbmin > oldsize){
830 /* Return error if old size is too small */
831 }
832 else if (is_mmapped(oldp)){
833 return internal_mmap_shrink_in_place(m, oldp, nbmin, nbmax, received_size, do_commit);
834 }
835 else{ // nbmin <= oldsize /* already big enough*/
836 size_t nb = nbmin;
837 size_t rsize = oldsize - nb;
838 if (rsize >= MIN_CHUNK_SIZE) {
839 if(do_commit){
840 mchunkptr remainder = chunk_plus_offset(oldp, nb);
841 set_inuse(m, oldp, nb);
842 set_inuse(m, remainder, rsize);
843 extra = chunk2mem(remainder);
844 }
845 *received_size = nb - overhead_for(oldp);
846 if(!do_commit)
847 return 1;
848 }
849 }
850 }
851 else {
852 USAGE_ERROR_ACTION(m, oldmem);
853 return 0;
854 }
855
856 if (extra != 0 && do_commit) {
857 mspace_free_lockless(m, extra);
858 check_inuse_chunk(m, oldp);
859 return 1;
860 }
861 else {
862 return 0;
863 }
864 }
865 }
866
867
868 #define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096
869
870 #define SQRT_MAX_SIZE_T (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2))
871
872 static int internal_node_multialloc
873 (mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
874 void* mem; /* malloced aggregate space */
875 mchunkptr p; /* corresponding chunk */
876 size_t remainder_size; /* remaining bytes while splitting */
877 flag_t was_enabled; /* to disable mmap */
878 size_t elements_per_segment = 0;
879 size_t element_req_size = request2size(element_size);
880 boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
881
882 /*Error if wrong element_size parameter */
883 if( !element_size ||
884 /*OR Error if n_elements less thatn contiguous_elements */
885 ((contiguous_elements + 1) > (DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) ||
886 /* OR Error if integer overflow */
887 (SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) &&
888 (MAX_SIZE_T/element_req_size) < contiguous_elements)){
889 return 0;
890 }
891 switch(contiguous_elements){
892 case DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
893 {
894 /* Default contiguous, just check that we can store at least one element */
895 elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM/element_req_size;
896 elements_per_segment += (size_t)(!elements_per_segment);
897 }
898 break;
899 case DL_MULTIALLOC_ALL_CONTIGUOUS:
900 /* All elements should be allocated in a single call */
901 elements_per_segment = n_elements;
902 break;
903 default:
904 /* Allocate in chunks of "contiguous_elements" */
905 elements_per_segment = contiguous_elements;
906 }
907
908 {
909 size_t i;
910 size_t next_i;
911 /*
912 Allocate the aggregate chunk. First disable direct-mmapping so
913 malloc won't use it, since we would not be able to later
914 free/realloc space internal to a segregated mmap region.
915 */
916 was_enabled = use_mmap(m);
917 disable_mmap(m);
918 for(i = 0; i != n_elements; i = next_i)
919 {
920 size_t accum_size;
921 size_t n_elements_left = n_elements - i;
922 next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment);
923 accum_size = element_req_size*(next_i - i);
924
925 mem = mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD);
926 if (mem == 0){
927 BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
928 while(i--){
929 void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it);
930 BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
931 mspace_free_lockless(m, addr);
932 }
933 if (was_enabled)
934 enable_mmap(m);
935 return 0;
936 }
937 p = mem2chunk(mem);
938 remainder_size = chunksize(p);
939 s_allocated_memory += remainder_size;
940
941 assert(!is_mmapped(p));
942 { /* split out elements */
943 void *mem_orig = mem;
944 boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
945 size_t num_elements = next_i-i;
946
947 size_t num_loops = num_elements - 1;
948 remainder_size -= element_req_size*num_loops;
949 while(num_loops--){
950 void **mem_prev = ((void**)mem);
951 set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size);
952 p = chunk_plus_offset(p, element_req_size);
953 mem = chunk2mem(p);
954 *mem_prev = mem;
955 }
956 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
957 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
958 }
959 }
960 if (was_enabled)
961 enable_mmap(m);
962 }
963 return 1;
964 }
965
966 static int internal_multialloc_arrays
967 (mstate m, size_t n_elements, const size_t* sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
968 void* mem; /* malloced aggregate space */
969 mchunkptr p; /* corresponding chunk */
970 size_t remainder_size; /* remaining bytes while splitting */
971 flag_t was_enabled; /* to disable mmap */
972 size_t size;
973 size_t boost_cont_multialloc_segmented_malloc_size;
974 size_t max_size;
975
976 /* Check overflow */
977 if(!element_size){
978 return 0;
979 }
980 max_size = MAX_REQUEST/element_size;
981 /* Different sizes*/
982 switch(contiguous_elements){
983 case DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
984 /* Use default contiguous mem */
985 boost_cont_multialloc_segmented_malloc_size = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM;
986 break;
987 case DL_MULTIALLOC_ALL_CONTIGUOUS:
988 boost_cont_multialloc_segmented_malloc_size = MAX_REQUEST + CHUNK_OVERHEAD;
989 break;
990 default:
991 if(max_size < contiguous_elements){
992 return 0;
993 }
994 else{
995 /* The suggested buffer is just the the element count by the size */
996 boost_cont_multialloc_segmented_malloc_size = element_size*contiguous_elements;
997 }
998 }
999
1000 {
1001 size_t i;
1002 size_t next_i;
1003 /*
1004 Allocate the aggregate chunk. First disable direct-mmapping so
1005 malloc won't use it, since we would not be able to later
1006 free/realloc space internal to a segregated mmap region.
1007 */
1008 was_enabled = use_mmap(m);
1009 disable_mmap(m);
1010 for(i = 0, next_i = 0; i != n_elements; i = next_i)
1011 {
1012 int error = 0;
1013 size_t accum_size;
1014 for(accum_size = 0; next_i != n_elements; ++next_i){
1015 size_t cur_array_size = sizes[next_i];
1016 if(max_size < cur_array_size){
1017 error = 1;
1018 break;
1019 }
1020 else{
1021 size_t reqsize = request2size(cur_array_size*element_size);
1022 if(((boost_cont_multialloc_segmented_malloc_size - CHUNK_OVERHEAD) - accum_size) < reqsize){
1023 if(!accum_size){
1024 accum_size += reqsize;
1025 ++next_i;
1026 }
1027 break;
1028 }
1029 accum_size += reqsize;
1030 }
1031 }
1032
1033 mem = error ? 0 : mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD);
1034 if (mem == 0){
1035 boost_cont_memchain_it it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
1036 while(i--){
1037 void *addr = BOOST_CONTAINER_MEMIT_ADDR(it);
1038 BOOST_CONTAINER_MEMIT_NEXT(it);
1039 mspace_free_lockless(m, addr);
1040 }
1041 if (was_enabled)
1042 enable_mmap(m);
1043 return 0;
1044 }
1045 p = mem2chunk(mem);
1046 remainder_size = chunksize(p);
1047 s_allocated_memory += remainder_size;
1048
1049 assert(!is_mmapped(p));
1050
1051 { /* split out elements */
1052 void *mem_orig = mem;
1053 boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
1054 size_t num_elements = next_i-i;
1055
1056 for(++i; i != next_i; ++i) {
1057 void **mem_prev = ((void**)mem);
1058 size = request2size(sizes[i]*element_size);
1059 remainder_size -= size;
1060 set_size_and_pinuse_of_inuse_chunk(m, p, size);
1061 p = chunk_plus_offset(p, size);
1062 mem = chunk2mem(p);
1063 *mem_prev = mem;
1064 }
1065 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
1066 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
1067 }
1068 }
1069 if (was_enabled)
1070 enable_mmap(m);
1071 }
1072 return 1;
1073 }
1074
1075 int boost_cont_multialloc_arrays
1076 (size_t n_elements, const size_t *sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1077 {
1078 int ret = 0;
1079 mstate ms = (mstate)gm;
1080 ensure_initialization();
1081 if (!ok_magic(ms)) {
1082 USAGE_ERROR_ACTION(ms,ms);
1083 }
1084 else if (!PREACTION(ms)) {
1085 ret = internal_multialloc_arrays(ms, n_elements, sizes, element_size, contiguous_elements, pchain);
1086 POSTACTION(ms);
1087 }
1088 return ret;
1089 }
1090
1091
1092 /*Doug Lea malloc extensions*/
1093 static boost_cont_malloc_stats_t get_malloc_stats(mstate m)
1094 {
1095 boost_cont_malloc_stats_t ret = { 0, 0, 0 };
1096 ensure_initialization();
1097 if (!PREACTION(m)) {
1098 size_t maxfp = 0;
1099 size_t fp = 0;
1100 size_t used = 0;
1101 check_malloc_state(m);
1102 if (is_initialized(m)) {
1103 msegmentptr s = &m->seg;
1104 maxfp = m->max_footprint;
1105 fp = m->footprint;
1106 used = fp - (m->topsize + TOP_FOOT_SIZE);
1107
1108 while (s != 0) {
1109 mchunkptr q = align_as_chunk(s->base);
1110 while (segment_holds(s, q) &&
1111 q != m->top && q->head != FENCEPOST_HEAD) {
1112 if (!cinuse(q))
1113 used -= chunksize(q);
1114 q = next_chunk(q);
1115 }
1116 s = s->next;
1117 }
1118 }
1119
1120 ret.max_system_bytes = maxfp;
1121 ret.system_bytes = fp;
1122 ret.in_use_bytes = used;
1123 POSTACTION(m);
1124 }
1125 return ret;
1126 }
1127
1128 size_t boost_cont_size(const void *p)
1129 { return DL_SIZE_IMPL(p); }
1130
1131 void* boost_cont_malloc(size_t bytes)
1132 {
1133 size_t received_bytes;
1134 ensure_initialization();
1135 return boost_cont_allocation_command
1136 (BOOST_CONTAINER_ALLOCATE_NEW, 1, bytes, bytes, &received_bytes, 0).first;
1137 }
1138
1139 void boost_cont_free(void* mem)
1140 {
1141 mstate ms = (mstate)gm;
1142 if (!ok_magic(ms)) {
1143 USAGE_ERROR_ACTION(ms,ms);
1144 }
1145 else if (!PREACTION(ms)) {
1146 mspace_free_lockless(ms, mem);
1147 POSTACTION(ms);
1148 }
1149 }
1150
1151 void* boost_cont_memalign(size_t bytes, size_t alignment)
1152 {
1153 void *addr;
1154 ensure_initialization();
1155 addr = mspace_memalign(gm, alignment, bytes);
1156 if(addr){
1157 s_allocated_memory += chunksize(mem2chunk(addr));
1158 }
1159 return addr;
1160 }
1161
1162 int boost_cont_multialloc_nodes
1163 (size_t n_elements, size_t elem_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1164 {
1165 int ret = 0;
1166 mstate ms = (mstate)gm;
1167 ensure_initialization();
1168 if (!ok_magic(ms)) {
1169 USAGE_ERROR_ACTION(ms,ms);
1170 }
1171 else if (!PREACTION(ms)) {
1172 ret = internal_node_multialloc(ms, n_elements, elem_size, contiguous_elements, pchain);
1173 POSTACTION(ms);
1174 }
1175 return ret;
1176 }
1177
1178 size_t boost_cont_footprint()
1179 {
1180 return ((mstate)gm)->footprint;
1181 }
1182
1183 size_t boost_cont_allocated_memory()
1184 {
1185 size_t alloc_mem = 0;
1186 mstate m = (mstate)gm;
1187 ensure_initialization();
1188 if (!ok_magic(ms)) {
1189 USAGE_ERROR_ACTION(ms,ms);
1190 }
1191
1192
1193 if (!PREACTION(m)) {
1194 check_malloc_state(m);
1195 if (is_initialized(m)) {
1196 size_t nfree = SIZE_T_ONE; /* top always free */
1197 size_t mfree = m->topsize + TOP_FOOT_SIZE;
1198 size_t sum = mfree;
1199 msegmentptr s = &m->seg;
1200 while (s != 0) {
1201 mchunkptr q = align_as_chunk(s->base);
1202 while (segment_holds(s, q) &&
1203 q != m->top && q->head != FENCEPOST_HEAD) {
1204 size_t sz = chunksize(q);
1205 sum += sz;
1206 if (!is_inuse(q)) {
1207 mfree += sz;
1208 ++nfree;
1209 }
1210 q = next_chunk(q);
1211 }
1212 s = s->next;
1213 }
1214 {
1215 size_t uordblks = m->footprint - mfree;
1216 if(nfree)
1217 alloc_mem = (size_t)(uordblks - (nfree-1)*TOP_FOOT_SIZE);
1218 else
1219 alloc_mem = uordblks;
1220 }
1221 }
1222
1223 POSTACTION(m);
1224 }
1225 return alloc_mem;
1226 }
1227
1228 size_t boost_cont_chunksize(const void *p)
1229 { return chunksize(mem2chunk(p)); }
1230
1231 int boost_cont_all_deallocated()
1232 { return !s_allocated_memory; }
1233
1234 boost_cont_malloc_stats_t boost_cont_malloc_stats()
1235 {
1236 mstate ms = (mstate)gm;
1237 if (ok_magic(ms)) {
1238 return get_malloc_stats(ms);
1239 }
1240 else {
1241 boost_cont_malloc_stats_t r = { 0, 0, 0 };
1242 USAGE_ERROR_ACTION(ms,ms);
1243 return r;
1244 }
1245 }
1246
1247 size_t boost_cont_in_use_memory()
1248 { return s_allocated_memory; }
1249
1250 int boost_cont_trim(size_t pad)
1251 {
1252 ensure_initialization();
1253 return dlmalloc_trim(pad);
1254 }
1255
1256 int boost_cont_grow
1257 (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received)
1258 {
1259 mstate ms = (mstate)gm;
1260 if (!ok_magic(ms)) {
1261 USAGE_ERROR_ACTION(ms,ms);
1262 return 0;
1263 }
1264
1265 if (!PREACTION(ms)) {
1266 mchunkptr p = mem2chunk(oldmem);
1267 size_t oldsize = chunksize(p);
1268 p = try_realloc_chunk_with_min(ms, p, request2size(minbytes), request2size(maxbytes), 0);
1269 POSTACTION(ms);
1270 if(p){
1271 check_inuse_chunk(ms, p);
1272 *received = DL_SIZE_IMPL(oldmem);
1273 s_allocated_memory += chunksize(p) - oldsize;
1274 }
1275 return 0 != p;
1276 }
1277 return 0;
1278 }
1279
1280 int boost_cont_shrink
1281 (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received, int do_commit)
1282 {
1283 mstate ms = (mstate)gm;
1284 if (!ok_magic(ms)) {
1285 USAGE_ERROR_ACTION(ms,ms);
1286 return 0;
1287 }
1288
1289 if (!PREACTION(ms)) {
1290 int ret = internal_shrink(ms, oldmem, minbytes, maxbytes, received, do_commit);
1291 POSTACTION(ms);
1292 return 0 != ret;
1293 }
1294 return 0;
1295 }
1296
1297
1298 void* boost_cont_alloc
1299 (size_t minbytes, size_t preferred_bytes, size_t *received_bytes)
1300 {
1301 //ensure_initialization provided by boost_cont_allocation_command
1302 return boost_cont_allocation_command
1303 (BOOST_CONTAINER_ALLOCATE_NEW, 1, minbytes, preferred_bytes, received_bytes, 0).first;
1304 }
1305
1306 void boost_cont_multidealloc(boost_cont_memchain *pchain)
1307 {
1308 mstate ms = (mstate)gm;
1309 if (!ok_magic(ms)) {
1310 (void)ms;
1311 USAGE_ERROR_ACTION(ms,ms);
1312 }
1313 internal_multialloc_free(ms, pchain);
1314 }
1315
1316 int boost_cont_malloc_check()
1317 {
1318 #ifdef DEBUG
1319 mstate ms = (mstate)gm;
1320 ensure_initialization();
1321 if (!ok_magic(ms)) {
1322 (void)ms;
1323 USAGE_ERROR_ACTION(ms,ms);
1324 return 0;
1325 }
1326 check_malloc_state(ms);
1327 return 1;
1328 #else
1329 return 1;
1330 #endif
1331 }
1332
1333
1334 boost_cont_command_ret_t boost_cont_allocation_command
1335 (allocation_type command, size_t sizeof_object, size_t limit_size
1336 , size_t preferred_size, size_t *received_size, void *reuse_ptr)
1337 {
1338 boost_cont_command_ret_t ret = { 0, 0 };
1339 ensure_initialization();
1340 if(command & (BOOST_CONTAINER_SHRINK_IN_PLACE | BOOST_CONTAINER_TRY_SHRINK_IN_PLACE)){
1341 int success = boost_cont_shrink( reuse_ptr, preferred_size, limit_size
1342 , received_size, (command & BOOST_CONTAINER_SHRINK_IN_PLACE));
1343 ret.first = success ? reuse_ptr : 0;
1344 return ret;
1345 }
1346
1347 *received_size = 0;
1348
1349 if(limit_size > preferred_size)
1350 return ret;
1351
1352 {
1353 mstate ms = (mstate)gm;
1354
1355 /*Expand in place*/
1356 if (!PREACTION(ms)) {
1357 #if FOOTERS
1358 if(reuse_ptr){
1359 mstate m = get_mstate_for(mem2chunk(reuse_ptr));
1360 if (!ok_magic(m)) {
1361 USAGE_ERROR_ACTION(m, reuse_ptr);
1362 return ret;
1363 }
1364 }
1365 #endif
1366 if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1367 void *r = internal_grow_both_sides
1368 ( ms, command, reuse_ptr, limit_size
1369 , preferred_size, received_size, sizeof_object, 1);
1370 if(r){
1371 ret.first = r;
1372 ret.second = 1;
1373 goto postaction;
1374 }
1375 }
1376
1377 if(command & BOOST_CONTAINER_ALLOCATE_NEW){
1378 void *addr = mspace_malloc_lockless(ms, preferred_size);
1379 if(!addr) addr = mspace_malloc_lockless(ms, limit_size);
1380 if(addr){
1381 s_allocated_memory += chunksize(mem2chunk(addr));
1382 *received_size = DL_SIZE_IMPL(addr);
1383 }
1384 ret.first = addr;
1385 ret.second = 0;
1386 if(addr){
1387 goto postaction;
1388 }
1389 }
1390
1391 //Now try to expand both sides with min size
1392 if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1393 void *r = internal_grow_both_sides
1394 ( ms, command, reuse_ptr, limit_size
1395 , preferred_size, received_size, sizeof_object, 0);
1396 if(r){
1397 ret.first = r;
1398 ret.second = 1;
1399 goto postaction;
1400 }
1401 }
1402 postaction:
1403 POSTACTION(ms);
1404 }
1405 }
1406 return ret;
1407 }
1408
1409 int boost_cont_mallopt(int param_number, int value)
1410 {
1411 return change_mparam(param_number, value);
1412 }
1413
1414 void *boost_cont_sync_create()
1415 {
1416 void *p = boost_cont_malloc(sizeof(MLOCK_T));
1417 if(p){
1418 if(0 != INITIAL_LOCK((MLOCK_T*)p)){
1419 boost_cont_free(p);
1420 p = 0;
1421 }
1422 }
1423 return p;
1424 }
1425
1426 void boost_cont_sync_destroy(void *sync)
1427 {
1428 if(sync){
1429 (void)DESTROY_LOCK((MLOCK_T*)sync);
1430 boost_cont_free(sync);
1431 }
1432 }
1433
1434 int boost_cont_sync_lock(void *sync)
1435 { return 0 == (ACQUIRE_LOCK((MLOCK_T*)sync)); }
1436
1437 void boost_cont_sync_unlock(void *sync)
1438 { RELEASE_LOCK((MLOCK_T*)sync); }
1439
1440 int boost_cont_global_sync_lock()
1441 {
1442 int ret;
1443 ensure_initialization();
1444 ret = ACQUIRE_MALLOC_GLOBAL_LOCK();
1445 return 0 == ret;
1446 }
1447
1448 void boost_cont_global_sync_unlock()
1449 {
1450 RELEASE_MALLOC_GLOBAL_LOCK()
1451 }
1452
1453 //#ifdef DL_DEBUG_DEFINED
1454 // #undef DEBUG
1455 //#endif
1456
1457 #ifdef _MSC_VER
1458 #pragma warning (pop)
1459 #endif