]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ////////////////////////////////////////////////////////////////////////////// |
2 | // | |
3 | // (C) Copyright Ion Gaztanaga 2007-2015. Distributed under the Boost | |
4 | // Software License, Version 1.0. (See accompanying file | |
5 | // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | |
6 | // | |
7 | // See http://www.boost.org/libs/container for documentation. | |
8 | // | |
9 | ////////////////////////////////////////////////////////////////////////////// | |
10 | ||
11 | #include <boost/container/detail/alloc_lib.h> | |
12 | ||
13 | #include "errno.h" //dlmalloc bug EINVAL is used in posix_memalign without checking LACKS_ERRNO_H | |
14 | #include "limits.h" //CHAR_BIT | |
15 | #ifdef BOOST_CONTAINER_DLMALLOC_FOOTERS | |
16 | #define FOOTERS 1 | |
17 | #endif | |
18 | #define USE_LOCKS 1 | |
19 | #define MSPACES 1 | |
20 | #define NO_MALLINFO 1 | |
21 | #define NO_MALLOC_STATS 1 | |
92f5a8d4 TL |
22 | //disable sbrk as it's deprecated in some systems and weakens ASLR |
23 | #define HAVE_MORECORE 0 | |
7c673cae FG |
24 | |
25 | ||
26 | #if !defined(NDEBUG) | |
27 | #if !defined(DEBUG) | |
28 | #define DEBUG 1 | |
29 | #define DL_DEBUG_DEFINED | |
30 | #endif | |
31 | #endif | |
32 | ||
33 | #define USE_DL_PREFIX | |
34 | ||
35 | #ifdef __GNUC__ | |
36 | #define FORCEINLINE inline | |
37 | #endif | |
7c673cae FG |
38 | |
39 | #ifdef _MSC_VER | |
40 | #pragma warning (push) | |
41 | #pragma warning (disable : 4127) | |
42 | #pragma warning (disable : 4267) | |
43 | #pragma warning (disable : 4127) | |
44 | #pragma warning (disable : 4702) | |
45 | #pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/ | |
46 | #pragma warning (disable : 4251 4231 4660) /*dll warnings*/ | |
1e59de90 TL |
47 | #pragma warning (disable : 4057) /*differs in indirection to slightly different base types from*/ |
48 | #pragma warning (disable : 4702) /*unreachable code*/ | |
49 | #pragma warning (disable : 4127) /*conditional expression is constant*/ | |
7c673cae FG |
50 | #endif |
51 | ||
1e59de90 TL |
52 | #include "dlmalloc_2_8_6.c" |
53 | ||
7c673cae FG |
54 | #define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p))) |
55 | ||
56 | static size_t s_allocated_memory; | |
57 | ||
58 | /////////////////////////////////////////////////////////////// | |
59 | /////////////////////////////////////////////////////////////// | |
60 | /////////////////////////////////////////////////////////////// | |
61 | // | |
62 | // SLIGHTLY MODIFIED DLMALLOC FUNCTIONS | |
63 | // | |
64 | /////////////////////////////////////////////////////////////// | |
65 | /////////////////////////////////////////////////////////////// | |
66 | /////////////////////////////////////////////////////////////// | |
67 | ||
68 | //This function is equal to mspace_free | |
69 | //replacing PREACTION with 0 and POSTACTION with nothing | |
70 | static void mspace_free_lockless(mspace msp, void* mem) | |
71 | { | |
72 | if (mem != 0) { | |
73 | mchunkptr p = mem2chunk(mem); | |
74 | #if FOOTERS | |
75 | mstate fm = get_mstate_for(p); | |
76 | msp = msp; /* placate people compiling -Wunused */ | |
77 | #else /* FOOTERS */ | |
78 | mstate fm = (mstate)msp; | |
79 | #endif /* FOOTERS */ | |
80 | if (!ok_magic(fm)) { | |
81 | USAGE_ERROR_ACTION(fm, p); | |
82 | return; | |
83 | } | |
84 | if (!0){//PREACTION(fm)) { | |
85 | check_inuse_chunk(fm, p); | |
86 | if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { | |
87 | size_t psize = chunksize(p); | |
88 | mchunkptr next = chunk_plus_offset(p, psize); | |
7c673cae FG |
89 | if (!pinuse(p)) { |
90 | size_t prevsize = p->prev_foot; | |
91 | if (is_mmapped(p)) { | |
92 | psize += prevsize + MMAP_FOOT_PAD; | |
93 | if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) | |
94 | fm->footprint -= psize; | |
95 | goto postaction; | |
96 | } | |
97 | else { | |
98 | mchunkptr prev = chunk_minus_offset(p, prevsize); | |
99 | psize += prevsize; | |
100 | p = prev; | |
101 | if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ | |
102 | if (p != fm->dv) { | |
103 | unlink_chunk(fm, p, prevsize); | |
104 | } | |
105 | else if ((next->head & INUSE_BITS) == INUSE_BITS) { | |
106 | fm->dvsize = psize; | |
107 | set_free_with_pinuse(p, psize, next); | |
108 | goto postaction; | |
109 | } | |
110 | } | |
111 | else | |
112 | goto erroraction; | |
113 | } | |
114 | } | |
115 | ||
116 | if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { | |
117 | if (!cinuse(next)) { /* consolidate forward */ | |
118 | if (next == fm->top) { | |
119 | size_t tsize = fm->topsize += psize; | |
120 | fm->top = p; | |
121 | p->head = tsize | PINUSE_BIT; | |
122 | if (p == fm->dv) { | |
123 | fm->dv = 0; | |
124 | fm->dvsize = 0; | |
125 | } | |
126 | if (should_trim(fm, tsize)) | |
127 | sys_trim(fm, 0); | |
128 | goto postaction; | |
129 | } | |
130 | else if (next == fm->dv) { | |
131 | size_t dsize = fm->dvsize += psize; | |
132 | fm->dv = p; | |
133 | set_size_and_pinuse_of_free_chunk(p, dsize); | |
134 | goto postaction; | |
135 | } | |
136 | else { | |
137 | size_t nsize = chunksize(next); | |
138 | psize += nsize; | |
139 | unlink_chunk(fm, next, nsize); | |
140 | set_size_and_pinuse_of_free_chunk(p, psize); | |
141 | if (p == fm->dv) { | |
142 | fm->dvsize = psize; | |
143 | goto postaction; | |
144 | } | |
145 | } | |
146 | } | |
147 | else | |
148 | set_free_with_pinuse(p, psize, next); | |
149 | ||
150 | if (is_small(psize)) { | |
151 | insert_small_chunk(fm, p, psize); | |
152 | check_free_chunk(fm, p); | |
153 | } | |
154 | else { | |
155 | tchunkptr tp = (tchunkptr)p; | |
156 | insert_large_chunk(fm, tp, psize); | |
157 | check_free_chunk(fm, p); | |
158 | if (--fm->release_checks == 0) | |
159 | release_unused_segments(fm); | |
160 | } | |
161 | goto postaction; | |
162 | } | |
163 | } | |
164 | erroraction: | |
165 | USAGE_ERROR_ACTION(fm, p); | |
166 | postaction: | |
167 | ;//POSTACTION(fm); | |
168 | } | |
169 | } | |
170 | } | |
171 | ||
172 | //This function is equal to mspace_malloc | |
173 | //replacing PREACTION with 0 and POSTACTION with nothing | |
174 | void* mspace_malloc_lockless(mspace msp, size_t bytes) | |
175 | { | |
176 | mstate ms = (mstate)msp; | |
177 | if (!ok_magic(ms)) { | |
178 | USAGE_ERROR_ACTION(ms,ms); | |
179 | return 0; | |
180 | } | |
181 | if (!0){//PREACTION(ms)) { | |
182 | void* mem; | |
183 | size_t nb; | |
184 | if (bytes <= MAX_SMALL_REQUEST) { | |
185 | bindex_t idx; | |
186 | binmap_t smallbits; | |
187 | nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); | |
188 | idx = small_index(nb); | |
189 | smallbits = ms->smallmap >> idx; | |
190 | ||
191 | if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ | |
192 | mchunkptr b, p; | |
193 | idx += ~smallbits & 1; /* Uses next bin if idx empty */ | |
194 | b = smallbin_at(ms, idx); | |
195 | p = b->fd; | |
196 | assert(chunksize(p) == small_index2size(idx)); | |
197 | unlink_first_small_chunk(ms, b, p, idx); | |
198 | set_inuse_and_pinuse(ms, p, small_index2size(idx)); | |
199 | mem = chunk2mem(p); | |
200 | check_malloced_chunk(ms, mem, nb); | |
201 | goto postaction; | |
202 | } | |
203 | ||
204 | else if (nb > ms->dvsize) { | |
205 | if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ | |
206 | mchunkptr b, p, r; | |
207 | size_t rsize; | |
208 | bindex_t i; | |
209 | binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); | |
210 | binmap_t leastbit = least_bit(leftbits); | |
211 | compute_bit2idx(leastbit, i); | |
212 | b = smallbin_at(ms, i); | |
213 | p = b->fd; | |
214 | assert(chunksize(p) == small_index2size(i)); | |
215 | unlink_first_small_chunk(ms, b, p, i); | |
216 | rsize = small_index2size(i) - nb; | |
217 | /* Fit here cannot be remainderless if 4byte sizes */ | |
218 | if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) | |
219 | set_inuse_and_pinuse(ms, p, small_index2size(i)); | |
220 | else { | |
221 | set_size_and_pinuse_of_inuse_chunk(ms, p, nb); | |
222 | r = chunk_plus_offset(p, nb); | |
223 | set_size_and_pinuse_of_free_chunk(r, rsize); | |
224 | replace_dv(ms, r, rsize); | |
225 | } | |
226 | mem = chunk2mem(p); | |
227 | check_malloced_chunk(ms, mem, nb); | |
228 | goto postaction; | |
229 | } | |
230 | ||
231 | else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { | |
232 | check_malloced_chunk(ms, mem, nb); | |
233 | goto postaction; | |
234 | } | |
235 | } | |
236 | } | |
237 | else if (bytes >= MAX_REQUEST) | |
238 | nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ | |
239 | else { | |
240 | nb = pad_request(bytes); | |
241 | if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { | |
242 | check_malloced_chunk(ms, mem, nb); | |
243 | goto postaction; | |
244 | } | |
245 | } | |
246 | ||
247 | if (nb <= ms->dvsize) { | |
248 | size_t rsize = ms->dvsize - nb; | |
249 | mchunkptr p = ms->dv; | |
250 | if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ | |
251 | mchunkptr r = ms->dv = chunk_plus_offset(p, nb); | |
252 | ms->dvsize = rsize; | |
253 | set_size_and_pinuse_of_free_chunk(r, rsize); | |
254 | set_size_and_pinuse_of_inuse_chunk(ms, p, nb); | |
255 | } | |
256 | else { /* exhaust dv */ | |
257 | size_t dvs = ms->dvsize; | |
258 | ms->dvsize = 0; | |
259 | ms->dv = 0; | |
260 | set_inuse_and_pinuse(ms, p, dvs); | |
261 | } | |
262 | mem = chunk2mem(p); | |
263 | check_malloced_chunk(ms, mem, nb); | |
264 | goto postaction; | |
265 | } | |
266 | ||
267 | else if (nb < ms->topsize) { /* Split top */ | |
268 | size_t rsize = ms->topsize -= nb; | |
269 | mchunkptr p = ms->top; | |
270 | mchunkptr r = ms->top = chunk_plus_offset(p, nb); | |
271 | r->head = rsize | PINUSE_BIT; | |
272 | set_size_and_pinuse_of_inuse_chunk(ms, p, nb); | |
273 | mem = chunk2mem(p); | |
274 | check_top_chunk(ms, ms->top); | |
275 | check_malloced_chunk(ms, mem, nb); | |
276 | goto postaction; | |
277 | } | |
278 | ||
279 | mem = sys_alloc(ms, nb); | |
280 | ||
281 | postaction: | |
282 | ;//POSTACTION(ms); | |
283 | return mem; | |
284 | } | |
285 | ||
286 | return 0; | |
287 | } | |
288 | ||
289 | //This function is equal to try_realloc_chunk but handling | |
290 | //minimum and desired bytes | |
291 | static mchunkptr try_realloc_chunk_with_min(mstate m, mchunkptr p, size_t min_nb, size_t des_nb, int can_move) | |
292 | { | |
293 | mchunkptr newp = 0; | |
294 | size_t oldsize = chunksize(p); | |
295 | mchunkptr next = chunk_plus_offset(p, oldsize); | |
296 | if (RTCHECK(ok_address(m, p) && ok_inuse(p) && | |
297 | ok_next(p, next) && ok_pinuse(next))) { | |
298 | if (is_mmapped(p)) { | |
299 | newp = mmap_resize(m, p, des_nb, can_move); | |
300 | if(!newp) //mmap does not return how many bytes we could reallocate, so go the minimum | |
301 | newp = mmap_resize(m, p, min_nb, can_move); | |
302 | } | |
303 | else if (oldsize >= min_nb) { /* already big enough */ | |
304 | size_t nb = oldsize >= des_nb ? des_nb : oldsize; | |
305 | size_t rsize = oldsize - nb; | |
306 | if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ | |
307 | mchunkptr r = chunk_plus_offset(p, nb); | |
308 | set_inuse(m, p, nb); | |
309 | set_inuse(m, r, rsize); | |
310 | dispose_chunk(m, r, rsize); | |
311 | } | |
312 | newp = p; | |
313 | } | |
314 | else if (next == m->top) { /* extend into top */ | |
315 | if (oldsize + m->topsize > min_nb) { | |
316 | size_t nb = (oldsize + m->topsize) > des_nb ? des_nb : (oldsize + m->topsize - MALLOC_ALIGNMENT); | |
317 | size_t newsize = oldsize + m->topsize; | |
318 | size_t newtopsize = newsize - nb; | |
319 | mchunkptr newtop = chunk_plus_offset(p, nb); | |
320 | set_inuse(m, p, nb); | |
321 | newtop->head = newtopsize |PINUSE_BIT; | |
322 | m->top = newtop; | |
323 | m->topsize = newtopsize; | |
324 | newp = p; | |
325 | } | |
326 | } | |
327 | else if (next == m->dv) { /* extend into dv */ | |
328 | size_t dvs = m->dvsize; | |
329 | if (oldsize + dvs >= min_nb) { | |
330 | size_t nb = (oldsize + dvs) >= des_nb ? des_nb : (oldsize + dvs); | |
331 | size_t dsize = oldsize + dvs - nb; | |
332 | if (dsize >= MIN_CHUNK_SIZE) { | |
333 | mchunkptr r = chunk_plus_offset(p, nb); | |
334 | mchunkptr n = chunk_plus_offset(r, dsize); | |
335 | set_inuse(m, p, nb); | |
336 | set_size_and_pinuse_of_free_chunk(r, dsize); | |
337 | clear_pinuse(n); | |
338 | m->dvsize = dsize; | |
339 | m->dv = r; | |
340 | } | |
341 | else { /* exhaust dv */ | |
342 | size_t newsize = oldsize + dvs; | |
343 | set_inuse(m, p, newsize); | |
344 | m->dvsize = 0; | |
345 | m->dv = 0; | |
346 | } | |
347 | newp = p; | |
348 | } | |
349 | } | |
350 | else if (!cinuse(next)) { /* extend into next free chunk */ | |
351 | size_t nextsize = chunksize(next); | |
352 | if (oldsize + nextsize >= min_nb) { | |
353 | size_t nb = (oldsize + nextsize) >= des_nb ? des_nb : (oldsize + nextsize); | |
354 | size_t rsize = oldsize + nextsize - nb; | |
355 | unlink_chunk(m, next, nextsize); | |
356 | if (rsize < MIN_CHUNK_SIZE) { | |
357 | size_t newsize = oldsize + nextsize; | |
358 | set_inuse(m, p, newsize); | |
359 | } | |
360 | else { | |
361 | mchunkptr r = chunk_plus_offset(p, nb); | |
362 | set_inuse(m, p, nb); | |
363 | set_inuse(m, r, rsize); | |
364 | dispose_chunk(m, r, rsize); | |
365 | } | |
366 | newp = p; | |
367 | } | |
368 | } | |
369 | } | |
370 | else { | |
371 | USAGE_ERROR_ACTION(m, chunk2mem(p)); | |
372 | } | |
373 | return newp; | |
374 | } | |
375 | ||
7c673cae FG |
376 | /////////////////////////////////////////////////////////////// |
377 | /////////////////////////////////////////////////////////////// | |
378 | /////////////////////////////////////////////////////////////// | |
379 | // | |
380 | // NEW FUNCTIONS BASED ON DLMALLOC INTERNALS | |
381 | // | |
382 | /////////////////////////////////////////////////////////////// | |
383 | /////////////////////////////////////////////////////////////// | |
384 | /////////////////////////////////////////////////////////////// | |
385 | ||
386 | #define GET_TRUNCATED_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE)/(ROUNDTO)*(ROUNDTO)) | |
387 | #define GET_ROUNDED_SIZE(ORIG_SIZE, ROUNDTO) ((((ORIG_SIZE)-1)/(ROUNDTO)+1)*(ROUNDTO)) | |
388 | #define GET_TRUNCATED_PO2_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE) & (~(ROUNDTO-1))) | |
389 | #define GET_ROUNDED_PO2_SIZE(ORIG_SIZE, ROUNDTO) (((ORIG_SIZE - 1) & (~(ROUNDTO-1))) + ROUNDTO) | |
390 | ||
391 | /* Greatest common divisor and least common multiple | |
392 | gcd is an algorithm that calculates the greatest common divisor of two | |
393 | integers, using Euclid's algorithm. | |
394 | ||
395 | Pre: A > 0 && B > 0 | |
396 | Recommended: A > B*/ | |
397 | #define CALCULATE_GCD(A, B, OUT)\ | |
398 | {\ | |
399 | size_t a = A;\ | |
400 | size_t b = B;\ | |
401 | do\ | |
402 | {\ | |
403 | size_t tmp = b;\ | |
404 | b = a % b;\ | |
405 | a = tmp;\ | |
406 | } while (b != 0);\ | |
407 | \ | |
408 | OUT = a;\ | |
409 | } | |
410 | ||
411 | /* lcm is an algorithm that calculates the least common multiple of two | |
412 | integers. | |
413 | ||
414 | Pre: A > 0 && B > 0 | |
415 | Recommended: A > B*/ | |
416 | #define CALCULATE_LCM(A, B, OUT)\ | |
417 | {\ | |
418 | CALCULATE_GCD(A, B, OUT);\ | |
419 | OUT = (A / OUT)*B;\ | |
420 | } | |
421 | ||
422 | static int calculate_lcm_and_needs_backwards_lcmed | |
423 | (size_t backwards_multiple, size_t received_size, size_t size_to_achieve, | |
424 | size_t *plcm, size_t *pneeds_backwards_lcmed) | |
425 | { | |
426 | /* Now calculate lcm */ | |
427 | size_t max = backwards_multiple; | |
428 | size_t min = MALLOC_ALIGNMENT; | |
429 | size_t needs_backwards; | |
430 | size_t needs_backwards_lcmed; | |
431 | size_t lcm; | |
432 | size_t current_forward; | |
433 | /*Swap if necessary*/ | |
434 | if(max < min){ | |
435 | size_t tmp = min; | |
436 | min = max; | |
437 | max = tmp; | |
438 | } | |
439 | /*Check if it's power of two*/ | |
440 | if((backwards_multiple & (backwards_multiple-1)) == 0){ | |
441 | if(0 != (size_to_achieve & ((backwards_multiple-1)))){ | |
442 | USAGE_ERROR_ACTION(m, oldp); | |
443 | return 0; | |
444 | } | |
445 | ||
446 | lcm = max; | |
447 | /*If we want to use minbytes data to get a buffer between maxbytes | |
448 | and minbytes if maxbytes can't be achieved, calculate the | |
449 | biggest of all possibilities*/ | |
450 | current_forward = GET_TRUNCATED_PO2_SIZE(received_size, backwards_multiple); | |
451 | needs_backwards = size_to_achieve - current_forward; | |
452 | assert((needs_backwards % backwards_multiple) == 0); | |
453 | needs_backwards_lcmed = GET_ROUNDED_PO2_SIZE(needs_backwards, lcm); | |
454 | *plcm = lcm; | |
455 | *pneeds_backwards_lcmed = needs_backwards_lcmed; | |
456 | return 1; | |
457 | } | |
458 | /*Check if it's multiple of alignment*/ | |
459 | else if((backwards_multiple & (MALLOC_ALIGNMENT - 1u)) == 0){ | |
460 | lcm = backwards_multiple; | |
461 | current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple); | |
462 | //No need to round needs_backwards because backwards_multiple == lcm | |
463 | needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward; | |
464 | assert((needs_backwards_lcmed & (MALLOC_ALIGNMENT - 1u)) == 0); | |
465 | *plcm = lcm; | |
466 | *pneeds_backwards_lcmed = needs_backwards_lcmed; | |
467 | return 1; | |
468 | } | |
469 | /*Check if it's multiple of the half of the alignmment*/ | |
470 | else if((backwards_multiple & ((MALLOC_ALIGNMENT/2u) - 1u)) == 0){ | |
471 | lcm = backwards_multiple*2u; | |
472 | current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple); | |
473 | needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward; | |
474 | if(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))) | |
475 | //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))) | |
476 | needs_backwards_lcmed += backwards_multiple; | |
477 | assert((needs_backwards_lcmed % lcm) == 0); | |
478 | *plcm = lcm; | |
479 | *pneeds_backwards_lcmed = needs_backwards_lcmed; | |
480 | return 1; | |
481 | } | |
482 | /*Check if it's multiple of the quarter of the alignmment*/ | |
483 | else if((backwards_multiple & ((MALLOC_ALIGNMENT/4u) - 1u)) == 0){ | |
484 | size_t remainder; | |
485 | lcm = backwards_multiple*4u; | |
486 | current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple); | |
487 | needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward; | |
488 | //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))) | |
489 | //needs_backwards_lcmed += backwards_multiple; | |
490 | if(0 != (remainder = ((needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))>>(MALLOC_ALIGNMENT/8u)))){ | |
491 | if(backwards_multiple & MALLOC_ALIGNMENT/2u){ | |
492 | needs_backwards_lcmed += (remainder)*backwards_multiple; | |
493 | } | |
494 | else{ | |
495 | needs_backwards_lcmed += (4-remainder)*backwards_multiple; | |
496 | } | |
497 | } | |
498 | assert((needs_backwards_lcmed % lcm) == 0); | |
499 | *plcm = lcm; | |
500 | *pneeds_backwards_lcmed = needs_backwards_lcmed; | |
501 | return 1; | |
502 | } | |
503 | else{ | |
504 | CALCULATE_LCM(max, min, lcm); | |
505 | /*If we want to use minbytes data to get a buffer between maxbytes | |
506 | and minbytes if maxbytes can't be achieved, calculate the | |
507 | biggest of all possibilities*/ | |
508 | current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple); | |
509 | needs_backwards = size_to_achieve - current_forward; | |
510 | assert((needs_backwards % backwards_multiple) == 0); | |
511 | needs_backwards_lcmed = GET_ROUNDED_SIZE(needs_backwards, lcm); | |
512 | *plcm = lcm; | |
513 | *pneeds_backwards_lcmed = needs_backwards_lcmed; | |
514 | return 1; | |
515 | } | |
516 | } | |
517 | ||
518 | static void *internal_grow_both_sides | |
519 | (mstate m | |
520 | ,allocation_type command | |
521 | ,void *oldmem | |
522 | ,size_t minbytes | |
523 | ,size_t maxbytes | |
524 | ,size_t *received_size | |
525 | ,size_t backwards_multiple | |
526 | ,int only_preferred_backwards) | |
527 | { | |
528 | mchunkptr oldp = mem2chunk(oldmem); | |
529 | size_t oldsize = chunksize(oldp); | |
530 | *received_size = oldsize - overhead_for(oldp); | |
531 | if(minbytes <= *received_size) | |
532 | return oldmem; | |
533 | ||
534 | if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp))) { | |
535 | if(command & BOOST_CONTAINER_EXPAND_FWD){ | |
536 | if(try_realloc_chunk_with_min(m, oldp, request2size(minbytes), request2size(maxbytes), 0)){ | |
537 | check_inuse_chunk(m, oldp); | |
538 | *received_size = DL_SIZE_IMPL(oldmem); | |
539 | s_allocated_memory += chunksize(oldp) - oldsize; | |
540 | return oldmem; | |
541 | } | |
542 | } | |
543 | else{ | |
544 | *received_size = DL_SIZE_IMPL(oldmem); | |
545 | if(*received_size >= maxbytes) | |
546 | return oldmem; | |
547 | } | |
548 | /* | |
549 | Should we check this? | |
550 | if(backwards_multiple && | |
551 | (0 != (minbytes % backwards_multiple) && | |
552 | 0 != (maxbytes % backwards_multiple)) ){ | |
553 | USAGE_ERROR_ACTION(m, oldp); | |
554 | return 0; | |
555 | } | |
556 | */ | |
557 | /* We reach here only if forward expansion fails */ | |
558 | if(!(command & BOOST_CONTAINER_EXPAND_BWD) || pinuse(oldp)){ | |
559 | return 0; | |
560 | } | |
561 | { | |
562 | size_t prevsize = oldp->prev_foot; | |
563 | if ((prevsize & USE_MMAP_BIT) != 0){ | |
564 | /*Return failure the previous chunk was mmapped. | |
565 | mremap does not allow expanding to a fixed address (MREMAP_MAYMOVE) without | |
566 | copying (MREMAP_MAYMOVE must be also set).*/ | |
567 | return 0; | |
568 | } | |
569 | else { | |
570 | mchunkptr prev = chunk_minus_offset(oldp, prevsize); | |
571 | size_t dsize = oldsize + prevsize; | |
572 | size_t needs_backwards_lcmed; | |
573 | size_t lcm; | |
574 | ||
575 | /* Let's calculate the number of extra bytes of data before the current | |
576 | block's begin. The value is a multiple of backwards_multiple | |
577 | and the alignment*/ | |
578 | if(!calculate_lcm_and_needs_backwards_lcmed | |
579 | ( backwards_multiple, *received_size | |
580 | , only_preferred_backwards ? maxbytes : minbytes | |
581 | , &lcm, &needs_backwards_lcmed) | |
582 | || !RTCHECK(ok_address(m, prev))){ | |
583 | USAGE_ERROR_ACTION(m, oldp); | |
584 | return 0; | |
585 | } | |
586 | /* Check if previous block has enough size */ | |
587 | else if(prevsize < needs_backwards_lcmed){ | |
588 | /* preferred size? */ | |
589 | return 0; | |
590 | } | |
591 | /* Now take all next space. This must succeed, as we've previously calculated the correct size */ | |
592 | if(command & BOOST_CONTAINER_EXPAND_FWD){ | |
593 | if(!try_realloc_chunk_with_min(m, oldp, request2size(*received_size), request2size(*received_size), 0)){ | |
594 | assert(0); | |
595 | } | |
596 | check_inuse_chunk(m, oldp); | |
597 | *received_size = DL_SIZE_IMPL(oldmem); | |
598 | s_allocated_memory += chunksize(oldp) - oldsize; | |
599 | oldsize = chunksize(oldp); | |
600 | dsize = oldsize + prevsize; | |
601 | } | |
602 | /* We need a minimum size to split the previous one */ | |
603 | if(prevsize >= (needs_backwards_lcmed + MIN_CHUNK_SIZE)){ | |
604 | mchunkptr r = chunk_minus_offset(oldp, needs_backwards_lcmed); | |
605 | size_t rsize = oldsize + needs_backwards_lcmed; | |
606 | size_t newprevsize = dsize - rsize; | |
607 | int prev_was_dv = prev == m->dv; | |
608 | ||
609 | assert(newprevsize >= MIN_CHUNK_SIZE); | |
610 | ||
611 | if (prev_was_dv) { | |
612 | m->dvsize = newprevsize; | |
613 | } | |
614 | else{/* if ((next->head & INUSE_BITS) == INUSE_BITS) { */ | |
615 | unlink_chunk(m, prev, prevsize); | |
616 | insert_chunk(m, prev, newprevsize); | |
617 | } | |
618 | ||
619 | set_size_and_pinuse_of_free_chunk(prev, newprevsize); | |
620 | clear_pinuse(r); | |
621 | set_inuse(m, r, rsize); | |
622 | check_malloced_chunk(m, chunk2mem(r), rsize); | |
623 | *received_size = chunksize(r) - overhead_for(r); | |
624 | s_allocated_memory += chunksize(r) - oldsize; | |
625 | return chunk2mem(r); | |
626 | } | |
627 | /* Check if there is no place to create a new block and | |
628 | the whole new block is multiple of the backwards expansion multiple */ | |
629 | else if(prevsize >= needs_backwards_lcmed && !(prevsize % lcm)) { | |
630 | /* Just merge the whole previous block */ | |
631 | /* prevsize is multiple of lcm (and backwards_multiple)*/ | |
632 | *received_size += prevsize; | |
633 | ||
634 | if (prev != m->dv) { | |
635 | unlink_chunk(m, prev, prevsize); | |
636 | } | |
637 | else{ | |
638 | m->dvsize = 0; | |
639 | m->dv = 0; | |
640 | } | |
641 | set_inuse(m, prev, dsize); | |
642 | check_malloced_chunk(m, chunk2mem(prev), dsize); | |
643 | s_allocated_memory += chunksize(prev) - oldsize; | |
644 | return chunk2mem(prev); | |
645 | } | |
646 | else{ | |
647 | /* Previous block was big enough but there is no room | |
648 | to create an empty block and taking the whole block does | |
649 | not fulfill alignment requirements */ | |
650 | return 0; | |
651 | } | |
652 | } | |
653 | } | |
654 | } | |
655 | else{ | |
656 | USAGE_ERROR_ACTION(m, oldmem); | |
657 | return 0; | |
658 | } | |
659 | return 0; | |
660 | } | |
661 | ||
662 | /* This is similar to mmap_resize but: | |
663 | * Only to shrink | |
664 | * It takes min and max sizes | |
665 | * Takes additional 'do_commit' argument to obtain the final | |
666 | size before doing the real shrink operation. | |
667 | */ | |
668 | static int internal_mmap_shrink_in_place(mstate m, mchunkptr oldp, size_t nbmin, size_t nbmax, size_t *received_size, int do_commit) | |
669 | { | |
670 | size_t oldsize = chunksize(oldp); | |
671 | *received_size = oldsize; | |
672 | #if HAVE_MREMAP | |
673 | if (is_small(nbmax)) /* Can't shrink mmap regions below small size */ | |
674 | return 0; | |
675 | { | |
676 | size_t effective_min = nbmin > MIN_LARGE_SIZE ? nbmin : MIN_LARGE_SIZE; | |
677 | /* Keep old chunk if big enough but not too big */ | |
678 | if (oldsize >= effective_min + SIZE_T_SIZE && | |
679 | (oldsize - effective_min) <= (mparams.granularity << 1)) | |
680 | return 0; | |
681 | /* Now calculate new sizes */ | |
682 | { | |
683 | size_t offset = oldp->prev_foot; | |
684 | size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; | |
685 | size_t newmmsize = mmap_align(effective_min + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); | |
686 | *received_size = newmmsize; | |
687 | if(!do_commit){ | |
688 | const int flags = 0; /* placate people compiling -Wunused */ | |
689 | char* cp = (char*)CALL_MREMAP((char*)oldp - offset, | |
690 | oldmmsize, newmmsize, flags); | |
691 | /*This must always succeed */ | |
692 | if(!cp){ | |
693 | USAGE_ERROR_ACTION(m, m); | |
694 | return 0; | |
695 | } | |
696 | { | |
697 | mchunkptr newp = (mchunkptr)(cp + offset); | |
698 | size_t psize = newmmsize - offset - MMAP_FOOT_PAD; | |
699 | newp->head = psize; | |
700 | mark_inuse_foot(m, newp, psize); | |
701 | chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; | |
702 | chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; | |
703 | ||
704 | if (cp < m->least_addr) | |
705 | m->least_addr = cp; | |
706 | if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) | |
707 | m->max_footprint = m->footprint; | |
708 | check_mmapped_chunk(m, newp); | |
709 | } | |
710 | } | |
711 | } | |
712 | return 1; | |
713 | } | |
714 | #else //#if HAVE_MREMAP | |
715 | (void)m; | |
716 | (void)oldp; | |
717 | (void)nbmin; | |
718 | (void)nbmax; | |
719 | (void)received_size; | |
720 | (void)do_commit; | |
721 | return 0; | |
722 | #endif //#if HAVE_MREMAP | |
723 | } | |
724 | ||
725 | static int internal_shrink(mstate m, void* oldmem, size_t minbytes, size_t maxbytes, size_t *received_size, int do_commit) | |
726 | { | |
727 | *received_size = chunksize(mem2chunk(oldmem)) - overhead_for(mem2chunk(oldmem)); | |
728 | if (minbytes >= MAX_REQUEST || maxbytes >= MAX_REQUEST) { | |
729 | MALLOC_FAILURE_ACTION; | |
730 | return 0; | |
731 | } | |
732 | else if(minbytes < MIN_REQUEST){ | |
733 | minbytes = MIN_REQUEST; | |
734 | } | |
735 | if (minbytes > maxbytes) { | |
736 | return 0; | |
737 | } | |
738 | ||
739 | { | |
740 | mchunkptr oldp = mem2chunk(oldmem); | |
741 | size_t oldsize = chunksize(oldp); | |
742 | mchunkptr next = chunk_plus_offset(oldp, oldsize); | |
743 | void* extra = 0; | |
744 | ||
745 | /* Try to either shrink or extend into top. Else malloc-copy-free*/ | |
746 | if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) && | |
747 | ok_next(oldp, next) && ok_pinuse(next))) { | |
748 | size_t nbmin = request2size(minbytes); | |
749 | size_t nbmax = request2size(maxbytes); | |
750 | ||
751 | if (nbmin > oldsize){ | |
752 | /* Return error if old size is too small */ | |
753 | } | |
754 | else if (is_mmapped(oldp)){ | |
755 | return internal_mmap_shrink_in_place(m, oldp, nbmin, nbmax, received_size, do_commit); | |
756 | } | |
757 | else{ // nbmin <= oldsize /* already big enough*/ | |
758 | size_t nb = nbmin; | |
759 | size_t rsize = oldsize - nb; | |
760 | if (rsize >= MIN_CHUNK_SIZE) { | |
761 | if(do_commit){ | |
762 | mchunkptr remainder = chunk_plus_offset(oldp, nb); | |
763 | set_inuse(m, oldp, nb); | |
764 | set_inuse(m, remainder, rsize); | |
92f5a8d4 | 765 | s_allocated_memory -= rsize; |
7c673cae | 766 | extra = chunk2mem(remainder); |
92f5a8d4 TL |
767 | mspace_free_lockless(m, extra); |
768 | check_inuse_chunk(m, oldp); | |
7c673cae FG |
769 | } |
770 | *received_size = nb - overhead_for(oldp); | |
92f5a8d4 | 771 | return 1; |
7c673cae FG |
772 | } |
773 | } | |
774 | } | |
775 | else { | |
776 | USAGE_ERROR_ACTION(m, oldmem); | |
7c673cae | 777 | } |
92f5a8d4 | 778 | return 0; |
7c673cae FG |
779 | } |
780 | } | |
781 | ||
7c673cae | 782 | #define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096 |
7c673cae FG |
783 | #define SQRT_MAX_SIZE_T (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2)) |
784 | ||
785 | static int internal_node_multialloc | |
92f5a8d4 TL |
786 | (mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) { |
787 | void* mem; /* malloced aggregate space */ | |
788 | mchunkptr p; /* corresponding chunk */ | |
789 | size_t remainder_size; /* remaining bytes while splitting */ | |
790 | flag_t was_enabled; /* to disable mmap */ | |
791 | size_t elements_per_segment = 0; | |
792 | size_t element_req_size = request2size(element_size); | |
793 | boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); | |
794 | ||
795 | /*Error if wrong element_size parameter */ | |
796 | if (!element_size || | |
797 | /*OR Error if n_elements less than contiguous_elements */ | |
20effc67 | 798 | ((contiguous_elements + 1) > (BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) || |
92f5a8d4 TL |
799 | /* OR Error if integer overflow */ |
800 | (SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) && | |
801 | (MAX_SIZE_T / element_req_size) < contiguous_elements)) { | |
802 | return 0; | |
803 | } | |
804 | switch (contiguous_elements) { | |
20effc67 | 805 | case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS: |
92f5a8d4 TL |
806 | { |
807 | /* Default contiguous, just check that we can store at least one element */ | |
808 | elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM / element_req_size; | |
809 | elements_per_segment += (size_t)(!elements_per_segment); | |
810 | } | |
811 | break; | |
20effc67 | 812 | case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS: |
92f5a8d4 TL |
813 | /* All elements should be allocated in a single call */ |
814 | elements_per_segment = n_elements; | |
815 | break; | |
816 | default: | |
817 | /* Allocate in chunks of "contiguous_elements" */ | |
818 | elements_per_segment = contiguous_elements; | |
819 | } | |
820 | ||
821 | { | |
822 | size_t i; | |
823 | size_t next_i; | |
824 | /* | |
825 | Allocate the aggregate chunk. First disable direct-mmapping so | |
826 | malloc won't use it, since we would not be able to later | |
827 | free/realloc space internal to a segregated mmap region. | |
828 | */ | |
829 | was_enabled = use_mmap(m); | |
830 | disable_mmap(m); | |
831 | for (i = 0; i != n_elements; i = next_i) | |
832 | { | |
833 | size_t accum_size; | |
834 | size_t n_elements_left = n_elements - i; | |
835 | next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment); | |
836 | accum_size = element_req_size * (next_i - i); | |
837 | ||
838 | mem = mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD); | |
839 | if (mem == 0) { | |
840 | BOOST_CONTAINER_MEMIT_NEXT(prev_last_it); | |
841 | while (i) { | |
842 | void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it); | |
843 | --i; | |
844 | BOOST_CONTAINER_MEMIT_NEXT(prev_last_it); | |
845 | s_allocated_memory -= chunksize(mem2chunk(addr)); | |
846 | mspace_free_lockless(m, addr); | |
847 | } | |
848 | if (was_enabled) | |
849 | enable_mmap(m); | |
850 | return 0; | |
851 | } | |
852 | p = mem2chunk(mem); | |
853 | remainder_size = chunksize(p); | |
854 | s_allocated_memory += remainder_size; | |
855 | ||
856 | assert(!is_mmapped(p)); | |
857 | { /* split out elements */ | |
858 | //void *mem_orig = mem; | |
859 | //boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); | |
860 | size_t num_elements = next_i - i; | |
861 | ||
862 | size_t num_loops = num_elements - 1; | |
863 | remainder_size -= element_req_size * num_loops; | |
864 | while (num_loops) { | |
865 | --num_loops; | |
866 | //void **mem_prev = ((void**)mem); | |
867 | set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size); | |
868 | BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem); | |
869 | p = chunk_plus_offset(p, element_req_size); | |
870 | mem = chunk2mem(p); | |
871 | //*mem_prev = mem; | |
872 | } | |
873 | set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); | |
874 | BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem); | |
875 | //BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements); | |
876 | } | |
877 | } | |
878 | if (was_enabled) | |
879 | enable_mmap(m); | |
880 | } | |
881 | return 1; | |
882 | } | |
7c673cae | 883 | |
92f5a8d4 TL |
884 | #define BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC |
885 | #ifndef BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC | |
7c673cae | 886 | |
92f5a8d4 TL |
887 | #define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \ |
888 | *((void**)(THISMEM)) = *((void**)((NEXTMEM))) | |
7c673cae | 889 | |
92f5a8d4 TL |
890 | //This function is based on internal_bulk_free |
891 | //replacing iteration over array[] with boost_cont_memchain. | |
892 | //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes. | |
893 | //After forward merging, backwards merging is also tried | |
894 | static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain) | |
895 | { | |
896 | #if FOOTERS | |
897 | boost_cont_memchain ret_chain; | |
898 | BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain); | |
899 | #endif | |
900 | if (!PREACTION(m)) { | |
901 | boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain); | |
902 | while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */ | |
903 | void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it); | |
904 | mchunkptr a_p = mem2chunk(a_mem); | |
905 | size_t psize = chunksize(a_p); | |
906 | #if FOOTERS | |
907 | if (get_mstate_for(a_p) != m) { | |
908 | BOOST_CONTAINER_MEMIT_NEXT(a_it); | |
909 | BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem); | |
910 | continue; | |
911 | } | |
912 | #endif | |
913 | check_inuse_chunk(m, a_p); | |
914 | if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) { | |
915 | while (1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */ | |
916 | boost_cont_memchain_it b_it = a_it; | |
917 | BOOST_CONTAINER_MEMIT_NEXT(b_it); | |
918 | if (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)) { | |
919 | void *b_mem = BOOST_CONTAINER_MEMIT_ADDR(b_it); | |
920 | mchunkptr b_p = mem2chunk(b_mem); | |
921 | if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */ | |
922 | psize += chunksize(b_p); | |
923 | set_inuse(m, a_p, psize); | |
924 | BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem); | |
925 | continue; | |
926 | } | |
927 | if (RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))) { | |
928 | /* b chunk is contiguous and previous so a's size can be added to b */ | |
929 | if (a_p == next_chunk(b_p)) { | |
930 | psize += chunksize(b_p); | |
931 | set_inuse(m, b_p, psize); | |
932 | a_it = b_it; | |
933 | a_p = b_p; | |
934 | a_mem = b_mem; | |
935 | continue; | |
936 | } | |
937 | } | |
938 | } | |
939 | /* Normal deallocation starts again in the outer loop */ | |
940 | a_it = b_it; | |
941 | s_allocated_memory -= psize; | |
942 | dispose_chunk(m, a_p, psize); | |
943 | break; | |
944 | } | |
945 | } | |
946 | else { | |
947 | CORRUPTION_ERROR_ACTION(m); | |
948 | break; | |
949 | } | |
950 | } | |
951 | if (should_trim(m, m->topsize)) | |
952 | sys_trim(m, 0); | |
953 | POSTACTION(m); | |
954 | } | |
955 | #if FOOTERS | |
956 | { | |
957 | boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); | |
958 | BOOST_CONTAINER_MEMCHAIN_INIT(pchain); | |
959 | BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER | |
960 | (pchain | |
961 | , last_pchain | |
962 | , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain) | |
963 | , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain) | |
964 | , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain) | |
965 | ); | |
966 | } | |
967 | #endif | |
968 | } | |
7c673cae | 969 | |
92f5a8d4 TL |
970 | #else //BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC |
971 | ||
972 | //This function is based on internal_bulk_free | |
973 | //replacing iteration over array[] with boost_cont_memchain. | |
974 | //Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes. | |
975 | //After forward merging, backwards merging is also tried | |
976 | static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain) | |
977 | { | |
978 | if (!PREACTION(m)) { | |
979 | boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain); | |
980 | while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */ | |
981 | void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it); | |
982 | BOOST_CONTAINER_MEMIT_NEXT(a_it); | |
983 | s_allocated_memory -= chunksize(mem2chunk(a_mem)); | |
984 | mspace_free_lockless(m, a_mem); | |
985 | } | |
986 | POSTACTION(m); | |
987 | } | |
7c673cae FG |
988 | } |
989 | ||
92f5a8d4 TL |
990 | #endif //BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC |
991 | ||
7c673cae FG |
992 | static int internal_multialloc_arrays |
993 | (mstate m, size_t n_elements, const size_t* sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) { | |
994 | void* mem; /* malloced aggregate space */ | |
995 | mchunkptr p; /* corresponding chunk */ | |
996 | size_t remainder_size; /* remaining bytes while splitting */ | |
997 | flag_t was_enabled; /* to disable mmap */ | |
998 | size_t size; | |
999 | size_t boost_cont_multialloc_segmented_malloc_size; | |
1000 | size_t max_size; | |
1001 | ||
1002 | /* Check overflow */ | |
1003 | if(!element_size){ | |
1004 | return 0; | |
1005 | } | |
1006 | max_size = MAX_REQUEST/element_size; | |
1007 | /* Different sizes*/ | |
1008 | switch(contiguous_elements){ | |
20effc67 | 1009 | case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS: |
7c673cae FG |
1010 | /* Use default contiguous mem */ |
1011 | boost_cont_multialloc_segmented_malloc_size = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM; | |
1012 | break; | |
20effc67 | 1013 | case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS: |
7c673cae FG |
1014 | boost_cont_multialloc_segmented_malloc_size = MAX_REQUEST + CHUNK_OVERHEAD; |
1015 | break; | |
1016 | default: | |
1017 | if(max_size < contiguous_elements){ | |
1018 | return 0; | |
1019 | } | |
1020 | else{ | |
1021 | /* The suggested buffer is just the the element count by the size */ | |
1022 | boost_cont_multialloc_segmented_malloc_size = element_size*contiguous_elements; | |
1023 | } | |
1024 | } | |
1025 | ||
1026 | { | |
1027 | size_t i; | |
1028 | size_t next_i; | |
1029 | /* | |
1030 | Allocate the aggregate chunk. First disable direct-mmapping so | |
1031 | malloc won't use it, since we would not be able to later | |
1032 | free/realloc space internal to a segregated mmap region. | |
1033 | */ | |
1034 | was_enabled = use_mmap(m); | |
1035 | disable_mmap(m); | |
1036 | for(i = 0, next_i = 0; i != n_elements; i = next_i) | |
1037 | { | |
1038 | int error = 0; | |
1039 | size_t accum_size; | |
1040 | for(accum_size = 0; next_i != n_elements; ++next_i){ | |
1041 | size_t cur_array_size = sizes[next_i]; | |
1042 | if(max_size < cur_array_size){ | |
1043 | error = 1; | |
1044 | break; | |
1045 | } | |
1046 | else{ | |
1047 | size_t reqsize = request2size(cur_array_size*element_size); | |
1048 | if(((boost_cont_multialloc_segmented_malloc_size - CHUNK_OVERHEAD) - accum_size) < reqsize){ | |
1049 | if(!accum_size){ | |
1050 | accum_size += reqsize; | |
1051 | ++next_i; | |
1052 | } | |
1053 | break; | |
1054 | } | |
1055 | accum_size += reqsize; | |
1056 | } | |
1057 | } | |
1058 | ||
1059 | mem = error ? 0 : mspace_malloc_lockless(m, accum_size - CHUNK_OVERHEAD); | |
1060 | if (mem == 0){ | |
1061 | boost_cont_memchain_it it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain); | |
1062 | while(i--){ | |
1063 | void *addr = BOOST_CONTAINER_MEMIT_ADDR(it); | |
1064 | BOOST_CONTAINER_MEMIT_NEXT(it); | |
92f5a8d4 | 1065 | s_allocated_memory -= chunksize(mem2chunk(addr)); |
7c673cae FG |
1066 | mspace_free_lockless(m, addr); |
1067 | } | |
1068 | if (was_enabled) | |
1069 | enable_mmap(m); | |
1070 | return 0; | |
1071 | } | |
1072 | p = mem2chunk(mem); | |
1073 | remainder_size = chunksize(p); | |
1074 | s_allocated_memory += remainder_size; | |
1075 | ||
1076 | assert(!is_mmapped(p)); | |
1077 | ||
1078 | { /* split out elements */ | |
1079 | void *mem_orig = mem; | |
1080 | boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain); | |
1081 | size_t num_elements = next_i-i; | |
1082 | ||
1083 | for(++i; i != next_i; ++i) { | |
1084 | void **mem_prev = ((void**)mem); | |
1085 | size = request2size(sizes[i]*element_size); | |
1086 | remainder_size -= size; | |
1087 | set_size_and_pinuse_of_inuse_chunk(m, p, size); | |
1088 | p = chunk_plus_offset(p, size); | |
1089 | mem = chunk2mem(p); | |
1090 | *mem_prev = mem; | |
1091 | } | |
1092 | set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); | |
1093 | BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements); | |
1094 | } | |
1095 | } | |
1096 | if (was_enabled) | |
1097 | enable_mmap(m); | |
1098 | } | |
1099 | return 1; | |
1100 | } | |
1101 | ||
1102 | int boost_cont_multialloc_arrays | |
1103 | (size_t n_elements, const size_t *sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) | |
1104 | { | |
1105 | int ret = 0; | |
1106 | mstate ms = (mstate)gm; | |
1107 | ensure_initialization(); | |
1108 | if (!ok_magic(ms)) { | |
1109 | USAGE_ERROR_ACTION(ms,ms); | |
1110 | } | |
1111 | else if (!PREACTION(ms)) { | |
1112 | ret = internal_multialloc_arrays(ms, n_elements, sizes, element_size, contiguous_elements, pchain); | |
1113 | POSTACTION(ms); | |
1114 | } | |
1115 | return ret; | |
1116 | } | |
1117 | ||
1118 | ||
1119 | /*Doug Lea malloc extensions*/ | |
1120 | static boost_cont_malloc_stats_t get_malloc_stats(mstate m) | |
1121 | { | |
11fdf7f2 | 1122 | boost_cont_malloc_stats_t ret = { 0, 0, 0 }; |
7c673cae FG |
1123 | ensure_initialization(); |
1124 | if (!PREACTION(m)) { | |
1125 | size_t maxfp = 0; | |
1126 | size_t fp = 0; | |
1127 | size_t used = 0; | |
1128 | check_malloc_state(m); | |
1129 | if (is_initialized(m)) { | |
1130 | msegmentptr s = &m->seg; | |
1131 | maxfp = m->max_footprint; | |
1132 | fp = m->footprint; | |
1133 | used = fp - (m->topsize + TOP_FOOT_SIZE); | |
1134 | ||
1135 | while (s != 0) { | |
1136 | mchunkptr q = align_as_chunk(s->base); | |
1137 | while (segment_holds(s, q) && | |
1138 | q != m->top && q->head != FENCEPOST_HEAD) { | |
1139 | if (!cinuse(q)) | |
1140 | used -= chunksize(q); | |
1141 | q = next_chunk(q); | |
1142 | } | |
1143 | s = s->next; | |
1144 | } | |
1145 | } | |
1146 | ||
1147 | ret.max_system_bytes = maxfp; | |
1148 | ret.system_bytes = fp; | |
1149 | ret.in_use_bytes = used; | |
1150 | POSTACTION(m); | |
1151 | } | |
1152 | return ret; | |
1153 | } | |
1154 | ||
1155 | size_t boost_cont_size(const void *p) | |
1156 | { return DL_SIZE_IMPL(p); } | |
1157 | ||
1158 | void* boost_cont_malloc(size_t bytes) | |
1159 | { | |
1160 | size_t received_bytes; | |
1161 | ensure_initialization(); | |
1162 | return boost_cont_allocation_command | |
1163 | (BOOST_CONTAINER_ALLOCATE_NEW, 1, bytes, bytes, &received_bytes, 0).first; | |
1164 | } | |
1165 | ||
1166 | void boost_cont_free(void* mem) | |
1167 | { | |
1168 | mstate ms = (mstate)gm; | |
1169 | if (!ok_magic(ms)) { | |
1170 | USAGE_ERROR_ACTION(ms,ms); | |
1171 | } | |
1172 | else if (!PREACTION(ms)) { | |
92f5a8d4 TL |
1173 | if(mem) |
1174 | s_allocated_memory -= chunksize(mem2chunk(mem)); | |
7c673cae FG |
1175 | mspace_free_lockless(ms, mem); |
1176 | POSTACTION(ms); | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | void* boost_cont_memalign(size_t bytes, size_t alignment) | |
1181 | { | |
1182 | void *addr; | |
1183 | ensure_initialization(); | |
1184 | addr = mspace_memalign(gm, alignment, bytes); | |
1185 | if(addr){ | |
1186 | s_allocated_memory += chunksize(mem2chunk(addr)); | |
1187 | } | |
1188 | return addr; | |
1189 | } | |
1190 | ||
1191 | int boost_cont_multialloc_nodes | |
1192 | (size_t n_elements, size_t elem_size, size_t contiguous_elements, boost_cont_memchain *pchain) | |
1193 | { | |
1194 | int ret = 0; | |
1195 | mstate ms = (mstate)gm; | |
1196 | ensure_initialization(); | |
1197 | if (!ok_magic(ms)) { | |
1198 | USAGE_ERROR_ACTION(ms,ms); | |
1199 | } | |
1200 | else if (!PREACTION(ms)) { | |
1201 | ret = internal_node_multialloc(ms, n_elements, elem_size, contiguous_elements, pchain); | |
1202 | POSTACTION(ms); | |
1203 | } | |
1204 | return ret; | |
1205 | } | |
1206 | ||
1207 | size_t boost_cont_footprint() | |
1208 | { | |
1209 | return ((mstate)gm)->footprint; | |
1210 | } | |
1211 | ||
1212 | size_t boost_cont_allocated_memory() | |
1213 | { | |
1214 | size_t alloc_mem = 0; | |
1215 | mstate m = (mstate)gm; | |
1216 | ensure_initialization(); | |
1217 | if (!ok_magic(ms)) { | |
1218 | USAGE_ERROR_ACTION(ms,ms); | |
1219 | } | |
1220 | ||
1221 | ||
1222 | if (!PREACTION(m)) { | |
1223 | check_malloc_state(m); | |
1224 | if (is_initialized(m)) { | |
1225 | size_t nfree = SIZE_T_ONE; /* top always free */ | |
1226 | size_t mfree = m->topsize + TOP_FOOT_SIZE; | |
1227 | size_t sum = mfree; | |
1228 | msegmentptr s = &m->seg; | |
1229 | while (s != 0) { | |
1230 | mchunkptr q = align_as_chunk(s->base); | |
1231 | while (segment_holds(s, q) && | |
1232 | q != m->top && q->head != FENCEPOST_HEAD) { | |
1233 | size_t sz = chunksize(q); | |
1234 | sum += sz; | |
1235 | if (!is_inuse(q)) { | |
1236 | mfree += sz; | |
1237 | ++nfree; | |
1238 | } | |
1239 | q = next_chunk(q); | |
1240 | } | |
1241 | s = s->next; | |
1242 | } | |
1243 | { | |
1244 | size_t uordblks = m->footprint - mfree; | |
1245 | if(nfree) | |
1246 | alloc_mem = (size_t)(uordblks - (nfree-1)*TOP_FOOT_SIZE); | |
1247 | else | |
1248 | alloc_mem = uordblks; | |
1249 | } | |
1250 | } | |
1251 | ||
1252 | POSTACTION(m); | |
1253 | } | |
1254 | return alloc_mem; | |
1255 | } | |
1256 | ||
1257 | size_t boost_cont_chunksize(const void *p) | |
1258 | { return chunksize(mem2chunk(p)); } | |
1259 | ||
1260 | int boost_cont_all_deallocated() | |
1261 | { return !s_allocated_memory; } | |
1262 | ||
1263 | boost_cont_malloc_stats_t boost_cont_malloc_stats() | |
1264 | { | |
1265 | mstate ms = (mstate)gm; | |
1266 | if (ok_magic(ms)) { | |
1267 | return get_malloc_stats(ms); | |
1268 | } | |
1269 | else { | |
1270 | boost_cont_malloc_stats_t r = { 0, 0, 0 }; | |
1271 | USAGE_ERROR_ACTION(ms,ms); | |
1272 | return r; | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | size_t boost_cont_in_use_memory() | |
1277 | { return s_allocated_memory; } | |
1278 | ||
1279 | int boost_cont_trim(size_t pad) | |
1280 | { | |
1281 | ensure_initialization(); | |
1282 | return dlmalloc_trim(pad); | |
1283 | } | |
1284 | ||
1285 | int boost_cont_grow | |
1286 | (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received) | |
1287 | { | |
1288 | mstate ms = (mstate)gm; | |
1289 | if (!ok_magic(ms)) { | |
1290 | USAGE_ERROR_ACTION(ms,ms); | |
1291 | return 0; | |
1292 | } | |
1293 | ||
1294 | if (!PREACTION(ms)) { | |
1295 | mchunkptr p = mem2chunk(oldmem); | |
1296 | size_t oldsize = chunksize(p); | |
1297 | p = try_realloc_chunk_with_min(ms, p, request2size(minbytes), request2size(maxbytes), 0); | |
1298 | POSTACTION(ms); | |
1299 | if(p){ | |
1300 | check_inuse_chunk(ms, p); | |
1301 | *received = DL_SIZE_IMPL(oldmem); | |
1302 | s_allocated_memory += chunksize(p) - oldsize; | |
1303 | } | |
1304 | return 0 != p; | |
1305 | } | |
1306 | return 0; | |
1307 | } | |
1308 | ||
1309 | int boost_cont_shrink | |
1310 | (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received, int do_commit) | |
1311 | { | |
1312 | mstate ms = (mstate)gm; | |
1313 | if (!ok_magic(ms)) { | |
1314 | USAGE_ERROR_ACTION(ms,ms); | |
1315 | return 0; | |
1316 | } | |
1317 | ||
1318 | if (!PREACTION(ms)) { | |
1319 | int ret = internal_shrink(ms, oldmem, minbytes, maxbytes, received, do_commit); | |
1320 | POSTACTION(ms); | |
1321 | return 0 != ret; | |
1322 | } | |
1323 | return 0; | |
1324 | } | |
1325 | ||
1326 | ||
1327 | void* boost_cont_alloc | |
1328 | (size_t minbytes, size_t preferred_bytes, size_t *received_bytes) | |
1329 | { | |
1330 | //ensure_initialization provided by boost_cont_allocation_command | |
1331 | return boost_cont_allocation_command | |
1332 | (BOOST_CONTAINER_ALLOCATE_NEW, 1, minbytes, preferred_bytes, received_bytes, 0).first; | |
1333 | } | |
1334 | ||
1335 | void boost_cont_multidealloc(boost_cont_memchain *pchain) | |
1336 | { | |
1337 | mstate ms = (mstate)gm; | |
1338 | if (!ok_magic(ms)) { | |
1339 | (void)ms; | |
1340 | USAGE_ERROR_ACTION(ms,ms); | |
1341 | } | |
1342 | internal_multialloc_free(ms, pchain); | |
1343 | } | |
1344 | ||
1345 | int boost_cont_malloc_check() | |
1346 | { | |
1347 | #ifdef DEBUG | |
1348 | mstate ms = (mstate)gm; | |
1349 | ensure_initialization(); | |
1350 | if (!ok_magic(ms)) { | |
1351 | (void)ms; | |
1352 | USAGE_ERROR_ACTION(ms,ms); | |
1353 | return 0; | |
1354 | } | |
1355 | check_malloc_state(ms); | |
1356 | return 1; | |
1357 | #else | |
1358 | return 1; | |
1359 | #endif | |
1360 | } | |
1361 | ||
1362 | ||
1363 | boost_cont_command_ret_t boost_cont_allocation_command | |
1364 | (allocation_type command, size_t sizeof_object, size_t limit_size | |
1365 | , size_t preferred_size, size_t *received_size, void *reuse_ptr) | |
1366 | { | |
1367 | boost_cont_command_ret_t ret = { 0, 0 }; | |
1368 | ensure_initialization(); | |
1369 | if(command & (BOOST_CONTAINER_SHRINK_IN_PLACE | BOOST_CONTAINER_TRY_SHRINK_IN_PLACE)){ | |
1370 | int success = boost_cont_shrink( reuse_ptr, preferred_size, limit_size | |
1371 | , received_size, (command & BOOST_CONTAINER_SHRINK_IN_PLACE)); | |
1372 | ret.first = success ? reuse_ptr : 0; | |
1373 | return ret; | |
1374 | } | |
1375 | ||
1376 | *received_size = 0; | |
1377 | ||
1378 | if(limit_size > preferred_size) | |
1379 | return ret; | |
1380 | ||
1381 | { | |
1382 | mstate ms = (mstate)gm; | |
1383 | ||
1384 | /*Expand in place*/ | |
1385 | if (!PREACTION(ms)) { | |
1386 | #if FOOTERS | |
1387 | if(reuse_ptr){ | |
1388 | mstate m = get_mstate_for(mem2chunk(reuse_ptr)); | |
1389 | if (!ok_magic(m)) { | |
1390 | USAGE_ERROR_ACTION(m, reuse_ptr); | |
1391 | return ret; | |
1392 | } | |
1393 | } | |
1394 | #endif | |
1395 | if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){ | |
1396 | void *r = internal_grow_both_sides | |
1397 | ( ms, command, reuse_ptr, limit_size | |
1398 | , preferred_size, received_size, sizeof_object, 1); | |
1399 | if(r){ | |
1400 | ret.first = r; | |
1401 | ret.second = 1; | |
1402 | goto postaction; | |
1403 | } | |
1404 | } | |
1405 | ||
1406 | if(command & BOOST_CONTAINER_ALLOCATE_NEW){ | |
1407 | void *addr = mspace_malloc_lockless(ms, preferred_size); | |
1408 | if(!addr) addr = mspace_malloc_lockless(ms, limit_size); | |
1409 | if(addr){ | |
1410 | s_allocated_memory += chunksize(mem2chunk(addr)); | |
1411 | *received_size = DL_SIZE_IMPL(addr); | |
1412 | } | |
1413 | ret.first = addr; | |
1414 | ret.second = 0; | |
1415 | if(addr){ | |
1416 | goto postaction; | |
1417 | } | |
1418 | } | |
1419 | ||
1420 | //Now try to expand both sides with min size | |
1421 | if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){ | |
1422 | void *r = internal_grow_both_sides | |
1423 | ( ms, command, reuse_ptr, limit_size | |
1424 | , preferred_size, received_size, sizeof_object, 0); | |
1425 | if(r){ | |
1426 | ret.first = r; | |
1427 | ret.second = 1; | |
1428 | goto postaction; | |
1429 | } | |
1430 | } | |
1431 | postaction: | |
1432 | POSTACTION(ms); | |
1433 | } | |
1434 | } | |
1435 | return ret; | |
1436 | } | |
1437 | ||
1438 | int boost_cont_mallopt(int param_number, int value) | |
1439 | { | |
1440 | return change_mparam(param_number, value); | |
1441 | } | |
1442 | ||
1443 | void *boost_cont_sync_create() | |
1444 | { | |
1445 | void *p = boost_cont_malloc(sizeof(MLOCK_T)); | |
1446 | if(p){ | |
1447 | if(0 != INITIAL_LOCK((MLOCK_T*)p)){ | |
1448 | boost_cont_free(p); | |
1449 | p = 0; | |
1450 | } | |
1451 | } | |
1452 | return p; | |
1453 | } | |
1454 | ||
1455 | void boost_cont_sync_destroy(void *sync) | |
1456 | { | |
1457 | if(sync){ | |
1458 | (void)DESTROY_LOCK((MLOCK_T*)sync); | |
1459 | boost_cont_free(sync); | |
1460 | } | |
1461 | } | |
1462 | ||
1463 | int boost_cont_sync_lock(void *sync) | |
1464 | { return 0 == (ACQUIRE_LOCK((MLOCK_T*)sync)); } | |
1465 | ||
1466 | void boost_cont_sync_unlock(void *sync) | |
1467 | { RELEASE_LOCK((MLOCK_T*)sync); } | |
1468 | ||
1469 | int boost_cont_global_sync_lock() | |
1470 | { | |
1471 | int ret; | |
1472 | ensure_initialization(); | |
1473 | ret = ACQUIRE_MALLOC_GLOBAL_LOCK(); | |
1474 | return 0 == ret; | |
1475 | } | |
1476 | ||
1477 | void boost_cont_global_sync_unlock() | |
1478 | { | |
1479 | RELEASE_MALLOC_GLOBAL_LOCK() | |
1480 | } | |
1481 | ||
1482 | //#ifdef DL_DEBUG_DEFINED | |
1483 | // #undef DEBUG | |
1484 | //#endif | |
1485 | ||
1486 | #ifdef _MSC_VER | |
1487 | #pragma warning (pop) | |
1488 | #endif |