1 #define JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 purge_mode_t opt_purge
= PURGE_DEFAULT
;
8 const char *purge_mode_names
[] = {
13 ssize_t opt_lg_dirty_mult
= LG_DIRTY_MULT_DEFAULT
;
14 static ssize_t lg_dirty_mult_default
;
15 ssize_t opt_decay_time
= DECAY_TIME_DEFAULT
;
16 static ssize_t decay_time_default
;
18 arena_bin_info_t arena_bin_info
[NBINS
];
21 size_t map_misc_offset
;
22 size_t arena_maxrun
; /* Max run size for arenas. */
23 size_t large_maxclass
; /* Max large size class. */
24 size_t run_quantize_max
; /* Max run_quantize_*() input. */
25 static size_t small_maxrun
; /* Max run size for small size classes. */
26 static bool *small_run_tab
; /* Valid small run page multiples. */
27 static size_t *run_quantize_floor_tab
; /* run_quantize_floor() memoization. */
28 static size_t *run_quantize_ceil_tab
; /* run_quantize_ceil() memoization. */
29 unsigned nlclasses
; /* Number of large size classes. */
30 unsigned nhclasses
; /* Number of huge size classes. */
31 static szind_t runs_avail_bias
; /* Size index for first runs_avail tree. */
32 static szind_t runs_avail_nclasses
; /* Number of runs_avail trees. */
34 /******************************************************************************/
36 * Function prototypes for static functions that are referenced prior to
40 static void arena_purge_to_limit(arena_t
*arena
, size_t ndirty_limit
);
41 static void arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
,
42 bool cleaned
, bool decommitted
);
43 static void arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
,
44 arena_run_t
*run
, arena_bin_t
*bin
);
45 static void arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
,
46 arena_run_t
*run
, arena_bin_t
*bin
);
48 /******************************************************************************/
50 JEMALLOC_INLINE_C
size_t
51 arena_miscelm_size_get(const arena_chunk_map_misc_t
*miscelm
)
54 size_t pageind
, mapbits
;
56 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(miscelm
);
57 pageind
= arena_miscelm_to_pageind(miscelm
);
58 mapbits
= arena_mapbits_get(chunk
, pageind
);
59 return (arena_mapbits_size_decode(mapbits
));
63 arena_run_addr_comp(const arena_chunk_map_misc_t
*a
,
64 const arena_chunk_map_misc_t
*b
)
66 uintptr_t a_miscelm
= (uintptr_t)a
;
67 uintptr_t b_miscelm
= (uintptr_t)b
;
72 return ((a_miscelm
> b_miscelm
) - (a_miscelm
< b_miscelm
));
75 /* Generate red-black tree functions. */
76 rb_gen(static UNUSED
, arena_run_tree_
, arena_run_tree_t
, arena_chunk_map_misc_t
,
77 rb_link
, arena_run_addr_comp
)
80 run_quantize_floor_compute(size_t size
)
85 assert(size
== PAGE_CEILING(size
));
87 /* Don't change sizes that are valid small run sizes. */
88 if (size
<= small_maxrun
&& small_run_tab
[size
>> LG_PAGE
])
92 * Round down to the nearest run size that can actually be requested
93 * during normal large allocation. Add large_pad so that cache index
94 * randomization can offset the allocation from the page boundary.
96 qsize
= index2size(size2index(size
- large_pad
+ 1) - 1) + large_pad
;
97 if (qsize
<= SMALL_MAXCLASS
+ large_pad
)
98 return (run_quantize_floor_compute(size
- large_pad
));
99 assert(qsize
<= size
);
104 run_quantize_ceil_compute_hard(size_t size
)
106 size_t large_run_size_next
;
109 assert(size
== PAGE_CEILING(size
));
112 * Return the next quantized size greater than the input size.
113 * Quantized sizes comprise the union of run sizes that back small
114 * region runs, and run sizes that back large regions with no explicit
115 * alignment constraints.
118 if (size
> SMALL_MAXCLASS
) {
119 large_run_size_next
= PAGE_CEILING(index2size(size2index(size
-
120 large_pad
) + 1) + large_pad
);
122 large_run_size_next
= SIZE_T_MAX
;
123 if (size
>= small_maxrun
)
124 return (large_run_size_next
);
128 assert(size
<= small_maxrun
);
129 if (small_run_tab
[size
>> LG_PAGE
]) {
130 if (large_run_size_next
< size
)
131 return (large_run_size_next
);
138 run_quantize_ceil_compute(size_t size
)
140 size_t qsize
= run_quantize_floor_compute(size
);
144 * Skip a quantization that may have an adequately large run,
145 * because under-sized runs may be mixed in. This only happens
146 * when an unusual size is requested, i.e. for aligned
147 * allocation, and is just one of several places where linear
148 * search would potentially find sufficiently aligned available
149 * memory somewhere lower.
151 qsize
= run_quantize_ceil_compute_hard(qsize
);
157 #undef run_quantize_floor
158 #define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
161 run_quantize_floor(size_t size
)
166 assert(size
<= run_quantize_max
);
167 assert((size
& PAGE_MASK
) == 0);
169 ret
= run_quantize_floor_tab
[(size
>> LG_PAGE
) - 1];
170 assert(ret
== run_quantize_floor_compute(size
));
174 #undef run_quantize_floor
175 #define run_quantize_floor JEMALLOC_N(run_quantize_floor)
176 run_quantize_t
*run_quantize_floor
= JEMALLOC_N(run_quantize_floor_impl
);
180 #undef run_quantize_ceil
181 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
184 run_quantize_ceil(size_t size
)
189 assert(size
<= run_quantize_max
);
190 assert((size
& PAGE_MASK
) == 0);
192 ret
= run_quantize_ceil_tab
[(size
>> LG_PAGE
) - 1];
193 assert(ret
== run_quantize_ceil_compute(size
));
197 #undef run_quantize_ceil
198 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
199 run_quantize_t
*run_quantize_ceil
= JEMALLOC_N(run_quantize_ceil_impl
);
202 static arena_run_tree_t
*
203 arena_runs_avail_get(arena_t
*arena
, szind_t ind
)
206 assert(ind
>= runs_avail_bias
);
207 assert(ind
- runs_avail_bias
< runs_avail_nclasses
);
209 return (&arena
->runs_avail
[ind
- runs_avail_bias
]);
213 arena_avail_insert(arena_t
*arena
, arena_chunk_t
*chunk
, size_t pageind
,
216 szind_t ind
= size2index(run_quantize_floor(arena_miscelm_size_get(
217 arena_miscelm_get(chunk
, pageind
))));
218 assert(npages
== (arena_mapbits_unallocated_size_get(chunk
, pageind
) >>
220 arena_run_tree_insert(arena_runs_avail_get(arena
, ind
),
221 arena_miscelm_get(chunk
, pageind
));
225 arena_avail_remove(arena_t
*arena
, arena_chunk_t
*chunk
, size_t pageind
,
228 szind_t ind
= size2index(run_quantize_floor(arena_miscelm_size_get(
229 arena_miscelm_get(chunk
, pageind
))));
230 assert(npages
== (arena_mapbits_unallocated_size_get(chunk
, pageind
) >>
232 arena_run_tree_remove(arena_runs_avail_get(arena
, ind
),
233 arena_miscelm_get(chunk
, pageind
));
237 arena_run_dirty_insert(arena_t
*arena
, arena_chunk_t
*chunk
, size_t pageind
,
240 arena_chunk_map_misc_t
*miscelm
= arena_miscelm_get(chunk
, pageind
);
242 assert(npages
== (arena_mapbits_unallocated_size_get(chunk
, pageind
) >>
244 assert(arena_mapbits_dirty_get(chunk
, pageind
) == CHUNK_MAP_DIRTY
);
245 assert(arena_mapbits_dirty_get(chunk
, pageind
+npages
-1) ==
248 qr_new(&miscelm
->rd
, rd_link
);
249 qr_meld(&arena
->runs_dirty
, &miscelm
->rd
, rd_link
);
250 arena
->ndirty
+= npages
;
254 arena_run_dirty_remove(arena_t
*arena
, arena_chunk_t
*chunk
, size_t pageind
,
257 arena_chunk_map_misc_t
*miscelm
= arena_miscelm_get(chunk
, pageind
);
259 assert(npages
== (arena_mapbits_unallocated_size_get(chunk
, pageind
) >>
261 assert(arena_mapbits_dirty_get(chunk
, pageind
) == CHUNK_MAP_DIRTY
);
262 assert(arena_mapbits_dirty_get(chunk
, pageind
+npages
-1) ==
265 qr_remove(&miscelm
->rd
, rd_link
);
266 assert(arena
->ndirty
>= npages
);
267 arena
->ndirty
-= npages
;
271 arena_chunk_dirty_npages(const extent_node_t
*node
)
274 return (extent_node_size_get(node
) >> LG_PAGE
);
278 arena_chunk_cache_maybe_insert(arena_t
*arena
, extent_node_t
*node
, bool cache
)
282 extent_node_dirty_linkage_init(node
);
283 extent_node_dirty_insert(node
, &arena
->runs_dirty
,
284 &arena
->chunks_cache
);
285 arena
->ndirty
+= arena_chunk_dirty_npages(node
);
290 arena_chunk_cache_maybe_remove(arena_t
*arena
, extent_node_t
*node
, bool dirty
)
294 extent_node_dirty_remove(node
);
295 assert(arena
->ndirty
>= arena_chunk_dirty_npages(node
));
296 arena
->ndirty
-= arena_chunk_dirty_npages(node
);
300 JEMALLOC_INLINE_C
void *
301 arena_run_reg_alloc(arena_run_t
*run
, arena_bin_info_t
*bin_info
)
305 arena_chunk_map_misc_t
*miscelm
;
308 assert(run
->nfree
> 0);
309 assert(!bitmap_full(run
->bitmap
, &bin_info
->bitmap_info
));
311 regind
= (unsigned)bitmap_sfu(run
->bitmap
, &bin_info
->bitmap_info
);
312 miscelm
= arena_run_to_miscelm(run
);
313 rpages
= arena_miscelm_to_rpages(miscelm
);
314 ret
= (void *)((uintptr_t)rpages
+ (uintptr_t)bin_info
->reg0_offset
+
315 (uintptr_t)(bin_info
->reg_interval
* regind
));
320 JEMALLOC_INLINE_C
void
321 arena_run_reg_dalloc(arena_run_t
*run
, void *ptr
)
323 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
324 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
325 size_t mapbits
= arena_mapbits_get(chunk
, pageind
);
326 szind_t binind
= arena_ptr_small_binind_get(ptr
, mapbits
);
327 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
328 size_t regind
= arena_run_regind(run
, bin_info
, ptr
);
330 assert(run
->nfree
< bin_info
->nregs
);
331 /* Freeing an interior pointer can cause assertion failure. */
332 assert(((uintptr_t)ptr
-
333 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run
)) +
334 (uintptr_t)bin_info
->reg0_offset
)) %
335 (uintptr_t)bin_info
->reg_interval
== 0);
336 assert((uintptr_t)ptr
>=
337 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run
)) +
338 (uintptr_t)bin_info
->reg0_offset
);
339 /* Freeing an unallocated pointer can cause assertion failure. */
340 assert(bitmap_get(run
->bitmap
, &bin_info
->bitmap_info
, regind
));
342 bitmap_unset(run
->bitmap
, &bin_info
->bitmap_info
, regind
);
346 JEMALLOC_INLINE_C
void
347 arena_run_zero(arena_chunk_t
*chunk
, size_t run_ind
, size_t npages
)
350 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk
+
351 (run_ind
<< LG_PAGE
)), (npages
<< LG_PAGE
));
352 memset((void *)((uintptr_t)chunk
+ (run_ind
<< LG_PAGE
)), 0,
353 (npages
<< LG_PAGE
));
356 JEMALLOC_INLINE_C
void
357 arena_run_page_mark_zeroed(arena_chunk_t
*chunk
, size_t run_ind
)
360 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk
+ (run_ind
364 JEMALLOC_INLINE_C
void
365 arena_run_page_validate_zeroed(arena_chunk_t
*chunk
, size_t run_ind
)
368 UNUSED
size_t *p
= (size_t *)((uintptr_t)chunk
+ (run_ind
<< LG_PAGE
));
370 arena_run_page_mark_zeroed(chunk
, run_ind
);
371 for (i
= 0; i
< PAGE
/ sizeof(size_t); i
++)
376 arena_nactive_add(arena_t
*arena
, size_t add_pages
)
380 size_t cactive_add
= CHUNK_CEILING((arena
->nactive
+
381 add_pages
) << LG_PAGE
) - CHUNK_CEILING(arena
->nactive
<<
383 if (cactive_add
!= 0)
384 stats_cactive_add(cactive_add
);
386 arena
->nactive
+= add_pages
;
390 arena_nactive_sub(arena_t
*arena
, size_t sub_pages
)
394 size_t cactive_sub
= CHUNK_CEILING(arena
->nactive
<< LG_PAGE
) -
395 CHUNK_CEILING((arena
->nactive
- sub_pages
) << LG_PAGE
);
396 if (cactive_sub
!= 0)
397 stats_cactive_sub(cactive_sub
);
399 arena
->nactive
-= sub_pages
;
403 arena_run_split_remove(arena_t
*arena
, arena_chunk_t
*chunk
, size_t run_ind
,
404 size_t flag_dirty
, size_t flag_decommitted
, size_t need_pages
)
406 size_t total_pages
, rem_pages
;
408 assert(flag_dirty
== 0 || flag_decommitted
== 0);
410 total_pages
= arena_mapbits_unallocated_size_get(chunk
, run_ind
) >>
412 assert(arena_mapbits_dirty_get(chunk
, run_ind
+total_pages
-1) ==
414 assert(need_pages
<= total_pages
);
415 rem_pages
= total_pages
- need_pages
;
417 arena_avail_remove(arena
, chunk
, run_ind
, total_pages
);
419 arena_run_dirty_remove(arena
, chunk
, run_ind
, total_pages
);
420 arena_nactive_add(arena
, need_pages
);
422 /* Keep track of trailing unused pages for later use. */
424 size_t flags
= flag_dirty
| flag_decommitted
;
425 size_t flag_unzeroed_mask
= (flags
== 0) ? CHUNK_MAP_UNZEROED
:
428 arena_mapbits_unallocated_set(chunk
, run_ind
+need_pages
,
429 (rem_pages
<< LG_PAGE
), flags
|
430 (arena_mapbits_unzeroed_get(chunk
, run_ind
+need_pages
) &
431 flag_unzeroed_mask
));
432 arena_mapbits_unallocated_set(chunk
, run_ind
+total_pages
-1,
433 (rem_pages
<< LG_PAGE
), flags
|
434 (arena_mapbits_unzeroed_get(chunk
, run_ind
+total_pages
-1) &
435 flag_unzeroed_mask
));
436 if (flag_dirty
!= 0) {
437 arena_run_dirty_insert(arena
, chunk
, run_ind
+need_pages
,
440 arena_avail_insert(arena
, chunk
, run_ind
+need_pages
, rem_pages
);
445 arena_run_split_large_helper(arena_t
*arena
, arena_run_t
*run
, size_t size
,
446 bool remove
, bool zero
)
448 arena_chunk_t
*chunk
;
449 arena_chunk_map_misc_t
*miscelm
;
450 size_t flag_dirty
, flag_decommitted
, run_ind
, need_pages
;
451 size_t flag_unzeroed_mask
;
453 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
454 miscelm
= arena_run_to_miscelm(run
);
455 run_ind
= arena_miscelm_to_pageind(miscelm
);
456 flag_dirty
= arena_mapbits_dirty_get(chunk
, run_ind
);
457 flag_decommitted
= arena_mapbits_decommitted_get(chunk
, run_ind
);
458 need_pages
= (size
>> LG_PAGE
);
459 assert(need_pages
> 0);
461 if (flag_decommitted
!= 0 && arena
->chunk_hooks
.commit(chunk
, chunksize
,
462 run_ind
<< LG_PAGE
, size
, arena
->ind
))
466 arena_run_split_remove(arena
, chunk
, run_ind
, flag_dirty
,
467 flag_decommitted
, need_pages
);
471 if (flag_decommitted
!= 0) {
472 /* The run is untouched, and therefore zeroed. */
473 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
474 *)((uintptr_t)chunk
+ (run_ind
<< LG_PAGE
)),
475 (need_pages
<< LG_PAGE
));
476 } else if (flag_dirty
!= 0) {
477 /* The run is dirty, so all pages must be zeroed. */
478 arena_run_zero(chunk
, run_ind
, need_pages
);
481 * The run is clean, so some pages may be zeroed (i.e.
482 * never before touched).
485 for (i
= 0; i
< need_pages
; i
++) {
486 if (arena_mapbits_unzeroed_get(chunk
, run_ind
+i
)
488 arena_run_zero(chunk
, run_ind
+i
, 1);
489 else if (config_debug
) {
490 arena_run_page_validate_zeroed(chunk
,
493 arena_run_page_mark_zeroed(chunk
,
499 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk
+
500 (run_ind
<< LG_PAGE
)), (need_pages
<< LG_PAGE
));
504 * Set the last element first, in case the run only contains one page
505 * (i.e. both statements set the same element).
507 flag_unzeroed_mask
= (flag_dirty
| flag_decommitted
) == 0 ?
508 CHUNK_MAP_UNZEROED
: 0;
509 arena_mapbits_large_set(chunk
, run_ind
+need_pages
-1, 0, flag_dirty
|
510 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
,
511 run_ind
+need_pages
-1)));
512 arena_mapbits_large_set(chunk
, run_ind
, size
, flag_dirty
|
513 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
, run_ind
)));
518 arena_run_split_large(arena_t
*arena
, arena_run_t
*run
, size_t size
, bool zero
)
521 return (arena_run_split_large_helper(arena
, run
, size
, true, zero
));
525 arena_run_init_large(arena_t
*arena
, arena_run_t
*run
, size_t size
, bool zero
)
528 return (arena_run_split_large_helper(arena
, run
, size
, false, zero
));
532 arena_run_split_small(arena_t
*arena
, arena_run_t
*run
, size_t size
,
535 arena_chunk_t
*chunk
;
536 arena_chunk_map_misc_t
*miscelm
;
537 size_t flag_dirty
, flag_decommitted
, run_ind
, need_pages
, i
;
539 assert(binind
!= BININD_INVALID
);
541 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
542 miscelm
= arena_run_to_miscelm(run
);
543 run_ind
= arena_miscelm_to_pageind(miscelm
);
544 flag_dirty
= arena_mapbits_dirty_get(chunk
, run_ind
);
545 flag_decommitted
= arena_mapbits_decommitted_get(chunk
, run_ind
);
546 need_pages
= (size
>> LG_PAGE
);
547 assert(need_pages
> 0);
549 if (flag_decommitted
!= 0 && arena
->chunk_hooks
.commit(chunk
, chunksize
,
550 run_ind
<< LG_PAGE
, size
, arena
->ind
))
553 arena_run_split_remove(arena
, chunk
, run_ind
, flag_dirty
,
554 flag_decommitted
, need_pages
);
556 for (i
= 0; i
< need_pages
; i
++) {
557 size_t flag_unzeroed
= arena_mapbits_unzeroed_get(chunk
,
559 arena_mapbits_small_set(chunk
, run_ind
+i
, i
, binind
,
561 if (config_debug
&& flag_dirty
== 0 && flag_unzeroed
== 0)
562 arena_run_page_validate_zeroed(chunk
, run_ind
+i
);
564 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk
+
565 (run_ind
<< LG_PAGE
)), (need_pages
<< LG_PAGE
));
569 static arena_chunk_t
*
570 arena_chunk_init_spare(arena_t
*arena
)
572 arena_chunk_t
*chunk
;
574 assert(arena
->spare
!= NULL
);
576 chunk
= arena
->spare
;
579 assert(arena_mapbits_allocated_get(chunk
, map_bias
) == 0);
580 assert(arena_mapbits_allocated_get(chunk
, chunk_npages
-1) == 0);
581 assert(arena_mapbits_unallocated_size_get(chunk
, map_bias
) ==
583 assert(arena_mapbits_unallocated_size_get(chunk
, chunk_npages
-1) ==
585 assert(arena_mapbits_dirty_get(chunk
, map_bias
) ==
586 arena_mapbits_dirty_get(chunk
, chunk_npages
-1));
592 arena_chunk_register(arena_t
*arena
, arena_chunk_t
*chunk
, bool zero
)
596 * The extent node notion of "committed" doesn't directly apply to
597 * arena chunks. Arbitrarily mark them as committed. The commit state
598 * of runs is tracked individually, and upon chunk deallocation the
599 * entire chunk is in a consistent commit state.
601 extent_node_init(&chunk
->node
, arena
, chunk
, chunksize
, zero
, true);
602 extent_node_achunk_set(&chunk
->node
, true);
603 return (chunk_register(chunk
, &chunk
->node
));
606 static arena_chunk_t
*
607 arena_chunk_alloc_internal_hard(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
608 bool *zero
, bool *commit
)
610 arena_chunk_t
*chunk
;
612 malloc_mutex_unlock(&arena
->lock
);
614 chunk
= (arena_chunk_t
*)chunk_alloc_wrapper(arena
, chunk_hooks
, NULL
,
615 chunksize
, chunksize
, zero
, commit
);
616 if (chunk
!= NULL
&& !*commit
) {
618 if (chunk_hooks
->commit(chunk
, chunksize
, 0, map_bias
<<
619 LG_PAGE
, arena
->ind
)) {
620 chunk_dalloc_wrapper(arena
, chunk_hooks
,
621 (void *)chunk
, chunksize
, *commit
);
625 if (chunk
!= NULL
&& arena_chunk_register(arena
, chunk
, *zero
)) {
627 /* Undo commit of header. */
628 chunk_hooks
->decommit(chunk
, chunksize
, 0, map_bias
<<
629 LG_PAGE
, arena
->ind
);
631 chunk_dalloc_wrapper(arena
, chunk_hooks
, (void *)chunk
,
636 malloc_mutex_lock(&arena
->lock
);
640 static arena_chunk_t
*
641 arena_chunk_alloc_internal(arena_t
*arena
, bool *zero
, bool *commit
)
643 arena_chunk_t
*chunk
;
644 chunk_hooks_t chunk_hooks
= CHUNK_HOOKS_INITIALIZER
;
646 chunk
= chunk_alloc_cache(arena
, &chunk_hooks
, NULL
, chunksize
,
647 chunksize
, zero
, true);
649 if (arena_chunk_register(arena
, chunk
, *zero
)) {
650 chunk_dalloc_cache(arena
, &chunk_hooks
, chunk
,
657 chunk
= arena_chunk_alloc_internal_hard(arena
, &chunk_hooks
,
661 if (config_stats
&& chunk
!= NULL
) {
662 arena
->stats
.mapped
+= chunksize
;
663 arena
->stats
.metadata_mapped
+= (map_bias
<< LG_PAGE
);
669 static arena_chunk_t
*
670 arena_chunk_init_hard(arena_t
*arena
)
672 arena_chunk_t
*chunk
;
674 size_t flag_unzeroed
, flag_decommitted
, i
;
676 assert(arena
->spare
== NULL
);
680 chunk
= arena_chunk_alloc_internal(arena
, &zero
, &commit
);
685 * Initialize the map to contain one maximal free untouched run. Mark
686 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
689 flag_unzeroed
= (zero
|| !commit
) ? 0 : CHUNK_MAP_UNZEROED
;
690 flag_decommitted
= commit
? 0 : CHUNK_MAP_DECOMMITTED
;
691 arena_mapbits_unallocated_set(chunk
, map_bias
, arena_maxrun
,
692 flag_unzeroed
| flag_decommitted
);
694 * There is no need to initialize the internal page map entries unless
695 * the chunk is not zeroed.
698 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
699 (void *)arena_bitselm_get(chunk
, map_bias
+1),
700 (size_t)((uintptr_t) arena_bitselm_get(chunk
,
701 chunk_npages
-1) - (uintptr_t)arena_bitselm_get(chunk
,
703 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++)
704 arena_mapbits_internal_set(chunk
, i
, flag_unzeroed
);
706 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
707 *)arena_bitselm_get(chunk
, map_bias
+1), (size_t)((uintptr_t)
708 arena_bitselm_get(chunk
, chunk_npages
-1) -
709 (uintptr_t)arena_bitselm_get(chunk
, map_bias
+1)));
711 for (i
= map_bias
+1; i
< chunk_npages
-1; i
++) {
712 assert(arena_mapbits_unzeroed_get(chunk
, i
) ==
717 arena_mapbits_unallocated_set(chunk
, chunk_npages
-1, arena_maxrun
,
723 static arena_chunk_t
*
724 arena_chunk_alloc(arena_t
*arena
)
726 arena_chunk_t
*chunk
;
728 if (arena
->spare
!= NULL
)
729 chunk
= arena_chunk_init_spare(arena
);
731 chunk
= arena_chunk_init_hard(arena
);
736 arena_avail_insert(arena
, chunk
, map_bias
, chunk_npages
-map_bias
);
742 arena_chunk_dalloc(arena_t
*arena
, arena_chunk_t
*chunk
)
745 assert(arena_mapbits_allocated_get(chunk
, map_bias
) == 0);
746 assert(arena_mapbits_allocated_get(chunk
, chunk_npages
-1) == 0);
747 assert(arena_mapbits_unallocated_size_get(chunk
, map_bias
) ==
749 assert(arena_mapbits_unallocated_size_get(chunk
, chunk_npages
-1) ==
751 assert(arena_mapbits_dirty_get(chunk
, map_bias
) ==
752 arena_mapbits_dirty_get(chunk
, chunk_npages
-1));
753 assert(arena_mapbits_decommitted_get(chunk
, map_bias
) ==
754 arena_mapbits_decommitted_get(chunk
, chunk_npages
-1));
756 /* Remove run from runs_avail, so that the arena does not use it. */
757 arena_avail_remove(arena
, chunk
, map_bias
, chunk_npages
-map_bias
);
759 if (arena
->spare
!= NULL
) {
760 arena_chunk_t
*spare
= arena
->spare
;
761 chunk_hooks_t chunk_hooks
= CHUNK_HOOKS_INITIALIZER
;
764 arena
->spare
= chunk
;
765 if (arena_mapbits_dirty_get(spare
, map_bias
) != 0) {
766 arena_run_dirty_remove(arena
, spare
, map_bias
,
767 chunk_npages
-map_bias
);
770 chunk_deregister(spare
, &spare
->node
);
772 committed
= (arena_mapbits_decommitted_get(spare
, map_bias
) ==
776 * Decommit the header. Mark the chunk as decommitted
777 * even if header decommit fails, since treating a
778 * partially committed chunk as committed has a high
779 * potential for causing later access of decommitted
782 chunk_hooks
= chunk_hooks_get(arena
);
783 chunk_hooks
.decommit(spare
, chunksize
, 0, map_bias
<<
784 LG_PAGE
, arena
->ind
);
787 chunk_dalloc_cache(arena
, &chunk_hooks
, (void *)spare
,
788 chunksize
, committed
);
791 arena
->stats
.mapped
-= chunksize
;
792 arena
->stats
.metadata_mapped
-= (map_bias
<< LG_PAGE
);
795 arena
->spare
= chunk
;
799 arena_huge_malloc_stats_update(arena_t
*arena
, size_t usize
)
801 szind_t index
= size2index(usize
) - nlclasses
- NBINS
;
803 cassert(config_stats
);
805 arena
->stats
.nmalloc_huge
++;
806 arena
->stats
.allocated_huge
+= usize
;
807 arena
->stats
.hstats
[index
].nmalloc
++;
808 arena
->stats
.hstats
[index
].curhchunks
++;
812 arena_huge_malloc_stats_update_undo(arena_t
*arena
, size_t usize
)
814 szind_t index
= size2index(usize
) - nlclasses
- NBINS
;
816 cassert(config_stats
);
818 arena
->stats
.nmalloc_huge
--;
819 arena
->stats
.allocated_huge
-= usize
;
820 arena
->stats
.hstats
[index
].nmalloc
--;
821 arena
->stats
.hstats
[index
].curhchunks
--;
825 arena_huge_dalloc_stats_update(arena_t
*arena
, size_t usize
)
827 szind_t index
= size2index(usize
) - nlclasses
- NBINS
;
829 cassert(config_stats
);
831 arena
->stats
.ndalloc_huge
++;
832 arena
->stats
.allocated_huge
-= usize
;
833 arena
->stats
.hstats
[index
].ndalloc
++;
834 arena
->stats
.hstats
[index
].curhchunks
--;
838 arena_huge_dalloc_stats_update_undo(arena_t
*arena
, size_t usize
)
840 szind_t index
= size2index(usize
) - nlclasses
- NBINS
;
842 cassert(config_stats
);
844 arena
->stats
.ndalloc_huge
--;
845 arena
->stats
.allocated_huge
+= usize
;
846 arena
->stats
.hstats
[index
].ndalloc
--;
847 arena
->stats
.hstats
[index
].curhchunks
++;
851 arena_huge_ralloc_stats_update(arena_t
*arena
, size_t oldsize
, size_t usize
)
854 arena_huge_dalloc_stats_update(arena
, oldsize
);
855 arena_huge_malloc_stats_update(arena
, usize
);
859 arena_huge_ralloc_stats_update_undo(arena_t
*arena
, size_t oldsize
,
863 arena_huge_dalloc_stats_update_undo(arena
, oldsize
);
864 arena_huge_malloc_stats_update_undo(arena
, usize
);
868 arena_node_alloc(arena_t
*arena
)
872 malloc_mutex_lock(&arena
->node_cache_mtx
);
873 node
= ql_last(&arena
->node_cache
, ql_link
);
875 malloc_mutex_unlock(&arena
->node_cache_mtx
);
876 return (base_alloc(sizeof(extent_node_t
)));
878 ql_tail_remove(&arena
->node_cache
, extent_node_t
, ql_link
);
879 malloc_mutex_unlock(&arena
->node_cache_mtx
);
884 arena_node_dalloc(arena_t
*arena
, extent_node_t
*node
)
887 malloc_mutex_lock(&arena
->node_cache_mtx
);
888 ql_elm_new(node
, ql_link
);
889 ql_tail_insert(&arena
->node_cache
, node
, ql_link
);
890 malloc_mutex_unlock(&arena
->node_cache_mtx
);
894 arena_chunk_alloc_huge_hard(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
895 size_t usize
, size_t alignment
, bool *zero
, size_t csize
)
900 ret
= chunk_alloc_wrapper(arena
, chunk_hooks
, NULL
, csize
, alignment
,
903 /* Revert optimistic stats updates. */
904 malloc_mutex_lock(&arena
->lock
);
906 arena_huge_malloc_stats_update_undo(arena
, usize
);
907 arena
->stats
.mapped
-= usize
;
909 arena_nactive_sub(arena
, usize
>> LG_PAGE
);
910 malloc_mutex_unlock(&arena
->lock
);
917 arena_chunk_alloc_huge(arena_t
*arena
, size_t usize
, size_t alignment
,
921 chunk_hooks_t chunk_hooks
= CHUNK_HOOKS_INITIALIZER
;
922 size_t csize
= CHUNK_CEILING(usize
);
924 malloc_mutex_lock(&arena
->lock
);
926 /* Optimistically update stats. */
928 arena_huge_malloc_stats_update(arena
, usize
);
929 arena
->stats
.mapped
+= usize
;
931 arena_nactive_add(arena
, usize
>> LG_PAGE
);
933 ret
= chunk_alloc_cache(arena
, &chunk_hooks
, NULL
, csize
, alignment
,
935 malloc_mutex_unlock(&arena
->lock
);
937 ret
= arena_chunk_alloc_huge_hard(arena
, &chunk_hooks
, usize
,
938 alignment
, zero
, csize
);
945 arena_chunk_dalloc_huge(arena_t
*arena
, void *chunk
, size_t usize
)
947 chunk_hooks_t chunk_hooks
= CHUNK_HOOKS_INITIALIZER
;
950 csize
= CHUNK_CEILING(usize
);
951 malloc_mutex_lock(&arena
->lock
);
953 arena_huge_dalloc_stats_update(arena
, usize
);
954 arena
->stats
.mapped
-= usize
;
956 arena_nactive_sub(arena
, usize
>> LG_PAGE
);
958 chunk_dalloc_cache(arena
, &chunk_hooks
, chunk
, csize
, true);
959 malloc_mutex_unlock(&arena
->lock
);
963 arena_chunk_ralloc_huge_similar(arena_t
*arena
, void *chunk
, size_t oldsize
,
967 assert(CHUNK_CEILING(oldsize
) == CHUNK_CEILING(usize
));
968 assert(oldsize
!= usize
);
970 malloc_mutex_lock(&arena
->lock
);
972 arena_huge_ralloc_stats_update(arena
, oldsize
, usize
);
974 arena_nactive_add(arena
, (usize
- oldsize
) >> LG_PAGE
);
976 arena_nactive_sub(arena
, (oldsize
- usize
) >> LG_PAGE
);
977 malloc_mutex_unlock(&arena
->lock
);
981 arena_chunk_ralloc_huge_shrink(arena_t
*arena
, void *chunk
, size_t oldsize
,
984 size_t udiff
= oldsize
- usize
;
985 size_t cdiff
= CHUNK_CEILING(oldsize
) - CHUNK_CEILING(usize
);
987 malloc_mutex_lock(&arena
->lock
);
989 arena_huge_ralloc_stats_update(arena
, oldsize
, usize
);
991 arena
->stats
.mapped
-= cdiff
;
993 arena_nactive_sub(arena
, udiff
>> LG_PAGE
);
996 chunk_hooks_t chunk_hooks
= CHUNK_HOOKS_INITIALIZER
;
997 void *nchunk
= (void *)((uintptr_t)chunk
+
998 CHUNK_CEILING(usize
));
1000 chunk_dalloc_cache(arena
, &chunk_hooks
, nchunk
, cdiff
, true);
1002 malloc_mutex_unlock(&arena
->lock
);
1006 arena_chunk_ralloc_huge_expand_hard(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
1007 void *chunk
, size_t oldsize
, size_t usize
, bool *zero
, void *nchunk
,
1008 size_t udiff
, size_t cdiff
)
1013 err
= (chunk_alloc_wrapper(arena
, chunk_hooks
, nchunk
, cdiff
, chunksize
,
1014 zero
, &commit
) == NULL
);
1016 /* Revert optimistic stats updates. */
1017 malloc_mutex_lock(&arena
->lock
);
1019 arena_huge_ralloc_stats_update_undo(arena
, oldsize
,
1021 arena
->stats
.mapped
-= cdiff
;
1023 arena_nactive_sub(arena
, udiff
>> LG_PAGE
);
1024 malloc_mutex_unlock(&arena
->lock
);
1025 } else if (chunk_hooks
->merge(chunk
, CHUNK_CEILING(oldsize
), nchunk
,
1026 cdiff
, true, arena
->ind
)) {
1027 chunk_dalloc_arena(arena
, chunk_hooks
, nchunk
, cdiff
, *zero
,
1035 arena_chunk_ralloc_huge_expand(arena_t
*arena
, void *chunk
, size_t oldsize
,
1036 size_t usize
, bool *zero
)
1039 chunk_hooks_t chunk_hooks
= chunk_hooks_get(arena
);
1040 void *nchunk
= (void *)((uintptr_t)chunk
+ CHUNK_CEILING(oldsize
));
1041 size_t udiff
= usize
- oldsize
;
1042 size_t cdiff
= CHUNK_CEILING(usize
) - CHUNK_CEILING(oldsize
);
1044 malloc_mutex_lock(&arena
->lock
);
1046 /* Optimistically update stats. */
1048 arena_huge_ralloc_stats_update(arena
, oldsize
, usize
);
1049 arena
->stats
.mapped
+= cdiff
;
1051 arena_nactive_add(arena
, udiff
>> LG_PAGE
);
1053 err
= (chunk_alloc_cache(arena
, &arena
->chunk_hooks
, nchunk
, cdiff
,
1054 chunksize
, zero
, true) == NULL
);
1055 malloc_mutex_unlock(&arena
->lock
);
1057 err
= arena_chunk_ralloc_huge_expand_hard(arena
, &chunk_hooks
,
1058 chunk
, oldsize
, usize
, zero
, nchunk
, udiff
,
1060 } else if (chunk_hooks
.merge(chunk
, CHUNK_CEILING(oldsize
), nchunk
,
1061 cdiff
, true, arena
->ind
)) {
1062 chunk_dalloc_arena(arena
, &chunk_hooks
, nchunk
, cdiff
, *zero
,
1071 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1072 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1075 static arena_run_t
*
1076 arena_run_first_best_fit(arena_t
*arena
, size_t size
)
1080 ind
= size2index(run_quantize_ceil(size
));
1081 for (i
= ind
; i
< runs_avail_nclasses
+ runs_avail_bias
; i
++) {
1082 arena_chunk_map_misc_t
*miscelm
= arena_run_tree_first(
1083 arena_runs_avail_get(arena
, i
));
1084 if (miscelm
!= NULL
)
1085 return (&miscelm
->run
);
1091 static arena_run_t
*
1092 arena_run_alloc_large_helper(arena_t
*arena
, size_t size
, bool zero
)
1094 arena_run_t
*run
= arena_run_first_best_fit(arena
, s2u(size
));
1096 if (arena_run_split_large(arena
, run
, size
, zero
))
1102 static arena_run_t
*
1103 arena_run_alloc_large(arena_t
*arena
, size_t size
, bool zero
)
1105 arena_chunk_t
*chunk
;
1108 assert(size
<= arena_maxrun
);
1109 assert(size
== PAGE_CEILING(size
));
1111 /* Search the arena's chunks for the lowest best fit. */
1112 run
= arena_run_alloc_large_helper(arena
, size
, zero
);
1117 * No usable runs. Create a new chunk from which to allocate the run.
1119 chunk
= arena_chunk_alloc(arena
);
1120 if (chunk
!= NULL
) {
1121 run
= &arena_miscelm_get(chunk
, map_bias
)->run
;
1122 if (arena_run_split_large(arena
, run
, size
, zero
))
1128 * arena_chunk_alloc() failed, but another thread may have made
1129 * sufficient memory available while this one dropped arena->lock in
1130 * arena_chunk_alloc(), so search one more time.
1132 return (arena_run_alloc_large_helper(arena
, size
, zero
));
1135 static arena_run_t
*
1136 arena_run_alloc_small_helper(arena_t
*arena
, size_t size
, szind_t binind
)
1138 arena_run_t
*run
= arena_run_first_best_fit(arena
, size
);
1140 if (arena_run_split_small(arena
, run
, size
, binind
))
1146 static arena_run_t
*
1147 arena_run_alloc_small(arena_t
*arena
, size_t size
, szind_t binind
)
1149 arena_chunk_t
*chunk
;
1152 assert(size
<= arena_maxrun
);
1153 assert(size
== PAGE_CEILING(size
));
1154 assert(binind
!= BININD_INVALID
);
1156 /* Search the arena's chunks for the lowest best fit. */
1157 run
= arena_run_alloc_small_helper(arena
, size
, binind
);
1162 * No usable runs. Create a new chunk from which to allocate the run.
1164 chunk
= arena_chunk_alloc(arena
);
1165 if (chunk
!= NULL
) {
1166 run
= &arena_miscelm_get(chunk
, map_bias
)->run
;
1167 if (arena_run_split_small(arena
, run
, size
, binind
))
1173 * arena_chunk_alloc() failed, but another thread may have made
1174 * sufficient memory available while this one dropped arena->lock in
1175 * arena_chunk_alloc(), so search one more time.
1177 return (arena_run_alloc_small_helper(arena
, size
, binind
));
1181 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult
)
1184 return (lg_dirty_mult
>= -1 && lg_dirty_mult
< (ssize_t
)(sizeof(size_t)
1189 arena_lg_dirty_mult_get(arena_t
*arena
)
1191 ssize_t lg_dirty_mult
;
1193 malloc_mutex_lock(&arena
->lock
);
1194 lg_dirty_mult
= arena
->lg_dirty_mult
;
1195 malloc_mutex_unlock(&arena
->lock
);
1197 return (lg_dirty_mult
);
1201 arena_lg_dirty_mult_set(arena_t
*arena
, ssize_t lg_dirty_mult
)
1204 if (!arena_lg_dirty_mult_valid(lg_dirty_mult
))
1207 malloc_mutex_lock(&arena
->lock
);
1208 arena
->lg_dirty_mult
= lg_dirty_mult
;
1209 arena_maybe_purge(arena
);
1210 malloc_mutex_unlock(&arena
->lock
);
1216 arena_decay_deadline_init(arena_t
*arena
)
1219 assert(opt_purge
== purge_mode_decay
);
1222 * Generate a new deadline that is uniformly random within the next
1223 * epoch after the current one.
1225 nstime_copy(&arena
->decay_deadline
, &arena
->decay_epoch
);
1226 nstime_add(&arena
->decay_deadline
, &arena
->decay_interval
);
1227 if (arena
->decay_time
> 0) {
1230 nstime_init(&jitter
, prng_range(&arena
->decay_jitter_state
,
1231 nstime_ns(&arena
->decay_interval
)));
1232 nstime_add(&arena
->decay_deadline
, &jitter
);
1237 arena_decay_deadline_reached(const arena_t
*arena
, const nstime_t
*time
)
1240 assert(opt_purge
== purge_mode_decay
);
1242 return (nstime_compare(&arena
->decay_deadline
, time
) <= 0);
1246 arena_decay_backlog_npages_limit(const arena_t
*arena
)
1248 static const uint64_t h_steps
[] = {
1249 #define STEP(step, h, x, y) \
1255 size_t npages_limit_backlog
;
1258 assert(opt_purge
== purge_mode_decay
);
1261 * For each element of decay_backlog, multiply by the corresponding
1262 * fixed-point smoothstep decay factor. Sum the products, then divide
1263 * to round down to the nearest whole number of pages.
1266 for (i
= 0; i
< SMOOTHSTEP_NSTEPS
; i
++)
1267 sum
+= arena
->decay_backlog
[i
] * h_steps
[i
];
1268 npages_limit_backlog
= (sum
>> SMOOTHSTEP_BFP
);
1270 return (npages_limit_backlog
);
1274 arena_decay_epoch_advance(arena_t
*arena
, const nstime_t
*time
)
1278 size_t ndirty_delta
;
1280 assert(opt_purge
== purge_mode_decay
);
1281 assert(arena_decay_deadline_reached(arena
, time
));
1283 nstime_copy(&delta
, time
);
1284 nstime_subtract(&delta
, &arena
->decay_epoch
);
1285 nadvance
= nstime_divide(&delta
, &arena
->decay_interval
);
1286 assert(nadvance
> 0);
1288 /* Add nadvance decay intervals to epoch. */
1289 nstime_copy(&delta
, &arena
->decay_interval
);
1290 nstime_imultiply(&delta
, nadvance
);
1291 nstime_add(&arena
->decay_epoch
, &delta
);
1293 /* Set a new deadline. */
1294 arena_decay_deadline_init(arena
);
1296 /* Update the backlog. */
1297 if (nadvance
>= SMOOTHSTEP_NSTEPS
) {
1298 memset(arena
->decay_backlog
, 0, (SMOOTHSTEP_NSTEPS
-1) *
1301 memmove(arena
->decay_backlog
, &arena
->decay_backlog
[nadvance
],
1302 (SMOOTHSTEP_NSTEPS
- nadvance
) * sizeof(size_t));
1304 memset(&arena
->decay_backlog
[SMOOTHSTEP_NSTEPS
-
1305 nadvance
], 0, (nadvance
-1) * sizeof(size_t));
1308 ndirty_delta
= (arena
->ndirty
> arena
->decay_ndirty
) ? arena
->ndirty
-
1309 arena
->decay_ndirty
: 0;
1310 arena
->decay_ndirty
= arena
->ndirty
;
1311 arena
->decay_backlog
[SMOOTHSTEP_NSTEPS
-1] = ndirty_delta
;
1312 arena
->decay_backlog_npages_limit
=
1313 arena_decay_backlog_npages_limit(arena
);
1317 arena_decay_npages_limit(arena_t
*arena
)
1319 size_t npages_limit
;
1321 assert(opt_purge
== purge_mode_decay
);
1323 npages_limit
= arena
->decay_backlog_npages_limit
;
1325 /* Add in any dirty pages created during the current epoch. */
1326 if (arena
->ndirty
> arena
->decay_ndirty
)
1327 npages_limit
+= arena
->ndirty
- arena
->decay_ndirty
;
1329 return (npages_limit
);
1333 arena_decay_init(arena_t
*arena
, ssize_t decay_time
)
1336 arena
->decay_time
= decay_time
;
1337 if (decay_time
> 0) {
1338 nstime_init2(&arena
->decay_interval
, decay_time
, 0);
1339 nstime_idivide(&arena
->decay_interval
, SMOOTHSTEP_NSTEPS
);
1342 nstime_init(&arena
->decay_epoch
, 0);
1343 nstime_update(&arena
->decay_epoch
);
1344 arena
->decay_jitter_state
= (uint64_t)(uintptr_t)arena
;
1345 arena_decay_deadline_init(arena
);
1346 arena
->decay_ndirty
= arena
->ndirty
;
1347 arena
->decay_backlog_npages_limit
= 0;
1348 memset(arena
->decay_backlog
, 0, SMOOTHSTEP_NSTEPS
* sizeof(size_t));
1352 arena_decay_time_valid(ssize_t decay_time
)
1355 return (decay_time
>= -1 && decay_time
<= NSTIME_SEC_MAX
);
1359 arena_decay_time_get(arena_t
*arena
)
1363 malloc_mutex_lock(&arena
->lock
);
1364 decay_time
= arena
->decay_time
;
1365 malloc_mutex_unlock(&arena
->lock
);
1367 return (decay_time
);
1371 arena_decay_time_set(arena_t
*arena
, ssize_t decay_time
)
1374 if (!arena_decay_time_valid(decay_time
))
1377 malloc_mutex_lock(&arena
->lock
);
1379 * Restart decay backlog from scratch, which may cause many dirty pages
1380 * to be immediately purged. It would conceptually be possible to map
1381 * the old backlog onto the new backlog, but there is no justification
1382 * for such complexity since decay_time changes are intended to be
1383 * infrequent, either between the {-1, 0, >0} states, or a one-time
1384 * arbitrary change during initial arena configuration.
1386 arena_decay_init(arena
, decay_time
);
1387 arena_maybe_purge(arena
);
1388 malloc_mutex_unlock(&arena
->lock
);
1394 arena_maybe_purge_ratio(arena_t
*arena
)
1397 assert(opt_purge
== purge_mode_ratio
);
1399 /* Don't purge if the option is disabled. */
1400 if (arena
->lg_dirty_mult
< 0)
1404 * Iterate, since preventing recursive purging could otherwise leave too
1408 size_t threshold
= (arena
->nactive
>> arena
->lg_dirty_mult
);
1409 if (threshold
< chunk_npages
)
1410 threshold
= chunk_npages
;
1412 * Don't purge unless the number of purgeable pages exceeds the
1415 if (arena
->ndirty
<= threshold
)
1417 arena_purge_to_limit(arena
, threshold
);
1422 arena_maybe_purge_decay(arena_t
*arena
)
1425 size_t ndirty_limit
;
1427 assert(opt_purge
== purge_mode_decay
);
1429 /* Purge all or nothing if the option is disabled. */
1430 if (arena
->decay_time
<= 0) {
1431 if (arena
->decay_time
== 0)
1432 arena_purge_to_limit(arena
, 0);
1436 nstime_copy(&time
, &arena
->decay_epoch
);
1437 if (unlikely(nstime_update(&time
))) {
1438 /* Time went backwards. Force an epoch advance. */
1439 nstime_copy(&time
, &arena
->decay_deadline
);
1442 if (arena_decay_deadline_reached(arena
, &time
))
1443 arena_decay_epoch_advance(arena
, &time
);
1445 ndirty_limit
= arena_decay_npages_limit(arena
);
1448 * Don't try to purge unless the number of purgeable pages exceeds the
1451 if (arena
->ndirty
<= ndirty_limit
)
1453 arena_purge_to_limit(arena
, ndirty_limit
);
1457 arena_maybe_purge(arena_t
*arena
)
1460 /* Don't recursively purge. */
1464 if (opt_purge
== purge_mode_ratio
)
1465 arena_maybe_purge_ratio(arena
);
1467 arena_maybe_purge_decay(arena
);
1471 arena_dirty_count(arena_t
*arena
)
1474 arena_runs_dirty_link_t
*rdelm
;
1475 extent_node_t
*chunkselm
;
1477 for (rdelm
= qr_next(&arena
->runs_dirty
, rd_link
),
1478 chunkselm
= qr_next(&arena
->chunks_cache
, cc_link
);
1479 rdelm
!= &arena
->runs_dirty
; rdelm
= qr_next(rdelm
, rd_link
)) {
1482 if (rdelm
== &chunkselm
->rd
) {
1483 npages
= extent_node_size_get(chunkselm
) >> LG_PAGE
;
1484 chunkselm
= qr_next(chunkselm
, cc_link
);
1486 arena_chunk_t
*chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(
1488 arena_chunk_map_misc_t
*miscelm
=
1489 arena_rd_to_miscelm(rdelm
);
1490 size_t pageind
= arena_miscelm_to_pageind(miscelm
);
1491 assert(arena_mapbits_allocated_get(chunk
, pageind
) ==
1493 assert(arena_mapbits_large_get(chunk
, pageind
) == 0);
1494 assert(arena_mapbits_dirty_get(chunk
, pageind
) != 0);
1495 npages
= arena_mapbits_unallocated_size_get(chunk
,
1496 pageind
) >> LG_PAGE
;
1505 arena_stash_dirty(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
1506 size_t ndirty_limit
, arena_runs_dirty_link_t
*purge_runs_sentinel
,
1507 extent_node_t
*purge_chunks_sentinel
)
1509 arena_runs_dirty_link_t
*rdelm
, *rdelm_next
;
1510 extent_node_t
*chunkselm
;
1511 size_t nstashed
= 0;
1513 /* Stash runs/chunks according to ndirty_limit. */
1514 for (rdelm
= qr_next(&arena
->runs_dirty
, rd_link
),
1515 chunkselm
= qr_next(&arena
->chunks_cache
, cc_link
);
1516 rdelm
!= &arena
->runs_dirty
; rdelm
= rdelm_next
) {
1518 rdelm_next
= qr_next(rdelm
, rd_link
);
1520 if (rdelm
== &chunkselm
->rd
) {
1521 extent_node_t
*chunkselm_next
;
1525 npages
= extent_node_size_get(chunkselm
) >> LG_PAGE
;
1526 if (opt_purge
== purge_mode_decay
&& arena
->ndirty
-
1527 (nstashed
+ npages
) < ndirty_limit
)
1530 chunkselm_next
= qr_next(chunkselm
, cc_link
);
1532 * Allocate. chunkselm remains valid due to the
1533 * dalloc_node=false argument to chunk_alloc_cache().
1536 chunk
= chunk_alloc_cache(arena
, chunk_hooks
,
1537 extent_node_addr_get(chunkselm
),
1538 extent_node_size_get(chunkselm
), chunksize
, &zero
,
1540 assert(chunk
== extent_node_addr_get(chunkselm
));
1541 assert(zero
== extent_node_zeroed_get(chunkselm
));
1542 extent_node_dirty_insert(chunkselm
, purge_runs_sentinel
,
1543 purge_chunks_sentinel
);
1544 assert(npages
== (extent_node_size_get(chunkselm
) >>
1546 chunkselm
= chunkselm_next
;
1548 arena_chunk_t
*chunk
=
1549 (arena_chunk_t
*)CHUNK_ADDR2BASE(rdelm
);
1550 arena_chunk_map_misc_t
*miscelm
=
1551 arena_rd_to_miscelm(rdelm
);
1552 size_t pageind
= arena_miscelm_to_pageind(miscelm
);
1553 arena_run_t
*run
= &miscelm
->run
;
1555 arena_mapbits_unallocated_size_get(chunk
, pageind
);
1557 npages
= run_size
>> LG_PAGE
;
1558 if (opt_purge
== purge_mode_decay
&& arena
->ndirty
-
1559 (nstashed
+ npages
) < ndirty_limit
)
1562 assert(pageind
+ npages
<= chunk_npages
);
1563 assert(arena_mapbits_dirty_get(chunk
, pageind
) ==
1564 arena_mapbits_dirty_get(chunk
, pageind
+npages
-1));
1567 * If purging the spare chunk's run, make it available
1568 * prior to allocation.
1570 if (chunk
== arena
->spare
)
1571 arena_chunk_alloc(arena
);
1573 /* Temporarily allocate the free dirty run. */
1574 arena_run_split_large(arena
, run
, run_size
, false);
1577 qr_new(rdelm
, rd_link
); /* Redundant. */
1579 assert(qr_next(rdelm
, rd_link
) == rdelm
);
1580 assert(qr_prev(rdelm
, rd_link
) == rdelm
);
1582 qr_meld(purge_runs_sentinel
, rdelm
, rd_link
);
1586 if (opt_purge
== purge_mode_ratio
&& arena
->ndirty
- nstashed
<=
1595 arena_purge_stashed(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
1596 arena_runs_dirty_link_t
*purge_runs_sentinel
,
1597 extent_node_t
*purge_chunks_sentinel
)
1599 size_t npurged
, nmadvise
;
1600 arena_runs_dirty_link_t
*rdelm
;
1601 extent_node_t
*chunkselm
;
1607 malloc_mutex_unlock(&arena
->lock
);
1608 for (rdelm
= qr_next(purge_runs_sentinel
, rd_link
),
1609 chunkselm
= qr_next(purge_chunks_sentinel
, cc_link
);
1610 rdelm
!= purge_runs_sentinel
; rdelm
= qr_next(rdelm
, rd_link
)) {
1613 if (rdelm
== &chunkselm
->rd
) {
1615 * Don't actually purge the chunk here because 1)
1616 * chunkselm is embedded in the chunk and must remain
1617 * valid, and 2) we deallocate the chunk in
1618 * arena_unstash_purged(), where it is destroyed,
1619 * decommitted, or purged, depending on chunk
1620 * deallocation policy.
1622 size_t size
= extent_node_size_get(chunkselm
);
1623 npages
= size
>> LG_PAGE
;
1624 chunkselm
= qr_next(chunkselm
, cc_link
);
1626 size_t pageind
, run_size
, flag_unzeroed
, flags
, i
;
1628 arena_chunk_t
*chunk
=
1629 (arena_chunk_t
*)CHUNK_ADDR2BASE(rdelm
);
1630 arena_chunk_map_misc_t
*miscelm
=
1631 arena_rd_to_miscelm(rdelm
);
1632 pageind
= arena_miscelm_to_pageind(miscelm
);
1633 run_size
= arena_mapbits_large_size_get(chunk
, pageind
);
1634 npages
= run_size
>> LG_PAGE
;
1636 assert(pageind
+ npages
<= chunk_npages
);
1637 assert(!arena_mapbits_decommitted_get(chunk
, pageind
));
1638 assert(!arena_mapbits_decommitted_get(chunk
,
1640 decommitted
= !chunk_hooks
->decommit(chunk
, chunksize
,
1641 pageind
<< LG_PAGE
, npages
<< LG_PAGE
, arena
->ind
);
1644 flags
= CHUNK_MAP_DECOMMITTED
;
1646 flag_unzeroed
= chunk_purge_wrapper(arena
,
1647 chunk_hooks
, chunk
, chunksize
, pageind
<<
1648 LG_PAGE
, run_size
) ? CHUNK_MAP_UNZEROED
: 0;
1649 flags
= flag_unzeroed
;
1651 arena_mapbits_large_set(chunk
, pageind
+npages
-1, 0,
1653 arena_mapbits_large_set(chunk
, pageind
, run_size
,
1657 * Set the unzeroed flag for internal pages, now that
1658 * chunk_purge_wrapper() has returned whether the pages
1659 * were zeroed as a side effect of purging. This chunk
1660 * map modification is safe even though the arena mutex
1661 * isn't currently owned by this thread, because the run
1662 * is marked as allocated, thus protecting it from being
1663 * modified by any other thread. As long as these
1664 * writes don't perturb the first and last elements'
1665 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1667 for (i
= 1; i
< npages
-1; i
++) {
1668 arena_mapbits_internal_set(chunk
, pageind
+i
,
1677 malloc_mutex_lock(&arena
->lock
);
1680 arena
->stats
.nmadvise
+= nmadvise
;
1681 arena
->stats
.purged
+= npurged
;
1688 arena_unstash_purged(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
1689 arena_runs_dirty_link_t
*purge_runs_sentinel
,
1690 extent_node_t
*purge_chunks_sentinel
)
1692 arena_runs_dirty_link_t
*rdelm
, *rdelm_next
;
1693 extent_node_t
*chunkselm
;
1695 /* Deallocate chunks/runs. */
1696 for (rdelm
= qr_next(purge_runs_sentinel
, rd_link
),
1697 chunkselm
= qr_next(purge_chunks_sentinel
, cc_link
);
1698 rdelm
!= purge_runs_sentinel
; rdelm
= rdelm_next
) {
1699 rdelm_next
= qr_next(rdelm
, rd_link
);
1700 if (rdelm
== &chunkselm
->rd
) {
1701 extent_node_t
*chunkselm_next
= qr_next(chunkselm
,
1703 void *addr
= extent_node_addr_get(chunkselm
);
1704 size_t size
= extent_node_size_get(chunkselm
);
1705 bool zeroed
= extent_node_zeroed_get(chunkselm
);
1706 bool committed
= extent_node_committed_get(chunkselm
);
1707 extent_node_dirty_remove(chunkselm
);
1708 arena_node_dalloc(arena
, chunkselm
);
1709 chunkselm
= chunkselm_next
;
1710 chunk_dalloc_arena(arena
, chunk_hooks
, addr
, size
,
1713 arena_chunk_t
*chunk
=
1714 (arena_chunk_t
*)CHUNK_ADDR2BASE(rdelm
);
1715 arena_chunk_map_misc_t
*miscelm
=
1716 arena_rd_to_miscelm(rdelm
);
1717 size_t pageind
= arena_miscelm_to_pageind(miscelm
);
1718 bool decommitted
= (arena_mapbits_decommitted_get(chunk
,
1720 arena_run_t
*run
= &miscelm
->run
;
1721 qr_remove(rdelm
, rd_link
);
1722 arena_run_dalloc(arena
, run
, false, true, decommitted
);
1728 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1729 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1731 * (arena->ndirty <= ndirty_limit)
1732 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1733 * violating the invariant:
1734 * (arena->ndirty >= ndirty_limit)
1737 arena_purge_to_limit(arena_t
*arena
, size_t ndirty_limit
)
1739 chunk_hooks_t chunk_hooks
= chunk_hooks_get(arena
);
1740 size_t npurge
, npurged
;
1741 arena_runs_dirty_link_t purge_runs_sentinel
;
1742 extent_node_t purge_chunks_sentinel
;
1744 arena
->purging
= true;
1747 * Calls to arena_dirty_count() are disabled even for debug builds
1748 * because overhead grows nonlinearly as memory usage increases.
1750 if (false && config_debug
) {
1751 size_t ndirty
= arena_dirty_count(arena
);
1752 assert(ndirty
== arena
->ndirty
);
1754 assert(opt_purge
!= purge_mode_ratio
|| (arena
->nactive
>>
1755 arena
->lg_dirty_mult
) < arena
->ndirty
|| ndirty_limit
== 0);
1757 qr_new(&purge_runs_sentinel
, rd_link
);
1758 extent_node_dirty_linkage_init(&purge_chunks_sentinel
);
1760 npurge
= arena_stash_dirty(arena
, &chunk_hooks
, ndirty_limit
,
1761 &purge_runs_sentinel
, &purge_chunks_sentinel
);
1764 npurged
= arena_purge_stashed(arena
, &chunk_hooks
, &purge_runs_sentinel
,
1765 &purge_chunks_sentinel
);
1766 assert(npurged
== npurge
);
1767 arena_unstash_purged(arena
, &chunk_hooks
, &purge_runs_sentinel
,
1768 &purge_chunks_sentinel
);
1771 arena
->stats
.npurge
++;
1774 arena
->purging
= false;
1778 arena_purge(arena_t
*arena
, bool all
)
1781 malloc_mutex_lock(&arena
->lock
);
1783 arena_purge_to_limit(arena
, 0);
1785 arena_maybe_purge(arena
);
1786 malloc_mutex_unlock(&arena
->lock
);
1790 arena_run_coalesce(arena_t
*arena
, arena_chunk_t
*chunk
, size_t *p_size
,
1791 size_t *p_run_ind
, size_t *p_run_pages
, size_t flag_dirty
,
1792 size_t flag_decommitted
)
1794 size_t size
= *p_size
;
1795 size_t run_ind
= *p_run_ind
;
1796 size_t run_pages
= *p_run_pages
;
1798 /* Try to coalesce forward. */
1799 if (run_ind
+ run_pages
< chunk_npages
&&
1800 arena_mapbits_allocated_get(chunk
, run_ind
+run_pages
) == 0 &&
1801 arena_mapbits_dirty_get(chunk
, run_ind
+run_pages
) == flag_dirty
&&
1802 arena_mapbits_decommitted_get(chunk
, run_ind
+run_pages
) ==
1804 size_t nrun_size
= arena_mapbits_unallocated_size_get(chunk
,
1806 size_t nrun_pages
= nrun_size
>> LG_PAGE
;
1809 * Remove successor from runs_avail; the coalesced run is
1812 assert(arena_mapbits_unallocated_size_get(chunk
,
1813 run_ind
+run_pages
+nrun_pages
-1) == nrun_size
);
1814 assert(arena_mapbits_dirty_get(chunk
,
1815 run_ind
+run_pages
+nrun_pages
-1) == flag_dirty
);
1816 assert(arena_mapbits_decommitted_get(chunk
,
1817 run_ind
+run_pages
+nrun_pages
-1) == flag_decommitted
);
1818 arena_avail_remove(arena
, chunk
, run_ind
+run_pages
, nrun_pages
);
1821 * If the successor is dirty, remove it from the set of dirty
1824 if (flag_dirty
!= 0) {
1825 arena_run_dirty_remove(arena
, chunk
, run_ind
+run_pages
,
1830 run_pages
+= nrun_pages
;
1832 arena_mapbits_unallocated_size_set(chunk
, run_ind
, size
);
1833 arena_mapbits_unallocated_size_set(chunk
, run_ind
+run_pages
-1,
1837 /* Try to coalesce backward. */
1838 if (run_ind
> map_bias
&& arena_mapbits_allocated_get(chunk
,
1839 run_ind
-1) == 0 && arena_mapbits_dirty_get(chunk
, run_ind
-1) ==
1840 flag_dirty
&& arena_mapbits_decommitted_get(chunk
, run_ind
-1) ==
1842 size_t prun_size
= arena_mapbits_unallocated_size_get(chunk
,
1844 size_t prun_pages
= prun_size
>> LG_PAGE
;
1846 run_ind
-= prun_pages
;
1849 * Remove predecessor from runs_avail; the coalesced run is
1852 assert(arena_mapbits_unallocated_size_get(chunk
, run_ind
) ==
1854 assert(arena_mapbits_dirty_get(chunk
, run_ind
) == flag_dirty
);
1855 assert(arena_mapbits_decommitted_get(chunk
, run_ind
) ==
1857 arena_avail_remove(arena
, chunk
, run_ind
, prun_pages
);
1860 * If the predecessor is dirty, remove it from the set of dirty
1863 if (flag_dirty
!= 0) {
1864 arena_run_dirty_remove(arena
, chunk
, run_ind
,
1869 run_pages
+= prun_pages
;
1871 arena_mapbits_unallocated_size_set(chunk
, run_ind
, size
);
1872 arena_mapbits_unallocated_size_set(chunk
, run_ind
+run_pages
-1,
1877 *p_run_ind
= run_ind
;
1878 *p_run_pages
= run_pages
;
1882 arena_run_size_get(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1887 assert(run_ind
>= map_bias
);
1888 assert(run_ind
< chunk_npages
);
1890 if (arena_mapbits_large_get(chunk
, run_ind
) != 0) {
1891 size
= arena_mapbits_large_size_get(chunk
, run_ind
);
1892 assert(size
== PAGE
|| arena_mapbits_large_size_get(chunk
,
1893 run_ind
+(size
>>LG_PAGE
)-1) == 0);
1895 arena_bin_info_t
*bin_info
= &arena_bin_info
[run
->binind
];
1896 size
= bin_info
->run_size
;
1903 arena_run_dalloc(arena_t
*arena
, arena_run_t
*run
, bool dirty
, bool cleaned
,
1906 arena_chunk_t
*chunk
;
1907 arena_chunk_map_misc_t
*miscelm
;
1908 size_t size
, run_ind
, run_pages
, flag_dirty
, flag_decommitted
;
1910 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
1911 miscelm
= arena_run_to_miscelm(run
);
1912 run_ind
= arena_miscelm_to_pageind(miscelm
);
1913 assert(run_ind
>= map_bias
);
1914 assert(run_ind
< chunk_npages
);
1915 size
= arena_run_size_get(arena
, chunk
, run
, run_ind
);
1916 run_pages
= (size
>> LG_PAGE
);
1917 arena_nactive_sub(arena
, run_pages
);
1920 * The run is dirty if the caller claims to have dirtied it, as well as
1921 * if it was already dirty before being allocated and the caller
1922 * doesn't claim to have cleaned it.
1924 assert(arena_mapbits_dirty_get(chunk
, run_ind
) ==
1925 arena_mapbits_dirty_get(chunk
, run_ind
+run_pages
-1));
1926 if (!cleaned
&& !decommitted
&& arena_mapbits_dirty_get(chunk
, run_ind
)
1929 flag_dirty
= dirty
? CHUNK_MAP_DIRTY
: 0;
1930 flag_decommitted
= decommitted
? CHUNK_MAP_DECOMMITTED
: 0;
1932 /* Mark pages as unallocated in the chunk map. */
1933 if (dirty
|| decommitted
) {
1934 size_t flags
= flag_dirty
| flag_decommitted
;
1935 arena_mapbits_unallocated_set(chunk
, run_ind
, size
, flags
);
1936 arena_mapbits_unallocated_set(chunk
, run_ind
+run_pages
-1, size
,
1939 arena_mapbits_unallocated_set(chunk
, run_ind
, size
,
1940 arena_mapbits_unzeroed_get(chunk
, run_ind
));
1941 arena_mapbits_unallocated_set(chunk
, run_ind
+run_pages
-1, size
,
1942 arena_mapbits_unzeroed_get(chunk
, run_ind
+run_pages
-1));
1945 arena_run_coalesce(arena
, chunk
, &size
, &run_ind
, &run_pages
,
1946 flag_dirty
, flag_decommitted
);
1948 /* Insert into runs_avail, now that coalescing is complete. */
1949 assert(arena_mapbits_unallocated_size_get(chunk
, run_ind
) ==
1950 arena_mapbits_unallocated_size_get(chunk
, run_ind
+run_pages
-1));
1951 assert(arena_mapbits_dirty_get(chunk
, run_ind
) ==
1952 arena_mapbits_dirty_get(chunk
, run_ind
+run_pages
-1));
1953 assert(arena_mapbits_decommitted_get(chunk
, run_ind
) ==
1954 arena_mapbits_decommitted_get(chunk
, run_ind
+run_pages
-1));
1955 arena_avail_insert(arena
, chunk
, run_ind
, run_pages
);
1958 arena_run_dirty_insert(arena
, chunk
, run_ind
, run_pages
);
1960 /* Deallocate chunk if it is now completely unused. */
1961 if (size
== arena_maxrun
) {
1962 assert(run_ind
== map_bias
);
1963 assert(run_pages
== (arena_maxrun
>> LG_PAGE
));
1964 arena_chunk_dalloc(arena
, chunk
);
1968 * It is okay to do dirty page processing here even if the chunk was
1969 * deallocated above, since in that case it is the spare. Waiting
1970 * until after possible chunk deallocation to do dirty processing
1971 * allows for an old spare to be fully deallocated, thus decreasing the
1972 * chances of spuriously crossing the dirty page purging threshold.
1975 arena_maybe_purge(arena
);
1979 arena_run_trim_head(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
1980 size_t oldsize
, size_t newsize
)
1982 arena_chunk_map_misc_t
*miscelm
= arena_run_to_miscelm(run
);
1983 size_t pageind
= arena_miscelm_to_pageind(miscelm
);
1984 size_t head_npages
= (oldsize
- newsize
) >> LG_PAGE
;
1985 size_t flag_dirty
= arena_mapbits_dirty_get(chunk
, pageind
);
1986 size_t flag_decommitted
= arena_mapbits_decommitted_get(chunk
, pageind
);
1987 size_t flag_unzeroed_mask
= (flag_dirty
| flag_decommitted
) == 0 ?
1988 CHUNK_MAP_UNZEROED
: 0;
1990 assert(oldsize
> newsize
);
1993 * Update the chunk map so that arena_run_dalloc() can treat the
1994 * leading run as separately allocated. Set the last element of each
1995 * run first, in case of single-page runs.
1997 assert(arena_mapbits_large_size_get(chunk
, pageind
) == oldsize
);
1998 arena_mapbits_large_set(chunk
, pageind
+head_npages
-1, 0, flag_dirty
|
1999 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
,
2000 pageind
+head_npages
-1)));
2001 arena_mapbits_large_set(chunk
, pageind
, oldsize
-newsize
, flag_dirty
|
2002 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
, pageind
)));
2005 UNUSED
size_t tail_npages
= newsize
>> LG_PAGE
;
2006 assert(arena_mapbits_large_size_get(chunk
,
2007 pageind
+head_npages
+tail_npages
-1) == 0);
2008 assert(arena_mapbits_dirty_get(chunk
,
2009 pageind
+head_npages
+tail_npages
-1) == flag_dirty
);
2011 arena_mapbits_large_set(chunk
, pageind
+head_npages
, newsize
,
2012 flag_dirty
| (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
,
2013 pageind
+head_npages
)));
2015 arena_run_dalloc(arena
, run
, false, false, (flag_decommitted
!= 0));
2019 arena_run_trim_tail(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
2020 size_t oldsize
, size_t newsize
, bool dirty
)
2022 arena_chunk_map_misc_t
*miscelm
= arena_run_to_miscelm(run
);
2023 size_t pageind
= arena_miscelm_to_pageind(miscelm
);
2024 size_t head_npages
= newsize
>> LG_PAGE
;
2025 size_t flag_dirty
= arena_mapbits_dirty_get(chunk
, pageind
);
2026 size_t flag_decommitted
= arena_mapbits_decommitted_get(chunk
, pageind
);
2027 size_t flag_unzeroed_mask
= (flag_dirty
| flag_decommitted
) == 0 ?
2028 CHUNK_MAP_UNZEROED
: 0;
2029 arena_chunk_map_misc_t
*tail_miscelm
;
2030 arena_run_t
*tail_run
;
2032 assert(oldsize
> newsize
);
2035 * Update the chunk map so that arena_run_dalloc() can treat the
2036 * trailing run as separately allocated. Set the last element of each
2037 * run first, in case of single-page runs.
2039 assert(arena_mapbits_large_size_get(chunk
, pageind
) == oldsize
);
2040 arena_mapbits_large_set(chunk
, pageind
+head_npages
-1, 0, flag_dirty
|
2041 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
,
2042 pageind
+head_npages
-1)));
2043 arena_mapbits_large_set(chunk
, pageind
, newsize
, flag_dirty
|
2044 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
, pageind
)));
2047 UNUSED
size_t tail_npages
= (oldsize
- newsize
) >> LG_PAGE
;
2048 assert(arena_mapbits_large_size_get(chunk
,
2049 pageind
+head_npages
+tail_npages
-1) == 0);
2050 assert(arena_mapbits_dirty_get(chunk
,
2051 pageind
+head_npages
+tail_npages
-1) == flag_dirty
);
2053 arena_mapbits_large_set(chunk
, pageind
+head_npages
, oldsize
-newsize
,
2054 flag_dirty
| (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
,
2055 pageind
+head_npages
)));
2057 tail_miscelm
= arena_miscelm_get(chunk
, pageind
+ head_npages
);
2058 tail_run
= &tail_miscelm
->run
;
2059 arena_run_dalloc(arena
, tail_run
, dirty
, false, (flag_decommitted
!=
2063 static arena_run_t
*
2064 arena_bin_runs_first(arena_bin_t
*bin
)
2066 arena_chunk_map_misc_t
*miscelm
= arena_run_tree_first(&bin
->runs
);
2067 if (miscelm
!= NULL
)
2068 return (&miscelm
->run
);
2074 arena_bin_runs_insert(arena_bin_t
*bin
, arena_run_t
*run
)
2076 arena_chunk_map_misc_t
*miscelm
= arena_run_to_miscelm(run
);
2078 assert(arena_run_tree_search(&bin
->runs
, miscelm
) == NULL
);
2080 arena_run_tree_insert(&bin
->runs
, miscelm
);
2084 arena_bin_runs_remove(arena_bin_t
*bin
, arena_run_t
*run
)
2086 arena_chunk_map_misc_t
*miscelm
= arena_run_to_miscelm(run
);
2088 assert(arena_run_tree_search(&bin
->runs
, miscelm
) != NULL
);
2090 arena_run_tree_remove(&bin
->runs
, miscelm
);
2093 static arena_run_t
*
2094 arena_bin_nonfull_run_tryget(arena_bin_t
*bin
)
2096 arena_run_t
*run
= arena_bin_runs_first(bin
);
2098 arena_bin_runs_remove(bin
, run
);
2100 bin
->stats
.reruns
++;
2105 static arena_run_t
*
2106 arena_bin_nonfull_run_get(arena_t
*arena
, arena_bin_t
*bin
)
2110 arena_bin_info_t
*bin_info
;
2112 /* Look for a usable run. */
2113 run
= arena_bin_nonfull_run_tryget(bin
);
2116 /* No existing runs have any space available. */
2118 binind
= arena_bin_index(arena
, bin
);
2119 bin_info
= &arena_bin_info
[binind
];
2121 /* Allocate a new run. */
2122 malloc_mutex_unlock(&bin
->lock
);
2123 /******************************/
2124 malloc_mutex_lock(&arena
->lock
);
2125 run
= arena_run_alloc_small(arena
, bin_info
->run_size
, binind
);
2127 /* Initialize run internals. */
2128 run
->binind
= binind
;
2129 run
->nfree
= bin_info
->nregs
;
2130 bitmap_init(run
->bitmap
, &bin_info
->bitmap_info
);
2132 malloc_mutex_unlock(&arena
->lock
);
2133 /********************************/
2134 malloc_mutex_lock(&bin
->lock
);
2138 bin
->stats
.curruns
++;
2144 * arena_run_alloc_small() failed, but another thread may have made
2145 * sufficient memory available while this one dropped bin->lock above,
2146 * so search one more time.
2148 run
= arena_bin_nonfull_run_tryget(bin
);
2155 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2157 arena_bin_malloc_hard(arena_t
*arena
, arena_bin_t
*bin
)
2160 arena_bin_info_t
*bin_info
;
2163 binind
= arena_bin_index(arena
, bin
);
2164 bin_info
= &arena_bin_info
[binind
];
2166 run
= arena_bin_nonfull_run_get(arena
, bin
);
2167 if (bin
->runcur
!= NULL
&& bin
->runcur
->nfree
> 0) {
2169 * Another thread updated runcur while this one ran without the
2170 * bin lock in arena_bin_nonfull_run_get().
2173 assert(bin
->runcur
->nfree
> 0);
2174 ret
= arena_run_reg_alloc(bin
->runcur
, bin_info
);
2176 arena_chunk_t
*chunk
;
2179 * arena_run_alloc_small() may have allocated run, or
2180 * it may have pulled run from the bin's run tree.
2181 * Therefore it is unsafe to make any assumptions about
2182 * how run has previously been used, and
2183 * arena_bin_lower_run() must be called, as if a region
2184 * were just deallocated from the run.
2186 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
2187 if (run
->nfree
== bin_info
->nregs
)
2188 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
2190 arena_bin_lower_run(arena
, chunk
, run
, bin
);
2200 assert(bin
->runcur
->nfree
> 0);
2202 return (arena_run_reg_alloc(bin
->runcur
, bin_info
));
2206 arena_tcache_fill_small(tsd_t
*tsd
, arena_t
*arena
, tcache_bin_t
*tbin
,
2207 szind_t binind
, uint64_t prof_accumbytes
)
2212 assert(tbin
->ncached
== 0);
2214 if (config_prof
&& arena_prof_accum(arena
, prof_accumbytes
))
2216 bin
= &arena
->bins
[binind
];
2217 malloc_mutex_lock(&bin
->lock
);
2218 for (i
= 0, nfill
= (tcache_bin_info
[binind
].ncached_max
>>
2219 tbin
->lg_fill_div
); i
< nfill
; i
++) {
2222 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
2223 ptr
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
2225 ptr
= arena_bin_malloc_hard(arena
, bin
);
2228 * OOM. tbin->avail isn't yet filled down to its first
2229 * element, so the successful allocations (if any) must
2230 * be moved just before tbin->avail before bailing out.
2233 memmove(tbin
->avail
- i
, tbin
->avail
- nfill
,
2234 i
* sizeof(void *));
2238 if (config_fill
&& unlikely(opt_junk_alloc
)) {
2239 arena_alloc_junk_small(ptr
, &arena_bin_info
[binind
],
2242 /* Insert such that low regions get used first. */
2243 *(tbin
->avail
- nfill
+ i
) = ptr
;
2246 bin
->stats
.nmalloc
+= i
;
2247 bin
->stats
.nrequests
+= tbin
->tstats
.nrequests
;
2248 bin
->stats
.curregs
+= i
;
2249 bin
->stats
.nfills
++;
2250 tbin
->tstats
.nrequests
= 0;
2252 malloc_mutex_unlock(&bin
->lock
);
2254 arena_decay_tick(tsd
, arena
);
2258 arena_alloc_junk_small(void *ptr
, arena_bin_info_t
*bin_info
, bool zero
)
2262 size_t redzone_size
= bin_info
->redzone_size
;
2263 memset((void *)((uintptr_t)ptr
- redzone_size
), 0xa5,
2265 memset((void *)((uintptr_t)ptr
+ bin_info
->reg_size
), 0xa5,
2268 memset((void *)((uintptr_t)ptr
- bin_info
->redzone_size
), 0xa5,
2269 bin_info
->reg_interval
);
2274 #undef arena_redzone_corruption
2275 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
2278 arena_redzone_corruption(void *ptr
, size_t usize
, bool after
,
2279 size_t offset
, uint8_t byte
)
2282 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2283 "(size %zu), byte=%#x\n", offset
, (offset
== 1) ? "" : "s",
2284 after
? "after" : "before", ptr
, usize
, byte
);
2287 #undef arena_redzone_corruption
2288 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2289 arena_redzone_corruption_t
*arena_redzone_corruption
=
2290 JEMALLOC_N(arena_redzone_corruption_impl
);
2294 arena_redzones_validate(void *ptr
, arena_bin_info_t
*bin_info
, bool reset
)
2298 if (opt_junk_alloc
) {
2299 size_t size
= bin_info
->reg_size
;
2300 size_t redzone_size
= bin_info
->redzone_size
;
2303 for (i
= 1; i
<= redzone_size
; i
++) {
2304 uint8_t *byte
= (uint8_t *)((uintptr_t)ptr
- i
);
2305 if (*byte
!= 0xa5) {
2307 arena_redzone_corruption(ptr
, size
, false, i
,
2313 for (i
= 0; i
< redzone_size
; i
++) {
2314 uint8_t *byte
= (uint8_t *)((uintptr_t)ptr
+ size
+ i
);
2315 if (*byte
!= 0xa5) {
2317 arena_redzone_corruption(ptr
, size
, true, i
,
2325 if (opt_abort
&& error
)
2330 #undef arena_dalloc_junk_small
2331 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
2334 arena_dalloc_junk_small(void *ptr
, arena_bin_info_t
*bin_info
)
2336 size_t redzone_size
= bin_info
->redzone_size
;
2338 arena_redzones_validate(ptr
, bin_info
, false);
2339 memset((void *)((uintptr_t)ptr
- redzone_size
), 0x5a,
2340 bin_info
->reg_interval
);
2343 #undef arena_dalloc_junk_small
2344 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2345 arena_dalloc_junk_small_t
*arena_dalloc_junk_small
=
2346 JEMALLOC_N(arena_dalloc_junk_small_impl
);
2350 arena_quarantine_junk_small(void *ptr
, size_t usize
)
2353 arena_bin_info_t
*bin_info
;
2354 cassert(config_fill
);
2355 assert(opt_junk_free
);
2356 assert(opt_quarantine
);
2357 assert(usize
<= SMALL_MAXCLASS
);
2359 binind
= size2index(usize
);
2360 bin_info
= &arena_bin_info
[binind
];
2361 arena_redzones_validate(ptr
, bin_info
, true);
2365 arena_malloc_small(tsd_t
*tsd
, arena_t
*arena
, szind_t binind
, bool zero
)
2372 assert(binind
< NBINS
);
2373 bin
= &arena
->bins
[binind
];
2374 usize
= index2size(binind
);
2376 malloc_mutex_lock(&bin
->lock
);
2377 if ((run
= bin
->runcur
) != NULL
&& run
->nfree
> 0)
2378 ret
= arena_run_reg_alloc(run
, &arena_bin_info
[binind
]);
2380 ret
= arena_bin_malloc_hard(arena
, bin
);
2383 malloc_mutex_unlock(&bin
->lock
);
2388 bin
->stats
.nmalloc
++;
2389 bin
->stats
.nrequests
++;
2390 bin
->stats
.curregs
++;
2392 malloc_mutex_unlock(&bin
->lock
);
2393 if (config_prof
&& !isthreaded
&& arena_prof_accum(arena
, usize
))
2398 if (unlikely(opt_junk_alloc
)) {
2399 arena_alloc_junk_small(ret
,
2400 &arena_bin_info
[binind
], false);
2401 } else if (unlikely(opt_zero
))
2402 memset(ret
, 0, usize
);
2404 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, usize
);
2406 if (config_fill
&& unlikely(opt_junk_alloc
)) {
2407 arena_alloc_junk_small(ret
, &arena_bin_info
[binind
],
2410 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, usize
);
2411 memset(ret
, 0, usize
);
2414 arena_decay_tick(tsd
, arena
);
2419 arena_malloc_large(tsd_t
*tsd
, arena_t
*arena
, szind_t binind
, bool zero
)
2423 uintptr_t random_offset
;
2425 arena_chunk_map_misc_t
*miscelm
;
2426 UNUSED
bool idump
JEMALLOC_CC_SILENCE_INIT(false);
2428 /* Large allocation. */
2429 usize
= index2size(binind
);
2430 malloc_mutex_lock(&arena
->lock
);
2431 if (config_cache_oblivious
) {
2435 * Compute a uniformly distributed offset within the first page
2436 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2437 * for 4 KiB pages and 64-byte cachelines.
2439 r
= prng_lg_range(&arena
->offset_state
, LG_PAGE
- LG_CACHELINE
);
2440 random_offset
= ((uintptr_t)r
) << LG_CACHELINE
;
2443 run
= arena_run_alloc_large(arena
, usize
+ large_pad
, zero
);
2445 malloc_mutex_unlock(&arena
->lock
);
2448 miscelm
= arena_run_to_miscelm(run
);
2449 ret
= (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm
) +
2452 szind_t index
= binind
- NBINS
;
2454 arena
->stats
.nmalloc_large
++;
2455 arena
->stats
.nrequests_large
++;
2456 arena
->stats
.allocated_large
+= usize
;
2457 arena
->stats
.lstats
[index
].nmalloc
++;
2458 arena
->stats
.lstats
[index
].nrequests
++;
2459 arena
->stats
.lstats
[index
].curruns
++;
2462 idump
= arena_prof_accum_locked(arena
, usize
);
2463 malloc_mutex_unlock(&arena
->lock
);
2464 if (config_prof
&& idump
)
2469 if (unlikely(opt_junk_alloc
))
2470 memset(ret
, 0xa5, usize
);
2471 else if (unlikely(opt_zero
))
2472 memset(ret
, 0, usize
);
2476 arena_decay_tick(tsd
, arena
);
2481 arena_malloc_hard(tsd_t
*tsd
, arena_t
*arena
, size_t size
, szind_t ind
,
2482 bool zero
, tcache_t
*tcache
)
2485 arena
= arena_choose(tsd
, arena
);
2486 if (unlikely(arena
== NULL
))
2489 if (likely(size
<= SMALL_MAXCLASS
))
2490 return (arena_malloc_small(tsd
, arena
, ind
, zero
));
2491 if (likely(size
<= large_maxclass
))
2492 return (arena_malloc_large(tsd
, arena
, ind
, zero
));
2493 return (huge_malloc(tsd
, arena
, index2size(ind
), zero
, tcache
));
2496 /* Only handles large allocations that require more than page alignment. */
2498 arena_palloc_large(tsd_t
*tsd
, arena_t
*arena
, size_t usize
, size_t alignment
,
2502 size_t alloc_size
, leadsize
, trailsize
;
2504 arena_chunk_t
*chunk
;
2505 arena_chunk_map_misc_t
*miscelm
;
2508 assert(usize
== PAGE_CEILING(usize
));
2510 arena
= arena_choose(tsd
, arena
);
2511 if (unlikely(arena
== NULL
))
2514 alignment
= PAGE_CEILING(alignment
);
2515 alloc_size
= usize
+ large_pad
+ alignment
- PAGE
;
2517 malloc_mutex_lock(&arena
->lock
);
2518 run
= arena_run_alloc_large(arena
, alloc_size
, false);
2520 malloc_mutex_unlock(&arena
->lock
);
2523 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(run
);
2524 miscelm
= arena_run_to_miscelm(run
);
2525 rpages
= arena_miscelm_to_rpages(miscelm
);
2527 leadsize
= ALIGNMENT_CEILING((uintptr_t)rpages
, alignment
) -
2529 assert(alloc_size
>= leadsize
+ usize
);
2530 trailsize
= alloc_size
- leadsize
- usize
- large_pad
;
2531 if (leadsize
!= 0) {
2532 arena_chunk_map_misc_t
*head_miscelm
= miscelm
;
2533 arena_run_t
*head_run
= run
;
2535 miscelm
= arena_miscelm_get(chunk
,
2536 arena_miscelm_to_pageind(head_miscelm
) + (leadsize
>>
2538 run
= &miscelm
->run
;
2540 arena_run_trim_head(arena
, chunk
, head_run
, alloc_size
,
2541 alloc_size
- leadsize
);
2543 if (trailsize
!= 0) {
2544 arena_run_trim_tail(arena
, chunk
, run
, usize
+ large_pad
+
2545 trailsize
, usize
+ large_pad
, false);
2547 if (arena_run_init_large(arena
, run
, usize
+ large_pad
, zero
)) {
2549 arena_miscelm_to_pageind(arena_run_to_miscelm(run
));
2550 bool dirty
= (arena_mapbits_dirty_get(chunk
, run_ind
) != 0);
2551 bool decommitted
= (arena_mapbits_decommitted_get(chunk
,
2554 assert(decommitted
); /* Cause of OOM. */
2555 arena_run_dalloc(arena
, run
, dirty
, false, decommitted
);
2556 malloc_mutex_unlock(&arena
->lock
);
2559 ret
= arena_miscelm_to_rpages(miscelm
);
2562 szind_t index
= size2index(usize
) - NBINS
;
2564 arena
->stats
.nmalloc_large
++;
2565 arena
->stats
.nrequests_large
++;
2566 arena
->stats
.allocated_large
+= usize
;
2567 arena
->stats
.lstats
[index
].nmalloc
++;
2568 arena
->stats
.lstats
[index
].nrequests
++;
2569 arena
->stats
.lstats
[index
].curruns
++;
2571 malloc_mutex_unlock(&arena
->lock
);
2573 if (config_fill
&& !zero
) {
2574 if (unlikely(opt_junk_alloc
))
2575 memset(ret
, 0xa5, usize
);
2576 else if (unlikely(opt_zero
))
2577 memset(ret
, 0, usize
);
2579 arena_decay_tick(tsd
, arena
);
2584 arena_palloc(tsd_t
*tsd
, arena_t
*arena
, size_t usize
, size_t alignment
,
2585 bool zero
, tcache_t
*tcache
)
2589 if (usize
<= SMALL_MAXCLASS
&& (alignment
< PAGE
|| (alignment
== PAGE
2590 && (usize
& PAGE_MASK
) == 0))) {
2591 /* Small; alignment doesn't require special run placement. */
2592 ret
= arena_malloc(tsd
, arena
, usize
, size2index(usize
), zero
,
2594 } else if (usize
<= large_maxclass
&& alignment
<= PAGE
) {
2596 * Large; alignment doesn't require special run placement.
2597 * However, the cached pointer may be at a random offset from
2598 * the base of the run, so do some bit manipulation to retrieve
2601 ret
= arena_malloc(tsd
, arena
, usize
, size2index(usize
), zero
,
2603 if (config_cache_oblivious
)
2604 ret
= (void *)((uintptr_t)ret
& ~PAGE_MASK
);
2606 if (likely(usize
<= large_maxclass
)) {
2607 ret
= arena_palloc_large(tsd
, arena
, usize
, alignment
,
2609 } else if (likely(alignment
<= chunksize
))
2610 ret
= huge_malloc(tsd
, arena
, usize
, zero
, tcache
);
2612 ret
= huge_palloc(tsd
, arena
, usize
, alignment
, zero
,
2620 arena_prof_promoted(const void *ptr
, size_t size
)
2622 arena_chunk_t
*chunk
;
2626 cassert(config_prof
);
2627 assert(ptr
!= NULL
);
2628 assert(CHUNK_ADDR2BASE(ptr
) != ptr
);
2629 assert(isalloc(ptr
, false) == LARGE_MINCLASS
);
2630 assert(isalloc(ptr
, true) == LARGE_MINCLASS
);
2631 assert(size
<= SMALL_MAXCLASS
);
2633 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
2634 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
2635 binind
= size2index(size
);
2636 assert(binind
< NBINS
);
2637 arena_mapbits_large_binind_set(chunk
, pageind
, binind
);
2639 assert(isalloc(ptr
, false) == LARGE_MINCLASS
);
2640 assert(isalloc(ptr
, true) == size
);
2644 arena_dissociate_bin_run(arena_chunk_t
*chunk
, arena_run_t
*run
,
2648 /* Dissociate run from bin. */
2649 if (run
== bin
->runcur
)
2652 szind_t binind
= arena_bin_index(extent_node_arena_get(
2653 &chunk
->node
), bin
);
2654 arena_bin_info_t
*bin_info
= &arena_bin_info
[binind
];
2656 if (bin_info
->nregs
!= 1) {
2658 * This block's conditional is necessary because if the
2659 * run only contains one region, then it never gets
2660 * inserted into the non-full runs tree.
2662 arena_bin_runs_remove(bin
, run
);
2668 arena_dalloc_bin_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
2672 assert(run
!= bin
->runcur
);
2673 assert(arena_run_tree_search(&bin
->runs
, arena_run_to_miscelm(run
)) ==
2676 malloc_mutex_unlock(&bin
->lock
);
2677 /******************************/
2678 malloc_mutex_lock(&arena
->lock
);
2679 arena_run_dalloc(arena
, run
, true, false, false);
2680 malloc_mutex_unlock(&arena
->lock
);
2681 /****************************/
2682 malloc_mutex_lock(&bin
->lock
);
2684 bin
->stats
.curruns
--;
2688 arena_bin_lower_run(arena_t
*arena
, arena_chunk_t
*chunk
, arena_run_t
*run
,
2693 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2694 * non-full run. It is okay to NULL runcur out rather than proactively
2695 * keeping it pointing at the lowest non-full run.
2697 if ((uintptr_t)run
< (uintptr_t)bin
->runcur
) {
2698 /* Switch runcur. */
2699 if (bin
->runcur
->nfree
> 0)
2700 arena_bin_runs_insert(bin
, bin
->runcur
);
2703 bin
->stats
.reruns
++;
2705 arena_bin_runs_insert(bin
, run
);
2709 arena_dalloc_bin_locked_impl(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
2710 arena_chunk_map_bits_t
*bitselm
, bool junked
)
2712 size_t pageind
, rpages_ind
;
2715 arena_bin_info_t
*bin_info
;
2718 pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
2719 rpages_ind
= pageind
- arena_mapbits_small_runind_get(chunk
, pageind
);
2720 run
= &arena_miscelm_get(chunk
, rpages_ind
)->run
;
2721 binind
= run
->binind
;
2722 bin
= &arena
->bins
[binind
];
2723 bin_info
= &arena_bin_info
[binind
];
2725 if (!junked
&& config_fill
&& unlikely(opt_junk_free
))
2726 arena_dalloc_junk_small(ptr
, bin_info
);
2728 arena_run_reg_dalloc(run
, ptr
);
2729 if (run
->nfree
== bin_info
->nregs
) {
2730 arena_dissociate_bin_run(chunk
, run
, bin
);
2731 arena_dalloc_bin_run(arena
, chunk
, run
, bin
);
2732 } else if (run
->nfree
== 1 && run
!= bin
->runcur
)
2733 arena_bin_lower_run(arena
, chunk
, run
, bin
);
2736 bin
->stats
.ndalloc
++;
2737 bin
->stats
.curregs
--;
2742 arena_dalloc_bin_junked_locked(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
2743 arena_chunk_map_bits_t
*bitselm
)
2746 arena_dalloc_bin_locked_impl(arena
, chunk
, ptr
, bitselm
, true);
2750 arena_dalloc_bin(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
2751 size_t pageind
, arena_chunk_map_bits_t
*bitselm
)
2757 rpages_ind
= pageind
- arena_mapbits_small_runind_get(chunk
, pageind
);
2758 run
= &arena_miscelm_get(chunk
, rpages_ind
)->run
;
2759 bin
= &arena
->bins
[run
->binind
];
2760 malloc_mutex_lock(&bin
->lock
);
2761 arena_dalloc_bin_locked_impl(arena
, chunk
, ptr
, bitselm
, false);
2762 malloc_mutex_unlock(&bin
->lock
);
2766 arena_dalloc_small(tsd_t
*tsd
, arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
2769 arena_chunk_map_bits_t
*bitselm
;
2772 /* arena_ptr_small_binind_get() does extra sanity checking. */
2773 assert(arena_ptr_small_binind_get(ptr
, arena_mapbits_get(chunk
,
2774 pageind
)) != BININD_INVALID
);
2776 bitselm
= arena_bitselm_get(chunk
, pageind
);
2777 arena_dalloc_bin(arena
, chunk
, ptr
, pageind
, bitselm
);
2778 arena_decay_tick(tsd
, arena
);
2782 #undef arena_dalloc_junk_large
2783 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
2786 arena_dalloc_junk_large(void *ptr
, size_t usize
)
2789 if (config_fill
&& unlikely(opt_junk_free
))
2790 memset(ptr
, 0x5a, usize
);
2793 #undef arena_dalloc_junk_large
2794 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2795 arena_dalloc_junk_large_t
*arena_dalloc_junk_large
=
2796 JEMALLOC_N(arena_dalloc_junk_large_impl
);
2800 arena_dalloc_large_locked_impl(arena_t
*arena
, arena_chunk_t
*chunk
,
2801 void *ptr
, bool junked
)
2803 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
2804 arena_chunk_map_misc_t
*miscelm
= arena_miscelm_get(chunk
, pageind
);
2805 arena_run_t
*run
= &miscelm
->run
;
2807 if (config_fill
|| config_stats
) {
2808 size_t usize
= arena_mapbits_large_size_get(chunk
, pageind
) -
2812 arena_dalloc_junk_large(ptr
, usize
);
2814 szind_t index
= size2index(usize
) - NBINS
;
2816 arena
->stats
.ndalloc_large
++;
2817 arena
->stats
.allocated_large
-= usize
;
2818 arena
->stats
.lstats
[index
].ndalloc
++;
2819 arena
->stats
.lstats
[index
].curruns
--;
2823 arena_run_dalloc(arena
, run
, true, false, false);
2827 arena_dalloc_large_junked_locked(arena_t
*arena
, arena_chunk_t
*chunk
,
2831 arena_dalloc_large_locked_impl(arena
, chunk
, ptr
, true);
2835 arena_dalloc_large(tsd_t
*tsd
, arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
)
2838 malloc_mutex_lock(&arena
->lock
);
2839 arena_dalloc_large_locked_impl(arena
, chunk
, ptr
, false);
2840 malloc_mutex_unlock(&arena
->lock
);
2841 arena_decay_tick(tsd
, arena
);
2845 arena_ralloc_large_shrink(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
2846 size_t oldsize
, size_t size
)
2848 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
2849 arena_chunk_map_misc_t
*miscelm
= arena_miscelm_get(chunk
, pageind
);
2850 arena_run_t
*run
= &miscelm
->run
;
2852 assert(size
< oldsize
);
2855 * Shrink the run, and make trailing pages available for other
2858 malloc_mutex_lock(&arena
->lock
);
2859 arena_run_trim_tail(arena
, chunk
, run
, oldsize
+ large_pad
, size
+
2862 szind_t oldindex
= size2index(oldsize
) - NBINS
;
2863 szind_t index
= size2index(size
) - NBINS
;
2865 arena
->stats
.ndalloc_large
++;
2866 arena
->stats
.allocated_large
-= oldsize
;
2867 arena
->stats
.lstats
[oldindex
].ndalloc
++;
2868 arena
->stats
.lstats
[oldindex
].curruns
--;
2870 arena
->stats
.nmalloc_large
++;
2871 arena
->stats
.nrequests_large
++;
2872 arena
->stats
.allocated_large
+= size
;
2873 arena
->stats
.lstats
[index
].nmalloc
++;
2874 arena
->stats
.lstats
[index
].nrequests
++;
2875 arena
->stats
.lstats
[index
].curruns
++;
2877 malloc_mutex_unlock(&arena
->lock
);
2881 arena_ralloc_large_grow(arena_t
*arena
, arena_chunk_t
*chunk
, void *ptr
,
2882 size_t oldsize
, size_t usize_min
, size_t usize_max
, bool zero
)
2884 size_t pageind
= ((uintptr_t)ptr
- (uintptr_t)chunk
) >> LG_PAGE
;
2885 size_t npages
= (oldsize
+ large_pad
) >> LG_PAGE
;
2888 assert(oldsize
== arena_mapbits_large_size_get(chunk
, pageind
) -
2891 /* Try to extend the run. */
2892 malloc_mutex_lock(&arena
->lock
);
2893 if (pageind
+npages
>= chunk_npages
|| arena_mapbits_allocated_get(chunk
,
2894 pageind
+npages
) != 0)
2896 followsize
= arena_mapbits_unallocated_size_get(chunk
, pageind
+npages
);
2897 if (oldsize
+ followsize
>= usize_min
) {
2899 * The next run is available and sufficiently large. Split the
2900 * following run, then merge the first part with the existing
2904 size_t usize
, splitsize
, size
, flag_dirty
, flag_unzeroed_mask
;
2907 while (oldsize
+ followsize
< usize
)
2908 usize
= index2size(size2index(usize
)-1);
2909 assert(usize
>= usize_min
);
2910 assert(usize
>= oldsize
);
2911 splitsize
= usize
- oldsize
;
2915 run
= &arena_miscelm_get(chunk
, pageind
+npages
)->run
;
2916 if (arena_run_split_large(arena
, run
, splitsize
, zero
))
2919 if (config_cache_oblivious
&& zero
) {
2921 * Zero the trailing bytes of the original allocation's
2922 * last page, since they are in an indeterminate state.
2923 * There will always be trailing bytes, because ptr's
2924 * offset from the beginning of the run is a multiple of
2925 * CACHELINE in [0 .. PAGE).
2927 void *zbase
= (void *)((uintptr_t)ptr
+ oldsize
);
2928 void *zpast
= PAGE_ADDR2BASE((void *)((uintptr_t)zbase
+
2930 size_t nzero
= (uintptr_t)zpast
- (uintptr_t)zbase
;
2932 memset(zbase
, 0, nzero
);
2935 size
= oldsize
+ splitsize
;
2936 npages
= (size
+ large_pad
) >> LG_PAGE
;
2939 * Mark the extended run as dirty if either portion of the run
2940 * was dirty before allocation. This is rather pedantic,
2941 * because there's not actually any sequence of events that
2942 * could cause the resulting run to be passed to
2943 * arena_run_dalloc() with the dirty argument set to false
2944 * (which is when dirty flag consistency would really matter).
2946 flag_dirty
= arena_mapbits_dirty_get(chunk
, pageind
) |
2947 arena_mapbits_dirty_get(chunk
, pageind
+npages
-1);
2948 flag_unzeroed_mask
= flag_dirty
== 0 ? CHUNK_MAP_UNZEROED
: 0;
2949 arena_mapbits_large_set(chunk
, pageind
, size
+ large_pad
,
2950 flag_dirty
| (flag_unzeroed_mask
&
2951 arena_mapbits_unzeroed_get(chunk
, pageind
)));
2952 arena_mapbits_large_set(chunk
, pageind
+npages
-1, 0, flag_dirty
|
2953 (flag_unzeroed_mask
& arena_mapbits_unzeroed_get(chunk
,
2954 pageind
+npages
-1)));
2957 szind_t oldindex
= size2index(oldsize
) - NBINS
;
2958 szind_t index
= size2index(size
) - NBINS
;
2960 arena
->stats
.ndalloc_large
++;
2961 arena
->stats
.allocated_large
-= oldsize
;
2962 arena
->stats
.lstats
[oldindex
].ndalloc
++;
2963 arena
->stats
.lstats
[oldindex
].curruns
--;
2965 arena
->stats
.nmalloc_large
++;
2966 arena
->stats
.nrequests_large
++;
2967 arena
->stats
.allocated_large
+= size
;
2968 arena
->stats
.lstats
[index
].nmalloc
++;
2969 arena
->stats
.lstats
[index
].nrequests
++;
2970 arena
->stats
.lstats
[index
].curruns
++;
2972 malloc_mutex_unlock(&arena
->lock
);
2976 malloc_mutex_unlock(&arena
->lock
);
2981 #undef arena_ralloc_junk_large
2982 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2985 arena_ralloc_junk_large(void *ptr
, size_t old_usize
, size_t usize
)
2988 if (config_fill
&& unlikely(opt_junk_free
)) {
2989 memset((void *)((uintptr_t)ptr
+ usize
), 0x5a,
2994 #undef arena_ralloc_junk_large
2995 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2996 arena_ralloc_junk_large_t
*arena_ralloc_junk_large
=
2997 JEMALLOC_N(arena_ralloc_junk_large_impl
);
3001 * Try to resize a large allocation, in order to avoid copying. This will
3002 * always fail if growing an object, and the following run is already in use.
3005 arena_ralloc_large(void *ptr
, size_t oldsize
, size_t usize_min
,
3006 size_t usize_max
, bool zero
)
3008 arena_chunk_t
*chunk
;
3011 if (oldsize
== usize_max
) {
3012 /* Current size class is compatible and maximal. */
3016 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
3017 arena
= extent_node_arena_get(&chunk
->node
);
3019 if (oldsize
< usize_max
) {
3020 bool ret
= arena_ralloc_large_grow(arena
, chunk
, ptr
, oldsize
,
3021 usize_min
, usize_max
, zero
);
3022 if (config_fill
&& !ret
&& !zero
) {
3023 if (unlikely(opt_junk_alloc
)) {
3024 memset((void *)((uintptr_t)ptr
+ oldsize
), 0xa5,
3025 isalloc(ptr
, config_prof
) - oldsize
);
3026 } else if (unlikely(opt_zero
)) {
3027 memset((void *)((uintptr_t)ptr
+ oldsize
), 0,
3028 isalloc(ptr
, config_prof
) - oldsize
);
3034 assert(oldsize
> usize_max
);
3035 /* Fill before shrinking in order avoid a race. */
3036 arena_ralloc_junk_large(ptr
, oldsize
, usize_max
);
3037 arena_ralloc_large_shrink(arena
, chunk
, ptr
, oldsize
, usize_max
);
3042 arena_ralloc_no_move(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
3043 size_t extra
, bool zero
)
3045 size_t usize_min
, usize_max
;
3047 /* Calls with non-zero extra had to clamp extra. */
3048 assert(extra
== 0 || size
+ extra
<= HUGE_MAXCLASS
);
3050 if (unlikely(size
> HUGE_MAXCLASS
))
3053 usize_min
= s2u(size
);
3054 usize_max
= s2u(size
+ extra
);
3055 if (likely(oldsize
<= large_maxclass
&& usize_min
<= large_maxclass
)) {
3056 arena_chunk_t
*chunk
;
3059 * Avoid moving the allocation if the size class can be left the
3062 if (oldsize
<= SMALL_MAXCLASS
) {
3063 assert(arena_bin_info
[size2index(oldsize
)].reg_size
==
3065 if ((usize_max
> SMALL_MAXCLASS
||
3066 size2index(usize_max
) != size2index(oldsize
)) &&
3067 (size
> oldsize
|| usize_max
< oldsize
))
3070 if (usize_max
<= SMALL_MAXCLASS
)
3072 if (arena_ralloc_large(ptr
, oldsize
, usize_min
,
3077 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
3078 arena_decay_tick(tsd
, extent_node_arena_get(&chunk
->node
));
3081 return (huge_ralloc_no_move(tsd
, ptr
, oldsize
, usize_min
,
3087 arena_ralloc_move_helper(tsd_t
*tsd
, arena_t
*arena
, size_t usize
,
3088 size_t alignment
, bool zero
, tcache_t
*tcache
)
3092 return (arena_malloc(tsd
, arena
, usize
, size2index(usize
), zero
,
3094 usize
= sa2u(usize
, alignment
);
3095 if (unlikely(usize
== 0 || usize
> HUGE_MAXCLASS
))
3097 return (ipalloct(tsd
, usize
, alignment
, zero
, tcache
, arena
));
3101 arena_ralloc(tsd_t
*tsd
, arena_t
*arena
, void *ptr
, size_t oldsize
, size_t size
,
3102 size_t alignment
, bool zero
, tcache_t
*tcache
)
3108 if (unlikely(usize
== 0 || size
> HUGE_MAXCLASS
))
3111 if (likely(usize
<= large_maxclass
)) {
3114 /* Try to avoid moving the allocation. */
3115 if (!arena_ralloc_no_move(tsd
, ptr
, oldsize
, usize
, 0, zero
))
3119 * size and oldsize are different enough that we need to move
3120 * the object. In that case, fall back to allocating new space
3123 ret
= arena_ralloc_move_helper(tsd
, arena
, usize
, alignment
,
3129 * Junk/zero-filling were already done by
3130 * ipalloc()/arena_malloc().
3133 copysize
= (usize
< oldsize
) ? usize
: oldsize
;
3134 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, copysize
);
3135 memcpy(ret
, ptr
, copysize
);
3136 isqalloc(tsd
, ptr
, oldsize
, tcache
);
3138 ret
= huge_ralloc(tsd
, arena
, ptr
, oldsize
, usize
, alignment
,
3145 arena_dss_prec_get(arena_t
*arena
)
3149 malloc_mutex_lock(&arena
->lock
);
3150 ret
= arena
->dss_prec
;
3151 malloc_mutex_unlock(&arena
->lock
);
3156 arena_dss_prec_set(arena_t
*arena
, dss_prec_t dss_prec
)
3160 return (dss_prec
!= dss_prec_disabled
);
3161 malloc_mutex_lock(&arena
->lock
);
3162 arena
->dss_prec
= dss_prec
;
3163 malloc_mutex_unlock(&arena
->lock
);
3168 arena_lg_dirty_mult_default_get(void)
3171 return ((ssize_t
)atomic_read_z((size_t *)&lg_dirty_mult_default
));
3175 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult
)
3178 if (opt_purge
!= purge_mode_ratio
)
3180 if (!arena_lg_dirty_mult_valid(lg_dirty_mult
))
3182 atomic_write_z((size_t *)&lg_dirty_mult_default
, (size_t)lg_dirty_mult
);
3187 arena_decay_time_default_get(void)
3190 return ((ssize_t
)atomic_read_z((size_t *)&decay_time_default
));
3194 arena_decay_time_default_set(ssize_t decay_time
)
3197 if (opt_purge
!= purge_mode_decay
)
3199 if (!arena_decay_time_valid(decay_time
))
3201 atomic_write_z((size_t *)&decay_time_default
, (size_t)decay_time
);
3206 arena_basic_stats_merge_locked(arena_t
*arena
, unsigned *nthreads
,
3207 const char **dss
, ssize_t
*lg_dirty_mult
, ssize_t
*decay_time
,
3208 size_t *nactive
, size_t *ndirty
)
3211 *nthreads
+= arena_nthreads_get(arena
);
3212 *dss
= dss_prec_names
[arena
->dss_prec
];
3213 *lg_dirty_mult
= arena
->lg_dirty_mult
;
3214 *decay_time
= arena
->decay_time
;
3215 *nactive
+= arena
->nactive
;
3216 *ndirty
+= arena
->ndirty
;
3220 arena_basic_stats_merge(arena_t
*arena
, unsigned *nthreads
, const char **dss
,
3221 ssize_t
*lg_dirty_mult
, ssize_t
*decay_time
, size_t *nactive
,
3225 malloc_mutex_lock(&arena
->lock
);
3226 arena_basic_stats_merge_locked(arena
, nthreads
, dss
, lg_dirty_mult
,
3227 decay_time
, nactive
, ndirty
);
3228 malloc_mutex_unlock(&arena
->lock
);
3232 arena_stats_merge(arena_t
*arena
, unsigned *nthreads
, const char **dss
,
3233 ssize_t
*lg_dirty_mult
, ssize_t
*decay_time
, size_t *nactive
,
3234 size_t *ndirty
, arena_stats_t
*astats
, malloc_bin_stats_t
*bstats
,
3235 malloc_large_stats_t
*lstats
, malloc_huge_stats_t
*hstats
)
3239 cassert(config_stats
);
3241 malloc_mutex_lock(&arena
->lock
);
3242 arena_basic_stats_merge_locked(arena
, nthreads
, dss
, lg_dirty_mult
,
3243 decay_time
, nactive
, ndirty
);
3245 astats
->mapped
+= arena
->stats
.mapped
;
3246 astats
->npurge
+= arena
->stats
.npurge
;
3247 astats
->nmadvise
+= arena
->stats
.nmadvise
;
3248 astats
->purged
+= arena
->stats
.purged
;
3249 astats
->metadata_mapped
+= arena
->stats
.metadata_mapped
;
3250 astats
->metadata_allocated
+= arena_metadata_allocated_get(arena
);
3251 astats
->allocated_large
+= arena
->stats
.allocated_large
;
3252 astats
->nmalloc_large
+= arena
->stats
.nmalloc_large
;
3253 astats
->ndalloc_large
+= arena
->stats
.ndalloc_large
;
3254 astats
->nrequests_large
+= arena
->stats
.nrequests_large
;
3255 astats
->allocated_huge
+= arena
->stats
.allocated_huge
;
3256 astats
->nmalloc_huge
+= arena
->stats
.nmalloc_huge
;
3257 astats
->ndalloc_huge
+= arena
->stats
.ndalloc_huge
;
3259 for (i
= 0; i
< nlclasses
; i
++) {
3260 lstats
[i
].nmalloc
+= arena
->stats
.lstats
[i
].nmalloc
;
3261 lstats
[i
].ndalloc
+= arena
->stats
.lstats
[i
].ndalloc
;
3262 lstats
[i
].nrequests
+= arena
->stats
.lstats
[i
].nrequests
;
3263 lstats
[i
].curruns
+= arena
->stats
.lstats
[i
].curruns
;
3266 for (i
= 0; i
< nhclasses
; i
++) {
3267 hstats
[i
].nmalloc
+= arena
->stats
.hstats
[i
].nmalloc
;
3268 hstats
[i
].ndalloc
+= arena
->stats
.hstats
[i
].ndalloc
;
3269 hstats
[i
].curhchunks
+= arena
->stats
.hstats
[i
].curhchunks
;
3271 malloc_mutex_unlock(&arena
->lock
);
3273 for (i
= 0; i
< NBINS
; i
++) {
3274 arena_bin_t
*bin
= &arena
->bins
[i
];
3276 malloc_mutex_lock(&bin
->lock
);
3277 bstats
[i
].nmalloc
+= bin
->stats
.nmalloc
;
3278 bstats
[i
].ndalloc
+= bin
->stats
.ndalloc
;
3279 bstats
[i
].nrequests
+= bin
->stats
.nrequests
;
3280 bstats
[i
].curregs
+= bin
->stats
.curregs
;
3281 if (config_tcache
) {
3282 bstats
[i
].nfills
+= bin
->stats
.nfills
;
3283 bstats
[i
].nflushes
+= bin
->stats
.nflushes
;
3285 bstats
[i
].nruns
+= bin
->stats
.nruns
;
3286 bstats
[i
].reruns
+= bin
->stats
.reruns
;
3287 bstats
[i
].curruns
+= bin
->stats
.curruns
;
3288 malloc_mutex_unlock(&bin
->lock
);
3293 arena_nthreads_get(arena_t
*arena
)
3296 return (atomic_read_u(&arena
->nthreads
));
3300 arena_nthreads_inc(arena_t
*arena
)
3303 atomic_add_u(&arena
->nthreads
, 1);
3307 arena_nthreads_dec(arena_t
*arena
)
3310 atomic_sub_u(&arena
->nthreads
, 1);
3314 arena_new(unsigned ind
)
3321 /* Compute arena size to incorporate sufficient runs_avail elements. */
3322 arena_size
= offsetof(arena_t
, runs_avail
) + (sizeof(arena_run_tree_t
) *
3323 runs_avail_nclasses
);
3325 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3326 * because there is no way to clean up if base_alloc() OOMs.
3329 arena
= (arena_t
*)base_alloc(CACHELINE_CEILING(arena_size
) +
3330 QUANTUM_CEILING(nlclasses
* sizeof(malloc_large_stats_t
) +
3331 nhclasses
) * sizeof(malloc_huge_stats_t
));
3333 arena
= (arena_t
*)base_alloc(arena_size
);
3338 arena
->nthreads
= 0;
3339 if (malloc_mutex_init(&arena
->lock
))
3343 memset(&arena
->stats
, 0, sizeof(arena_stats_t
));
3344 arena
->stats
.lstats
= (malloc_large_stats_t
*)((uintptr_t)arena
3345 + CACHELINE_CEILING(arena_size
));
3346 memset(arena
->stats
.lstats
, 0, nlclasses
*
3347 sizeof(malloc_large_stats_t
));
3348 arena
->stats
.hstats
= (malloc_huge_stats_t
*)((uintptr_t)arena
3349 + CACHELINE_CEILING(arena_size
) +
3350 QUANTUM_CEILING(nlclasses
* sizeof(malloc_large_stats_t
)));
3351 memset(arena
->stats
.hstats
, 0, nhclasses
*
3352 sizeof(malloc_huge_stats_t
));
3354 ql_new(&arena
->tcache_ql
);
3358 arena
->prof_accumbytes
= 0;
3360 if (config_cache_oblivious
) {
3362 * A nondeterministic seed based on the address of arena reduces
3363 * the likelihood of lockstep non-uniform cache index
3364 * utilization among identical concurrent processes, but at the
3365 * cost of test repeatability. For debug builds, instead use a
3366 * deterministic seed.
3368 arena
->offset_state
= config_debug
? ind
:
3369 (uint64_t)(uintptr_t)arena
;
3372 arena
->dss_prec
= chunk_dss_prec_get();
3374 arena
->spare
= NULL
;
3376 arena
->lg_dirty_mult
= arena_lg_dirty_mult_default_get();
3377 arena
->purging
= false;
3381 for(i
= 0; i
< runs_avail_nclasses
; i
++)
3382 arena_run_tree_new(&arena
->runs_avail
[i
]);
3383 qr_new(&arena
->runs_dirty
, rd_link
);
3384 qr_new(&arena
->chunks_cache
, cc_link
);
3386 if (opt_purge
== purge_mode_decay
)
3387 arena_decay_init(arena
, arena_decay_time_default_get());
3389 ql_new(&arena
->huge
);
3390 if (malloc_mutex_init(&arena
->huge_mtx
))
3393 extent_tree_szad_new(&arena
->chunks_szad_cached
);
3394 extent_tree_ad_new(&arena
->chunks_ad_cached
);
3395 extent_tree_szad_new(&arena
->chunks_szad_retained
);
3396 extent_tree_ad_new(&arena
->chunks_ad_retained
);
3397 if (malloc_mutex_init(&arena
->chunks_mtx
))
3399 ql_new(&arena
->node_cache
);
3400 if (malloc_mutex_init(&arena
->node_cache_mtx
))
3403 arena
->chunk_hooks
= chunk_hooks_default
;
3405 /* Initialize bins. */
3406 for (i
= 0; i
< NBINS
; i
++) {
3407 bin
= &arena
->bins
[i
];
3408 if (malloc_mutex_init(&bin
->lock
))
3411 arena_run_tree_new(&bin
->runs
);
3413 memset(&bin
->stats
, 0, sizeof(malloc_bin_stats_t
));
3420 * Calculate bin_info->run_size such that it meets the following constraints:
3422 * *) bin_info->run_size <= arena_maxrun
3423 * *) bin_info->nregs <= RUN_MAXREGS
3425 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3426 * these settings are all interdependent.
3429 bin_info_run_size_calc(arena_bin_info_t
*bin_info
)
3432 size_t try_run_size
, perfect_run_size
, actual_run_size
;
3433 uint32_t try_nregs
, perfect_nregs
, actual_nregs
;
3436 * Determine redzone size based on minimum alignment and minimum
3437 * redzone size. Add padding to the end of the run if it is needed to
3438 * align the regions. The padding allows each redzone to be half the
3439 * minimum alignment; without the padding, each redzone would have to
3440 * be twice as large in order to maintain alignment.
3442 if (config_fill
&& unlikely(opt_redzone
)) {
3443 size_t align_min
= ZU(1) << (ffs_zu(bin_info
->reg_size
) - 1);
3444 if (align_min
<= REDZONE_MINSIZE
) {
3445 bin_info
->redzone_size
= REDZONE_MINSIZE
;
3448 bin_info
->redzone_size
= align_min
>> 1;
3449 pad_size
= bin_info
->redzone_size
;
3452 bin_info
->redzone_size
= 0;
3455 bin_info
->reg_interval
= bin_info
->reg_size
+
3456 (bin_info
->redzone_size
<< 1);
3459 * Compute run size under ideal conditions (no redzones, no limit on run
3462 try_run_size
= PAGE
;
3463 try_nregs
= (uint32_t)(try_run_size
/ bin_info
->reg_size
);
3465 perfect_run_size
= try_run_size
;
3466 perfect_nregs
= try_nregs
;
3468 try_run_size
+= PAGE
;
3469 try_nregs
= (uint32_t)(try_run_size
/ bin_info
->reg_size
);
3470 } while (perfect_run_size
!= perfect_nregs
* bin_info
->reg_size
);
3471 assert(perfect_nregs
<= RUN_MAXREGS
);
3473 actual_run_size
= perfect_run_size
;
3474 actual_nregs
= (uint32_t)((actual_run_size
- pad_size
) /
3475 bin_info
->reg_interval
);
3478 * Redzones can require enough padding that not even a single region can
3479 * fit within the number of pages that would normally be dedicated to a
3480 * run for this size class. Increase the run size until at least one
3483 while (actual_nregs
== 0) {
3484 assert(config_fill
&& unlikely(opt_redzone
));
3486 actual_run_size
+= PAGE
;
3487 actual_nregs
= (uint32_t)((actual_run_size
- pad_size
) /
3488 bin_info
->reg_interval
);
3492 * Make sure that the run will fit within an arena chunk.
3494 while (actual_run_size
> arena_maxrun
) {
3495 actual_run_size
-= PAGE
;
3496 actual_nregs
= (uint32_t)((actual_run_size
- pad_size
) /
3497 bin_info
->reg_interval
);
3499 assert(actual_nregs
> 0);
3500 assert(actual_run_size
== s2u(actual_run_size
));
3502 /* Copy final settings. */
3503 bin_info
->run_size
= actual_run_size
;
3504 bin_info
->nregs
= actual_nregs
;
3505 bin_info
->reg0_offset
= (uint32_t)(actual_run_size
- (actual_nregs
*
3506 bin_info
->reg_interval
) - pad_size
+ bin_info
->redzone_size
);
3508 if (actual_run_size
> small_maxrun
)
3509 small_maxrun
= actual_run_size
;
3511 assert(bin_info
->reg0_offset
- bin_info
->redzone_size
+ (bin_info
->nregs
3512 * bin_info
->reg_interval
) + pad_size
== bin_info
->run_size
);
3518 arena_bin_info_t
*bin_info
;
3520 #define BIN_INFO_INIT_bin_yes(index, size) \
3521 bin_info = &arena_bin_info[index]; \
3522 bin_info->reg_size = size; \
3523 bin_info_run_size_calc(bin_info); \
3524 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3525 #define BIN_INFO_INIT_bin_no(index, size)
3526 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3527 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3529 #undef BIN_INFO_INIT_bin_yes
3530 #undef BIN_INFO_INIT_bin_no
3535 small_run_size_init(void)
3538 assert(small_maxrun
!= 0);
3540 small_run_tab
= (bool *)base_alloc(sizeof(bool) * (small_maxrun
>>
3542 if (small_run_tab
== NULL
)
3545 #define TAB_INIT_bin_yes(index, size) { \
3546 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3547 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3549 #define TAB_INIT_bin_no(index, size)
3550 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3551 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3553 #undef TAB_INIT_bin_yes
3554 #undef TAB_INIT_bin_no
3561 run_quantize_init(void)
3565 run_quantize_max
= chunksize
+ large_pad
;
3567 run_quantize_floor_tab
= (size_t *)base_alloc(sizeof(size_t) *
3568 (run_quantize_max
>> LG_PAGE
));
3569 if (run_quantize_floor_tab
== NULL
)
3572 run_quantize_ceil_tab
= (size_t *)base_alloc(sizeof(size_t) *
3573 (run_quantize_max
>> LG_PAGE
));
3574 if (run_quantize_ceil_tab
== NULL
)
3577 for (i
= 1; i
<= run_quantize_max
>> LG_PAGE
; i
++) {
3578 size_t run_size
= i
<< LG_PAGE
;
3580 run_quantize_floor_tab
[i
-1] =
3581 run_quantize_floor_compute(run_size
);
3582 run_quantize_ceil_tab
[i
-1] =
3583 run_quantize_ceil_compute(run_size
);
3594 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult
);
3595 arena_decay_time_default_set(opt_decay_time
);
3598 * Compute the header size such that it is large enough to contain the
3599 * page map. The page map is biased to omit entries for the header
3600 * itself, so some iteration is necessary to compute the map bias.
3602 * 1) Compute safe header_size and map_bias values that include enough
3603 * space for an unbiased page map.
3604 * 2) Refine map_bias based on (1) to omit the header pages in the page
3605 * map. The resulting map_bias may be one too small.
3606 * 3) Refine map_bias based on (2). The result will be >= the result
3607 * from (2), and will always be correct.
3610 for (i
= 0; i
< 3; i
++) {
3611 size_t header_size
= offsetof(arena_chunk_t
, map_bits
) +
3612 ((sizeof(arena_chunk_map_bits_t
) +
3613 sizeof(arena_chunk_map_misc_t
)) * (chunk_npages
-map_bias
));
3614 map_bias
= (header_size
+ PAGE_MASK
) >> LG_PAGE
;
3616 assert(map_bias
> 0);
3618 map_misc_offset
= offsetof(arena_chunk_t
, map_bits
) +
3619 sizeof(arena_chunk_map_bits_t
) * (chunk_npages
-map_bias
);
3621 arena_maxrun
= chunksize
- (map_bias
<< LG_PAGE
);
3622 assert(arena_maxrun
> 0);
3623 large_maxclass
= index2size(size2index(chunksize
)-1);
3624 if (large_maxclass
> arena_maxrun
) {
3626 * For small chunk sizes it's possible for there to be fewer
3627 * non-header pages available than are necessary to serve the
3628 * size classes just below chunksize.
3630 large_maxclass
= arena_maxrun
;
3632 assert(large_maxclass
> 0);
3633 nlclasses
= size2index(large_maxclass
) - size2index(SMALL_MAXCLASS
);
3634 nhclasses
= NSIZES
- nlclasses
- NBINS
;
3637 if (small_run_size_init())
3639 if (run_quantize_init())
3642 runs_avail_bias
= size2index(PAGE
);
3643 runs_avail_nclasses
= size2index(run_quantize_max
)+1 - runs_avail_bias
;
3649 arena_prefork(arena_t
*arena
)
3653 malloc_mutex_prefork(&arena
->lock
);
3654 malloc_mutex_prefork(&arena
->huge_mtx
);
3655 malloc_mutex_prefork(&arena
->chunks_mtx
);
3656 malloc_mutex_prefork(&arena
->node_cache_mtx
);
3657 for (i
= 0; i
< NBINS
; i
++)
3658 malloc_mutex_prefork(&arena
->bins
[i
].lock
);
3662 arena_postfork_parent(arena_t
*arena
)
3666 for (i
= 0; i
< NBINS
; i
++)
3667 malloc_mutex_postfork_parent(&arena
->bins
[i
].lock
);
3668 malloc_mutex_postfork_parent(&arena
->node_cache_mtx
);
3669 malloc_mutex_postfork_parent(&arena
->chunks_mtx
);
3670 malloc_mutex_postfork_parent(&arena
->huge_mtx
);
3671 malloc_mutex_postfork_parent(&arena
->lock
);
3675 arena_postfork_child(arena_t
*arena
)
3679 for (i
= 0; i
< NBINS
; i
++)
3680 malloc_mutex_postfork_child(&arena
->bins
[i
].lock
);
3681 malloc_mutex_postfork_child(&arena
->node_cache_mtx
);
3682 malloc_mutex_postfork_child(&arena
->chunks_mtx
);
3683 malloc_mutex_postfork_child(&arena
->huge_mtx
);
3684 malloc_mutex_postfork_child(&arena
->lock
);