]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/src/arena.c
New upstream version 1.22.1+dfsg1
[rustc.git] / src / jemalloc / src / arena.c
CommitLineData
970d7e83
LB
1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
3b2f2976
XL
7bool opt_thp = true;
8static bool thp_initially_huge;
54a0048b
SL
9purge_mode_t opt_purge = PURGE_DEFAULT;
10const char *purge_mode_names[] = {
11 "ratio",
12 "decay",
13 "N/A"
14};
970d7e83 15ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
54a0048b
SL
16static ssize_t lg_dirty_mult_default;
17ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
18static ssize_t decay_time_default;
970d7e83 19
54a0048b 20arena_bin_info_t arena_bin_info[NBINS];
7453a54e 21
54a0048b
SL
22size_t map_bias;
23size_t map_misc_offset;
24size_t arena_maxrun; /* Max run size for arenas. */
25size_t large_maxclass; /* Max large size class. */
54a0048b
SL
26unsigned nlclasses; /* Number of large size classes. */
27unsigned nhclasses; /* Number of huge size classes. */
970d7e83
LB
28
29/******************************************************************************/
1a4d82fc
JJ
30/*
31 * Function prototypes for static functions that are referenced prior to
32 * definition.
33 */
34
3b2f2976
XL
35static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
36 arena_chunk_t *chunk);
37static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
38 size_t ndirty_limit);
39static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
40 bool dirty, bool cleaned, bool decommitted);
41static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
42 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
43static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
44 arena_bin_t *bin);
970d7e83
LB
45
46/******************************************************************************/
47
1a4d82fc 48JEMALLOC_INLINE_C size_t
54a0048b 49arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
1a4d82fc 50{
54a0048b
SL
51 arena_chunk_t *chunk;
52 size_t pageind, mapbits;
1a4d82fc 53
54a0048b
SL
54 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
55 pageind = arena_miscelm_to_pageind(miscelm);
56 mapbits = arena_mapbits_get(chunk, pageind);
57 return (arena_mapbits_size_decode(mapbits));
1a4d82fc
JJ
58}
59
3b2f2976
XL
60JEMALLOC_INLINE_C const extent_node_t *
61arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
62{
63 arena_chunk_t *chunk;
64
65 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
66 return (&chunk->node);
67}
68
54a0048b 69JEMALLOC_INLINE_C int
3b2f2976 70arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
970d7e83 71{
3b2f2976 72 size_t a_sn, b_sn;
970d7e83
LB
73
74 assert(a != NULL);
75 assert(b != NULL);
76
3b2f2976
XL
77 a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
78 b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
970d7e83 79
3b2f2976
XL
80 return ((a_sn > b_sn) - (a_sn < b_sn));
81}
970d7e83 82
3b2f2976
XL
83JEMALLOC_INLINE_C int
84arena_ad_comp(const arena_chunk_map_misc_t *a,
85 const arena_chunk_map_misc_t *b)
970d7e83 86{
3b2f2976
XL
87 uintptr_t a_miscelm = (uintptr_t)a;
88 uintptr_t b_miscelm = (uintptr_t)b;
9cc50fc6 89
3b2f2976
XL
90 assert(a != NULL);
91 assert(b != NULL);
54a0048b 92
3b2f2976 93 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
54a0048b
SL
94}
95
3b2f2976
XL
96JEMALLOC_INLINE_C int
97arena_snad_comp(const arena_chunk_map_misc_t *a,
98 const arena_chunk_map_misc_t *b)
54a0048b 99{
3b2f2976 100 int ret;
54a0048b 101
3b2f2976
XL
102 assert(a != NULL);
103 assert(b != NULL);
54a0048b 104
3b2f2976
XL
105 ret = arena_sn_comp(a, b);
106 if (ret != 0)
107 return (ret);
54a0048b 108
3b2f2976
XL
109 ret = arena_ad_comp(a, b);
110 return (ret);
54a0048b
SL
111}
112
3b2f2976
XL
113/* Generate pairing heap functions. */
114ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
115 ph_link, arena_snad_comp)
54a0048b
SL
116
117#ifdef JEMALLOC_JET
118#undef run_quantize_floor
3b2f2976 119#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
54a0048b
SL
120#endif
121static size_t
122run_quantize_floor(size_t size)
123{
124 size_t ret;
3b2f2976 125 pszind_t pind;
54a0048b
SL
126
127 assert(size > 0);
3b2f2976 128 assert(size <= HUGE_MAXCLASS);
54a0048b 129 assert((size & PAGE_MASK) == 0);
970d7e83 130
3b2f2976
XL
131 assert(size != 0);
132 assert(size == PAGE_CEILING(size));
133
134 pind = psz2ind(size - large_pad + 1);
135 if (pind == 0) {
136 /*
137 * Avoid underflow. This short-circuit would also do the right
138 * thing for all sizes in the range for which there are
139 * PAGE-spaced size classes, but it's simplest to just handle
140 * the one case that would cause erroneous results.
141 */
142 return (size);
143 }
144 ret = pind2sz(pind - 1) + large_pad;
145 assert(ret <= size);
970d7e83
LB
146 return (ret);
147}
54a0048b
SL
148#ifdef JEMALLOC_JET
149#undef run_quantize_floor
150#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
3b2f2976 151run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
54a0048b 152#endif
970d7e83 153
54a0048b
SL
154#ifdef JEMALLOC_JET
155#undef run_quantize_ceil
3b2f2976 156#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
54a0048b
SL
157#endif
158static size_t
159run_quantize_ceil(size_t size)
160{
161 size_t ret;
162
163 assert(size > 0);
3b2f2976 164 assert(size <= HUGE_MAXCLASS);
54a0048b
SL
165 assert((size & PAGE_MASK) == 0);
166
3b2f2976
XL
167 ret = run_quantize_floor(size);
168 if (ret < size) {
169 /*
170 * Skip a quantization that may have an adequately large run,
171 * because under-sized runs may be mixed in. This only happens
172 * when an unusual size is requested, i.e. for aligned
173 * allocation, and is just one of several places where linear
174 * search would potentially find sufficiently aligned available
175 * memory somewhere lower.
176 */
177 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
178 }
54a0048b
SL
179 return (ret);
180}
181#ifdef JEMALLOC_JET
182#undef run_quantize_ceil
183#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
3b2f2976 184run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
54a0048b
SL
185#endif
186
1a4d82fc
JJ
187static void
188arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
189 size_t npages)
970d7e83 190{
3b2f2976
XL
191 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
192 arena_miscelm_get_const(chunk, pageind))));
1a4d82fc
JJ
193 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
194 LG_PAGE));
3b2f2976
XL
195 assert((npages << LG_PAGE) < chunksize);
196 assert(pind2sz(pind) <= chunksize);
197 arena_run_heap_insert(&arena->runs_avail[pind],
198 arena_miscelm_get_mutable(chunk, pageind));
970d7e83
LB
199}
200
1a4d82fc
JJ
201static void
202arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
203 size_t npages)
970d7e83 204{
3b2f2976
XL
205 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
206 arena_miscelm_get_const(chunk, pageind))));
1a4d82fc
JJ
207 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
208 LG_PAGE));
3b2f2976
XL
209 assert((npages << LG_PAGE) < chunksize);
210 assert(pind2sz(pind) <= chunksize);
211 arena_run_heap_remove(&arena->runs_avail[pind],
212 arena_miscelm_get_mutable(chunk, pageind));
970d7e83
LB
213}
214
215static void
54a0048b 216arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
1a4d82fc 217 size_t npages)
970d7e83 218{
3b2f2976
XL
219 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
220 pageind);
54a0048b 221
970d7e83
LB
222 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
223 LG_PAGE));
1a4d82fc
JJ
224 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
225 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
226 CHUNK_MAP_DIRTY);
54a0048b
SL
227
228 qr_new(&miscelm->rd, rd_link);
229 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
1a4d82fc 230 arena->ndirty += npages;
970d7e83
LB
231}
232
233static void
54a0048b 234arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
1a4d82fc 235 size_t npages)
970d7e83 236{
3b2f2976
XL
237 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
238 pageind);
54a0048b 239
970d7e83
LB
240 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
241 LG_PAGE));
1a4d82fc
JJ
242 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
243 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
244 CHUNK_MAP_DIRTY);
54a0048b
SL
245
246 qr_remove(&miscelm->rd, rd_link);
247 assert(arena->ndirty >= npages);
1a4d82fc 248 arena->ndirty -= npages;
970d7e83
LB
249}
250
54a0048b
SL
251static size_t
252arena_chunk_dirty_npages(const extent_node_t *node)
253{
254
255 return (extent_node_size_get(node) >> LG_PAGE);
256}
257
258void
259arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
260{
261
262 if (cache) {
263 extent_node_dirty_linkage_init(node);
264 extent_node_dirty_insert(node, &arena->runs_dirty,
265 &arena->chunks_cache);
266 arena->ndirty += arena_chunk_dirty_npages(node);
267 }
268}
269
270void
271arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
272{
273
274 if (dirty) {
275 extent_node_dirty_remove(node);
276 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
277 arena->ndirty -= arena_chunk_dirty_npages(node);
278 }
279}
280
281JEMALLOC_INLINE_C void *
970d7e83
LB
282arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
283{
284 void *ret;
54a0048b 285 size_t regind;
1a4d82fc
JJ
286 arena_chunk_map_misc_t *miscelm;
287 void *rpages;
970d7e83
LB
288
289 assert(run->nfree > 0);
1a4d82fc 290 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
970d7e83 291
54a0048b 292 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
1a4d82fc
JJ
293 miscelm = arena_run_to_miscelm(run);
294 rpages = arena_miscelm_to_rpages(miscelm);
295 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
970d7e83
LB
296 (uintptr_t)(bin_info->reg_interval * regind));
297 run->nfree--;
970d7e83
LB
298 return (ret);
299}
300
54a0048b 301JEMALLOC_INLINE_C void
970d7e83
LB
302arena_run_reg_dalloc(arena_run_t *run, void *ptr)
303{
304 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
305 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
306 size_t mapbits = arena_mapbits_get(chunk, pageind);
54a0048b 307 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
970d7e83 308 arena_bin_info_t *bin_info = &arena_bin_info[binind];
54a0048b 309 size_t regind = arena_run_regind(run, bin_info, ptr);
970d7e83
LB
310
311 assert(run->nfree < bin_info->nregs);
312 /* Freeing an interior pointer can cause assertion failure. */
1a4d82fc
JJ
313 assert(((uintptr_t)ptr -
314 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
970d7e83
LB
315 (uintptr_t)bin_info->reg0_offset)) %
316 (uintptr_t)bin_info->reg_interval == 0);
1a4d82fc
JJ
317 assert((uintptr_t)ptr >=
318 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
970d7e83
LB
319 (uintptr_t)bin_info->reg0_offset);
320 /* Freeing an unallocated pointer can cause assertion failure. */
1a4d82fc 321 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
970d7e83 322
1a4d82fc 323 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
970d7e83
LB
324 run->nfree++;
325}
326
54a0048b 327JEMALLOC_INLINE_C void
970d7e83
LB
328arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
329{
330
1a4d82fc
JJ
331 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
332 (run_ind << LG_PAGE)), (npages << LG_PAGE));
970d7e83
LB
333 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
334 (npages << LG_PAGE));
335}
336
54a0048b 337JEMALLOC_INLINE_C void
1a4d82fc
JJ
338arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
339{
340
341 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
342 << LG_PAGE)), PAGE);
343}
344
54a0048b 345JEMALLOC_INLINE_C void
970d7e83
LB
346arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
347{
348 size_t i;
349 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
350
1a4d82fc 351 arena_run_page_mark_zeroed(chunk, run_ind);
970d7e83
LB
352 for (i = 0; i < PAGE / sizeof(size_t); i++)
353 assert(p[i] == 0);
354}
355
356static void
54a0048b 357arena_nactive_add(arena_t *arena, size_t add_pages)
970d7e83 358{
970d7e83 359
1a4d82fc 360 if (config_stats) {
54a0048b
SL
361 size_t cactive_add = CHUNK_CEILING((arena->nactive +
362 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
1a4d82fc 363 LG_PAGE);
54a0048b
SL
364 if (cactive_add != 0)
365 stats_cactive_add(cactive_add);
366 }
367 arena->nactive += add_pages;
368}
369
370static void
371arena_nactive_sub(arena_t *arena, size_t sub_pages)
372{
373
374 if (config_stats) {
375 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
376 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
377 if (cactive_sub != 0)
378 stats_cactive_sub(cactive_sub);
1a4d82fc 379 }
54a0048b 380 arena->nactive -= sub_pages;
1a4d82fc
JJ
381}
382
383static void
384arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
54a0048b 385 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
1a4d82fc
JJ
386{
387 size_t total_pages, rem_pages;
970d7e83 388
54a0048b
SL
389 assert(flag_dirty == 0 || flag_decommitted == 0);
390
970d7e83
LB
391 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
392 LG_PAGE;
393 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
394 flag_dirty);
970d7e83
LB
395 assert(need_pages <= total_pages);
396 rem_pages = total_pages - need_pages;
397
1a4d82fc
JJ
398 arena_avail_remove(arena, chunk, run_ind, total_pages);
399 if (flag_dirty != 0)
54a0048b
SL
400 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
401 arena_nactive_add(arena, need_pages);
970d7e83
LB
402
403 /* Keep track of trailing unused pages for later use. */
404 if (rem_pages > 0) {
54a0048b
SL
405 size_t flags = flag_dirty | flag_decommitted;
406 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
407 0;
408
409 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
410 (rem_pages << LG_PAGE), flags |
411 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
412 flag_unzeroed_mask));
413 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
414 (rem_pages << LG_PAGE), flags |
415 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
416 flag_unzeroed_mask));
970d7e83 417 if (flag_dirty != 0) {
54a0048b 418 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
1a4d82fc 419 rem_pages);
970d7e83 420 }
1a4d82fc 421 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
970d7e83 422 }
1a4d82fc 423}
970d7e83 424
54a0048b 425static bool
1a4d82fc
JJ
426arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
427 bool remove, bool zero)
428{
429 arena_chunk_t *chunk;
430 arena_chunk_map_misc_t *miscelm;
54a0048b
SL
431 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
432 size_t flag_unzeroed_mask;
1a4d82fc
JJ
433
434 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
435 miscelm = arena_run_to_miscelm(run);
436 run_ind = arena_miscelm_to_pageind(miscelm);
437 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
54a0048b 438 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
1a4d82fc
JJ
439 need_pages = (size >> LG_PAGE);
440 assert(need_pages > 0);
441
54a0048b
SL
442 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
443 run_ind << LG_PAGE, size, arena->ind))
444 return (true);
445
1a4d82fc
JJ
446 if (remove) {
447 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
54a0048b 448 flag_decommitted, need_pages);
1a4d82fc
JJ
449 }
450
451 if (zero) {
54a0048b
SL
452 if (flag_decommitted != 0) {
453 /* The run is untouched, and therefore zeroed. */
454 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
455 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
456 (need_pages << LG_PAGE));
457 } else if (flag_dirty != 0) {
458 /* The run is dirty, so all pages must be zeroed. */
459 arena_run_zero(chunk, run_ind, need_pages);
460 } else {
1a4d82fc
JJ
461 /*
462 * The run is clean, so some pages may be zeroed (i.e.
463 * never before touched).
464 */
54a0048b 465 size_t i;
1a4d82fc
JJ
466 for (i = 0; i < need_pages; i++) {
467 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
468 != 0)
469 arena_run_zero(chunk, run_ind+i, 1);
470 else if (config_debug) {
471 arena_run_page_validate_zeroed(chunk,
472 run_ind+i);
473 } else {
474 arena_run_page_mark_zeroed(chunk,
475 run_ind+i);
970d7e83 476 }
970d7e83
LB
477 }
478 }
970d7e83 479 } else {
1a4d82fc
JJ
480 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
481 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
970d7e83 482 }
1a4d82fc
JJ
483
484 /*
485 * Set the last element first, in case the run only contains one page
486 * (i.e. both statements set the same element).
487 */
54a0048b
SL
488 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
489 CHUNK_MAP_UNZEROED : 0;
490 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
491 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
492 run_ind+need_pages-1)));
493 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
494 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
495 return (false);
1a4d82fc
JJ
496}
497
54a0048b 498static bool
1a4d82fc
JJ
499arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
500{
501
54a0048b 502 return (arena_run_split_large_helper(arena, run, size, true, zero));
1a4d82fc
JJ
503}
504
54a0048b 505static bool
1a4d82fc
JJ
506arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
507{
508
54a0048b 509 return (arena_run_split_large_helper(arena, run, size, false, zero));
1a4d82fc
JJ
510}
511
54a0048b 512static bool
1a4d82fc 513arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
54a0048b 514 szind_t binind)
1a4d82fc
JJ
515{
516 arena_chunk_t *chunk;
517 arena_chunk_map_misc_t *miscelm;
54a0048b 518 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
1a4d82fc
JJ
519
520 assert(binind != BININD_INVALID);
521
522 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
523 miscelm = arena_run_to_miscelm(run);
524 run_ind = arena_miscelm_to_pageind(miscelm);
525 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
54a0048b 526 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
1a4d82fc
JJ
527 need_pages = (size >> LG_PAGE);
528 assert(need_pages > 0);
529
54a0048b
SL
530 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
531 run_ind << LG_PAGE, size, arena->ind))
532 return (true);
9cc50fc6 533
54a0048b
SL
534 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
535 flag_decommitted, need_pages);
536
537 for (i = 0; i < need_pages; i++) {
538 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
539 run_ind+i);
540 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
541 flag_unzeroed);
542 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
1a4d82fc
JJ
543 arena_run_page_validate_zeroed(chunk, run_ind+i);
544 }
1a4d82fc
JJ
545 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
546 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
54a0048b 547 return (false);
970d7e83
LB
548}
549
550static arena_chunk_t *
1a4d82fc 551arena_chunk_init_spare(arena_t *arena)
970d7e83
LB
552{
553 arena_chunk_t *chunk;
970d7e83 554
1a4d82fc 555 assert(arena->spare != NULL);
970d7e83 556
1a4d82fc
JJ
557 chunk = arena->spare;
558 arena->spare = NULL;
970d7e83 559
1a4d82fc
JJ
560 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
561 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
562 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
54a0048b 563 arena_maxrun);
1a4d82fc 564 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
54a0048b 565 arena_maxrun);
1a4d82fc
JJ
566 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
567 arena_mapbits_dirty_get(chunk, chunk_npages-1));
970d7e83 568
1a4d82fc
JJ
569 return (chunk);
570}
970d7e83 571
54a0048b 572static bool
3b2f2976
XL
573arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
574 bool *gdump)
54a0048b
SL
575{
576
577 /*
578 * The extent node notion of "committed" doesn't directly apply to
579 * arena chunks. Arbitrarily mark them as committed. The commit state
580 * of runs is tracked individually, and upon chunk deallocation the
581 * entire chunk is in a consistent commit state.
582 */
3b2f2976 583 extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
54a0048b 584 extent_node_achunk_set(&chunk->node, true);
3b2f2976 585 return (chunk_register(chunk, &chunk->node, gdump));
54a0048b
SL
586}
587
1a4d82fc 588static arena_chunk_t *
3b2f2976
XL
589arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
590 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
1a4d82fc
JJ
591{
592 arena_chunk_t *chunk;
3b2f2976 593 size_t sn;
970d7e83 594
3b2f2976
XL
595 malloc_mutex_unlock(tsdn, &arena->lock);
596 /* prof_gdump() requirement. */
597 witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
7453a54e 598
3b2f2976
XL
599 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
600 NULL, chunksize, chunksize, &sn, zero, commit);
54a0048b
SL
601 if (chunk != NULL && !*commit) {
602 /* Commit header. */
603 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
604 LG_PAGE, arena->ind)) {
3b2f2976
XL
605 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
606 (void *)chunk, chunksize, sn, *zero, *commit);
54a0048b
SL
607 chunk = NULL;
608 }
609 }
3b2f2976
XL
610 if (chunk != NULL) {
611 bool gdump;
612 if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
613 if (!*commit) {
614 /* Undo commit of header. */
615 chunk_hooks->decommit(chunk, chunksize, 0,
616 map_bias << LG_PAGE, arena->ind);
617 }
618 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
619 (void *)chunk, chunksize, sn, *zero, *commit);
620 chunk = NULL;
54a0048b 621 }
3b2f2976
XL
622 if (config_prof && opt_prof && gdump)
623 prof_gdump(tsdn);
54a0048b
SL
624 }
625
3b2f2976 626 malloc_mutex_lock(tsdn, &arena->lock);
1a4d82fc
JJ
627 return (chunk);
628}
629
54a0048b 630static arena_chunk_t *
3b2f2976
XL
631arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
632 bool *commit)
1a4d82fc 633{
54a0048b
SL
634 arena_chunk_t *chunk;
635 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
3b2f2976
XL
636 size_t sn;
637
638 /* prof_gdump() requirement. */
639 witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
640 malloc_mutex_assert_owner(tsdn, &arena->lock);
1a4d82fc 641
3b2f2976
XL
642 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
643 chunksize, &sn, zero, commit, true);
54a0048b 644 if (chunk != NULL) {
3b2f2976
XL
645 bool gdump;
646 if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
647 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
648 chunksize, sn, true);
54a0048b
SL
649 return (NULL);
650 }
3b2f2976
XL
651 if (config_prof && opt_prof && gdump) {
652 malloc_mutex_unlock(tsdn, &arena->lock);
653 prof_gdump(tsdn);
654 malloc_mutex_lock(tsdn, &arena->lock);
655 }
54a0048b
SL
656 }
657 if (chunk == NULL) {
3b2f2976
XL
658 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
659 &chunk_hooks, zero, commit);
1a4d82fc 660 }
1a4d82fc 661
54a0048b
SL
662 if (config_stats && chunk != NULL) {
663 arena->stats.mapped += chunksize;
664 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
1a4d82fc
JJ
665 }
666
54a0048b 667 return (chunk);
1a4d82fc
JJ
668}
669
670static arena_chunk_t *
3b2f2976 671arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
1a4d82fc
JJ
672{
673 arena_chunk_t *chunk;
54a0048b
SL
674 bool zero, commit;
675 size_t flag_unzeroed, flag_decommitted, i;
1a4d82fc
JJ
676
677 assert(arena->spare == NULL);
678
679 zero = false;
54a0048b 680 commit = false;
3b2f2976 681 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
1a4d82fc
JJ
682 if (chunk == NULL)
683 return (NULL);
684
3b2f2976
XL
685 if (config_thp && opt_thp) {
686 chunk->hugepage = thp_initially_huge;
687 }
688
1a4d82fc
JJ
689 /*
690 * Initialize the map to contain one maximal free untouched run. Mark
3b2f2976
XL
691 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
692 * or decommitted chunk.
1a4d82fc 693 */
54a0048b
SL
694 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
695 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
696 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
697 flag_unzeroed | flag_decommitted);
1a4d82fc
JJ
698 /*
699 * There is no need to initialize the internal page map entries unless
700 * the chunk is not zeroed.
701 */
702 if (!zero) {
703 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
3b2f2976
XL
704 (void *)arena_bitselm_get_const(chunk, map_bias+1),
705 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
706 chunk_npages-1) -
707 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
1a4d82fc 708 for (i = map_bias+1; i < chunk_npages-1; i++)
54a0048b 709 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
1a4d82fc
JJ
710 } else {
711 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
3b2f2976
XL
712 *)arena_bitselm_get_const(chunk, map_bias+1),
713 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
714 chunk_npages-1) -
715 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
1a4d82fc 716 if (config_debug) {
970d7e83
LB
717 for (i = map_bias+1; i < chunk_npages-1; i++) {
718 assert(arena_mapbits_unzeroed_get(chunk, i) ==
54a0048b 719 flag_unzeroed);
970d7e83
LB
720 }
721 }
1a4d82fc 722 }
54a0048b
SL
723 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
724 flag_unzeroed);
1a4d82fc
JJ
725
726 return (chunk);
727}
728
729static arena_chunk_t *
3b2f2976 730arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
1a4d82fc
JJ
731{
732 arena_chunk_t *chunk;
733
734 if (arena->spare != NULL)
735 chunk = arena_chunk_init_spare(arena);
736 else {
3b2f2976 737 chunk = arena_chunk_init_hard(tsdn, arena);
1a4d82fc
JJ
738 if (chunk == NULL)
739 return (NULL);
970d7e83
LB
740 }
741
3b2f2976
XL
742 ql_elm_new(&chunk->node, ql_link);
743 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
1a4d82fc 744 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
970d7e83
LB
745
746 return (chunk);
747}
748
1a4d82fc 749static void
3b2f2976
XL
750arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
751{
752 size_t sn;
753 UNUSED bool hugepage JEMALLOC_CC_SILENCE_INIT(false);
754 bool committed;
755 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
756
757 chunk_deregister(chunk, &chunk->node);
758
759 sn = extent_node_sn_get(&chunk->node);
760 if (config_thp && opt_thp) {
761 hugepage = chunk->hugepage;
762 }
763 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
764 if (!committed) {
765 /*
766 * Decommit the header. Mark the chunk as decommitted even if
767 * header decommit fails, since treating a partially committed
768 * chunk as committed has a high potential for causing later
769 * access of decommitted memory.
770 */
771 chunk_hooks = chunk_hooks_get(tsdn, arena);
772 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
773 arena->ind);
774 }
775 if (config_thp && opt_thp && hugepage != thp_initially_huge) {
776 /*
777 * Convert chunk back to initial THP state, so that all
778 * subsequent chunk allocations start out in a consistent state.
779 */
780 if (thp_initially_huge) {
781 pages_huge(chunk, chunksize);
782 } else {
783 pages_nohuge(chunk, chunksize);
784 }
785 }
786
787 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
788 sn, committed);
789
790 if (config_stats) {
791 arena->stats.mapped -= chunksize;
792 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
793 }
794}
795
796static void
797arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
798{
799
800 assert(arena->spare != spare);
801
802 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
803 arena_run_dirty_remove(arena, spare, map_bias,
804 chunk_npages-map_bias);
805 }
806
807 arena_chunk_discard(tsdn, arena, spare);
808}
809
810static void
811arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
970d7e83 812{
3b2f2976 813 arena_chunk_t *spare;
1a4d82fc 814
970d7e83
LB
815 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
816 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
817 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
54a0048b 818 arena_maxrun);
970d7e83 819 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
54a0048b 820 arena_maxrun);
970d7e83
LB
821 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
822 arena_mapbits_dirty_get(chunk, chunk_npages-1));
54a0048b
SL
823 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
824 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
970d7e83 825
54a0048b 826 /* Remove run from runs_avail, so that the arena does not use it. */
1a4d82fc 827 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
970d7e83 828
3b2f2976
XL
829 ql_remove(&arena->achunks, &chunk->node, ql_link);
830 spare = arena->spare;
831 arena->spare = chunk;
832 if (spare != NULL)
833 arena_spare_discard(tsdn, arena, spare);
970d7e83
LB
834}
835
54a0048b
SL
836static void
837arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
838{
839 szind_t index = size2index(usize) - nlclasses - NBINS;
840
841 cassert(config_stats);
842
843 arena->stats.nmalloc_huge++;
844 arena->stats.allocated_huge += usize;
845 arena->stats.hstats[index].nmalloc++;
846 arena->stats.hstats[index].curhchunks++;
847}
848
849static void
850arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
851{
852 szind_t index = size2index(usize) - nlclasses - NBINS;
853
854 cassert(config_stats);
855
856 arena->stats.nmalloc_huge--;
857 arena->stats.allocated_huge -= usize;
858 arena->stats.hstats[index].nmalloc--;
859 arena->stats.hstats[index].curhchunks--;
860}
861
862static void
863arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
864{
865 szind_t index = size2index(usize) - nlclasses - NBINS;
866
867 cassert(config_stats);
868
869 arena->stats.ndalloc_huge++;
870 arena->stats.allocated_huge -= usize;
871 arena->stats.hstats[index].ndalloc++;
872 arena->stats.hstats[index].curhchunks--;
873}
874
3b2f2976
XL
875static void
876arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
877{
878 szind_t index = size2index(usize) - nlclasses - NBINS;
879
880 cassert(config_stats);
881
882 arena->stats.ndalloc_huge++;
883 arena->stats.hstats[index].ndalloc--;
884}
885
54a0048b
SL
886static void
887arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
888{
889 szind_t index = size2index(usize) - nlclasses - NBINS;
890
891 cassert(config_stats);
892
893 arena->stats.ndalloc_huge--;
894 arena->stats.allocated_huge += usize;
895 arena->stats.hstats[index].ndalloc--;
896 arena->stats.hstats[index].curhchunks++;
897}
898
899static void
900arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
901{
902
903 arena_huge_dalloc_stats_update(arena, oldsize);
904 arena_huge_malloc_stats_update(arena, usize);
905}
906
907static void
908arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
909 size_t usize)
910{
911
912 arena_huge_dalloc_stats_update_undo(arena, oldsize);
913 arena_huge_malloc_stats_update_undo(arena, usize);
914}
915
916extent_node_t *
3b2f2976 917arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
918{
919 extent_node_t *node;
920
3b2f2976 921 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
54a0048b
SL
922 node = ql_last(&arena->node_cache, ql_link);
923 if (node == NULL) {
3b2f2976
XL
924 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
925 return (base_alloc(tsdn, sizeof(extent_node_t)));
54a0048b
SL
926 }
927 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
3b2f2976 928 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
54a0048b
SL
929 return (node);
930}
931
932void
3b2f2976 933arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
54a0048b
SL
934{
935
3b2f2976 936 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
54a0048b
SL
937 ql_elm_new(node, ql_link);
938 ql_tail_insert(&arena->node_cache, node, ql_link);
3b2f2976 939 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
54a0048b
SL
940}
941
942static void *
3b2f2976
XL
943arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
944 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
945 bool *zero, size_t csize)
54a0048b
SL
946{
947 void *ret;
948 bool commit = true;
949
3b2f2976
XL
950 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
951 alignment, sn, zero, &commit);
54a0048b
SL
952 if (ret == NULL) {
953 /* Revert optimistic stats updates. */
3b2f2976 954 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
955 if (config_stats) {
956 arena_huge_malloc_stats_update_undo(arena, usize);
957 arena->stats.mapped -= usize;
958 }
959 arena_nactive_sub(arena, usize >> LG_PAGE);
3b2f2976 960 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
961 }
962
963 return (ret);
964}
965
966void *
3b2f2976
XL
967arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
968 size_t alignment, size_t *sn, bool *zero)
54a0048b
SL
969{
970 void *ret;
971 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
972 size_t csize = CHUNK_CEILING(usize);
3b2f2976 973 bool commit = true;
54a0048b 974
3b2f2976 975 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
976
977 /* Optimistically update stats. */
978 if (config_stats) {
979 arena_huge_malloc_stats_update(arena, usize);
980 arena->stats.mapped += usize;
981 }
982 arena_nactive_add(arena, usize >> LG_PAGE);
983
3b2f2976
XL
984 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
985 alignment, sn, zero, &commit, true);
986 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b 987 if (ret == NULL) {
3b2f2976
XL
988 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
989 usize, alignment, sn, zero, csize);
54a0048b
SL
990 }
991
992 return (ret);
993}
994
995void
3b2f2976
XL
996arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
997 size_t sn)
54a0048b
SL
998{
999 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1000 size_t csize;
1001
1002 csize = CHUNK_CEILING(usize);
3b2f2976 1003 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
1004 if (config_stats) {
1005 arena_huge_dalloc_stats_update(arena, usize);
1006 arena->stats.mapped -= usize;
1007 }
1008 arena_nactive_sub(arena, usize >> LG_PAGE);
1009
3b2f2976
XL
1010 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
1011 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1012}
1013
1014void
3b2f2976
XL
1015arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
1016 size_t oldsize, size_t usize)
54a0048b
SL
1017{
1018
1019 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
1020 assert(oldsize != usize);
1021
3b2f2976 1022 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
1023 if (config_stats)
1024 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1025 if (oldsize < usize)
1026 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1027 else
1028 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
3b2f2976 1029 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1030}
1031
1032void
3b2f2976
XL
1033arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
1034 size_t oldsize, size_t usize, size_t sn)
54a0048b
SL
1035{
1036 size_t udiff = oldsize - usize;
1037 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1038
3b2f2976 1039 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
1040 if (config_stats) {
1041 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1042 if (cdiff != 0)
1043 arena->stats.mapped -= cdiff;
1044 }
1045 arena_nactive_sub(arena, udiff >> LG_PAGE);
1046
1047 if (cdiff != 0) {
1048 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1049 void *nchunk = (void *)((uintptr_t)chunk +
1050 CHUNK_CEILING(usize));
1051
3b2f2976
XL
1052 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1053 sn, true);
54a0048b 1054 }
3b2f2976 1055 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1056}
1057
1058static bool
3b2f2976
XL
1059arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
1060 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1061 size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
54a0048b
SL
1062{
1063 bool err;
1064 bool commit = true;
1065
3b2f2976
XL
1066 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1067 chunksize, sn, zero, &commit) == NULL);
54a0048b
SL
1068 if (err) {
1069 /* Revert optimistic stats updates. */
3b2f2976 1070 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
1071 if (config_stats) {
1072 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1073 usize);
1074 arena->stats.mapped -= cdiff;
1075 }
1076 arena_nactive_sub(arena, udiff >> LG_PAGE);
3b2f2976 1077 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1078 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1079 cdiff, true, arena->ind)) {
3b2f2976
XL
1080 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1081 *sn, *zero, true);
54a0048b
SL
1082 err = true;
1083 }
1084 return (err);
1085}
1086
1087bool
3b2f2976
XL
1088arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
1089 size_t oldsize, size_t usize, bool *zero)
54a0048b
SL
1090{
1091 bool err;
3b2f2976 1092 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
54a0048b
SL
1093 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1094 size_t udiff = usize - oldsize;
1095 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
3b2f2976
XL
1096 size_t sn;
1097 bool commit = true;
54a0048b 1098
3b2f2976 1099 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
1100
1101 /* Optimistically update stats. */
1102 if (config_stats) {
1103 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1104 arena->stats.mapped += cdiff;
1105 }
1106 arena_nactive_add(arena, udiff >> LG_PAGE);
1107
3b2f2976
XL
1108 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1109 chunksize, &sn, zero, &commit, true) == NULL);
1110 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b 1111 if (err) {
3b2f2976
XL
1112 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
1113 &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
1114 udiff, cdiff);
54a0048b
SL
1115 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1116 cdiff, true, arena->ind)) {
3b2f2976
XL
1117 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1118 sn, *zero, true);
54a0048b
SL
1119 err = true;
1120 }
1121
1122 return (err);
1123}
1124
1125/*
1126 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1127 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1128 * same size.
1129 */
9cc50fc6 1130static arena_run_t *
54a0048b 1131arena_run_first_best_fit(arena_t *arena, size_t size)
9cc50fc6 1132{
3b2f2976
XL
1133 pszind_t pind, i;
1134
1135 pind = psz2ind(run_quantize_ceil(size));
7453a54e 1136
3b2f2976
XL
1137 for (i = pind; pind2sz(i) <= chunksize; i++) {
1138 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1139 &arena->runs_avail[i]);
54a0048b
SL
1140 if (miscelm != NULL)
1141 return (&miscelm->run);
9cc50fc6 1142 }
7453a54e
SL
1143
1144 return (NULL);
1a4d82fc
JJ
1145}
1146
54a0048b
SL
1147static arena_run_t *
1148arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1149{
3b2f2976 1150 arena_run_t *run = arena_run_first_best_fit(arena, size);
54a0048b
SL
1151 if (run != NULL) {
1152 if (arena_run_split_large(arena, run, size, zero))
1153 run = NULL;
1154 }
1155 return (run);
1156}
1157
1a4d82fc 1158static arena_run_t *
3b2f2976 1159arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
970d7e83 1160{
1a4d82fc 1161 arena_chunk_t *chunk;
970d7e83 1162 arena_run_t *run;
970d7e83 1163
54a0048b
SL
1164 assert(size <= arena_maxrun);
1165 assert(size == PAGE_CEILING(size));
970d7e83 1166
1a4d82fc
JJ
1167 /* Search the arena's chunks for the lowest best fit. */
1168 run = arena_run_alloc_large_helper(arena, size, zero);
1169 if (run != NULL)
1170 return (run);
1171
1172 /*
1173 * No usable runs. Create a new chunk from which to allocate the run.
1174 */
3b2f2976 1175 chunk = arena_chunk_alloc(tsdn, arena);
1a4d82fc 1176 if (chunk != NULL) {
3b2f2976 1177 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
54a0048b
SL
1178 if (arena_run_split_large(arena, run, size, zero))
1179 run = NULL;
1a4d82fc
JJ
1180 return (run);
1181 }
1182
1183 /*
1184 * arena_chunk_alloc() failed, but another thread may have made
1185 * sufficient memory available while this one dropped arena->lock in
1186 * arena_chunk_alloc(), so search one more time.
1187 */
1188 return (arena_run_alloc_large_helper(arena, size, zero));
1189}
1190
1191static arena_run_t *
54a0048b 1192arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1a4d82fc 1193{
54a0048b
SL
1194 arena_run_t *run = arena_run_first_best_fit(arena, size);
1195 if (run != NULL) {
1196 if (arena_run_split_small(arena, run, size, binind))
1197 run = NULL;
970d7e83 1198 }
54a0048b 1199 return (run);
970d7e83
LB
1200}
1201
1202static arena_run_t *
3b2f2976 1203arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
970d7e83
LB
1204{
1205 arena_chunk_t *chunk;
1206 arena_run_t *run;
1207
54a0048b
SL
1208 assert(size <= arena_maxrun);
1209 assert(size == PAGE_CEILING(size));
1a4d82fc 1210 assert(binind != BININD_INVALID);
970d7e83
LB
1211
1212 /* Search the arena's chunks for the lowest best fit. */
1a4d82fc 1213 run = arena_run_alloc_small_helper(arena, size, binind);
970d7e83
LB
1214 if (run != NULL)
1215 return (run);
1216
1217 /*
1218 * No usable runs. Create a new chunk from which to allocate the run.
1219 */
3b2f2976 1220 chunk = arena_chunk_alloc(tsdn, arena);
970d7e83 1221 if (chunk != NULL) {
3b2f2976 1222 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
54a0048b
SL
1223 if (arena_run_split_small(arena, run, size, binind))
1224 run = NULL;
970d7e83
LB
1225 return (run);
1226 }
1227
1228 /*
1229 * arena_chunk_alloc() failed, but another thread may have made
1230 * sufficient memory available while this one dropped arena->lock in
1231 * arena_chunk_alloc(), so search one more time.
1232 */
1a4d82fc 1233 return (arena_run_alloc_small_helper(arena, size, binind));
970d7e83
LB
1234}
1235
54a0048b
SL
1236static bool
1237arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
970d7e83 1238{
970d7e83 1239
54a0048b
SL
1240 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1241 << 3));
1242}
1243
1244ssize_t
3b2f2976 1245arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
1246{
1247 ssize_t lg_dirty_mult;
1248
3b2f2976 1249 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b 1250 lg_dirty_mult = arena->lg_dirty_mult;
3b2f2976 1251 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1252
1253 return (lg_dirty_mult);
1254}
1255
1256bool
3b2f2976 1257arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
54a0048b
SL
1258{
1259
1260 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1261 return (true);
1262
3b2f2976 1263 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b 1264 arena->lg_dirty_mult = lg_dirty_mult;
3b2f2976
XL
1265 arena_maybe_purge(tsdn, arena);
1266 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1267
1268 return (false);
1269}
1270
1271static void
1272arena_decay_deadline_init(arena_t *arena)
1273{
1274
1275 assert(opt_purge == purge_mode_decay);
1276
970d7e83 1277 /*
54a0048b
SL
1278 * Generate a new deadline that is uniformly random within the next
1279 * epoch after the current one.
970d7e83 1280 */
3b2f2976
XL
1281 nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1282 nstime_add(&arena->decay.deadline, &arena->decay.interval);
1283 if (arena->decay.time > 0) {
54a0048b
SL
1284 nstime_t jitter;
1285
3b2f2976
XL
1286 nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
1287 nstime_ns(&arena->decay.interval)));
1288 nstime_add(&arena->decay.deadline, &jitter);
54a0048b
SL
1289 }
1290}
7453a54e 1291
54a0048b
SL
1292static bool
1293arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1294{
1295
1296 assert(opt_purge == purge_mode_decay);
1297
3b2f2976 1298 return (nstime_compare(&arena->decay.deadline, time) <= 0);
970d7e83
LB
1299}
1300
1a4d82fc 1301static size_t
54a0048b
SL
1302arena_decay_backlog_npages_limit(const arena_t *arena)
1303{
1304 static const uint64_t h_steps[] = {
1305#define STEP(step, h, x, y) \
1306 h,
1307 SMOOTHSTEP
1308#undef STEP
1309 };
1310 uint64_t sum;
1311 size_t npages_limit_backlog;
1312 unsigned i;
1313
1314 assert(opt_purge == purge_mode_decay);
1315
1316 /*
1317 * For each element of decay_backlog, multiply by the corresponding
1318 * fixed-point smoothstep decay factor. Sum the products, then divide
1319 * to round down to the nearest whole number of pages.
1320 */
1321 sum = 0;
1322 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
3b2f2976
XL
1323 sum += arena->decay.backlog[i] * h_steps[i];
1324 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
54a0048b
SL
1325
1326 return (npages_limit_backlog);
1327}
1328
1329static void
3b2f2976
XL
1330arena_decay_backlog_update_last(arena_t *arena)
1331{
1332 size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1333 arena->ndirty - arena->decay.ndirty : 0;
1334 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1335}
1336
1337static void
1338arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1339{
1340
1341 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1342 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1343 sizeof(size_t));
1344 } else {
1345 size_t nadvance_z = (size_t)nadvance_u64;
1346
1347 assert((uint64_t)nadvance_z == nadvance_u64);
1348
1349 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
1350 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1351 if (nadvance_z > 1) {
1352 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
1353 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1354 }
1355 }
1356
1357 arena_decay_backlog_update_last(arena);
1358}
1359
1360static void
1361arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
970d7e83 1362{
3b2f2976 1363 uint64_t nadvance_u64;
54a0048b 1364 nstime_t delta;
54a0048b
SL
1365
1366 assert(opt_purge == purge_mode_decay);
1367 assert(arena_decay_deadline_reached(arena, time));
1a4d82fc 1368
54a0048b 1369 nstime_copy(&delta, time);
3b2f2976
XL
1370 nstime_subtract(&delta, &arena->decay.epoch);
1371 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
1372 assert(nadvance_u64 > 0);
54a0048b 1373
3b2f2976
XL
1374 /* Add nadvance_u64 decay intervals to epoch. */
1375 nstime_copy(&delta, &arena->decay.interval);
1376 nstime_imultiply(&delta, nadvance_u64);
1377 nstime_add(&arena->decay.epoch, &delta);
54a0048b
SL
1378
1379 /* Set a new deadline. */
1380 arena_decay_deadline_init(arena);
1381
1382 /* Update the backlog. */
3b2f2976 1383 arena_decay_backlog_update(arena, nadvance_u64);
1a4d82fc
JJ
1384}
1385
3b2f2976
XL
1386static void
1387arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
54a0048b 1388{
3b2f2976 1389 size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
54a0048b 1390
3b2f2976
XL
1391 if (arena->ndirty > ndirty_limit)
1392 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1393 arena->decay.ndirty = arena->ndirty;
1394}
54a0048b 1395
3b2f2976
XL
1396static void
1397arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1398{
54a0048b 1399
3b2f2976
XL
1400 arena_decay_epoch_advance_helper(arena, time);
1401 arena_decay_epoch_advance_purge(tsdn, arena);
54a0048b
SL
1402}
1403
1404static void
1405arena_decay_init(arena_t *arena, ssize_t decay_time)
1406{
1407
3b2f2976 1408 arena->decay.time = decay_time;
54a0048b 1409 if (decay_time > 0) {
3b2f2976
XL
1410 nstime_init2(&arena->decay.interval, decay_time, 0);
1411 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
54a0048b
SL
1412 }
1413
3b2f2976
XL
1414 nstime_init(&arena->decay.epoch, 0);
1415 nstime_update(&arena->decay.epoch);
1416 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
54a0048b 1417 arena_decay_deadline_init(arena);
3b2f2976
XL
1418 arena->decay.ndirty = arena->ndirty;
1419 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
54a0048b
SL
1420}
1421
1422static bool
1423arena_decay_time_valid(ssize_t decay_time)
1424{
1425
3b2f2976
XL
1426 if (decay_time < -1)
1427 return (false);
1428 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1429 return (true);
1430 return (false);
54a0048b
SL
1431}
1432
1433ssize_t
3b2f2976 1434arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
1435{
1436 ssize_t decay_time;
1437
3b2f2976
XL
1438 malloc_mutex_lock(tsdn, &arena->lock);
1439 decay_time = arena->decay.time;
1440 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1441
1442 return (decay_time);
1443}
1444
1445bool
3b2f2976 1446arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1a4d82fc 1447{
970d7e83 1448
54a0048b
SL
1449 if (!arena_decay_time_valid(decay_time))
1450 return (true);
1451
3b2f2976 1452 malloc_mutex_lock(tsdn, &arena->lock);
970d7e83 1453 /*
54a0048b
SL
1454 * Restart decay backlog from scratch, which may cause many dirty pages
1455 * to be immediately purged. It would conceptually be possible to map
1456 * the old backlog onto the new backlog, but there is no justification
1457 * for such complexity since decay_time changes are intended to be
1458 * infrequent, either between the {-1, 0, >0} states, or a one-time
1459 * arbitrary change during initial arena configuration.
970d7e83 1460 */
54a0048b 1461 arena_decay_init(arena, decay_time);
3b2f2976
XL
1462 arena_maybe_purge(tsdn, arena);
1463 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83 1464
54a0048b
SL
1465 return (false);
1466}
1467
1468static void
3b2f2976 1469arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
1470{
1471
1472 assert(opt_purge == purge_mode_ratio);
1473
1474 /* Don't purge if the option is disabled. */
1475 if (arena->lg_dirty_mult < 0)
1476 return;
1477
1478 /*
1479 * Iterate, since preventing recursive purging could otherwise leave too
1480 * many dirty pages.
1481 */
1482 while (true) {
1483 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1484 if (threshold < chunk_npages)
1485 threshold = chunk_npages;
1486 /*
1487 * Don't purge unless the number of purgeable pages exceeds the
1488 * threshold.
1489 */
1490 if (arena->ndirty <= threshold)
1491 return;
3b2f2976 1492 arena_purge_to_limit(tsdn, arena, threshold);
54a0048b
SL
1493 }
1494}
1495
1496static void
3b2f2976 1497arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
1498{
1499 nstime_t time;
54a0048b
SL
1500
1501 assert(opt_purge == purge_mode_decay);
1502
1503 /* Purge all or nothing if the option is disabled. */
3b2f2976
XL
1504 if (arena->decay.time <= 0) {
1505 if (arena->decay.time == 0)
1506 arena_purge_to_limit(tsdn, arena, 0);
54a0048b
SL
1507 return;
1508 }
1509
3b2f2976
XL
1510 nstime_init(&time, 0);
1511 nstime_update(&time);
1512 if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1513 &time) > 0)) {
1514 /*
1515 * Time went backwards. Move the epoch back in time and
1516 * generate a new deadline, with the expectation that time
1517 * typically flows forward for long enough periods of time that
1518 * epochs complete. Unfortunately, this strategy is susceptible
1519 * to clock jitter triggering premature epoch advances, but
1520 * clock jitter estimation and compensation isn't feasible here
1521 * because calls into this code are event-driven.
1522 */
1523 nstime_copy(&arena->decay.epoch, &time);
1524 arena_decay_deadline_init(arena);
1525 } else {
1526 /* Verify that time does not go backwards. */
1527 assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
54a0048b
SL
1528 }
1529
54a0048b 1530 /*
3b2f2976
XL
1531 * If the deadline has been reached, advance to the current epoch and
1532 * purge to the new limit if necessary. Note that dirty pages created
1533 * during the current epoch are not subject to purge until a future
1534 * epoch, so as a result purging only happens during epoch advances.
54a0048b 1535 */
3b2f2976
XL
1536 if (arena_decay_deadline_reached(arena, &time))
1537 arena_decay_epoch_advance(tsdn, arena, &time);
54a0048b
SL
1538}
1539
1540void
3b2f2976 1541arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
1542{
1543
1544 /* Don't recursively purge. */
1545 if (arena->purging)
1546 return;
970d7e83 1547
54a0048b 1548 if (opt_purge == purge_mode_ratio)
3b2f2976 1549 arena_maybe_purge_ratio(tsdn, arena);
54a0048b 1550 else
3b2f2976 1551 arena_maybe_purge_decay(tsdn, arena);
1a4d82fc 1552}
970d7e83 1553
1a4d82fc 1554static size_t
54a0048b 1555arena_dirty_count(arena_t *arena)
1a4d82fc 1556{
54a0048b
SL
1557 size_t ndirty = 0;
1558 arena_runs_dirty_link_t *rdelm;
1559 extent_node_t *chunkselm;
1560
1561 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1562 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1563 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1564 size_t npages;
1565
1566 if (rdelm == &chunkselm->rd) {
1567 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1568 chunkselm = qr_next(chunkselm, cc_link);
1569 } else {
1570 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1571 rdelm);
1572 arena_chunk_map_misc_t *miscelm =
1573 arena_rd_to_miscelm(rdelm);
1574 size_t pageind = arena_miscelm_to_pageind(miscelm);
1575 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1576 0);
1577 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1578 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1579 npages = arena_mapbits_unallocated_size_get(chunk,
1580 pageind) >> LG_PAGE;
1581 }
1582 ndirty += npages;
1583 }
1584
1585 return (ndirty);
1586}
1587
1588static size_t
3b2f2976 1589arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
54a0048b
SL
1590 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1591 extent_node_t *purge_chunks_sentinel)
1592{
1593 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1594 extent_node_t *chunkselm;
1a4d82fc
JJ
1595 size_t nstashed = 0;
1596
54a0048b
SL
1597 /* Stash runs/chunks according to ndirty_limit. */
1598 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1599 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1600 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1601 size_t npages;
1602 rdelm_next = qr_next(rdelm, rd_link);
9cc50fc6 1603
54a0048b
SL
1604 if (rdelm == &chunkselm->rd) {
1605 extent_node_t *chunkselm_next;
3b2f2976
XL
1606 size_t sn;
1607 bool zero, commit;
54a0048b 1608 UNUSED void *chunk;
9cc50fc6 1609
54a0048b
SL
1610 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1611 if (opt_purge == purge_mode_decay && arena->ndirty -
1612 (nstashed + npages) < ndirty_limit)
1613 break;
1a4d82fc 1614
54a0048b
SL
1615 chunkselm_next = qr_next(chunkselm, cc_link);
1616 /*
1617 * Allocate. chunkselm remains valid due to the
1618 * dalloc_node=false argument to chunk_alloc_cache().
1619 */
1620 zero = false;
3b2f2976
XL
1621 commit = false;
1622 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
54a0048b 1623 extent_node_addr_get(chunkselm),
3b2f2976
XL
1624 extent_node_size_get(chunkselm), chunksize, &sn,
1625 &zero, &commit, false);
54a0048b
SL
1626 assert(chunk == extent_node_addr_get(chunkselm));
1627 assert(zero == extent_node_zeroed_get(chunkselm));
1628 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1629 purge_chunks_sentinel);
1630 assert(npages == (extent_node_size_get(chunkselm) >>
1631 LG_PAGE));
1632 chunkselm = chunkselm_next;
1633 } else {
1634 arena_chunk_t *chunk =
1635 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1636 arena_chunk_map_misc_t *miscelm =
1637 arena_rd_to_miscelm(rdelm);
1638 size_t pageind = arena_miscelm_to_pageind(miscelm);
1639 arena_run_t *run = &miscelm->run;
1640 size_t run_size =
1641 arena_mapbits_unallocated_size_get(chunk, pageind);
1642
1643 npages = run_size >> LG_PAGE;
1644 if (opt_purge == purge_mode_decay && arena->ndirty -
1645 (nstashed + npages) < ndirty_limit)
1646 break;
1647
1648 assert(pageind + npages <= chunk_npages);
1649 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1650 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1651
1652 /*
1653 * If purging the spare chunk's run, make it available
1654 * prior to allocation.
1655 */
1656 if (chunk == arena->spare)
3b2f2976 1657 arena_chunk_alloc(tsdn, arena);
54a0048b
SL
1658
1659 /* Temporarily allocate the free dirty run. */
1660 arena_run_split_large(arena, run, run_size, false);
1661 /* Stash. */
1662 if (false)
1663 qr_new(rdelm, rd_link); /* Redundant. */
1664 else {
1665 assert(qr_next(rdelm, rd_link) == rdelm);
1666 assert(qr_prev(rdelm, rd_link) == rdelm);
1667 }
1668 qr_meld(purge_runs_sentinel, rdelm, rd_link);
1669 }
7453a54e 1670
54a0048b
SL
1671 nstashed += npages;
1672 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1673 ndirty_limit)
1a4d82fc 1674 break;
970d7e83 1675 }
970d7e83 1676
1a4d82fc
JJ
1677 return (nstashed);
1678}
1679
1680static size_t
3b2f2976 1681arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
54a0048b
SL
1682 arena_runs_dirty_link_t *purge_runs_sentinel,
1683 extent_node_t *purge_chunks_sentinel)
1a4d82fc
JJ
1684{
1685 size_t npurged, nmadvise;
54a0048b
SL
1686 arena_runs_dirty_link_t *rdelm;
1687 extent_node_t *chunkselm;
1a4d82fc 1688
970d7e83
LB
1689 if (config_stats)
1690 nmadvise = 0;
1691 npurged = 0;
1a4d82fc 1692
3b2f2976 1693 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
1694 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1695 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1696 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1697 size_t npages;
1a4d82fc 1698
54a0048b
SL
1699 if (rdelm == &chunkselm->rd) {
1700 /*
1701 * Don't actually purge the chunk here because 1)
1702 * chunkselm is embedded in the chunk and must remain
1703 * valid, and 2) we deallocate the chunk in
1704 * arena_unstash_purged(), where it is destroyed,
1705 * decommitted, or purged, depending on chunk
1706 * deallocation policy.
1707 */
1708 size_t size = extent_node_size_get(chunkselm);
1709 npages = size >> LG_PAGE;
1710 chunkselm = qr_next(chunkselm, cc_link);
1711 } else {
1712 size_t pageind, run_size, flag_unzeroed, flags, i;
1713 bool decommitted;
1714 arena_chunk_t *chunk =
1715 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1716 arena_chunk_map_misc_t *miscelm =
1717 arena_rd_to_miscelm(rdelm);
1718 pageind = arena_miscelm_to_pageind(miscelm);
1719 run_size = arena_mapbits_large_size_get(chunk, pageind);
1720 npages = run_size >> LG_PAGE;
1721
3b2f2976
XL
1722 /*
1723 * If this is the first run purged within chunk, mark
1724 * the chunk as non-THP-capable. This will prevent all
1725 * use of THPs for this chunk until the chunk as a whole
1726 * is deallocated.
1727 */
1728 if (config_thp && opt_thp && chunk->hugepage) {
1729 chunk->hugepage = pages_nohuge(chunk,
1730 chunksize);
1731 }
1732
54a0048b
SL
1733 assert(pageind + npages <= chunk_npages);
1734 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1735 assert(!arena_mapbits_decommitted_get(chunk,
1736 pageind+npages-1));
1737 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1738 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1739 if (decommitted) {
1740 flag_unzeroed = 0;
1741 flags = CHUNK_MAP_DECOMMITTED;
1742 } else {
3b2f2976 1743 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
54a0048b
SL
1744 chunk_hooks, chunk, chunksize, pageind <<
1745 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1746 flags = flag_unzeroed;
1747 }
1748 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1749 flags);
1750 arena_mapbits_large_set(chunk, pageind, run_size,
1751 flags);
7453a54e 1752
54a0048b
SL
1753 /*
1754 * Set the unzeroed flag for internal pages, now that
1755 * chunk_purge_wrapper() has returned whether the pages
1756 * were zeroed as a side effect of purging. This chunk
1757 * map modification is safe even though the arena mutex
1758 * isn't currently owned by this thread, because the run
1759 * is marked as allocated, thus protecting it from being
1760 * modified by any other thread. As long as these
1761 * writes don't perturb the first and last elements'
1762 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1763 */
1764 for (i = 1; i < npages-1; i++) {
1765 arena_mapbits_internal_set(chunk, pageind+i,
1766 flag_unzeroed);
1767 }
970d7e83 1768 }
1a4d82fc 1769
970d7e83
LB
1770 npurged += npages;
1771 if (config_stats)
1772 nmadvise++;
1773 }
3b2f2976 1774 malloc_mutex_lock(tsdn, &arena->lock);
970d7e83 1775
1a4d82fc
JJ
1776 if (config_stats) {
1777 arena->stats.nmadvise += nmadvise;
1778 arena->stats.purged += npurged;
970d7e83
LB
1779 }
1780
1781 return (npurged);
1782}
1783
1a4d82fc 1784static void
3b2f2976 1785arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
54a0048b
SL
1786 arena_runs_dirty_link_t *purge_runs_sentinel,
1787 extent_node_t *purge_chunks_sentinel)
1788{
1789 arena_runs_dirty_link_t *rdelm, *rdelm_next;
1790 extent_node_t *chunkselm;
1791
1792 /* Deallocate chunks/runs. */
1793 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1794 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1795 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1796 rdelm_next = qr_next(rdelm, rd_link);
1797 if (rdelm == &chunkselm->rd) {
1798 extent_node_t *chunkselm_next = qr_next(chunkselm,
1799 cc_link);
1800 void *addr = extent_node_addr_get(chunkselm);
1801 size_t size = extent_node_size_get(chunkselm);
3b2f2976 1802 size_t sn = extent_node_sn_get(chunkselm);
54a0048b
SL
1803 bool zeroed = extent_node_zeroed_get(chunkselm);
1804 bool committed = extent_node_committed_get(chunkselm);
1805 extent_node_dirty_remove(chunkselm);
3b2f2976 1806 arena_node_dalloc(tsdn, arena, chunkselm);
54a0048b 1807 chunkselm = chunkselm_next;
3b2f2976
XL
1808 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
1809 size, sn, zeroed, committed);
54a0048b
SL
1810 } else {
1811 arena_chunk_t *chunk =
1812 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1813 arena_chunk_map_misc_t *miscelm =
1814 arena_rd_to_miscelm(rdelm);
1815 size_t pageind = arena_miscelm_to_pageind(miscelm);
1816 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1817 pageind) != 0);
1818 arena_run_t *run = &miscelm->run;
1819 qr_remove(rdelm, rd_link);
3b2f2976
XL
1820 arena_run_dalloc(tsdn, arena, run, false, true,
1821 decommitted);
54a0048b 1822 }
1a4d82fc 1823 }
970d7e83
LB
1824}
1825
54a0048b
SL
1826/*
1827 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1828 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1829 * desired state:
1830 * (arena->ndirty <= ndirty_limit)
1831 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1832 * violating the invariant:
1833 * (arena->ndirty >= ndirty_limit)
1834 */
1835static void
3b2f2976 1836arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
970d7e83 1837{
3b2f2976 1838 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
54a0048b
SL
1839 size_t npurge, npurged;
1840 arena_runs_dirty_link_t purge_runs_sentinel;
1841 extent_node_t purge_chunks_sentinel;
9cc50fc6 1842
54a0048b
SL
1843 arena->purging = true;
1844
1845 /*
1846 * Calls to arena_dirty_count() are disabled even for debug builds
1847 * because overhead grows nonlinearly as memory usage increases.
1848 */
1849 if (false && config_debug) {
1a4d82fc 1850 size_t ndirty = arena_dirty_count(arena);
970d7e83
LB
1851 assert(ndirty == arena->ndirty);
1852 }
54a0048b
SL
1853 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1854 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1855
1856 qr_new(&purge_runs_sentinel, rd_link);
1857 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1858
3b2f2976 1859 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
54a0048b
SL
1860 &purge_runs_sentinel, &purge_chunks_sentinel);
1861 if (npurge == 0)
1862 goto label_return;
3b2f2976
XL
1863 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
1864 &purge_runs_sentinel, &purge_chunks_sentinel);
54a0048b 1865 assert(npurged == npurge);
3b2f2976 1866 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
54a0048b 1867 &purge_chunks_sentinel);
970d7e83
LB
1868
1869 if (config_stats)
1870 arena->stats.npurge++;
1871
54a0048b
SL
1872label_return:
1873 arena->purging = false;
970d7e83
LB
1874}
1875
1876void
3b2f2976 1877arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
970d7e83
LB
1878{
1879
3b2f2976 1880 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b 1881 if (all)
3b2f2976 1882 arena_purge_to_limit(tsdn, arena, 0);
54a0048b 1883 else
3b2f2976
XL
1884 arena_maybe_purge(tsdn, arena);
1885 malloc_mutex_unlock(tsdn, &arena->lock);
1886}
1887
1888static void
1889arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1890{
1891 size_t pageind, npages;
1892
1893 cassert(config_prof);
1894 assert(opt_prof);
1895
1896 /*
1897 * Iterate over the allocated runs and remove profiled allocations from
1898 * the sample set.
1899 */
1900 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1901 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1902 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1903 void *ptr = (void *)((uintptr_t)chunk + (pageind
1904 << LG_PAGE));
1905 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1906 config_prof);
1907
1908 prof_free(tsd, ptr, usize);
1909 npages = arena_mapbits_large_size_get(chunk,
1910 pageind) >> LG_PAGE;
1911 } else {
1912 /* Skip small run. */
1913 size_t binind = arena_mapbits_binind_get(chunk,
1914 pageind);
1915 arena_bin_info_t *bin_info =
1916 &arena_bin_info[binind];
1917 npages = bin_info->run_size >> LG_PAGE;
1918 }
1919 } else {
1920 /* Skip unallocated run. */
1921 npages = arena_mapbits_unallocated_size_get(chunk,
1922 pageind) >> LG_PAGE;
1923 }
1924 assert(pageind + npages <= chunk_npages);
1925 }
1926}
1927
1928void
1929arena_reset(tsd_t *tsd, arena_t *arena)
1930{
1931 unsigned i;
1932 extent_node_t *node;
1933
1934 /*
1935 * Locking in this function is unintuitive. The caller guarantees that
1936 * no concurrent operations are happening in this arena, but there are
1937 * still reasons that some locking is necessary:
1938 *
1939 * - Some of the functions in the transitive closure of calls assume
1940 * appropriate locks are held, and in some cases these locks are
1941 * temporarily dropped to avoid lock order reversal or deadlock due to
1942 * reentry.
1943 * - mallctl("epoch", ...) may concurrently refresh stats. While
1944 * strictly speaking this is a "concurrent operation", disallowing
1945 * stats refreshes would impose an inconvenient burden.
1946 */
1947
1948 /* Remove large allocations from prof sample set. */
1949 if (config_prof && opt_prof) {
1950 ql_foreach(node, &arena->achunks, ql_link) {
1951 arena_achunk_prof_reset(tsd, arena,
1952 extent_node_addr_get(node));
1953 }
1954 }
1955
1956 /* Reset curruns for large size classes. */
1957 if (config_stats) {
1958 for (i = 0; i < nlclasses; i++)
1959 arena->stats.lstats[i].curruns = 0;
1960 }
1961
1962 /* Huge allocations. */
1963 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1964 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1965 ql_last(&arena->huge, ql_link)) {
1966 void *ptr = extent_node_addr_get(node);
1967 size_t usize;
1968
1969 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1970 if (config_stats || (config_prof && opt_prof))
1971 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1972 /* Remove huge allocation from prof sample set. */
1973 if (config_prof && opt_prof)
1974 prof_free(tsd, ptr, usize);
1975 huge_dalloc(tsd_tsdn(tsd), ptr);
1976 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1977 /* Cancel out unwanted effects on stats. */
1978 if (config_stats)
1979 arena_huge_reset_stats_cancel(arena, usize);
1980 }
1981 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1982
1983 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
1984
1985 /* Bins. */
1986 for (i = 0; i < NBINS; i++) {
1987 arena_bin_t *bin = &arena->bins[i];
1988 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1989 bin->runcur = NULL;
1990 arena_run_heap_new(&bin->runs);
1991 if (config_stats) {
1992 bin->stats.curregs = 0;
1993 bin->stats.curruns = 0;
1994 }
1995 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1996 }
1997
1998 /*
1999 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
2000 * chains directly correspond.
2001 */
2002 qr_new(&arena->runs_dirty, rd_link);
2003 for (node = qr_next(&arena->chunks_cache, cc_link);
2004 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
2005 qr_new(&node->rd, rd_link);
2006 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
2007 }
2008
2009 /* Arena chunks. */
2010 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
2011 ql_last(&arena->achunks, ql_link)) {
2012 ql_remove(&arena->achunks, node, ql_link);
2013 arena_chunk_discard(tsd_tsdn(tsd), arena,
2014 extent_node_addr_get(node));
2015 }
2016
2017 /* Spare. */
2018 if (arena->spare != NULL) {
2019 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
2020 arena->spare = NULL;
2021 }
2022
2023 assert(!arena->purging);
2024 arena->nactive = 0;
2025
2026 for (i = 0; i < NPSIZES; i++)
2027 arena_run_heap_new(&arena->runs_avail[i]);
2028
2029 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
970d7e83
LB
2030}
2031
2032static void
1a4d82fc 2033arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
54a0048b
SL
2034 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
2035 size_t flag_decommitted)
970d7e83 2036{
1a4d82fc
JJ
2037 size_t size = *p_size;
2038 size_t run_ind = *p_run_ind;
2039 size_t run_pages = *p_run_pages;
970d7e83
LB
2040
2041 /* Try to coalesce forward. */
2042 if (run_ind + run_pages < chunk_npages &&
2043 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
54a0048b
SL
2044 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
2045 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
2046 flag_decommitted) {
970d7e83
LB
2047 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
2048 run_ind+run_pages);
2049 size_t nrun_pages = nrun_size >> LG_PAGE;
2050
2051 /*
2052 * Remove successor from runs_avail; the coalesced run is
2053 * inserted later.
2054 */
2055 assert(arena_mapbits_unallocated_size_get(chunk,
2056 run_ind+run_pages+nrun_pages-1) == nrun_size);
2057 assert(arena_mapbits_dirty_get(chunk,
2058 run_ind+run_pages+nrun_pages-1) == flag_dirty);
54a0048b
SL
2059 assert(arena_mapbits_decommitted_get(chunk,
2060 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
1a4d82fc
JJ
2061 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
2062
54a0048b
SL
2063 /*
2064 * If the successor is dirty, remove it from the set of dirty
2065 * pages.
2066 */
1a4d82fc 2067 if (flag_dirty != 0) {
54a0048b 2068 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
1a4d82fc
JJ
2069 nrun_pages);
2070 }
970d7e83
LB
2071
2072 size += nrun_size;
2073 run_pages += nrun_pages;
2074
2075 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2076 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2077 size);
2078 }
2079
2080 /* Try to coalesce backward. */
1a4d82fc
JJ
2081 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2082 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
54a0048b
SL
2083 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2084 flag_decommitted) {
970d7e83
LB
2085 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2086 run_ind-1);
2087 size_t prun_pages = prun_size >> LG_PAGE;
2088
2089 run_ind -= prun_pages;
2090
2091 /*
2092 * Remove predecessor from runs_avail; the coalesced run is
2093 * inserted later.
2094 */
2095 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2096 prun_size);
2097 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
54a0048b
SL
2098 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2099 flag_decommitted);
1a4d82fc
JJ
2100 arena_avail_remove(arena, chunk, run_ind, prun_pages);
2101
54a0048b
SL
2102 /*
2103 * If the predecessor is dirty, remove it from the set of dirty
2104 * pages.
2105 */
2106 if (flag_dirty != 0) {
2107 arena_run_dirty_remove(arena, chunk, run_ind,
2108 prun_pages);
2109 }
970d7e83
LB
2110
2111 size += prun_size;
2112 run_pages += prun_pages;
2113
2114 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2115 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2116 size);
2117 }
2118
1a4d82fc
JJ
2119 *p_size = size;
2120 *p_run_ind = run_ind;
2121 *p_run_pages = run_pages;
2122}
2123
54a0048b
SL
2124static size_t
2125arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2126 size_t run_ind)
9cc50fc6 2127{
54a0048b 2128 size_t size;
9cc50fc6 2129
9cc50fc6
SL
2130 assert(run_ind >= map_bias);
2131 assert(run_ind < chunk_npages);
54a0048b 2132
7453a54e
SL
2133 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2134 size = arena_mapbits_large_size_get(chunk, run_ind);
54a0048b 2135 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
7453a54e
SL
2136 run_ind+(size>>LG_PAGE)-1) == 0);
2137 } else {
54a0048b 2138 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
7453a54e
SL
2139 size = bin_info->run_size;
2140 }
54a0048b
SL
2141
2142 return (size);
2143}
2144
2145static void
3b2f2976
XL
2146arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
2147 bool cleaned, bool decommitted)
54a0048b
SL
2148{
2149 arena_chunk_t *chunk;
2150 arena_chunk_map_misc_t *miscelm;
2151 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
2152
2153 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2154 miscelm = arena_run_to_miscelm(run);
2155 run_ind = arena_miscelm_to_pageind(miscelm);
2156 assert(run_ind >= map_bias);
2157 assert(run_ind < chunk_npages);
2158 size = arena_run_size_get(arena, chunk, run, run_ind);
1a4d82fc 2159 run_pages = (size >> LG_PAGE);
54a0048b 2160 arena_nactive_sub(arena, run_pages);
1a4d82fc
JJ
2161
2162 /*
2163 * The run is dirty if the caller claims to have dirtied it, as well as
2164 * if it was already dirty before being allocated and the caller
2165 * doesn't claim to have cleaned it.
2166 */
2167 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2168 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
54a0048b
SL
2169 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2170 != 0)
1a4d82fc
JJ
2171 dirty = true;
2172 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
54a0048b 2173 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
1a4d82fc
JJ
2174
2175 /* Mark pages as unallocated in the chunk map. */
54a0048b
SL
2176 if (dirty || decommitted) {
2177 size_t flags = flag_dirty | flag_decommitted;
2178 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
1a4d82fc 2179 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
54a0048b 2180 flags);
1a4d82fc
JJ
2181 } else {
2182 arena_mapbits_unallocated_set(chunk, run_ind, size,
2183 arena_mapbits_unzeroed_get(chunk, run_ind));
2184 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2185 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2186 }
2187
54a0048b
SL
2188 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2189 flag_dirty, flag_decommitted);
1a4d82fc 2190
970d7e83
LB
2191 /* Insert into runs_avail, now that coalescing is complete. */
2192 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2193 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2194 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2195 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
54a0048b
SL
2196 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2197 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
1a4d82fc
JJ
2198 arena_avail_insert(arena, chunk, run_ind, run_pages);
2199
2200 if (dirty)
54a0048b 2201 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
970d7e83
LB
2202
2203 /* Deallocate chunk if it is now completely unused. */
54a0048b 2204 if (size == arena_maxrun) {
970d7e83 2205 assert(run_ind == map_bias);
54a0048b 2206 assert(run_pages == (arena_maxrun >> LG_PAGE));
3b2f2976 2207 arena_chunk_dalloc(tsdn, arena, chunk);
970d7e83
LB
2208 }
2209
2210 /*
2211 * It is okay to do dirty page processing here even if the chunk was
2212 * deallocated above, since in that case it is the spare. Waiting
2213 * until after possible chunk deallocation to do dirty processing
2214 * allows for an old spare to be fully deallocated, thus decreasing the
2215 * chances of spuriously crossing the dirty page purging threshold.
2216 */
2217 if (dirty)
3b2f2976 2218 arena_maybe_purge(tsdn, arena);
970d7e83
LB
2219}
2220
2221static void
3b2f2976
XL
2222arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2223 arena_run_t *run, size_t oldsize, size_t newsize)
970d7e83 2224{
1a4d82fc
JJ
2225 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2226 size_t pageind = arena_miscelm_to_pageind(miscelm);
970d7e83
LB
2227 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
2228 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
54a0048b
SL
2229 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2230 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2231 CHUNK_MAP_UNZEROED : 0;
970d7e83
LB
2232
2233 assert(oldsize > newsize);
2234
2235 /*
2236 * Update the chunk map so that arena_run_dalloc() can treat the
2237 * leading run as separately allocated. Set the last element of each
2238 * run first, in case of single-page runs.
2239 */
2240 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
54a0048b
SL
2241 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2242 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2243 pageind+head_npages-1)));
2244 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2245 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
970d7e83
LB
2246
2247 if (config_debug) {
2248 UNUSED size_t tail_npages = newsize >> LG_PAGE;
2249 assert(arena_mapbits_large_size_get(chunk,
2250 pageind+head_npages+tail_npages-1) == 0);
2251 assert(arena_mapbits_dirty_get(chunk,
2252 pageind+head_npages+tail_npages-1) == flag_dirty);
2253 }
2254 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
54a0048b
SL
2255 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2256 pageind+head_npages)));
970d7e83 2257
3b2f2976
XL
2258 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
2259 0));
970d7e83
LB
2260}
2261
2262static void
3b2f2976
XL
2263arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2264 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
970d7e83 2265{
1a4d82fc
JJ
2266 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2267 size_t pageind = arena_miscelm_to_pageind(miscelm);
970d7e83
LB
2268 size_t head_npages = newsize >> LG_PAGE;
2269 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
54a0048b
SL
2270 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2271 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2272 CHUNK_MAP_UNZEROED : 0;
1a4d82fc
JJ
2273 arena_chunk_map_misc_t *tail_miscelm;
2274 arena_run_t *tail_run;
970d7e83
LB
2275
2276 assert(oldsize > newsize);
2277
2278 /*
2279 * Update the chunk map so that arena_run_dalloc() can treat the
2280 * trailing run as separately allocated. Set the last element of each
2281 * run first, in case of single-page runs.
2282 */
2283 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
54a0048b
SL
2284 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2285 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2286 pageind+head_npages-1)));
2287 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2288 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
970d7e83
LB
2289
2290 if (config_debug) {
2291 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2292 assert(arena_mapbits_large_size_get(chunk,
2293 pageind+head_npages+tail_npages-1) == 0);
2294 assert(arena_mapbits_dirty_get(chunk,
2295 pageind+head_npages+tail_npages-1) == flag_dirty);
2296 }
2297 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
54a0048b
SL
2298 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2299 pageind+head_npages)));
970d7e83 2300
3b2f2976 2301 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
1a4d82fc 2302 tail_run = &tail_miscelm->run;
3b2f2976
XL
2303 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
2304 != 0));
970d7e83
LB
2305}
2306
2307static void
2308arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2309{
1a4d82fc 2310 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
970d7e83 2311
3b2f2976 2312 arena_run_heap_insert(&bin->runs, miscelm);
970d7e83
LB
2313}
2314
2315static arena_run_t *
2316arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2317{
3b2f2976
XL
2318 arena_chunk_map_misc_t *miscelm;
2319
2320 miscelm = arena_run_heap_remove_first(&bin->runs);
2321 if (miscelm == NULL)
2322 return (NULL);
2323 if (config_stats)
2324 bin->stats.reruns++;
2325
2326 return (&miscelm->run);
970d7e83
LB
2327}
2328
2329static arena_run_t *
3b2f2976 2330arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
970d7e83
LB
2331{
2332 arena_run_t *run;
54a0048b 2333 szind_t binind;
970d7e83
LB
2334 arena_bin_info_t *bin_info;
2335
2336 /* Look for a usable run. */
2337 run = arena_bin_nonfull_run_tryget(bin);
2338 if (run != NULL)
2339 return (run);
2340 /* No existing runs have any space available. */
2341
2342 binind = arena_bin_index(arena, bin);
2343 bin_info = &arena_bin_info[binind];
2344
2345 /* Allocate a new run. */
3b2f2976 2346 malloc_mutex_unlock(tsdn, &bin->lock);
970d7e83 2347 /******************************/
3b2f2976
XL
2348 malloc_mutex_lock(tsdn, &arena->lock);
2349 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
970d7e83 2350 if (run != NULL) {
970d7e83 2351 /* Initialize run internals. */
54a0048b 2352 run->binind = binind;
970d7e83 2353 run->nfree = bin_info->nregs;
1a4d82fc 2354 bitmap_init(run->bitmap, &bin_info->bitmap_info);
970d7e83 2355 }
3b2f2976 2356 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83 2357 /********************************/
3b2f2976 2358 malloc_mutex_lock(tsdn, &bin->lock);
970d7e83
LB
2359 if (run != NULL) {
2360 if (config_stats) {
2361 bin->stats.nruns++;
2362 bin->stats.curruns++;
2363 }
2364 return (run);
2365 }
2366
2367 /*
1a4d82fc 2368 * arena_run_alloc_small() failed, but another thread may have made
970d7e83
LB
2369 * sufficient memory available while this one dropped bin->lock above,
2370 * so search one more time.
2371 */
2372 run = arena_bin_nonfull_run_tryget(bin);
2373 if (run != NULL)
2374 return (run);
2375
2376 return (NULL);
2377}
2378
2379/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2380static void *
3b2f2976 2381arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
970d7e83 2382{
54a0048b 2383 szind_t binind;
970d7e83
LB
2384 arena_bin_info_t *bin_info;
2385 arena_run_t *run;
2386
2387 binind = arena_bin_index(arena, bin);
2388 bin_info = &arena_bin_info[binind];
2389 bin->runcur = NULL;
3b2f2976 2390 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
970d7e83
LB
2391 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2392 /*
2393 * Another thread updated runcur while this one ran without the
2394 * bin lock in arena_bin_nonfull_run_get().
2395 */
54a0048b 2396 void *ret;
970d7e83
LB
2397 assert(bin->runcur->nfree > 0);
2398 ret = arena_run_reg_alloc(bin->runcur, bin_info);
2399 if (run != NULL) {
2400 arena_chunk_t *chunk;
2401
2402 /*
1a4d82fc
JJ
2403 * arena_run_alloc_small() may have allocated run, or
2404 * it may have pulled run from the bin's run tree.
2405 * Therefore it is unsafe to make any assumptions about
2406 * how run has previously been used, and
2407 * arena_bin_lower_run() must be called, as if a region
2408 * were just deallocated from the run.
970d7e83
LB
2409 */
2410 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
3b2f2976
XL
2411 if (run->nfree == bin_info->nregs) {
2412 arena_dalloc_bin_run(tsdn, arena, chunk, run,
2413 bin);
2414 } else
2415 arena_bin_lower_run(arena, run, bin);
970d7e83
LB
2416 }
2417 return (ret);
2418 }
2419
2420 if (run == NULL)
2421 return (NULL);
2422
2423 bin->runcur = run;
2424
2425 assert(bin->runcur->nfree > 0);
2426
2427 return (arena_run_reg_alloc(bin->runcur, bin_info));
2428}
2429
2430void
3b2f2976 2431arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
54a0048b 2432 szind_t binind, uint64_t prof_accumbytes)
970d7e83
LB
2433{
2434 unsigned i, nfill;
2435 arena_bin_t *bin;
970d7e83
LB
2436
2437 assert(tbin->ncached == 0);
2438
3b2f2976
XL
2439 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2440 prof_idump(tsdn);
970d7e83 2441 bin = &arena->bins[binind];
3b2f2976 2442 malloc_mutex_lock(tsdn, &bin->lock);
970d7e83
LB
2443 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2444 tbin->lg_fill_div); i < nfill; i++) {
54a0048b
SL
2445 arena_run_t *run;
2446 void *ptr;
970d7e83
LB
2447 if ((run = bin->runcur) != NULL && run->nfree > 0)
2448 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2449 else
3b2f2976 2450 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
1a4d82fc
JJ
2451 if (ptr == NULL) {
2452 /*
2453 * OOM. tbin->avail isn't yet filled down to its first
2454 * element, so the successful allocations (if any) must
54a0048b 2455 * be moved just before tbin->avail before bailing out.
1a4d82fc
JJ
2456 */
2457 if (i > 0) {
54a0048b 2458 memmove(tbin->avail - i, tbin->avail - nfill,
1a4d82fc
JJ
2459 i * sizeof(void *));
2460 }
970d7e83 2461 break;
1a4d82fc 2462 }
54a0048b 2463 if (config_fill && unlikely(opt_junk_alloc)) {
970d7e83
LB
2464 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2465 true);
2466 }
2467 /* Insert such that low regions get used first. */
54a0048b 2468 *(tbin->avail - nfill + i) = ptr;
970d7e83
LB
2469 }
2470 if (config_stats) {
970d7e83
LB
2471 bin->stats.nmalloc += i;
2472 bin->stats.nrequests += tbin->tstats.nrequests;
54a0048b 2473 bin->stats.curregs += i;
970d7e83
LB
2474 bin->stats.nfills++;
2475 tbin->tstats.nrequests = 0;
2476 }
3b2f2976 2477 malloc_mutex_unlock(tsdn, &bin->lock);
970d7e83 2478 tbin->ncached = i;
3b2f2976 2479 arena_decay_tick(tsdn, arena);
970d7e83
LB
2480}
2481
2482void
2483arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2484{
2485
3b2f2976
XL
2486 size_t redzone_size = bin_info->redzone_size;
2487
970d7e83 2488 if (zero) {
3b2f2976
XL
2489 memset((void *)((uintptr_t)ptr - redzone_size),
2490 JEMALLOC_ALLOC_JUNK, redzone_size);
2491 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2492 JEMALLOC_ALLOC_JUNK, redzone_size);
970d7e83 2493 } else {
3b2f2976
XL
2494 memset((void *)((uintptr_t)ptr - redzone_size),
2495 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
970d7e83
LB
2496 }
2497}
2498
1a4d82fc
JJ
2499#ifdef JEMALLOC_JET
2500#undef arena_redzone_corruption
3b2f2976 2501#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
1a4d82fc
JJ
2502#endif
2503static void
2504arena_redzone_corruption(void *ptr, size_t usize, bool after,
2505 size_t offset, uint8_t byte)
2506{
2507
2508 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2509 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2510 after ? "after" : "before", ptr, usize, byte);
2511}
2512#ifdef JEMALLOC_JET
2513#undef arena_redzone_corruption
2514#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2515arena_redzone_corruption_t *arena_redzone_corruption =
3b2f2976 2516 JEMALLOC_N(n_arena_redzone_corruption);
1a4d82fc
JJ
2517#endif
2518
2519static void
2520arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
970d7e83 2521{
970d7e83
LB
2522 bool error = false;
2523
54a0048b
SL
2524 if (opt_junk_alloc) {
2525 size_t size = bin_info->reg_size;
2526 size_t redzone_size = bin_info->redzone_size;
2527 size_t i;
2528
2529 for (i = 1; i <= redzone_size; i++) {
2530 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
3b2f2976 2531 if (*byte != JEMALLOC_ALLOC_JUNK) {
54a0048b
SL
2532 error = true;
2533 arena_redzone_corruption(ptr, size, false, i,
2534 *byte);
2535 if (reset)
3b2f2976 2536 *byte = JEMALLOC_ALLOC_JUNK;
54a0048b 2537 }
970d7e83 2538 }
54a0048b
SL
2539 for (i = 0; i < redzone_size; i++) {
2540 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
3b2f2976 2541 if (*byte != JEMALLOC_ALLOC_JUNK) {
54a0048b
SL
2542 error = true;
2543 arena_redzone_corruption(ptr, size, true, i,
2544 *byte);
2545 if (reset)
3b2f2976 2546 *byte = JEMALLOC_ALLOC_JUNK;
54a0048b 2547 }
970d7e83
LB
2548 }
2549 }
54a0048b 2550
970d7e83
LB
2551 if (opt_abort && error)
2552 abort();
1a4d82fc 2553}
970d7e83 2554
1a4d82fc
JJ
2555#ifdef JEMALLOC_JET
2556#undef arena_dalloc_junk_small
3b2f2976 2557#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
1a4d82fc
JJ
2558#endif
2559void
2560arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2561{
2562 size_t redzone_size = bin_info->redzone_size;
2563
2564 arena_redzones_validate(ptr, bin_info, false);
3b2f2976 2565 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
970d7e83
LB
2566 bin_info->reg_interval);
2567}
1a4d82fc
JJ
2568#ifdef JEMALLOC_JET
2569#undef arena_dalloc_junk_small
2570#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2571arena_dalloc_junk_small_t *arena_dalloc_junk_small =
3b2f2976 2572 JEMALLOC_N(n_arena_dalloc_junk_small);
1a4d82fc
JJ
2573#endif
2574
2575void
2576arena_quarantine_junk_small(void *ptr, size_t usize)
2577{
54a0048b 2578 szind_t binind;
1a4d82fc
JJ
2579 arena_bin_info_t *bin_info;
2580 cassert(config_fill);
54a0048b 2581 assert(opt_junk_free);
1a4d82fc
JJ
2582 assert(opt_quarantine);
2583 assert(usize <= SMALL_MAXCLASS);
2584
54a0048b 2585 binind = size2index(usize);
1a4d82fc
JJ
2586 bin_info = &arena_bin_info[binind];
2587 arena_redzones_validate(ptr, bin_info, true);
2588}
970d7e83 2589
54a0048b 2590static void *
3b2f2976 2591arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
970d7e83
LB
2592{
2593 void *ret;
2594 arena_bin_t *bin;
54a0048b 2595 size_t usize;
970d7e83 2596 arena_run_t *run;
970d7e83 2597
970d7e83
LB
2598 assert(binind < NBINS);
2599 bin = &arena->bins[binind];
54a0048b 2600 usize = index2size(binind);
970d7e83 2601
3b2f2976 2602 malloc_mutex_lock(tsdn, &bin->lock);
970d7e83
LB
2603 if ((run = bin->runcur) != NULL && run->nfree > 0)
2604 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2605 else
3b2f2976 2606 ret = arena_bin_malloc_hard(tsdn, arena, bin);
970d7e83
LB
2607
2608 if (ret == NULL) {
3b2f2976 2609 malloc_mutex_unlock(tsdn, &bin->lock);
970d7e83
LB
2610 return (NULL);
2611 }
2612
2613 if (config_stats) {
970d7e83
LB
2614 bin->stats.nmalloc++;
2615 bin->stats.nrequests++;
54a0048b 2616 bin->stats.curregs++;
970d7e83 2617 }
3b2f2976
XL
2618 malloc_mutex_unlock(tsdn, &bin->lock);
2619 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2620 prof_idump(tsdn);
970d7e83 2621
1a4d82fc 2622 if (!zero) {
970d7e83 2623 if (config_fill) {
54a0048b 2624 if (unlikely(opt_junk_alloc)) {
970d7e83
LB
2625 arena_alloc_junk_small(ret,
2626 &arena_bin_info[binind], false);
1a4d82fc 2627 } else if (unlikely(opt_zero))
54a0048b 2628 memset(ret, 0, usize);
970d7e83 2629 }
54a0048b 2630 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
970d7e83 2631 } else {
54a0048b 2632 if (config_fill && unlikely(opt_junk_alloc)) {
970d7e83
LB
2633 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2634 true);
2635 }
54a0048b
SL
2636 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2637 memset(ret, 0, usize);
970d7e83 2638 }
970d7e83 2639
3b2f2976 2640 arena_decay_tick(tsdn, arena);
970d7e83
LB
2641 return (ret);
2642}
2643
2644void *
3b2f2976 2645arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
970d7e83
LB
2646{
2647 void *ret;
54a0048b
SL
2648 size_t usize;
2649 uintptr_t random_offset;
1a4d82fc
JJ
2650 arena_run_t *run;
2651 arena_chunk_map_misc_t *miscelm;
54a0048b 2652 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
970d7e83
LB
2653
2654 /* Large allocation. */
54a0048b 2655 usize = index2size(binind);
3b2f2976 2656 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
2657 if (config_cache_oblivious) {
2658 uint64_t r;
2659
2660 /*
2661 * Compute a uniformly distributed offset within the first page
2662 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2663 * for 4 KiB pages and 64-byte cachelines.
2664 */
3b2f2976
XL
2665 r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
2666 LG_CACHELINE, false);
54a0048b
SL
2667 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2668 } else
2669 random_offset = 0;
3b2f2976 2670 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
1a4d82fc 2671 if (run == NULL) {
3b2f2976 2672 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
2673 return (NULL);
2674 }
1a4d82fc 2675 miscelm = arena_run_to_miscelm(run);
54a0048b
SL
2676 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2677 random_offset);
970d7e83 2678 if (config_stats) {
54a0048b
SL
2679 szind_t index = binind - NBINS;
2680
970d7e83
LB
2681 arena->stats.nmalloc_large++;
2682 arena->stats.nrequests_large++;
54a0048b
SL
2683 arena->stats.allocated_large += usize;
2684 arena->stats.lstats[index].nmalloc++;
2685 arena->stats.lstats[index].nrequests++;
2686 arena->stats.lstats[index].curruns++;
970d7e83
LB
2687 }
2688 if (config_prof)
54a0048b 2689 idump = arena_prof_accum_locked(arena, usize);
3b2f2976 2690 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83 2691 if (config_prof && idump)
3b2f2976 2692 prof_idump(tsdn);
970d7e83 2693
1a4d82fc 2694 if (!zero) {
970d7e83 2695 if (config_fill) {
54a0048b 2696 if (unlikely(opt_junk_alloc))
3b2f2976 2697 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
1a4d82fc 2698 else if (unlikely(opt_zero))
54a0048b 2699 memset(ret, 0, usize);
970d7e83
LB
2700 }
2701 }
2702
3b2f2976 2703 arena_decay_tick(tsdn, arena);
970d7e83
LB
2704 return (ret);
2705}
2706
7453a54e 2707void *
3b2f2976
XL
2708arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
2709 bool zero)
54a0048b
SL
2710{
2711
3b2f2976
XL
2712 assert(!tsdn_null(tsdn) || arena != NULL);
2713
2714 if (likely(!tsdn_null(tsdn)))
2715 arena = arena_choose(tsdn_tsd(tsdn), arena);
54a0048b
SL
2716 if (unlikely(arena == NULL))
2717 return (NULL);
2718
2719 if (likely(size <= SMALL_MAXCLASS))
3b2f2976 2720 return (arena_malloc_small(tsdn, arena, ind, zero));
54a0048b 2721 if (likely(size <= large_maxclass))
3b2f2976
XL
2722 return (arena_malloc_large(tsdn, arena, ind, zero));
2723 assert(index2size(ind) >= chunksize);
2724 return (huge_malloc(tsdn, arena, index2size(ind), zero));
54a0048b
SL
2725}
2726
2727/* Only handles large allocations that require more than page alignment. */
2728static void *
3b2f2976 2729arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
54a0048b 2730 bool zero)
970d7e83
LB
2731{
2732 void *ret;
2733 size_t alloc_size, leadsize, trailsize;
2734 arena_run_t *run;
2735 arena_chunk_t *chunk;
1a4d82fc
JJ
2736 arena_chunk_map_misc_t *miscelm;
2737 void *rpages;
970d7e83 2738
3b2f2976 2739 assert(!tsdn_null(tsdn) || arena != NULL);
54a0048b
SL
2740 assert(usize == PAGE_CEILING(usize));
2741
3b2f2976
XL
2742 if (likely(!tsdn_null(tsdn)))
2743 arena = arena_choose(tsdn_tsd(tsdn), arena);
54a0048b
SL
2744 if (unlikely(arena == NULL))
2745 return (NULL);
970d7e83
LB
2746
2747 alignment = PAGE_CEILING(alignment);
54a0048b 2748 alloc_size = usize + large_pad + alignment - PAGE;
970d7e83 2749
3b2f2976
XL
2750 malloc_mutex_lock(tsdn, &arena->lock);
2751 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
970d7e83 2752 if (run == NULL) {
3b2f2976 2753 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
2754 return (NULL);
2755 }
2756 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1a4d82fc
JJ
2757 miscelm = arena_run_to_miscelm(run);
2758 rpages = arena_miscelm_to_rpages(miscelm);
970d7e83 2759
1a4d82fc
JJ
2760 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2761 (uintptr_t)rpages;
54a0048b
SL
2762 assert(alloc_size >= leadsize + usize);
2763 trailsize = alloc_size - leadsize - usize - large_pad;
970d7e83 2764 if (leadsize != 0) {
1a4d82fc
JJ
2765 arena_chunk_map_misc_t *head_miscelm = miscelm;
2766 arena_run_t *head_run = run;
2767
3b2f2976 2768 miscelm = arena_miscelm_get_mutable(chunk,
1a4d82fc
JJ
2769 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2770 LG_PAGE));
2771 run = &miscelm->run;
2772
3b2f2976 2773 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
1a4d82fc 2774 alloc_size - leadsize);
970d7e83
LB
2775 }
2776 if (trailsize != 0) {
3b2f2976 2777 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
54a0048b
SL
2778 trailsize, usize + large_pad, false);
2779 }
2780 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2781 size_t run_ind =
2782 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2783 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2784 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2785 run_ind) != 0);
2786
2787 assert(decommitted); /* Cause of OOM. */
3b2f2976
XL
2788 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2789 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b 2790 return (NULL);
970d7e83 2791 }
1a4d82fc 2792 ret = arena_miscelm_to_rpages(miscelm);
970d7e83
LB
2793
2794 if (config_stats) {
54a0048b
SL
2795 szind_t index = size2index(usize) - NBINS;
2796
970d7e83
LB
2797 arena->stats.nmalloc_large++;
2798 arena->stats.nrequests_large++;
54a0048b
SL
2799 arena->stats.allocated_large += usize;
2800 arena->stats.lstats[index].nmalloc++;
2801 arena->stats.lstats[index].nrequests++;
2802 arena->stats.lstats[index].curruns++;
970d7e83 2803 }
3b2f2976 2804 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83 2805
1a4d82fc 2806 if (config_fill && !zero) {
54a0048b 2807 if (unlikely(opt_junk_alloc))
3b2f2976 2808 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
1a4d82fc 2809 else if (unlikely(opt_zero))
54a0048b
SL
2810 memset(ret, 0, usize);
2811 }
3b2f2976 2812 arena_decay_tick(tsdn, arena);
54a0048b
SL
2813 return (ret);
2814}
2815
2816void *
3b2f2976 2817arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
54a0048b
SL
2818 bool zero, tcache_t *tcache)
2819{
2820 void *ret;
2821
2822 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2823 && (usize & PAGE_MASK) == 0))) {
2824 /* Small; alignment doesn't require special run placement. */
3b2f2976 2825 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
54a0048b
SL
2826 tcache, true);
2827 } else if (usize <= large_maxclass && alignment <= PAGE) {
2828 /*
2829 * Large; alignment doesn't require special run placement.
2830 * However, the cached pointer may be at a random offset from
2831 * the base of the run, so do some bit manipulation to retrieve
2832 * the base.
2833 */
3b2f2976 2834 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
54a0048b
SL
2835 tcache, true);
2836 if (config_cache_oblivious)
2837 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2838 } else {
2839 if (likely(usize <= large_maxclass)) {
3b2f2976 2840 ret = arena_palloc_large(tsdn, arena, usize, alignment,
54a0048b
SL
2841 zero);
2842 } else if (likely(alignment <= chunksize))
3b2f2976 2843 ret = huge_malloc(tsdn, arena, usize, zero);
54a0048b 2844 else {
3b2f2976 2845 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
54a0048b 2846 }
970d7e83
LB
2847 }
2848 return (ret);
2849}
2850
2851void
3b2f2976 2852arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
970d7e83
LB
2853{
2854 arena_chunk_t *chunk;
54a0048b
SL
2855 size_t pageind;
2856 szind_t binind;
970d7e83
LB
2857
2858 cassert(config_prof);
2859 assert(ptr != NULL);
2860 assert(CHUNK_ADDR2BASE(ptr) != ptr);
3b2f2976
XL
2861 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2862 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
970d7e83
LB
2863 assert(size <= SMALL_MAXCLASS);
2864
2865 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2866 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
54a0048b 2867 binind = size2index(size);
970d7e83
LB
2868 assert(binind < NBINS);
2869 arena_mapbits_large_binind_set(chunk, pageind, binind);
2870
3b2f2976
XL
2871 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2872 assert(isalloc(tsdn, ptr, true) == size);
970d7e83
LB
2873}
2874
2875static void
2876arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2877 arena_bin_t *bin)
2878{
2879
2880 /* Dissociate run from bin. */
2881 if (run == bin->runcur)
2882 bin->runcur = NULL;
2883 else {
54a0048b
SL
2884 szind_t binind = arena_bin_index(extent_node_arena_get(
2885 &chunk->node), bin);
970d7e83
LB
2886 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2887
3b2f2976
XL
2888 /*
2889 * The following block's conditional is necessary because if the
2890 * run only contains one region, then it never gets inserted
2891 * into the non-full runs tree.
2892 */
970d7e83 2893 if (bin_info->nregs != 1) {
3b2f2976
XL
2894 arena_chunk_map_misc_t *miscelm =
2895 arena_run_to_miscelm(run);
2896
2897 arena_run_heap_remove(&bin->runs, miscelm);
970d7e83
LB
2898 }
2899 }
2900}
2901
2902static void
3b2f2976
XL
2903arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2904 arena_run_t *run, arena_bin_t *bin)
970d7e83 2905{
970d7e83
LB
2906
2907 assert(run != bin->runcur);
970d7e83 2908
3b2f2976 2909 malloc_mutex_unlock(tsdn, &bin->lock);
970d7e83 2910 /******************************/
3b2f2976
XL
2911 malloc_mutex_lock(tsdn, &arena->lock);
2912 arena_run_dalloc(tsdn, arena, run, true, false, false);
2913 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83 2914 /****************************/
3b2f2976 2915 malloc_mutex_lock(tsdn, &bin->lock);
970d7e83
LB
2916 if (config_stats)
2917 bin->stats.curruns--;
2918}
2919
2920static void
3b2f2976 2921arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
970d7e83
LB
2922{
2923
2924 /*
3b2f2976
XL
2925 * Make sure that if bin->runcur is non-NULL, it refers to the
2926 * oldest/lowest non-full run. It is okay to NULL runcur out rather
2927 * than proactively keeping it pointing at the oldest/lowest non-full
2928 * run.
970d7e83 2929 */
3b2f2976
XL
2930 if (bin->runcur != NULL &&
2931 arena_snad_comp(arena_run_to_miscelm(bin->runcur),
2932 arena_run_to_miscelm(run)) > 0) {
970d7e83
LB
2933 /* Switch runcur. */
2934 if (bin->runcur->nfree > 0)
2935 arena_bin_runs_insert(bin, bin->runcur);
2936 bin->runcur = run;
2937 if (config_stats)
2938 bin->stats.reruns++;
2939 } else
2940 arena_bin_runs_insert(bin, run);
2941}
2942
54a0048b 2943static void
3b2f2976
XL
2944arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2945 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
970d7e83 2946{
1a4d82fc 2947 size_t pageind, rpages_ind;
970d7e83
LB
2948 arena_run_t *run;
2949 arena_bin_t *bin;
2950 arena_bin_info_t *bin_info;
54a0048b 2951 szind_t binind;
970d7e83
LB
2952
2953 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1a4d82fc 2954 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
3b2f2976 2955 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
54a0048b
SL
2956 binind = run->binind;
2957 bin = &arena->bins[binind];
970d7e83 2958 bin_info = &arena_bin_info[binind];
970d7e83 2959
54a0048b 2960 if (!junked && config_fill && unlikely(opt_junk_free))
970d7e83
LB
2961 arena_dalloc_junk_small(ptr, bin_info);
2962
2963 arena_run_reg_dalloc(run, ptr);
2964 if (run->nfree == bin_info->nregs) {
2965 arena_dissociate_bin_run(chunk, run, bin);
3b2f2976 2966 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
970d7e83 2967 } else if (run->nfree == 1 && run != bin->runcur)
3b2f2976 2968 arena_bin_lower_run(arena, run, bin);
970d7e83
LB
2969
2970 if (config_stats) {
970d7e83 2971 bin->stats.ndalloc++;
54a0048b 2972 bin->stats.curregs--;
970d7e83
LB
2973 }
2974}
2975
54a0048b 2976void
3b2f2976
XL
2977arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2978 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
54a0048b
SL
2979{
2980
3b2f2976 2981 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
54a0048b
SL
2982}
2983
970d7e83 2984void
3b2f2976 2985arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
1a4d82fc 2986 size_t pageind, arena_chunk_map_bits_t *bitselm)
970d7e83
LB
2987{
2988 arena_run_t *run;
2989 arena_bin_t *bin;
1a4d82fc 2990 size_t rpages_ind;
970d7e83 2991
1a4d82fc 2992 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
3b2f2976 2993 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
54a0048b 2994 bin = &arena->bins[run->binind];
3b2f2976
XL
2995 malloc_mutex_lock(tsdn, &bin->lock);
2996 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2997 malloc_mutex_unlock(tsdn, &bin->lock);
970d7e83
LB
2998}
2999
3000void
3b2f2976
XL
3001arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3002 void *ptr, size_t pageind)
970d7e83 3003{
1a4d82fc 3004 arena_chunk_map_bits_t *bitselm;
970d7e83
LB
3005
3006 if (config_debug) {
3007 /* arena_ptr_small_binind_get() does extra sanity checking. */
3008 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
3009 pageind)) != BININD_INVALID);
3010 }
3b2f2976
XL
3011 bitselm = arena_bitselm_get_mutable(chunk, pageind);
3012 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
3013 arena_decay_tick(tsdn, arena);
970d7e83
LB
3014}
3015
1a4d82fc
JJ
3016#ifdef JEMALLOC_JET
3017#undef arena_dalloc_junk_large
3b2f2976 3018#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
1a4d82fc 3019#endif
54a0048b 3020void
1a4d82fc
JJ
3021arena_dalloc_junk_large(void *ptr, size_t usize)
3022{
3023
54a0048b 3024 if (config_fill && unlikely(opt_junk_free))
3b2f2976 3025 memset(ptr, JEMALLOC_FREE_JUNK, usize);
1a4d82fc
JJ
3026}
3027#ifdef JEMALLOC_JET
3028#undef arena_dalloc_junk_large
3029#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
3030arena_dalloc_junk_large_t *arena_dalloc_junk_large =
3b2f2976 3031 JEMALLOC_N(n_arena_dalloc_junk_large);
1a4d82fc
JJ
3032#endif
3033
54a0048b 3034static void
3b2f2976
XL
3035arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
3036 arena_chunk_t *chunk, void *ptr, bool junked)
970d7e83 3037{
1a4d82fc 3038 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3b2f2976
XL
3039 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3040 pageind);
1a4d82fc 3041 arena_run_t *run = &miscelm->run;
970d7e83
LB
3042
3043 if (config_fill || config_stats) {
54a0048b
SL
3044 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
3045 large_pad;
970d7e83 3046
54a0048b
SL
3047 if (!junked)
3048 arena_dalloc_junk_large(ptr, usize);
970d7e83 3049 if (config_stats) {
54a0048b
SL
3050 szind_t index = size2index(usize) - NBINS;
3051
970d7e83 3052 arena->stats.ndalloc_large++;
1a4d82fc 3053 arena->stats.allocated_large -= usize;
54a0048b
SL
3054 arena->stats.lstats[index].ndalloc++;
3055 arena->stats.lstats[index].curruns--;
970d7e83
LB
3056 }
3057 }
3058
3b2f2976 3059 arena_run_dalloc(tsdn, arena, run, true, false, false);
54a0048b
SL
3060}
3061
3062void
3b2f2976
XL
3063arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
3064 arena_chunk_t *chunk, void *ptr)
54a0048b
SL
3065{
3066
3b2f2976 3067 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
970d7e83
LB
3068}
3069
3070void
3b2f2976
XL
3071arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3072 void *ptr)
970d7e83
LB
3073{
3074
3b2f2976
XL
3075 malloc_mutex_lock(tsdn, &arena->lock);
3076 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3077 malloc_mutex_unlock(tsdn, &arena->lock);
3078 arena_decay_tick(tsdn, arena);
970d7e83
LB
3079}
3080
3081static void
3b2f2976
XL
3082arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3083 void *ptr, size_t oldsize, size_t size)
970d7e83 3084{
1a4d82fc 3085 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3b2f2976
XL
3086 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3087 pageind);
1a4d82fc 3088 arena_run_t *run = &miscelm->run;
970d7e83
LB
3089
3090 assert(size < oldsize);
3091
3092 /*
3093 * Shrink the run, and make trailing pages available for other
3094 * allocations.
3095 */
3b2f2976
XL
3096 malloc_mutex_lock(tsdn, &arena->lock);
3097 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
54a0048b 3098 large_pad, true);
970d7e83 3099 if (config_stats) {
54a0048b
SL
3100 szind_t oldindex = size2index(oldsize) - NBINS;
3101 szind_t index = size2index(size) - NBINS;
3102
970d7e83
LB
3103 arena->stats.ndalloc_large++;
3104 arena->stats.allocated_large -= oldsize;
54a0048b
SL
3105 arena->stats.lstats[oldindex].ndalloc++;
3106 arena->stats.lstats[oldindex].curruns--;
970d7e83
LB
3107
3108 arena->stats.nmalloc_large++;
3109 arena->stats.nrequests_large++;
3110 arena->stats.allocated_large += size;
54a0048b
SL
3111 arena->stats.lstats[index].nmalloc++;
3112 arena->stats.lstats[index].nrequests++;
3113 arena->stats.lstats[index].curruns++;
970d7e83 3114 }
3b2f2976 3115 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
3116}
3117
3118static bool
3b2f2976
XL
3119arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3120 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
970d7e83
LB
3121{
3122 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
54a0048b 3123 size_t npages = (oldsize + large_pad) >> LG_PAGE;
970d7e83
LB
3124 size_t followsize;
3125
54a0048b
SL
3126 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3127 large_pad);
970d7e83
LB
3128
3129 /* Try to extend the run. */
3b2f2976 3130 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
3131 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3132 pageind+npages) != 0)
3133 goto label_fail;
3134 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3135 if (oldsize + followsize >= usize_min) {
970d7e83
LB
3136 /*
3137 * The next run is available and sufficiently large. Split the
3138 * following run, then merge the first part with the existing
3139 * allocation.
3140 */
54a0048b
SL
3141 arena_run_t *run;
3142 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
3143
3144 usize = usize_max;
3145 while (oldsize + followsize < usize)
3146 usize = index2size(size2index(usize)-1);
3147 assert(usize >= usize_min);
3148 assert(usize >= oldsize);
3149 splitsize = usize - oldsize;
3150 if (splitsize == 0)
3151 goto label_fail;
3152
3b2f2976 3153 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
54a0048b
SL
3154 if (arena_run_split_large(arena, run, splitsize, zero))
3155 goto label_fail;
3156
3157 if (config_cache_oblivious && zero) {
3158 /*
3159 * Zero the trailing bytes of the original allocation's
3160 * last page, since they are in an indeterminate state.
3161 * There will always be trailing bytes, because ptr's
3162 * offset from the beginning of the run is a multiple of
3163 * CACHELINE in [0 .. PAGE).
3164 */
3165 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3166 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3167 PAGE));
3168 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3169 assert(nzero > 0);
3170 memset(zbase, 0, nzero);
3171 }
970d7e83
LB
3172
3173 size = oldsize + splitsize;
54a0048b 3174 npages = (size + large_pad) >> LG_PAGE;
970d7e83
LB
3175
3176 /*
3177 * Mark the extended run as dirty if either portion of the run
3178 * was dirty before allocation. This is rather pedantic,
3179 * because there's not actually any sequence of events that
3180 * could cause the resulting run to be passed to
3181 * arena_run_dalloc() with the dirty argument set to false
3182 * (which is when dirty flag consistency would really matter).
3183 */
3184 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3185 arena_mapbits_dirty_get(chunk, pageind+npages-1);
54a0048b
SL
3186 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
3187 arena_mapbits_large_set(chunk, pageind, size + large_pad,
3188 flag_dirty | (flag_unzeroed_mask &
3189 arena_mapbits_unzeroed_get(chunk, pageind)));
3190 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3191 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3192 pageind+npages-1)));
970d7e83
LB
3193
3194 if (config_stats) {
54a0048b
SL
3195 szind_t oldindex = size2index(oldsize) - NBINS;
3196 szind_t index = size2index(size) - NBINS;
3197
970d7e83
LB
3198 arena->stats.ndalloc_large++;
3199 arena->stats.allocated_large -= oldsize;
54a0048b
SL
3200 arena->stats.lstats[oldindex].ndalloc++;
3201 arena->stats.lstats[oldindex].curruns--;
970d7e83
LB
3202
3203 arena->stats.nmalloc_large++;
3204 arena->stats.nrequests_large++;
3205 arena->stats.allocated_large += size;
54a0048b
SL
3206 arena->stats.lstats[index].nmalloc++;
3207 arena->stats.lstats[index].nrequests++;
3208 arena->stats.lstats[index].curruns++;
970d7e83 3209 }
3b2f2976 3210 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
3211 return (false);
3212 }
54a0048b 3213label_fail:
3b2f2976 3214 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
3215 return (true);
3216}
3217
1a4d82fc
JJ
3218#ifdef JEMALLOC_JET
3219#undef arena_ralloc_junk_large
3b2f2976 3220#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
1a4d82fc
JJ
3221#endif
3222static void
3223arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3224{
3225
54a0048b 3226 if (config_fill && unlikely(opt_junk_free)) {
3b2f2976 3227 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
1a4d82fc
JJ
3228 old_usize - usize);
3229 }
3230}
3231#ifdef JEMALLOC_JET
3232#undef arena_ralloc_junk_large
3233#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3234arena_ralloc_junk_large_t *arena_ralloc_junk_large =
3b2f2976 3235 JEMALLOC_N(n_arena_ralloc_junk_large);
1a4d82fc
JJ
3236#endif
3237
970d7e83
LB
3238/*
3239 * Try to resize a large allocation, in order to avoid copying. This will
3240 * always fail if growing an object, and the following run is already in use.
3241 */
3242static bool
3b2f2976 3243arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
54a0048b 3244 size_t usize_max, bool zero)
970d7e83 3245{
54a0048b
SL
3246 arena_chunk_t *chunk;
3247 arena_t *arena;
970d7e83 3248
54a0048b
SL
3249 if (oldsize == usize_max) {
3250 /* Current size class is compatible and maximal. */
970d7e83 3251 return (false);
54a0048b 3252 }
7453a54e 3253
54a0048b
SL
3254 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3255 arena = extent_node_arena_get(&chunk->node);
3256
3257 if (oldsize < usize_max) {
3b2f2976
XL
3258 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
3259 oldsize, usize_min, usize_max, zero);
54a0048b
SL
3260 if (config_fill && !ret && !zero) {
3261 if (unlikely(opt_junk_alloc)) {
3b2f2976
XL
3262 memset((void *)((uintptr_t)ptr + oldsize),
3263 JEMALLOC_ALLOC_JUNK,
3264 isalloc(tsdn, ptr, config_prof) - oldsize);
54a0048b
SL
3265 } else if (unlikely(opt_zero)) {
3266 memset((void *)((uintptr_t)ptr + oldsize), 0,
3b2f2976 3267 isalloc(tsdn, ptr, config_prof) - oldsize);
970d7e83 3268 }
970d7e83 3269 }
54a0048b 3270 return (ret);
970d7e83 3271 }
54a0048b
SL
3272
3273 assert(oldsize > usize_max);
3274 /* Fill before shrinking in order avoid a race. */
3275 arena_ralloc_junk_large(ptr, oldsize, usize_max);
3b2f2976 3276 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
54a0048b 3277 return (false);
970d7e83
LB
3278}
3279
1a4d82fc 3280bool
3b2f2976 3281arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
54a0048b 3282 size_t extra, bool zero)
970d7e83 3283{
54a0048b 3284 size_t usize_min, usize_max;
970d7e83 3285
54a0048b
SL
3286 /* Calls with non-zero extra had to clamp extra. */
3287 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3288
3289 if (unlikely(size > HUGE_MAXCLASS))
3290 return (true);
3291
3292 usize_min = s2u(size);
3293 usize_max = s2u(size + extra);
3294 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3295 arena_chunk_t *chunk;
3296
3297 /*
3298 * Avoid moving the allocation if the size class can be left the
3299 * same.
3300 */
970d7e83 3301 if (oldsize <= SMALL_MAXCLASS) {
54a0048b
SL
3302 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3303 oldsize);
3304 if ((usize_max > SMALL_MAXCLASS ||
3305 size2index(usize_max) != size2index(oldsize)) &&
3306 (size > oldsize || usize_max < oldsize))
3307 return (true);
970d7e83 3308 } else {
54a0048b
SL
3309 if (usize_max <= SMALL_MAXCLASS)
3310 return (true);
3b2f2976 3311 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
54a0048b
SL
3312 usize_max, zero))
3313 return (true);
970d7e83 3314 }
54a0048b
SL
3315
3316 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3b2f2976 3317 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
54a0048b
SL
3318 return (false);
3319 } else {
3b2f2976 3320 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
54a0048b 3321 usize_max, zero));
970d7e83 3322 }
54a0048b 3323}
9cc50fc6 3324
54a0048b 3325static void *
3b2f2976 3326arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
54a0048b
SL
3327 size_t alignment, bool zero, tcache_t *tcache)
3328{
3329
3330 if (alignment == 0)
3b2f2976
XL
3331 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3332 zero, tcache, true));
54a0048b
SL
3333 usize = sa2u(usize, alignment);
3334 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3335 return (NULL);
3b2f2976 3336 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
970d7e83
LB
3337}
3338
3339void *
1a4d82fc 3340arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
54a0048b 3341 size_t alignment, bool zero, tcache_t *tcache)
970d7e83
LB
3342{
3343 void *ret;
54a0048b 3344 size_t usize;
970d7e83 3345
54a0048b
SL
3346 usize = s2u(size);
3347 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3348 return (NULL);
970d7e83 3349
54a0048b
SL
3350 if (likely(usize <= large_maxclass)) {
3351 size_t copysize;
970d7e83 3352
54a0048b 3353 /* Try to avoid moving the allocation. */
3b2f2976
XL
3354 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3355 zero))
54a0048b 3356 return (ptr);
970d7e83 3357
54a0048b
SL
3358 /*
3359 * size and oldsize are different enough that we need to move
3360 * the object. In that case, fall back to allocating new space
3361 * and copying.
3362 */
3b2f2976
XL
3363 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3364 alignment, zero, tcache);
970d7e83
LB
3365 if (ret == NULL)
3366 return (NULL);
970d7e83 3367
54a0048b
SL
3368 /*
3369 * Junk/zero-filling were already done by
3370 * ipalloc()/arena_malloc().
3371 */
970d7e83 3372
54a0048b
SL
3373 copysize = (usize < oldsize) ? usize : oldsize;
3374 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3375 memcpy(ret, ptr, copysize);
3b2f2976 3376 isqalloc(tsd, ptr, oldsize, tcache, true);
54a0048b
SL
3377 } else {
3378 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3379 zero, tcache);
3380 }
970d7e83
LB
3381 return (ret);
3382}
3383
3384dss_prec_t
3b2f2976 3385arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
970d7e83
LB
3386{
3387 dss_prec_t ret;
3388
3b2f2976 3389 malloc_mutex_lock(tsdn, &arena->lock);
970d7e83 3390 ret = arena->dss_prec;
3b2f2976 3391 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
3392 return (ret);
3393}
3394
1a4d82fc 3395bool
3b2f2976 3396arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
970d7e83
LB
3397{
3398
1a4d82fc
JJ
3399 if (!have_dss)
3400 return (dss_prec != dss_prec_disabled);
3b2f2976 3401 malloc_mutex_lock(tsdn, &arena->lock);
970d7e83 3402 arena->dss_prec = dss_prec;
3b2f2976 3403 malloc_mutex_unlock(tsdn, &arena->lock);
1a4d82fc 3404 return (false);
970d7e83
LB
3405}
3406
54a0048b
SL
3407ssize_t
3408arena_lg_dirty_mult_default_get(void)
3409{
3410
3411 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3412}
3413
3414bool
3415arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3416{
3417
3418 if (opt_purge != purge_mode_ratio)
3419 return (true);
3420 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3421 return (true);
3422 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3423 return (false);
3424}
3425
3426ssize_t
3427arena_decay_time_default_get(void)
3428{
3429
3430 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3431}
3432
3433bool
3434arena_decay_time_default_set(ssize_t decay_time)
3435{
3436
3437 if (opt_purge != purge_mode_decay)
3438 return (true);
3439 if (!arena_decay_time_valid(decay_time))
3440 return (true);
3441 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3442 return (false);
3443}
3444
3445static void
3446arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3447 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3448 size_t *nactive, size_t *ndirty)
3449{
3450
3b2f2976 3451 *nthreads += arena_nthreads_get(arena, false);
54a0048b
SL
3452 *dss = dss_prec_names[arena->dss_prec];
3453 *lg_dirty_mult = arena->lg_dirty_mult;
3b2f2976 3454 *decay_time = arena->decay.time;
54a0048b
SL
3455 *nactive += arena->nactive;
3456 *ndirty += arena->ndirty;
3457}
3458
970d7e83 3459void
3b2f2976
XL
3460arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3461 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3462 size_t *nactive, size_t *ndirty)
54a0048b
SL
3463{
3464
3b2f2976 3465 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
3466 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3467 decay_time, nactive, ndirty);
3b2f2976 3468 malloc_mutex_unlock(tsdn, &arena->lock);
54a0048b
SL
3469}
3470
3471void
3b2f2976
XL
3472arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3473 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3474 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3475 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3476 malloc_huge_stats_t *hstats)
970d7e83
LB
3477{
3478 unsigned i;
3479
54a0048b
SL
3480 cassert(config_stats);
3481
3b2f2976 3482 malloc_mutex_lock(tsdn, &arena->lock);
54a0048b
SL
3483 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3484 decay_time, nactive, ndirty);
970d7e83
LB
3485
3486 astats->mapped += arena->stats.mapped;
3b2f2976 3487 astats->retained += arena->stats.retained;
970d7e83
LB
3488 astats->npurge += arena->stats.npurge;
3489 astats->nmadvise += arena->stats.nmadvise;
3490 astats->purged += arena->stats.purged;
54a0048b
SL
3491 astats->metadata_mapped += arena->stats.metadata_mapped;
3492 astats->metadata_allocated += arena_metadata_allocated_get(arena);
970d7e83
LB
3493 astats->allocated_large += arena->stats.allocated_large;
3494 astats->nmalloc_large += arena->stats.nmalloc_large;
3495 astats->ndalloc_large += arena->stats.ndalloc_large;
3496 astats->nrequests_large += arena->stats.nrequests_large;
1a4d82fc
JJ
3497 astats->allocated_huge += arena->stats.allocated_huge;
3498 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3499 astats->ndalloc_huge += arena->stats.ndalloc_huge;
970d7e83
LB
3500
3501 for (i = 0; i < nlclasses; i++) {
3502 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3503 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3504 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3505 lstats[i].curruns += arena->stats.lstats[i].curruns;
3506 }
54a0048b
SL
3507
3508 for (i = 0; i < nhclasses; i++) {
3509 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3510 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3511 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3512 }
3b2f2976 3513 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
3514
3515 for (i = 0; i < NBINS; i++) {
3516 arena_bin_t *bin = &arena->bins[i];
3517
3b2f2976 3518 malloc_mutex_lock(tsdn, &bin->lock);
970d7e83
LB
3519 bstats[i].nmalloc += bin->stats.nmalloc;
3520 bstats[i].ndalloc += bin->stats.ndalloc;
3521 bstats[i].nrequests += bin->stats.nrequests;
54a0048b 3522 bstats[i].curregs += bin->stats.curregs;
970d7e83
LB
3523 if (config_tcache) {
3524 bstats[i].nfills += bin->stats.nfills;
3525 bstats[i].nflushes += bin->stats.nflushes;
3526 }
3527 bstats[i].nruns += bin->stats.nruns;
3528 bstats[i].reruns += bin->stats.reruns;
3529 bstats[i].curruns += bin->stats.curruns;
3b2f2976 3530 malloc_mutex_unlock(tsdn, &bin->lock);
970d7e83
LB
3531 }
3532}
3533
54a0048b 3534unsigned
3b2f2976 3535arena_nthreads_get(arena_t *arena, bool internal)
54a0048b
SL
3536{
3537
3b2f2976 3538 return (atomic_read_u(&arena->nthreads[internal]));
54a0048b
SL
3539}
3540
3541void
3b2f2976 3542arena_nthreads_inc(arena_t *arena, bool internal)
54a0048b
SL
3543{
3544
3b2f2976 3545 atomic_add_u(&arena->nthreads[internal], 1);
54a0048b
SL
3546}
3547
3548void
3b2f2976
XL
3549arena_nthreads_dec(arena_t *arena, bool internal)
3550{
3551
3552 atomic_sub_u(&arena->nthreads[internal], 1);
3553}
3554
3555size_t
3556arena_extent_sn_next(arena_t *arena)
970d7e83 3557{
54a0048b 3558
3b2f2976 3559 return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
54a0048b
SL
3560}
3561
3562arena_t *
3b2f2976 3563arena_new(tsdn_t *tsdn, unsigned ind)
54a0048b
SL
3564{
3565 arena_t *arena;
970d7e83 3566 unsigned i;
970d7e83 3567
54a0048b
SL
3568 /*
3569 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3570 * because there is no way to clean up if base_alloc() OOMs.
3571 */
3572 if (config_stats) {
3b2f2976
XL
3573 arena = (arena_t *)base_alloc(tsdn,
3574 CACHELINE_CEILING(sizeof(arena_t)) +
3575 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
3576 + (nhclasses * sizeof(malloc_huge_stats_t)));
54a0048b 3577 } else
3b2f2976 3578 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
54a0048b
SL
3579 if (arena == NULL)
3580 return (NULL);
3581
970d7e83 3582 arena->ind = ind;
3b2f2976
XL
3583 arena->nthreads[0] = arena->nthreads[1] = 0;
3584 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
54a0048b 3585 return (NULL);
970d7e83
LB
3586
3587 if (config_stats) {
3588 memset(&arena->stats, 0, sizeof(arena_stats_t));
54a0048b 3589 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3b2f2976 3590 + CACHELINE_CEILING(sizeof(arena_t)));
970d7e83
LB
3591 memset(arena->stats.lstats, 0, nlclasses *
3592 sizeof(malloc_large_stats_t));
54a0048b 3593 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3b2f2976 3594 + CACHELINE_CEILING(sizeof(arena_t)) +
54a0048b
SL
3595 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3596 memset(arena->stats.hstats, 0, nhclasses *
3597 sizeof(malloc_huge_stats_t));
970d7e83
LB
3598 if (config_tcache)
3599 ql_new(&arena->tcache_ql);
3600 }
3601
3602 if (config_prof)
3603 arena->prof_accumbytes = 0;
3604
54a0048b
SL
3605 if (config_cache_oblivious) {
3606 /*
3607 * A nondeterministic seed based on the address of arena reduces
3608 * the likelihood of lockstep non-uniform cache index
3609 * utilization among identical concurrent processes, but at the
3610 * cost of test repeatability. For debug builds, instead use a
3611 * deterministic seed.
3612 */
3613 arena->offset_state = config_debug ? ind :
3b2f2976 3614 (size_t)(uintptr_t)arena;
54a0048b
SL
3615 }
3616
970d7e83
LB
3617 arena->dss_prec = chunk_dss_prec_get();
3618
3b2f2976
XL
3619 ql_new(&arena->achunks);
3620
3621 arena->extent_sn_next = 0;
3622
970d7e83
LB
3623 arena->spare = NULL;
3624
54a0048b
SL
3625 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3626 arena->purging = false;
970d7e83
LB
3627 arena->nactive = 0;
3628 arena->ndirty = 0;
970d7e83 3629
3b2f2976
XL
3630 for (i = 0; i < NPSIZES; i++)
3631 arena_run_heap_new(&arena->runs_avail[i]);
3632
54a0048b
SL
3633 qr_new(&arena->runs_dirty, rd_link);
3634 qr_new(&arena->chunks_cache, cc_link);
3635
3636 if (opt_purge == purge_mode_decay)
3637 arena_decay_init(arena, arena_decay_time_default_get());
3638
3639 ql_new(&arena->huge);
3b2f2976
XL
3640 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3641 WITNESS_RANK_ARENA_HUGE))
54a0048b
SL
3642 return (NULL);
3643
3b2f2976 3644 extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
54a0048b 3645 extent_tree_ad_new(&arena->chunks_ad_cached);
3b2f2976 3646 extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
54a0048b 3647 extent_tree_ad_new(&arena->chunks_ad_retained);
3b2f2976
XL
3648 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3649 WITNESS_RANK_ARENA_CHUNKS))
54a0048b
SL
3650 return (NULL);
3651 ql_new(&arena->node_cache);
3b2f2976
XL
3652 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3653 WITNESS_RANK_ARENA_NODE_CACHE))
54a0048b
SL
3654 return (NULL);
3655
3656 arena->chunk_hooks = chunk_hooks_default;
970d7e83
LB
3657
3658 /* Initialize bins. */
3659 for (i = 0; i < NBINS; i++) {
3b2f2976
XL
3660 arena_bin_t *bin = &arena->bins[i];
3661 if (malloc_mutex_init(&bin->lock, "arena_bin",
3662 WITNESS_RANK_ARENA_BIN))
54a0048b 3663 return (NULL);
970d7e83 3664 bin->runcur = NULL;
3b2f2976 3665 arena_run_heap_new(&bin->runs);
970d7e83
LB
3666 if (config_stats)
3667 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3668 }
3669
54a0048b 3670 return (arena);
970d7e83
LB
3671}
3672
3673/*
3674 * Calculate bin_info->run_size such that it meets the following constraints:
3675 *
54a0048b 3676 * *) bin_info->run_size <= arena_maxrun
970d7e83
LB
3677 * *) bin_info->nregs <= RUN_MAXREGS
3678 *
1a4d82fc
JJ
3679 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3680 * these settings are all interdependent.
970d7e83 3681 */
1a4d82fc
JJ
3682static void
3683bin_info_run_size_calc(arena_bin_info_t *bin_info)
970d7e83
LB
3684{
3685 size_t pad_size;
1a4d82fc
JJ
3686 size_t try_run_size, perfect_run_size, actual_run_size;
3687 uint32_t try_nregs, perfect_nregs, actual_nregs;
970d7e83
LB
3688
3689 /*
3690 * Determine redzone size based on minimum alignment and minimum
3691 * redzone size. Add padding to the end of the run if it is needed to
3692 * align the regions. The padding allows each redzone to be half the
3693 * minimum alignment; without the padding, each redzone would have to
3694 * be twice as large in order to maintain alignment.
3695 */
1a4d82fc 3696 if (config_fill && unlikely(opt_redzone)) {
54a0048b 3697 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
970d7e83
LB
3698 if (align_min <= REDZONE_MINSIZE) {
3699 bin_info->redzone_size = REDZONE_MINSIZE;
3700 pad_size = 0;
3701 } else {
3702 bin_info->redzone_size = align_min >> 1;
3703 pad_size = bin_info->redzone_size;
3704 }
3705 } else {
3706 bin_info->redzone_size = 0;
3707 pad_size = 0;
3708 }
3709 bin_info->reg_interval = bin_info->reg_size +
3710 (bin_info->redzone_size << 1);
3711
3712 /*
1a4d82fc
JJ
3713 * Compute run size under ideal conditions (no redzones, no limit on run
3714 * size).
970d7e83 3715 */
1a4d82fc 3716 try_run_size = PAGE;
54a0048b 3717 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
970d7e83 3718 do {
1a4d82fc
JJ
3719 perfect_run_size = try_run_size;
3720 perfect_nregs = try_nregs;
970d7e83 3721
970d7e83 3722 try_run_size += PAGE;
54a0048b 3723 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
1a4d82fc
JJ
3724 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3725 assert(perfect_nregs <= RUN_MAXREGS);
3726
3727 actual_run_size = perfect_run_size;
54a0048b
SL
3728 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3729 bin_info->reg_interval);
1a4d82fc
JJ
3730
3731 /*
3732 * Redzones can require enough padding that not even a single region can
3733 * fit within the number of pages that would normally be dedicated to a
3734 * run for this size class. Increase the run size until at least one
3735 * region fits.
3736 */
3737 while (actual_nregs == 0) {
3738 assert(config_fill && unlikely(opt_redzone));
3739
3740 actual_run_size += PAGE;
54a0048b
SL
3741 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3742 bin_info->reg_interval);
1a4d82fc
JJ
3743 }
3744
3745 /*
3746 * Make sure that the run will fit within an arena chunk.
3747 */
54a0048b 3748 while (actual_run_size > arena_maxrun) {
1a4d82fc 3749 actual_run_size -= PAGE;
54a0048b
SL
3750 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3751 bin_info->reg_interval);
1a4d82fc
JJ
3752 }
3753 assert(actual_nregs > 0);
54a0048b 3754 assert(actual_run_size == s2u(actual_run_size));
970d7e83
LB
3755
3756 /* Copy final settings. */
1a4d82fc
JJ
3757 bin_info->run_size = actual_run_size;
3758 bin_info->nregs = actual_nregs;
54a0048b
SL
3759 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3760 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3761
970d7e83
LB
3762 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3763 * bin_info->reg_interval) + pad_size == bin_info->run_size);
970d7e83
LB
3764}
3765
3766static void
3767bin_info_init(void)
3768{
3769 arena_bin_info_t *bin_info;
970d7e83 3770
54a0048b 3771#define BIN_INFO_INIT_bin_yes(index, size) \
1a4d82fc 3772 bin_info = &arena_bin_info[index]; \
970d7e83 3773 bin_info->reg_size = size; \
1a4d82fc 3774 bin_info_run_size_calc(bin_info); \
970d7e83 3775 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
1a4d82fc 3776#define BIN_INFO_INIT_bin_no(index, size)
3b2f2976 3777#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
1a4d82fc 3778 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
970d7e83 3779 SIZE_CLASSES
1a4d82fc
JJ
3780#undef BIN_INFO_INIT_bin_yes
3781#undef BIN_INFO_INIT_bin_no
3782#undef SC
970d7e83
LB
3783}
3784
3b2f2976
XL
3785static void
3786init_thp_initially_huge(void) {
3787 int fd;
3788 char buf[sizeof("[always] madvise never\n")];
3789 ssize_t nread;
3790 static const char *enabled_states[] = {
3791 "[always] madvise never\n",
3792 "always [madvise] never\n",
3793 "always madvise [never]\n"
3794 };
3795 static const bool thp_initially_huge_states[] = {
3796 true,
3797 false,
3798 false
3799 };
54a0048b
SL
3800 unsigned i;
3801
3b2f2976
XL
3802 if (config_debug) {
3803 for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
3804 i++) {
3805 assert(sizeof(buf) > strlen(enabled_states[i]));
3806 }
3807 }
3808 assert(sizeof(enabled_states)/sizeof(const char *) ==
3809 sizeof(thp_initially_huge_states)/sizeof(bool));
54a0048b 3810
3b2f2976
XL
3811#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
3812 fd = (int)syscall(SYS_open,
3813 "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
3814#else
3815 fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
3816#endif
3817 if (fd == -1) {
3818 goto label_error;
3819 }
54a0048b 3820
3b2f2976
XL
3821#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
3822 nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
3823#else
3824 nread = read(fd, &buf, sizeof(buf));
3825#endif
54a0048b 3826
3b2f2976
XL
3827#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
3828 syscall(SYS_close, fd);
3829#else
3830 close(fd);
3831#endif
54a0048b 3832
3b2f2976
XL
3833 if (nread < 1) {
3834 goto label_error;
3835 }
3836 for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
3837 i++) {
3838 if (strncmp(buf, enabled_states[i], (size_t)nread) == 0) {
3839 thp_initially_huge = thp_initially_huge_states[i];
3840 return;
3841 }
54a0048b
SL
3842 }
3843
3b2f2976
XL
3844label_error:
3845 thp_initially_huge = false;
54a0048b
SL
3846}
3847
3b2f2976 3848void
970d7e83
LB
3849arena_boot(void)
3850{
970d7e83
LB
3851 unsigned i;
3852
3b2f2976
XL
3853 if (config_thp && opt_thp) {
3854 init_thp_initially_huge();
3855 }
3856
54a0048b
SL
3857 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3858 arena_decay_time_default_set(opt_decay_time);
3859
970d7e83
LB
3860 /*
3861 * Compute the header size such that it is large enough to contain the
3862 * page map. The page map is biased to omit entries for the header
3863 * itself, so some iteration is necessary to compute the map bias.
3864 *
3865 * 1) Compute safe header_size and map_bias values that include enough
3866 * space for an unbiased page map.
3867 * 2) Refine map_bias based on (1) to omit the header pages in the page
3868 * map. The resulting map_bias may be one too small.
3869 * 3) Refine map_bias based on (2). The result will be >= the result
3870 * from (2), and will always be correct.
3871 */
3872 map_bias = 0;
3873 for (i = 0; i < 3; i++) {
54a0048b 3874 size_t header_size = offsetof(arena_chunk_t, map_bits) +
1a4d82fc
JJ
3875 ((sizeof(arena_chunk_map_bits_t) +
3876 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3877 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
970d7e83
LB
3878 }
3879 assert(map_bias > 0);
3880
1a4d82fc
JJ
3881 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3882 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3883
54a0048b
SL
3884 arena_maxrun = chunksize - (map_bias << LG_PAGE);
3885 assert(arena_maxrun > 0);
3886 large_maxclass = index2size(size2index(chunksize)-1);
54a0048b 3887 assert(large_maxclass > 0);
3b2f2976 3888 assert(large_maxclass + large_pad <= arena_maxrun);
54a0048b
SL
3889 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3890 nhclasses = NSIZES - nlclasses - NBINS;
970d7e83
LB
3891
3892 bin_info_init();
3b2f2976
XL
3893}
3894
3895void
3896arena_prefork0(tsdn_t *tsdn, arena_t *arena)
3897{
54a0048b 3898
3b2f2976
XL
3899 malloc_mutex_prefork(tsdn, &arena->lock);
3900}
54a0048b 3901
3b2f2976
XL
3902void
3903arena_prefork1(tsdn_t *tsdn, arena_t *arena)
3904{
3905
3906 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3907}
3908
3909void
3910arena_prefork2(tsdn_t *tsdn, arena_t *arena)
3911{
3912
3913 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
970d7e83
LB
3914}
3915
3916void
3b2f2976 3917arena_prefork3(tsdn_t *tsdn, arena_t *arena)
970d7e83
LB
3918{
3919 unsigned i;
3920
970d7e83 3921 for (i = 0; i < NBINS; i++)
3b2f2976
XL
3922 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3923 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
970d7e83
LB
3924}
3925
3926void
3b2f2976 3927arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
970d7e83
LB
3928{
3929 unsigned i;
3930
3b2f2976 3931 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
970d7e83 3932 for (i = 0; i < NBINS; i++)
3b2f2976
XL
3933 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3934 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3935 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3936 malloc_mutex_postfork_parent(tsdn, &arena->lock);
970d7e83
LB
3937}
3938
3939void
3b2f2976 3940arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
970d7e83
LB
3941{
3942 unsigned i;
3943
3b2f2976 3944 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
970d7e83 3945 for (i = 0; i < NBINS; i++)
3b2f2976
XL
3946 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3947 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3948 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3949 malloc_mutex_postfork_child(tsdn, &arena->lock);
970d7e83 3950}