]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/include/jemalloc/internal/arena.h
New upstream version 1.22.1+dfsg1
[rustc.git] / src / jemalloc / include / jemalloc / internal / arena.h
CommitLineData
970d7e83
LB
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
54a0048b
SL
4#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
5
970d7e83 6/* Maximum number of regions in one run. */
1a4d82fc 7#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
970d7e83
LB
8#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
9
10/*
11 * Minimum redzone size. Redzones may be larger than this if necessary to
12 * preserve region alignment.
13 */
14#define REDZONE_MINSIZE 16
15
16/*
17 * The minimum ratio of active:dirty pages per arena is computed as:
18 *
54a0048b 19 * (nactive >> lg_dirty_mult) >= ndirty
970d7e83 20 *
54a0048b
SL
21 * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
22 * many active pages as dirty pages.
970d7e83
LB
23 */
24#define LG_DIRTY_MULT_DEFAULT 3
25
54a0048b
SL
26typedef enum {
27 purge_mode_ratio = 0,
28 purge_mode_decay = 1,
29
30 purge_mode_limit = 2
31} purge_mode_t;
32#define PURGE_DEFAULT purge_mode_ratio
33/* Default decay time in seconds. */
34#define DECAY_TIME_DEFAULT 10
35/* Number of event ticks between time checks. */
36#define DECAY_NTICKS_PER_UPDATE 1000
37
38typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
3b2f2976 39typedef struct arena_avail_links_s arena_avail_links_t;
970d7e83 40typedef struct arena_run_s arena_run_t;
1a4d82fc
JJ
41typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
42typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
43typedef struct arena_chunk_s arena_chunk_t;
970d7e83 44typedef struct arena_bin_info_s arena_bin_info_t;
3b2f2976 45typedef struct arena_decay_s arena_decay_t;
970d7e83
LB
46typedef struct arena_bin_s arena_bin_t;
47typedef struct arena_s arena_t;
54a0048b 48typedef struct arena_tdata_s arena_tdata_t;
970d7e83
LB
49
50#endif /* JEMALLOC_H_TYPES */
51/******************************************************************************/
52#ifdef JEMALLOC_H_STRUCTS
53
54a0048b 54#ifdef JEMALLOC_ARENA_STRUCTS_A
1a4d82fc 55struct arena_run_s {
54a0048b
SL
56 /* Index of bin this run is associated with. */
57 szind_t binind;
1a4d82fc
JJ
58
59 /* Number of free regions in run. */
60 unsigned nfree;
61
62 /* Per region allocated/deallocated bitmap. */
63 bitmap_t bitmap[BITMAP_GROUPS_MAX];
64};
970d7e83 65
1a4d82fc
JJ
66/* Each element of the chunk map corresponds to one page within the chunk. */
67struct arena_chunk_map_bits_s {
970d7e83
LB
68 /*
69 * Run address (or size) and various flags are stored together. The bit
70 * layout looks like (assuming 32-bit system):
71 *
54a0048b 72 * ???????? ???????? ???nnnnn nnndumla
970d7e83
LB
73 *
74 * ? : Unallocated: Run address for first/last pages, unset for internal
75 * pages.
76 * Small: Run page offset.
54a0048b 77 * Large: Run page count for first page, unset for trailing pages.
970d7e83
LB
78 * n : binind for small size class, BININD_INVALID for large size class.
79 * d : dirty?
80 * u : unzeroed?
54a0048b 81 * m : decommitted?
970d7e83
LB
82 * l : large?
83 * a : allocated?
84 *
85 * Following are example bit patterns for the three types of runs.
86 *
87 * p : run page offset
88 * s : run size
89 * n : binind for size class; large objects set these to BININD_INVALID
970d7e83
LB
90 * x : don't care
91 * - : 0
92 * + : 1
54a0048b
SL
93 * [DUMLA] : bit set
94 * [dumla] : bit unset
970d7e83
LB
95 *
96 * Unallocated (clean):
54a0048b
SL
97 * ssssssss ssssssss sss+++++ +++dum-a
98 * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
99 * ssssssss ssssssss sss+++++ +++dUm-a
970d7e83
LB
100 *
101 * Unallocated (dirty):
54a0048b 102 * ssssssss ssssssss sss+++++ +++D-m-a
970d7e83 103 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
54a0048b 104 * ssssssss ssssssss sss+++++ +++D-m-a
970d7e83
LB
105 *
106 * Small:
54a0048b
SL
107 * pppppppp pppppppp pppnnnnn nnnd---A
108 * pppppppp pppppppp pppnnnnn nnn----A
109 * pppppppp pppppppp pppnnnnn nnnd---A
970d7e83
LB
110 *
111 * Large:
54a0048b 112 * ssssssss ssssssss sss+++++ +++D--LA
970d7e83 113 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
54a0048b 114 * -------- -------- ---+++++ +++D--LA
970d7e83 115 *
54a0048b
SL
116 * Large (sampled, size <= LARGE_MINCLASS):
117 * ssssssss ssssssss sssnnnnn nnnD--LA
118 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
119 * -------- -------- ---+++++ +++D--LA
970d7e83 120 *
54a0048b
SL
121 * Large (not sampled, size == LARGE_MINCLASS):
122 * ssssssss ssssssss sss+++++ +++D--LA
123 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
124 * -------- -------- ---+++++ +++D--LA
970d7e83
LB
125 */
126 size_t bits;
54a0048b
SL
127#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
128#define CHUNK_MAP_LARGE ((size_t)0x02U)
129#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
130
131#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
132#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
133#define CHUNK_MAP_DIRTY ((size_t)0x10U)
134#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
135
136#define CHUNK_MAP_BININD_SHIFT 5
970d7e83 137#define BININD_INVALID ((size_t)0xffU)
54a0048b 138#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
970d7e83 139#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
54a0048b
SL
140
141#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
142#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
143#define CHUNK_MAP_SIZE_MASK \
144 (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
145};
146
147struct arena_runs_dirty_link_s {
148 qr(arena_runs_dirty_link_t) rd_link;
970d7e83 149};
970d7e83 150
1a4d82fc
JJ
151/*
152 * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
153 * like arena_chunk_map_bits_t. Two separate arrays are stored within each
154 * chunk header in order to improve cache locality.
155 */
156struct arena_chunk_map_misc_s {
157 /*
3b2f2976 158 * Linkage for run heaps. There are two disjoint uses:
1a4d82fc 159 *
3b2f2976 160 * 1) arena_t's runs_avail heaps.
1a4d82fc 161 * 2) arena_run_t conceptually uses this linkage for in-use non-full
54a0048b 162 * runs, rather than directly embedding linkage.
1a4d82fc 163 */
3b2f2976 164 phn(arena_chunk_map_misc_t) ph_link;
970d7e83 165
1a4d82fc
JJ
166 union {
167 /* Linkage for list of dirty runs. */
54a0048b 168 arena_runs_dirty_link_t rd;
970d7e83 169
1a4d82fc 170 /* Profile counters, used for large object runs. */
54a0048b
SL
171 union {
172 void *prof_tctx_pun;
173 prof_tctx_t *prof_tctx;
174 };
970d7e83 175
1a4d82fc
JJ
176 /* Small region run metadata. */
177 arena_run_t run;
178 };
179};
3b2f2976 180typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
54a0048b 181#endif /* JEMALLOC_ARENA_STRUCTS_A */
970d7e83 182
54a0048b 183#ifdef JEMALLOC_ARENA_STRUCTS_B
1a4d82fc
JJ
184/* Arena chunk header. */
185struct arena_chunk_s {
54a0048b
SL
186 /*
187 * A pointer to the arena that owns the chunk is stored within the node.
188 * This field as a whole is used by chunks_rtree to support both
189 * ivsalloc() and core-based debugging.
190 */
191 extent_node_t node;
970d7e83 192
3b2f2976
XL
193 /*
194 * True if memory could be backed by transparent huge pages. This is
195 * only directly relevant to Linux, since it is the only supported
196 * platform on which jemalloc interacts with explicit transparent huge
197 * page controls.
198 */
199 bool hugepage;
200
970d7e83
LB
201 /*
202 * Map of pages within chunk that keeps track of free/large/small. The
203 * first map_bias entries are omitted, since the chunk header does not
204 * need to be tracked in the map. This omission saves a header page
205 * for common chunk sizes (e.g. 4 MiB).
206 */
1a4d82fc 207 arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
970d7e83
LB
208};
209
210/*
211 * Read-only information associated with each element of arena_t's bins array
212 * is stored separately, partly to reduce memory usage (only one copy, rather
213 * than one per arena), but mainly to avoid false cacheline sharing.
214 *
215 * Each run has the following layout:
216 *
217 * /--------------------\
1a4d82fc 218 * | pad? |
970d7e83
LB
219 * |--------------------|
220 * | redzone |
221 * reg0_offset | region 0 |
222 * | redzone |
223 * |--------------------| \
224 * | redzone | |
225 * | region 1 | > reg_interval
226 * | redzone | /
227 * |--------------------|
228 * | ... |
229 * | ... |
230 * | ... |
231 * |--------------------|
232 * | redzone |
233 * | region nregs-1 |
234 * | redzone |
235 * |--------------------|
236 * | alignment pad? |
237 * \--------------------/
238 *
239 * reg_interval has at least the same minimum alignment as reg_size; this
240 * preserves the alignment constraint that sa2u() depends on. Alignment pad is
241 * either 0 or redzone_size; it is present only if needed to align reg0_offset.
242 */
243struct arena_bin_info_s {
244 /* Size of regions in a run for this bin's size class. */
54a0048b 245 size_t reg_size;
970d7e83
LB
246
247 /* Redzone size. */
54a0048b 248 size_t redzone_size;
970d7e83
LB
249
250 /* Interval between regions (reg_size + (redzone_size << 1)). */
54a0048b 251 size_t reg_interval;
970d7e83
LB
252
253 /* Total size of a run for this bin's size class. */
54a0048b 254 size_t run_size;
970d7e83
LB
255
256 /* Total number of regions in a run for this bin's size class. */
54a0048b 257 uint32_t nregs;
970d7e83 258
970d7e83
LB
259 /*
260 * Metadata used to manipulate bitmaps for runs associated with this
261 * bin.
262 */
54a0048b 263 bitmap_info_t bitmap_info;
970d7e83 264
970d7e83 265 /* Offset of first region in a run for this bin's size class. */
54a0048b 266 uint32_t reg0_offset;
970d7e83
LB
267};
268
3b2f2976
XL
269struct arena_decay_s {
270 /*
271 * Approximate time in seconds from the creation of a set of unused
272 * dirty pages until an equivalent set of unused dirty pages is purged
273 * and/or reused.
274 */
275 ssize_t time;
276 /* time / SMOOTHSTEP_NSTEPS. */
277 nstime_t interval;
278 /*
279 * Time at which the current decay interval logically started. We do
280 * not actually advance to a new epoch until sometime after it starts
281 * because of scheduling and computation delays, and it is even possible
282 * to completely skip epochs. In all cases, during epoch advancement we
283 * merge all relevant activity into the most recently recorded epoch.
284 */
285 nstime_t epoch;
286 /* Deadline randomness generator. */
287 uint64_t jitter_state;
288 /*
289 * Deadline for current epoch. This is the sum of interval and per
290 * epoch jitter which is a uniform random variable in [0..interval).
291 * Epochs always advance by precise multiples of interval, but we
292 * randomize the deadline to reduce the likelihood of arenas purging in
293 * lockstep.
294 */
295 nstime_t deadline;
296 /*
297 * Number of dirty pages at beginning of current epoch. During epoch
298 * advancement we use the delta between arena->decay.ndirty and
299 * arena->ndirty to determine how many dirty pages, if any, were
300 * generated.
301 */
302 size_t ndirty;
303 /*
304 * Trailing log of how many unused dirty pages were generated during
305 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
306 * element is the most recent epoch. Corresponding epoch times are
307 * relative to epoch.
308 */
309 size_t backlog[SMOOTHSTEP_NSTEPS];
310};
311
970d7e83
LB
312struct arena_bin_s {
313 /*
314 * All operations on runcur, runs, and stats require that lock be
315 * locked. Run allocation/deallocation are protected by the arena lock,
316 * which may be acquired while holding one or more bin locks, but not
317 * vise versa.
318 */
54a0048b 319 malloc_mutex_t lock;
970d7e83
LB
320
321 /*
322 * Current run being used to service allocations of this bin's size
323 * class.
324 */
54a0048b 325 arena_run_t *runcur;
970d7e83
LB
326
327 /*
3b2f2976 328 * Heap of non-full runs. This heap is used when looking for an
970d7e83
LB
329 * existing run when runcur is no longer usable. We choose the
330 * non-full run that is lowest in memory; this policy tends to keep
331 * objects packed well, and it can also help reduce the number of
332 * almost-empty chunks.
333 */
3b2f2976 334 arena_run_heap_t runs;
970d7e83
LB
335
336 /* Bin statistics. */
54a0048b 337 malloc_bin_stats_t stats;
970d7e83
LB
338};
339
340struct arena_s {
341 /* This arena's index within the arenas array. */
342 unsigned ind;
343
344 /*
3b2f2976
XL
345 * Number of threads currently assigned to this arena, synchronized via
346 * atomic operations. Each thread has two distinct assignments, one for
347 * application-serving allocation, and the other for internal metadata
348 * allocation. Internal metadata must not be allocated from arenas
349 * created via the arenas.extend mallctl, because the arena.<i>.reset
350 * mallctl indiscriminately discards all allocations for the affected
351 * arena.
352 *
353 * 0: Application allocation.
354 * 1: Internal metadata allocation.
970d7e83 355 */
3b2f2976 356 unsigned nthreads[2];
970d7e83
LB
357
358 /*
359 * There are three classes of arena operations from a locking
360 * perspective:
54a0048b 361 * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
970d7e83
LB
362 * 2) Bin-related operations are protected by bin locks.
363 * 3) Chunk- and run-related operations are protected by this mutex.
364 */
365 malloc_mutex_t lock;
366
367 arena_stats_t stats;
368 /*
369 * List of tcaches for extant threads associated with this arena.
54a0048b
SL
370 * Stats from these are merged incrementally, and at exit if
371 * opt_stats_print is enabled.
970d7e83
LB
372 */
373 ql_head(tcache_t) tcache_ql;
374
375 uint64_t prof_accumbytes;
376
54a0048b
SL
377 /*
378 * PRNG state for cache index randomization of large allocation base
379 * pointers.
380 */
3b2f2976 381 size_t offset_state;
54a0048b 382
970d7e83
LB
383 dss_prec_t dss_prec;
384
3b2f2976
XL
385 /* Extant arena chunks. */
386 ql_head(extent_node_t) achunks;
387
388 /* Extent serial number generator state. */
389 size_t extent_sn_next;
390
970d7e83
LB
391 /*
392 * In order to avoid rapid chunk allocation/deallocation when an arena
393 * oscillates right on the cusp of needing a new chunk, cache the most
394 * recently freed chunk. The spare is left in the arena's chunk trees
395 * until it is deleted.
396 *
397 * There is one spare chunk per arena, rather than one spare total, in
398 * order to avoid interactions between multiple threads that could make
399 * a single spare inadequate.
400 */
401 arena_chunk_t *spare;
402
54a0048b
SL
403 /* Minimum ratio (log base 2) of nactive:ndirty. */
404 ssize_t lg_dirty_mult;
405
406 /* True if a thread is currently executing arena_purge_to_limit(). */
407 bool purging;
408
1a4d82fc 409 /* Number of pages in active runs and huge regions. */
970d7e83
LB
410 size_t nactive;
411
412 /*
413 * Current count of pages within unused runs that are potentially
414 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
415 * By tracking this, we can institute a limit on how much dirty unused
416 * memory is mapped for each arena.
417 */
418 size_t ndirty;
419
970d7e83 420 /*
54a0048b
SL
421 * Unused dirty memory this arena manages. Dirty memory is conceptually
422 * tracked as an arbitrarily interleaved LRU of dirty runs and cached
423 * chunks, but the list linkage is actually semi-duplicated in order to
424 * avoid extra arena_chunk_map_misc_t space overhead.
425 *
426 * LRU-----------------------------------------------------------MRU
427 *
428 * /-- arena ---\
429 * | |
430 * | |
431 * |------------| /- chunk -\
432 * ...->|chunks_cache|<--------------------------->| /----\ |<--...
433 * |------------| | |node| |
434 * | | | | | |
435 * | | /- run -\ /- run -\ | | | |
436 * | | | | | | | | | |
437 * | | | | | | | | | |
438 * |------------| |-------| |-------| | |----| |
439 * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
440 * |------------| |-------| |-------| | |----| |
441 * | | | | | | | | | |
442 * | | | | | | | \----/ |
443 * | | \-------/ \-------/ | |
444 * | | | |
445 * | | | |
446 * \------------/ \---------/
447 */
448 arena_runs_dirty_link_t runs_dirty;
449 extent_node_t chunks_cache;
450
3b2f2976
XL
451 /* Decay-based purging state. */
452 arena_decay_t decay;
970d7e83 453
54a0048b
SL
454 /* Extant huge allocations. */
455 ql_head(extent_node_t) huge;
456 /* Synchronizes all huge allocation/update/deallocation. */
457 malloc_mutex_t huge_mtx;
1a4d82fc
JJ
458
459 /*
54a0048b
SL
460 * Trees of chunks that were previously allocated (trees differ only in
461 * node ordering). These are used when allocating chunks, in an attempt
462 * to re-use address space. Depending on function, different tree
463 * orderings are needed, which is why there are two trees with the same
464 * contents.
1a4d82fc 465 */
3b2f2976 466 extent_tree_t chunks_szsnad_cached;
54a0048b 467 extent_tree_t chunks_ad_cached;
3b2f2976 468 extent_tree_t chunks_szsnad_retained;
54a0048b
SL
469 extent_tree_t chunks_ad_retained;
470
471 malloc_mutex_t chunks_mtx;
472 /* Cache of nodes that were allocated via base_alloc(). */
473 ql_head(extent_node_t) node_cache;
474 malloc_mutex_t node_cache_mtx;
475
476 /* User-configurable chunk hook functions. */
477 chunk_hooks_t chunk_hooks;
1a4d82fc 478
970d7e83
LB
479 /* bins is used to store trees of free regions. */
480 arena_bin_t bins[NBINS];
54a0048b
SL
481
482 /*
3b2f2976
XL
483 * Size-segregated address-ordered heaps of this arena's available runs,
484 * used for first-best-fit run allocation. Runs are quantized, i.e.
485 * they reside in the last heap which corresponds to a size class less
486 * than or equal to the run size.
54a0048b 487 */
3b2f2976 488 arena_run_heap_t runs_avail[NPSIZES];
970d7e83
LB
489};
490
54a0048b
SL
491/* Used in conjunction with tsd for fast arena-related context lookup. */
492struct arena_tdata_s {
493 ticker_t decay_ticker;
494};
495#endif /* JEMALLOC_ARENA_STRUCTS_B */
496
970d7e83
LB
497#endif /* JEMALLOC_H_STRUCTS */
498/******************************************************************************/
499#ifdef JEMALLOC_H_EXTERNS
500
54a0048b
SL
501static const size_t large_pad =
502#ifdef JEMALLOC_CACHE_OBLIVIOUS
503 PAGE
504#else
505 0
506#endif
507 ;
508
3b2f2976 509extern bool opt_thp;
54a0048b
SL
510extern purge_mode_t opt_purge;
511extern const char *purge_mode_names[];
512extern ssize_t opt_lg_dirty_mult;
513extern ssize_t opt_decay_time;
970d7e83 514
9cc50fc6 515extern arena_bin_info_t arena_bin_info[NBINS];
970d7e83 516
54a0048b
SL
517extern size_t map_bias; /* Number of arena chunk header pages. */
518extern size_t map_misc_offset;
519extern size_t arena_maxrun; /* Max run size for arenas. */
520extern size_t large_maxclass; /* Max large size class. */
54a0048b
SL
521extern unsigned nlclasses; /* Number of large size classes. */
522extern unsigned nhclasses; /* Number of huge size classes. */
7453a54e 523
54a0048b
SL
524#ifdef JEMALLOC_JET
525typedef size_t (run_quantize_t)(size_t);
526extern run_quantize_t *run_quantize_floor;
527extern run_quantize_t *run_quantize_ceil;
528#endif
529void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
530 bool cache);
531void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
532 bool cache);
3b2f2976
XL
533extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
534void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
535void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
536 size_t alignment, size_t *sn, bool *zero);
537void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
538 size_t usize, size_t sn);
539void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
540 void *chunk, size_t oldsize, size_t usize);
541void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
542 void *chunk, size_t oldsize, size_t usize, size_t sn);
543bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
544 void *chunk, size_t oldsize, size_t usize, bool *zero);
545ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
546bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
547 ssize_t lg_dirty_mult);
548ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
549bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
550void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
551void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
552void arena_reset(tsd_t *tsd, arena_t *arena);
553void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
554 tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
970d7e83
LB
555void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
556 bool zero);
1a4d82fc
JJ
557#ifdef JEMALLOC_JET
558typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
559 uint8_t);
560extern arena_redzone_corruption_t *arena_redzone_corruption;
561typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
562extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
563#else
970d7e83 564void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
1a4d82fc
JJ
565#endif
566void arena_quarantine_junk_small(void *ptr, size_t usize);
3b2f2976
XL
567void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
568 bool zero);
569void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
570 szind_t ind, bool zero);
571void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
54a0048b 572 size_t alignment, bool zero, tcache_t *tcache);
3b2f2976
XL
573void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
574void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
575 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
576void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
577 void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
578void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
54a0048b 579 void *ptr, size_t pageind);
1a4d82fc
JJ
580#ifdef JEMALLOC_JET
581typedef void (arena_dalloc_junk_large_t)(void *, size_t);
582extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
54a0048b
SL
583#else
584void arena_dalloc_junk_large(void *ptr, size_t usize);
1a4d82fc 585#endif
3b2f2976
XL
586void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
587 arena_chunk_t *chunk, void *ptr);
588void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
970d7e83 589 void *ptr);
1a4d82fc
JJ
590#ifdef JEMALLOC_JET
591typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
592extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
593#endif
3b2f2976
XL
594bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
595 size_t size, size_t extra, bool zero);
1a4d82fc 596void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
54a0048b 597 size_t size, size_t alignment, bool zero, tcache_t *tcache);
3b2f2976
XL
598dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
599bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
54a0048b
SL
600ssize_t arena_lg_dirty_mult_default_get(void);
601bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
602ssize_t arena_decay_time_default_get(void);
603bool arena_decay_time_default_set(ssize_t decay_time);
3b2f2976
XL
604void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
605 unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
606 ssize_t *decay_time, size_t *nactive, size_t *ndirty);
607void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
54a0048b 608 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3b2f2976
XL
609 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
610 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
611 malloc_huge_stats_t *hstats);
612unsigned arena_nthreads_get(arena_t *arena, bool internal);
613void arena_nthreads_inc(arena_t *arena, bool internal);
614void arena_nthreads_dec(arena_t *arena, bool internal);
615size_t arena_extent_sn_next(arena_t *arena);
616arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
617void arena_boot(void);
618void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
619void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
620void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
621void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
622void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
623void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
970d7e83
LB
624
625#endif /* JEMALLOC_H_EXTERNS */
626/******************************************************************************/
627#ifdef JEMALLOC_H_INLINES
628
629#ifndef JEMALLOC_ENABLE_INLINE
3b2f2976 630arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
1a4d82fc 631 size_t pageind);
3b2f2976
XL
632const arena_chunk_map_bits_t *arena_bitselm_get_const(
633 const arena_chunk_t *chunk, size_t pageind);
634arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
1a4d82fc 635 size_t pageind);
3b2f2976
XL
636const arena_chunk_map_misc_t *arena_miscelm_get_const(
637 const arena_chunk_t *chunk, size_t pageind);
54a0048b 638size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
3b2f2976 639void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
54a0048b 640arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
1a4d82fc 641arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
3b2f2976
XL
642size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
643const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
644 size_t pageind);
645size_t arena_mapbitsp_read(const size_t *mapbitsp);
646size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
54a0048b 647size_t arena_mapbits_size_decode(size_t mapbits);
3b2f2976
XL
648size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
649 size_t pageind);
650size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
651 size_t pageind);
652size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
970d7e83 653 size_t pageind);
3b2f2976
XL
654szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
655size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
656size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
657size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
658 size_t pageind);
659size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
660size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
1a4d82fc 661void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
54a0048b 662size_t arena_mapbits_size_encode(size_t size);
970d7e83
LB
663void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
664 size_t size, size_t flags);
665void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
666 size_t size);
54a0048b
SL
667void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
668 size_t flags);
970d7e83
LB
669void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
670 size_t size, size_t flags);
671void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
54a0048b 672 szind_t binind);
970d7e83 673void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
54a0048b
SL
674 size_t runind, szind_t binind, size_t flags);
675void arena_metadata_allocated_add(arena_t *arena, size_t size);
676void arena_metadata_allocated_sub(arena_t *arena, size_t size);
677size_t arena_metadata_allocated_get(arena_t *arena);
970d7e83
LB
678bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
679bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
3b2f2976 680bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
54a0048b
SL
681szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
682szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
683size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
970d7e83 684 const void *ptr);
3b2f2976
XL
685prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
686void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
687 prof_tctx_t *tctx);
688void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
54a0048b 689 const void *old_ptr, prof_tctx_t *old_tctx);
3b2f2976
XL
690void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
691void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
692void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
54a0048b
SL
693 bool zero, tcache_t *tcache, bool slow_path);
694arena_t *arena_aalloc(const void *ptr);
3b2f2976
XL
695size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
696void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
697void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
698 bool slow_path);
970d7e83
LB
699#endif
700
701#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
702# ifdef JEMALLOC_ARENA_INLINE_A
1a4d82fc 703JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
3b2f2976 704arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
705{
706
707 assert(pageind >= map_bias);
708 assert(pageind < chunk_npages);
709
1a4d82fc
JJ
710 return (&chunk->map_bits[pageind-map_bias]);
711}
712
3b2f2976
XL
713JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
714arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
715{
716
717 return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
718}
719
1a4d82fc 720JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
3b2f2976 721arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
1a4d82fc
JJ
722{
723
724 assert(pageind >= map_bias);
725 assert(pageind < chunk_npages);
726
727 return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
728 (uintptr_t)map_misc_offset) + pageind-map_bias);
729}
730
3b2f2976
XL
731JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
732arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
733{
734
735 return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
736}
737
1a4d82fc 738JEMALLOC_ALWAYS_INLINE size_t
54a0048b 739arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
1a4d82fc
JJ
740{
741 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
742 size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
743 map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
744
745 assert(pageind >= map_bias);
746 assert(pageind < chunk_npages);
747
748 return (pageind);
749}
750
751JEMALLOC_ALWAYS_INLINE void *
3b2f2976 752arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
1a4d82fc
JJ
753{
754 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
755 size_t pageind = arena_miscelm_to_pageind(miscelm);
756
757 return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
758}
759
54a0048b
SL
760JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
761arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
762{
763 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
764 *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
765
766 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
767 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
768
769 return (miscelm);
770}
771
1a4d82fc
JJ
772JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
773arena_run_to_miscelm(arena_run_t *run)
774{
775 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
776 *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
777
778 assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
779 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
780
781 return (miscelm);
970d7e83
LB
782}
783
784JEMALLOC_ALWAYS_INLINE size_t *
3b2f2976 785arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
786{
787
3b2f2976
XL
788 return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
789}
790
791JEMALLOC_ALWAYS_INLINE const size_t *
792arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
793{
794
795 return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
1a4d82fc
JJ
796}
797
798JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 799arena_mapbitsp_read(const size_t *mapbitsp)
1a4d82fc
JJ
800{
801
802 return (*mapbitsp);
970d7e83
LB
803}
804
805JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 806arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
807{
808
3b2f2976 809 return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
970d7e83
LB
810}
811
54a0048b
SL
812JEMALLOC_ALWAYS_INLINE size_t
813arena_mapbits_size_decode(size_t mapbits)
814{
815 size_t size;
816
817#if CHUNK_MAP_SIZE_SHIFT > 0
818 size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
819#elif CHUNK_MAP_SIZE_SHIFT == 0
820 size = mapbits & CHUNK_MAP_SIZE_MASK;
821#else
822 size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
823#endif
824
825 return (size);
826}
827
970d7e83 828JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 829arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
830{
831 size_t mapbits;
832
833 mapbits = arena_mapbits_get(chunk, pageind);
834 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
54a0048b 835 return (arena_mapbits_size_decode(mapbits));
970d7e83
LB
836}
837
838JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 839arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
840{
841 size_t mapbits;
842
843 mapbits = arena_mapbits_get(chunk, pageind);
844 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
845 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
54a0048b 846 return (arena_mapbits_size_decode(mapbits));
970d7e83
LB
847}
848
849JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 850arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
851{
852 size_t mapbits;
853
854 mapbits = arena_mapbits_get(chunk, pageind);
855 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
856 CHUNK_MAP_ALLOCATED);
54a0048b 857 return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
970d7e83
LB
858}
859
54a0048b 860JEMALLOC_ALWAYS_INLINE szind_t
3b2f2976 861arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
862{
863 size_t mapbits;
54a0048b 864 szind_t binind;
970d7e83
LB
865
866 mapbits = arena_mapbits_get(chunk, pageind);
867 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
868 assert(binind < NBINS || binind == BININD_INVALID);
869 return (binind);
870}
871
872JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 873arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
874{
875 size_t mapbits;
876
877 mapbits = arena_mapbits_get(chunk, pageind);
54a0048b
SL
878 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
879 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
970d7e83
LB
880 return (mapbits & CHUNK_MAP_DIRTY);
881}
882
883JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 884arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
885{
886 size_t mapbits;
887
888 mapbits = arena_mapbits_get(chunk, pageind);
54a0048b
SL
889 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
890 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
970d7e83
LB
891 return (mapbits & CHUNK_MAP_UNZEROED);
892}
893
54a0048b 894JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 895arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
54a0048b
SL
896{
897 size_t mapbits;
898
899 mapbits = arena_mapbits_get(chunk, pageind);
900 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
901 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
902 return (mapbits & CHUNK_MAP_DECOMMITTED);
903}
904
970d7e83 905JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 906arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
907{
908 size_t mapbits;
909
910 mapbits = arena_mapbits_get(chunk, pageind);
911 return (mapbits & CHUNK_MAP_LARGE);
912}
913
914JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 915arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
970d7e83
LB
916{
917 size_t mapbits;
918
919 mapbits = arena_mapbits_get(chunk, pageind);
920 return (mapbits & CHUNK_MAP_ALLOCATED);
921}
922
1a4d82fc
JJ
923JEMALLOC_ALWAYS_INLINE void
924arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
925{
926
927 *mapbitsp = mapbits;
928}
929
54a0048b
SL
930JEMALLOC_ALWAYS_INLINE size_t
931arena_mapbits_size_encode(size_t size)
932{
933 size_t mapbits;
934
935#if CHUNK_MAP_SIZE_SHIFT > 0
936 mapbits = size << CHUNK_MAP_SIZE_SHIFT;
937#elif CHUNK_MAP_SIZE_SHIFT == 0
938 mapbits = size;
939#else
940 mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
941#endif
942
943 assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
944 return (mapbits);
945}
946
970d7e83
LB
947JEMALLOC_ALWAYS_INLINE void
948arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
949 size_t flags)
950{
3b2f2976 951 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
970d7e83 952
970d7e83 953 assert((size & PAGE_MASK) == 0);
54a0048b
SL
954 assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
955 assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
956 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
957 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
958 CHUNK_MAP_BININD_INVALID | flags);
970d7e83
LB
959}
960
961JEMALLOC_ALWAYS_INLINE void
962arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
963 size_t size)
964{
3b2f2976 965 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
1a4d82fc 966 size_t mapbits = arena_mapbitsp_read(mapbitsp);
970d7e83 967
970d7e83 968 assert((size & PAGE_MASK) == 0);
1a4d82fc 969 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
54a0048b
SL
970 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
971 (mapbits & ~CHUNK_MAP_SIZE_MASK));
972}
973
974JEMALLOC_ALWAYS_INLINE void
975arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
976{
3b2f2976 977 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
54a0048b
SL
978
979 assert((flags & CHUNK_MAP_UNZEROED) == flags);
980 arena_mapbitsp_write(mapbitsp, flags);
970d7e83
LB
981}
982
983JEMALLOC_ALWAYS_INLINE void
984arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
985 size_t flags)
986{
3b2f2976 987 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
970d7e83 988
970d7e83 989 assert((size & PAGE_MASK) == 0);
54a0048b
SL
990 assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
991 assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
992 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
993 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
994 CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
995 CHUNK_MAP_ALLOCATED);
970d7e83
LB
996}
997
998JEMALLOC_ALWAYS_INLINE void
999arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
54a0048b 1000 szind_t binind)
970d7e83 1001{
3b2f2976 1002 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
1a4d82fc 1003 size_t mapbits = arena_mapbitsp_read(mapbitsp);
970d7e83
LB
1004
1005 assert(binind <= BININD_INVALID);
54a0048b
SL
1006 assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
1007 large_pad);
1a4d82fc
JJ
1008 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
1009 (binind << CHUNK_MAP_BININD_SHIFT));
970d7e83
LB
1010}
1011
1012JEMALLOC_ALWAYS_INLINE void
1013arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
54a0048b 1014 szind_t binind, size_t flags)
970d7e83 1015{
3b2f2976 1016 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
970d7e83
LB
1017
1018 assert(binind < BININD_INVALID);
970d7e83 1019 assert(pageind - runind >= map_bias);
54a0048b
SL
1020 assert((flags & CHUNK_MAP_UNZEROED) == flags);
1021 arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
1022 (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
970d7e83
LB
1023}
1024
54a0048b
SL
1025JEMALLOC_INLINE void
1026arena_metadata_allocated_add(arena_t *arena, size_t size)
9cc50fc6
SL
1027{
1028
54a0048b
SL
1029 atomic_add_z(&arena->stats.metadata_allocated, size);
1030}
1031
1032JEMALLOC_INLINE void
1033arena_metadata_allocated_sub(arena_t *arena, size_t size)
1034{
1035
1036 atomic_sub_z(&arena->stats.metadata_allocated, size);
1037}
1038
1039JEMALLOC_INLINE size_t
1040arena_metadata_allocated_get(arena_t *arena)
1041{
1042
1043 return (atomic_read_z(&arena->stats.metadata_allocated));
970d7e83
LB
1044}
1045
1046JEMALLOC_INLINE bool
1047arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
1048{
1049
1050 cassert(config_prof);
1051 assert(prof_interval != 0);
1052
1053 arena->prof_accumbytes += accumbytes;
1054 if (arena->prof_accumbytes >= prof_interval) {
1055 arena->prof_accumbytes -= prof_interval;
1056 return (true);
1057 }
1058 return (false);
1059}
1060
1061JEMALLOC_INLINE bool
1062arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
1063{
1064
1065 cassert(config_prof);
1066
1a4d82fc 1067 if (likely(prof_interval == 0))
970d7e83
LB
1068 return (false);
1069 return (arena_prof_accum_impl(arena, accumbytes));
1070}
1071
1072JEMALLOC_INLINE bool
3b2f2976 1073arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
970d7e83
LB
1074{
1075
1076 cassert(config_prof);
1077
1a4d82fc 1078 if (likely(prof_interval == 0))
970d7e83
LB
1079 return (false);
1080
1081 {
1082 bool ret;
1083
3b2f2976 1084 malloc_mutex_lock(tsdn, &arena->lock);
970d7e83 1085 ret = arena_prof_accum_impl(arena, accumbytes);
3b2f2976 1086 malloc_mutex_unlock(tsdn, &arena->lock);
970d7e83
LB
1087 return (ret);
1088 }
1089}
1090
54a0048b 1091JEMALLOC_ALWAYS_INLINE szind_t
970d7e83
LB
1092arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
1093{
54a0048b 1094 szind_t binind;
970d7e83
LB
1095
1096 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
1097
1098 if (config_debug) {
1099 arena_chunk_t *chunk;
1100 arena_t *arena;
1101 size_t pageind;
1102 size_t actual_mapbits;
1a4d82fc 1103 size_t rpages_ind;
3b2f2976 1104 const arena_run_t *run;
970d7e83 1105 arena_bin_t *bin;
54a0048b 1106 szind_t run_binind, actual_binind;
970d7e83 1107 arena_bin_info_t *bin_info;
3b2f2976
XL
1108 const arena_chunk_map_misc_t *miscelm;
1109 const void *rpages;
970d7e83
LB
1110
1111 assert(binind != BININD_INVALID);
1112 assert(binind < NBINS);
1113 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
54a0048b 1114 arena = extent_node_arena_get(&chunk->node);
970d7e83
LB
1115 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1116 actual_mapbits = arena_mapbits_get(chunk, pageind);
1117 assert(mapbits == actual_mapbits);
1118 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1119 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1a4d82fc
JJ
1120 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
1121 pageind);
3b2f2976 1122 miscelm = arena_miscelm_get_const(chunk, rpages_ind);
1a4d82fc 1123 run = &miscelm->run;
54a0048b
SL
1124 run_binind = run->binind;
1125 bin = &arena->bins[run_binind];
1126 actual_binind = (szind_t)(bin - arena->bins);
1127 assert(run_binind == actual_binind);
970d7e83 1128 bin_info = &arena_bin_info[actual_binind];
1a4d82fc
JJ
1129 rpages = arena_miscelm_to_rpages(miscelm);
1130 assert(((uintptr_t)ptr - ((uintptr_t)rpages +
970d7e83
LB
1131 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
1132 == 0);
1133 }
1134
1135 return (binind);
1136}
54a0048b 1137# endif /* JEMALLOC_ARENA_INLINE_A */
970d7e83 1138
54a0048b
SL
1139# ifdef JEMALLOC_ARENA_INLINE_B
1140JEMALLOC_INLINE szind_t
970d7e83
LB
1141arena_bin_index(arena_t *arena, arena_bin_t *bin)
1142{
54a0048b 1143 szind_t binind = (szind_t)(bin - arena->bins);
970d7e83
LB
1144 assert(binind < NBINS);
1145 return (binind);
1146}
1147
54a0048b 1148JEMALLOC_INLINE size_t
970d7e83
LB
1149arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
1150{
54a0048b 1151 size_t diff, interval, shift, regind;
1a4d82fc
JJ
1152 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
1153 void *rpages = arena_miscelm_to_rpages(miscelm);
970d7e83
LB
1154
1155 /*
1156 * Freeing a pointer lower than region zero can cause assertion
1157 * failure.
1158 */
1a4d82fc 1159 assert((uintptr_t)ptr >= (uintptr_t)rpages +
970d7e83
LB
1160 (uintptr_t)bin_info->reg0_offset);
1161
1162 /*
1163 * Avoid doing division with a variable divisor if possible. Using
1164 * actual division here can reduce allocator throughput by over 20%!
1165 */
54a0048b 1166 diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
970d7e83
LB
1167 bin_info->reg0_offset);
1168
1169 /* Rescale (factor powers of 2 out of the numerator and denominator). */
1170 interval = bin_info->reg_interval;
54a0048b 1171 shift = ffs_zu(interval) - 1;
970d7e83
LB
1172 diff >>= shift;
1173 interval >>= shift;
1174
1175 if (interval == 1) {
1176 /* The divisor was a power of 2. */
1177 regind = diff;
1178 } else {
1179 /*
1180 * To divide by a number D that is not a power of two we
1181 * multiply by (2^21 / D) and then right shift by 21 positions.
1182 *
1183 * X / D
1184 *
1185 * becomes
1186 *
1187 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
1188 *
1189 * We can omit the first three elements, because we never
1190 * divide by 0, and 1 and 2 are both powers of two, which are
1191 * handled above.
1192 */
54a0048b
SL
1193#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
1194#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
1195 static const size_t interval_invs[] = {
970d7e83
LB
1196 SIZE_INV(3),
1197 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
1198 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
1199 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
1200 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
1201 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
1202 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
1203 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
1204 };
1205
54a0048b
SL
1206 if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
1207 + 2))) {
970d7e83
LB
1208 regind = (diff * interval_invs[interval - 3]) >>
1209 SIZE_INV_SHIFT;
1210 } else
1211 regind = diff / interval;
1212#undef SIZE_INV
1213#undef SIZE_INV_SHIFT
1214 }
1215 assert(diff == regind * interval);
1216 assert(regind < bin_info->nregs);
1217
1218 return (regind);
1219}
1220
1a4d82fc 1221JEMALLOC_INLINE prof_tctx_t *
3b2f2976 1222arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
970d7e83 1223{
1a4d82fc 1224 prof_tctx_t *ret;
970d7e83 1225 arena_chunk_t *chunk;
970d7e83
LB
1226
1227 cassert(config_prof);
1228 assert(ptr != NULL);
970d7e83
LB
1229
1230 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
54a0048b
SL
1231 if (likely(chunk != ptr)) {
1232 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1233 size_t mapbits = arena_mapbits_get(chunk, pageind);
1234 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1235 if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
1236 ret = (prof_tctx_t *)(uintptr_t)1U;
1237 else {
3b2f2976
XL
1238 arena_chunk_map_misc_t *elm =
1239 arena_miscelm_get_mutable(chunk, pageind);
54a0048b
SL
1240 ret = atomic_read_p(&elm->prof_tctx_pun);
1241 }
1242 } else
3b2f2976 1243 ret = huge_prof_tctx_get(tsdn, ptr);
970d7e83
LB
1244
1245 return (ret);
1246}
1247
1248JEMALLOC_INLINE void
3b2f2976
XL
1249arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
1250 prof_tctx_t *tctx)
970d7e83
LB
1251{
1252 arena_chunk_t *chunk;
970d7e83
LB
1253
1254 cassert(config_prof);
1255 assert(ptr != NULL);
970d7e83
LB
1256
1257 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
54a0048b
SL
1258 if (likely(chunk != ptr)) {
1259 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1260
1261 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1a4d82fc 1262
54a0048b
SL
1263 if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
1264 (uintptr_t)1U)) {
1265 arena_chunk_map_misc_t *elm;
1266
1267 assert(arena_mapbits_large_get(chunk, pageind) != 0);
1268
3b2f2976 1269 elm = arena_miscelm_get_mutable(chunk, pageind);
54a0048b
SL
1270 atomic_write_p(&elm->prof_tctx_pun, tctx);
1271 } else {
1272 /*
1273 * tctx must always be initialized for large runs.
1274 * Assert that the surrounding conditional logic is
1275 * equivalent to checking whether ptr refers to a large
1276 * run.
1277 */
1278 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1279 }
1280 } else
3b2f2976 1281 huge_prof_tctx_set(tsdn, ptr, tctx);
54a0048b
SL
1282}
1283
1284JEMALLOC_INLINE void
3b2f2976
XL
1285arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
1286 const void *old_ptr, prof_tctx_t *old_tctx)
54a0048b
SL
1287{
1288
1289 cassert(config_prof);
1290 assert(ptr != NULL);
1291
1292 if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
1293 (uintptr_t)old_tctx > (uintptr_t)1U))) {
1294 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1295 if (likely(chunk != ptr)) {
1296 size_t pageind;
1297 arena_chunk_map_misc_t *elm;
1298
1299 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1300 LG_PAGE;
1301 assert(arena_mapbits_allocated_get(chunk, pageind) !=
1302 0);
1303 assert(arena_mapbits_large_get(chunk, pageind) != 0);
1304
3b2f2976 1305 elm = arena_miscelm_get_mutable(chunk, pageind);
54a0048b
SL
1306 atomic_write_p(&elm->prof_tctx_pun,
1307 (prof_tctx_t *)(uintptr_t)1U);
1308 } else
3b2f2976 1309 huge_prof_tctx_reset(tsdn, ptr);
54a0048b
SL
1310 }
1311}
1312
1313JEMALLOC_ALWAYS_INLINE void
3b2f2976 1314arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
54a0048b 1315{
3b2f2976 1316 tsd_t *tsd;
54a0048b
SL
1317 ticker_t *decay_ticker;
1318
3b2f2976 1319 if (unlikely(tsdn_null(tsdn)))
54a0048b 1320 return;
3b2f2976 1321 tsd = tsdn_tsd(tsdn);
54a0048b
SL
1322 decay_ticker = decay_ticker_get(tsd, arena->ind);
1323 if (unlikely(decay_ticker == NULL))
1324 return;
1325 if (unlikely(ticker_ticks(decay_ticker, nticks)))
3b2f2976 1326 arena_purge(tsdn, arena, false);
54a0048b
SL
1327}
1328
1329JEMALLOC_ALWAYS_INLINE void
3b2f2976 1330arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
54a0048b
SL
1331{
1332
3b2f2976 1333 arena_decay_ticks(tsdn, arena, 1);
970d7e83
LB
1334}
1335
1336JEMALLOC_ALWAYS_INLINE void *
3b2f2976 1337arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
54a0048b 1338 tcache_t *tcache, bool slow_path)
970d7e83 1339{
970d7e83 1340
3b2f2976 1341 assert(!tsdn_null(tsdn) || tcache == NULL);
970d7e83 1342 assert(size != 0);
970d7e83 1343
54a0048b
SL
1344 if (likely(tcache != NULL)) {
1345 if (likely(size <= SMALL_MAXCLASS)) {
3b2f2976
XL
1346 return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
1347 tcache, size, ind, zero, slow_path));
7453a54e 1348 }
54a0048b 1349 if (likely(size <= tcache_maxclass)) {
3b2f2976
XL
1350 return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
1351 tcache, size, ind, zero, slow_path));
7453a54e 1352 }
54a0048b
SL
1353 /* (size > tcache_maxclass) case falls through. */
1354 assert(size > tcache_maxclass);
7453a54e 1355 }
54a0048b 1356
3b2f2976 1357 return (arena_malloc_hard(tsdn, arena, size, ind, zero));
54a0048b
SL
1358}
1359
1360JEMALLOC_ALWAYS_INLINE arena_t *
1361arena_aalloc(const void *ptr)
1362{
1363 arena_chunk_t *chunk;
1364
1365 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1366 if (likely(chunk != ptr))
1367 return (extent_node_arena_get(&chunk->node));
1368 else
1369 return (huge_aalloc(ptr));
970d7e83
LB
1370}
1371
1372/* Return the size of the allocation pointed to by ptr. */
1373JEMALLOC_ALWAYS_INLINE size_t
3b2f2976 1374arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
970d7e83
LB
1375{
1376 size_t ret;
1377 arena_chunk_t *chunk;
54a0048b
SL
1378 size_t pageind;
1379 szind_t binind;
970d7e83
LB
1380
1381 assert(ptr != NULL);
970d7e83
LB
1382
1383 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
54a0048b
SL
1384 if (likely(chunk != ptr)) {
1385 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1386 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1387 binind = arena_mapbits_binind_get(chunk, pageind);
1388 if (unlikely(binind == BININD_INVALID || (config_prof && !demote
1389 && arena_mapbits_large_get(chunk, pageind) != 0))) {
1390 /*
1391 * Large allocation. In the common case (demote), and
1392 * as this is an inline function, most callers will only
1393 * end up looking at binind to determine that ptr is a
1394 * small allocation.
1395 */
1396 assert(config_cache_oblivious || ((uintptr_t)ptr &
1397 PAGE_MASK) == 0);
1398 ret = arena_mapbits_large_size_get(chunk, pageind) -
1399 large_pad;
1400 assert(ret != 0);
1401 assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
1402 chunk_npages);
1403 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1404 arena_mapbits_dirty_get(chunk,
1405 pageind+((ret+large_pad)>>LG_PAGE)-1));
1406 } else {
1407 /*
1408 * Small allocation (possibly promoted to a large
1409 * object).
1410 */
1411 assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
1412 arena_ptr_small_binind_get(ptr,
1413 arena_mapbits_get(chunk, pageind)) == binind);
1414 ret = index2size(binind);
1415 }
1416 } else
3b2f2976 1417 ret = huge_salloc(tsdn, ptr);
970d7e83
LB
1418
1419 return (ret);
1420}
1421
1422JEMALLOC_ALWAYS_INLINE void
3b2f2976 1423arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
970d7e83 1424{
54a0048b 1425 arena_chunk_t *chunk;
970d7e83 1426 size_t pageind, mapbits;
970d7e83 1427
3b2f2976 1428 assert(!tsdn_null(tsdn) || tcache == NULL);
970d7e83 1429 assert(ptr != NULL);
970d7e83 1430
54a0048b
SL
1431 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1432 if (likely(chunk != ptr)) {
1433 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1434 mapbits = arena_mapbits_get(chunk, pageind);
1435 assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
1436 if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
1437 /* Small allocation. */
1438 if (likely(tcache != NULL)) {
1439 szind_t binind = arena_ptr_small_binind_get(ptr,
1440 mapbits);
3b2f2976
XL
1441 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
1442 binind, slow_path);
54a0048b 1443 } else {
3b2f2976
XL
1444 arena_dalloc_small(tsdn,
1445 extent_node_arena_get(&chunk->node), chunk,
1446 ptr, pageind);
54a0048b
SL
1447 }
1448 } else {
1449 size_t size = arena_mapbits_large_size_get(chunk,
1450 pageind);
1451
1452 assert(config_cache_oblivious || ((uintptr_t)ptr &
1453 PAGE_MASK) == 0);
1454
1455 if (likely(tcache != NULL) && size - large_pad <=
1456 tcache_maxclass) {
3b2f2976
XL
1457 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1458 size - large_pad, slow_path);
54a0048b 1459 } else {
3b2f2976
XL
1460 arena_dalloc_large(tsdn,
1461 extent_node_arena_get(&chunk->node), chunk,
1462 ptr);
54a0048b
SL
1463 }
1464 }
1465 } else
3b2f2976 1466 huge_dalloc(tsdn, ptr);
1a4d82fc
JJ
1467}
1468
1469JEMALLOC_ALWAYS_INLINE void
3b2f2976
XL
1470arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
1471 bool slow_path)
1a4d82fc 1472{
54a0048b 1473 arena_chunk_t *chunk;
1a4d82fc 1474
3b2f2976
XL
1475 assert(!tsdn_null(tsdn) || tcache == NULL);
1476
54a0048b
SL
1477 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1478 if (likely(chunk != ptr)) {
1479 if (config_prof && opt_prof) {
1a4d82fc
JJ
1480 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
1481 LG_PAGE;
54a0048b
SL
1482 assert(arena_mapbits_allocated_get(chunk, pageind) !=
1483 0);
1484 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1485 /*
1486 * Make sure to use promoted size, not request
1487 * size.
1488 */
1489 size = arena_mapbits_large_size_get(chunk,
1490 pageind) - large_pad;
1491 }
1a4d82fc 1492 }
3b2f2976 1493 assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
54a0048b
SL
1494
1495 if (likely(size <= SMALL_MAXCLASS)) {
1496 /* Small allocation. */
1497 if (likely(tcache != NULL)) {
1498 szind_t binind = size2index(size);
3b2f2976
XL
1499 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
1500 binind, slow_path);
54a0048b
SL
1501 } else {
1502 size_t pageind = ((uintptr_t)ptr -
1503 (uintptr_t)chunk) >> LG_PAGE;
3b2f2976
XL
1504 arena_dalloc_small(tsdn,
1505 extent_node_arena_get(&chunk->node), chunk,
1506 ptr, pageind);
54a0048b
SL
1507 }
1508 } else {
1509 assert(config_cache_oblivious || ((uintptr_t)ptr &
1510 PAGE_MASK) == 0);
1511
1512 if (likely(tcache != NULL) && size <= tcache_maxclass) {
3b2f2976
XL
1513 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1514 size, slow_path);
54a0048b 1515 } else {
3b2f2976
XL
1516 arena_dalloc_large(tsdn,
1517 extent_node_arena_get(&chunk->node), chunk,
1518 ptr);
54a0048b
SL
1519 }
1520 }
1521 } else
3b2f2976 1522 huge_dalloc(tsdn, ptr);
970d7e83 1523}
54a0048b 1524# endif /* JEMALLOC_ARENA_INLINE_B */
970d7e83
LB
1525#endif
1526
1527#endif /* JEMALLOC_H_INLINES */
1528/******************************************************************************/