]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / jemalloc / include / jemalloc / internal / jemalloc_internal.h.in
CommitLineData
970d7e83 1#ifndef JEMALLOC_INTERNAL_H
1a4d82fc 2#define JEMALLOC_INTERNAL_H
970d7e83 3
1a4d82fc
JJ
4#include "jemalloc_internal_defs.h"
5#include "jemalloc/internal/jemalloc_internal_decls.h"
970d7e83
LB
6
7#ifdef JEMALLOC_UTRACE
8#include <sys/ktrace.h>
9#endif
10
1a4d82fc
JJ
11#define JEMALLOC_NO_DEMANGLE
12#ifdef JEMALLOC_JET
13# define JEMALLOC_N(n) jet_##n
14# include "jemalloc/internal/public_namespace.h"
15# define JEMALLOC_NO_RENAME
16# include "../jemalloc@install_suffix@.h"
17# undef JEMALLOC_NO_RENAME
970d7e83 18#else
1a4d82fc
JJ
19# define JEMALLOC_N(n) @private_namespace@##n
20# include "../jemalloc@install_suffix@.h"
970d7e83 21#endif
1a4d82fc 22#include "jemalloc/internal/private_namespace.h"
970d7e83
LB
23
24static const bool config_debug =
25#ifdef JEMALLOC_DEBUG
26 true
27#else
28 false
29#endif
30 ;
1a4d82fc 31static const bool have_dss =
970d7e83
LB
32#ifdef JEMALLOC_DSS
33 true
34#else
35 false
36#endif
37 ;
38static const bool config_fill =
39#ifdef JEMALLOC_FILL
40 true
41#else
42 false
43#endif
44 ;
45static const bool config_lazy_lock =
46#ifdef JEMALLOC_LAZY_LOCK
47 true
48#else
49 false
50#endif
51 ;
54a0048b 52static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
970d7e83
LB
53static const bool config_prof =
54#ifdef JEMALLOC_PROF
55 true
56#else
57 false
58#endif
59 ;
60static const bool config_prof_libgcc =
61#ifdef JEMALLOC_PROF_LIBGCC
62 true
63#else
64 false
65#endif
66 ;
67static const bool config_prof_libunwind =
68#ifdef JEMALLOC_PROF_LIBUNWIND
69 true
70#else
71 false
72#endif
73 ;
54a0048b
SL
74static const bool maps_coalesce =
75#ifdef JEMALLOC_MAPS_COALESCE
76 true
77#else
78 false
79#endif
80 ;
970d7e83
LB
81static const bool config_munmap =
82#ifdef JEMALLOC_MUNMAP
83 true
84#else
85 false
86#endif
87 ;
88static const bool config_stats =
89#ifdef JEMALLOC_STATS
90 true
91#else
92 false
93#endif
94 ;
95static const bool config_tcache =
96#ifdef JEMALLOC_TCACHE
97 true
98#else
99 false
100#endif
101 ;
102static const bool config_tls =
103#ifdef JEMALLOC_TLS
104 true
105#else
106 false
107#endif
108 ;
109static const bool config_utrace =
110#ifdef JEMALLOC_UTRACE
111 true
112#else
113 false
114#endif
115 ;
116static const bool config_valgrind =
117#ifdef JEMALLOC_VALGRIND
118 true
119#else
120 false
121#endif
122 ;
123static const bool config_xmalloc =
124#ifdef JEMALLOC_XMALLOC
125 true
126#else
127 false
128#endif
129 ;
130static const bool config_ivsalloc =
131#ifdef JEMALLOC_IVSALLOC
132 true
133#else
134 false
135#endif
136 ;
54a0048b
SL
137static const bool config_cache_oblivious =
138#ifdef JEMALLOC_CACHE_OBLIVIOUS
139 true
140#else
141 false
142#endif
143 ;
144
145#ifdef JEMALLOC_C11ATOMICS
146#include <stdatomic.h>
147#endif
970d7e83
LB
148
149#ifdef JEMALLOC_ATOMIC9
150#include <machine/atomic.h>
151#endif
152
153#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
154#include <libkern/OSAtomic.h>
155#endif
156
157#ifdef JEMALLOC_ZONE
158#include <mach/mach_error.h>
159#include <mach/mach_init.h>
160#include <mach/vm_map.h>
161#include <malloc/malloc.h>
162#endif
163
164#define RB_COMPACT
165#include "jemalloc/internal/rb.h"
166#include "jemalloc/internal/qr.h"
167#include "jemalloc/internal/ql.h"
168
169/*
170 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
171 * but there are circular dependencies that cannot be broken without
172 * substantial performance degradation. In order to reduce the effect on
173 * visual code flow, read the header files in multiple passes, with one of the
174 * following cpp variables defined during each pass:
175 *
176 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
177 * types.
178 * JEMALLOC_H_STRUCTS : Data structures.
179 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
180 * JEMALLOC_H_INLINES : Inline functions.
181 */
182/******************************************************************************/
1a4d82fc
JJ
183#define JEMALLOC_H_TYPES
184
185#include "jemalloc/internal/jemalloc_internal_macros.h"
186
54a0048b
SL
187/* Size class index type. */
188typedef unsigned szind_t;
189
190/*
191 * Flags bits:
192 *
193 * a: arena
194 * t: tcache
195 * 0: unused
196 * z: zero
197 * n: alignment
198 *
199 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
200 */
201#define MALLOCX_ARENA_MASK ((int)~0xfffff)
202#define MALLOCX_ARENA_MAX 0xffe
203#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
204#define MALLOCX_TCACHE_MAX 0xffd
1a4d82fc
JJ
205#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
206/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
207#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
208 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
209#define MALLOCX_ALIGN_GET(flags) \
210 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
211#define MALLOCX_ZERO_GET(flags) \
212 ((bool)(flags & MALLOCX_ZERO))
54a0048b
SL
213
214#define MALLOCX_TCACHE_GET(flags) \
215 (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
1a4d82fc 216#define MALLOCX_ARENA_GET(flags) \
54a0048b 217 (((unsigned)(((unsigned)flags) >> 20)) - 1)
970d7e83
LB
218
219/* Smallest size class to support. */
970d7e83
LB
220#define TINY_MIN (1U << LG_TINY_MIN)
221
222/*
54a0048b 223 * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
970d7e83
LB
224 * classes).
225 */
226#ifndef LG_QUANTUM
227# if (defined(__i386__) || defined(_M_IX86))
228# define LG_QUANTUM 4
229# endif
230# ifdef __ia64__
231# define LG_QUANTUM 4
232# endif
233# ifdef __alpha__
234# define LG_QUANTUM 4
235# endif
54a0048b 236# if (defined(__sparc64__) || defined(__sparcv9))
970d7e83
LB
237# define LG_QUANTUM 4
238# endif
239# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
240# define LG_QUANTUM 4
241# endif
242# ifdef __arm__
243# define LG_QUANTUM 3
244# endif
245# ifdef __aarch64__
246# define LG_QUANTUM 4
247# endif
248# ifdef __hppa__
249# define LG_QUANTUM 4
250# endif
251# ifdef __mips__
252# define LG_QUANTUM 3
253# endif
1a4d82fc
JJ
254# ifdef __or1k__
255# define LG_QUANTUM 3
256# endif
970d7e83
LB
257# ifdef __powerpc__
258# define LG_QUANTUM 4
259# endif
260# ifdef __s390__
261# define LG_QUANTUM 4
262# endif
263# ifdef __SH4__
264# define LG_QUANTUM 4
265# endif
266# ifdef __tile__
267# define LG_QUANTUM 4
268# endif
1a4d82fc
JJ
269# ifdef __le32__
270# define LG_QUANTUM 4
271# endif
970d7e83 272# ifndef LG_QUANTUM
54a0048b
SL
273# error "Unknown minimum alignment for architecture; specify via "
274 "--with-lg-quantum"
970d7e83
LB
275# endif
276#endif
277
278#define QUANTUM ((size_t)(1U << LG_QUANTUM))
279#define QUANTUM_MASK (QUANTUM - 1)
280
281/* Return the smallest quantum multiple that is >= a. */
282#define QUANTUM_CEILING(a) \
283 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
284
285#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
286#define LONG_MASK (LONG - 1)
287
288/* Return the smallest long multiple that is >= a. */
289#define LONG_CEILING(a) \
290 (((a) + LONG_MASK) & ~LONG_MASK)
291
292#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
293#define PTR_MASK (SIZEOF_PTR - 1)
294
295/* Return the smallest (void *) multiple that is >= a. */
296#define PTR_CEILING(a) \
297 (((a) + PTR_MASK) & ~PTR_MASK)
298
299/*
300 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
301 * In addition, this controls the spacing of cacheline-spaced size classes.
302 *
303 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
304 * only handle raw constants.
305 */
306#define LG_CACHELINE 6
307#define CACHELINE 64
308#define CACHELINE_MASK (CACHELINE - 1)
309
310/* Return the smallest cacheline multiple that is >= s. */
311#define CACHELINE_CEILING(s) \
312 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
313
54a0048b 314/* Page size. LG_PAGE is determined by the configure script. */
970d7e83
LB
315#ifdef PAGE_MASK
316# undef PAGE_MASK
317#endif
54a0048b 318#define PAGE ((size_t)(1U << LG_PAGE))
970d7e83
LB
319#define PAGE_MASK ((size_t)(PAGE - 1))
320
54a0048b
SL
321/* Return the page base address for the page containing address a. */
322#define PAGE_ADDR2BASE(a) \
323 ((void *)((uintptr_t)(a) & ~PAGE_MASK))
324
970d7e83
LB
325/* Return the smallest pagesize multiple that is >= s. */
326#define PAGE_CEILING(s) \
327 (((s) + PAGE_MASK) & ~PAGE_MASK)
328
329/* Return the nearest aligned address at or below a. */
330#define ALIGNMENT_ADDR2BASE(a, alignment) \
331 ((void *)((uintptr_t)(a) & (-(alignment))))
332
333/* Return the offset between a and the nearest aligned address at or below a. */
334#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
335 ((size_t)((uintptr_t)(a) & (alignment - 1)))
336
337/* Return the smallest alignment multiple that is >= s. */
338#define ALIGNMENT_CEILING(s, alignment) \
339 (((s) + (alignment - 1)) & (-(alignment)))
340
54a0048b 341/* Declare a variable-length array. */
970d7e83
LB
342#if __STDC_VERSION__ < 199901L
343# ifdef _MSC_VER
344# include <malloc.h>
345# define alloca _alloca
346# else
347# ifdef JEMALLOC_HAS_ALLOCA_H
348# include <alloca.h>
349# else
350# include <stdlib.h>
351# endif
352# endif
353# define VARIABLE_ARRAY(type, name, count) \
1a4d82fc 354 type *name = alloca(sizeof(type) * (count))
970d7e83 355#else
1a4d82fc 356# define VARIABLE_ARRAY(type, name, count) type name[(count)]
970d7e83
LB
357#endif
358
54a0048b 359#include "jemalloc/internal/nstime.h"
1a4d82fc 360#include "jemalloc/internal/valgrind.h"
970d7e83
LB
361#include "jemalloc/internal/util.h"
362#include "jemalloc/internal/atomic.h"
363#include "jemalloc/internal/prng.h"
54a0048b 364#include "jemalloc/internal/ticker.h"
970d7e83
LB
365#include "jemalloc/internal/ckh.h"
366#include "jemalloc/internal/size_classes.h"
54a0048b 367#include "jemalloc/internal/smoothstep.h"
970d7e83
LB
368#include "jemalloc/internal/stats.h"
369#include "jemalloc/internal/ctl.h"
370#include "jemalloc/internal/mutex.h"
371#include "jemalloc/internal/tsd.h"
372#include "jemalloc/internal/mb.h"
373#include "jemalloc/internal/extent.h"
374#include "jemalloc/internal/arena.h"
375#include "jemalloc/internal/bitmap.h"
376#include "jemalloc/internal/base.h"
54a0048b
SL
377#include "jemalloc/internal/rtree.h"
378#include "jemalloc/internal/pages.h"
970d7e83
LB
379#include "jemalloc/internal/chunk.h"
380#include "jemalloc/internal/huge.h"
970d7e83
LB
381#include "jemalloc/internal/tcache.h"
382#include "jemalloc/internal/hash.h"
383#include "jemalloc/internal/quarantine.h"
384#include "jemalloc/internal/prof.h"
385
386#undef JEMALLOC_H_TYPES
387/******************************************************************************/
1a4d82fc 388#define JEMALLOC_H_STRUCTS
970d7e83 389
54a0048b 390#include "jemalloc/internal/nstime.h"
1a4d82fc 391#include "jemalloc/internal/valgrind.h"
970d7e83
LB
392#include "jemalloc/internal/util.h"
393#include "jemalloc/internal/atomic.h"
394#include "jemalloc/internal/prng.h"
54a0048b 395#include "jemalloc/internal/ticker.h"
970d7e83
LB
396#include "jemalloc/internal/ckh.h"
397#include "jemalloc/internal/size_classes.h"
54a0048b 398#include "jemalloc/internal/smoothstep.h"
970d7e83
LB
399#include "jemalloc/internal/stats.h"
400#include "jemalloc/internal/ctl.h"
401#include "jemalloc/internal/mutex.h"
970d7e83
LB
402#include "jemalloc/internal/mb.h"
403#include "jemalloc/internal/bitmap.h"
54a0048b
SL
404#define JEMALLOC_ARENA_STRUCTS_A
405#include "jemalloc/internal/arena.h"
406#undef JEMALLOC_ARENA_STRUCTS_A
970d7e83 407#include "jemalloc/internal/extent.h"
54a0048b 408#define JEMALLOC_ARENA_STRUCTS_B
970d7e83 409#include "jemalloc/internal/arena.h"
54a0048b 410#undef JEMALLOC_ARENA_STRUCTS_B
970d7e83 411#include "jemalloc/internal/base.h"
54a0048b
SL
412#include "jemalloc/internal/rtree.h"
413#include "jemalloc/internal/pages.h"
970d7e83
LB
414#include "jemalloc/internal/chunk.h"
415#include "jemalloc/internal/huge.h"
970d7e83
LB
416#include "jemalloc/internal/tcache.h"
417#include "jemalloc/internal/hash.h"
418#include "jemalloc/internal/quarantine.h"
419#include "jemalloc/internal/prof.h"
420
1a4d82fc 421#include "jemalloc/internal/tsd.h"
970d7e83
LB
422
423#undef JEMALLOC_H_STRUCTS
424/******************************************************************************/
1a4d82fc 425#define JEMALLOC_H_EXTERNS
970d7e83
LB
426
427extern bool opt_abort;
54a0048b
SL
428extern const char *opt_junk;
429extern bool opt_junk_alloc;
430extern bool opt_junk_free;
970d7e83
LB
431extern size_t opt_quarantine;
432extern bool opt_redzone;
433extern bool opt_utrace;
970d7e83
LB
434extern bool opt_xmalloc;
435extern bool opt_zero;
54a0048b 436extern unsigned opt_narenas;
970d7e83 437
1a4d82fc
JJ
438extern bool in_valgrind;
439
970d7e83 440/* Number of CPUs. */
54a0048b 441extern unsigned ncpus;
970d7e83 442
970d7e83 443/*
7453a54e
SL
444 * Arenas that are used to service external requests. Not all elements of the
445 * arenas array are necessarily used; arenas are created lazily as needed.
9cc50fc6 446 */
54a0048b
SL
447extern arena_t **arenas;
448
449/*
450 * index2size_tab encodes the same information as could be computed (at
451 * unacceptable cost in some code paths) by index2size_compute().
452 */
453extern size_t const index2size_tab[NSIZES+1];
454/*
455 * size2index_tab is a compact lookup table that rounds request sizes up to
456 * size classes. In order to reduce cache footprint, the table is compressed,
457 * and all accesses are via size2index().
458 */
459extern uint8_t const size2index_tab[];
7453a54e 460
54a0048b
SL
461void *a0malloc(size_t size);
462void a0dalloc(void *ptr);
463void *bootstrap_malloc(size_t size);
464void *bootstrap_calloc(size_t num, size_t size);
465void bootstrap_free(void *ptr);
970d7e83 466arena_t *arenas_extend(unsigned ind);
54a0048b
SL
467unsigned narenas_total_get(void);
468arena_t *arena_init(unsigned ind);
469arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
470arena_t *arena_choose_hard(tsd_t *tsd);
471void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
1a4d82fc
JJ
472void thread_allocated_cleanup(tsd_t *tsd);
473void thread_deallocated_cleanup(tsd_t *tsd);
474void arena_cleanup(tsd_t *tsd);
54a0048b
SL
475void arenas_tdata_cleanup(tsd_t *tsd);
476void narenas_tdata_cleanup(tsd_t *tsd);
477void arenas_tdata_bypass_cleanup(tsd_t *tsd);
970d7e83
LB
478void jemalloc_prefork(void);
479void jemalloc_postfork_parent(void);
480void jemalloc_postfork_child(void);
481
54a0048b 482#include "jemalloc/internal/nstime.h"
1a4d82fc 483#include "jemalloc/internal/valgrind.h"
970d7e83
LB
484#include "jemalloc/internal/util.h"
485#include "jemalloc/internal/atomic.h"
486#include "jemalloc/internal/prng.h"
54a0048b 487#include "jemalloc/internal/ticker.h"
970d7e83
LB
488#include "jemalloc/internal/ckh.h"
489#include "jemalloc/internal/size_classes.h"
54a0048b 490#include "jemalloc/internal/smoothstep.h"
970d7e83
LB
491#include "jemalloc/internal/stats.h"
492#include "jemalloc/internal/ctl.h"
493#include "jemalloc/internal/mutex.h"
970d7e83
LB
494#include "jemalloc/internal/mb.h"
495#include "jemalloc/internal/bitmap.h"
496#include "jemalloc/internal/extent.h"
497#include "jemalloc/internal/arena.h"
498#include "jemalloc/internal/base.h"
54a0048b
SL
499#include "jemalloc/internal/rtree.h"
500#include "jemalloc/internal/pages.h"
970d7e83
LB
501#include "jemalloc/internal/chunk.h"
502#include "jemalloc/internal/huge.h"
970d7e83
LB
503#include "jemalloc/internal/tcache.h"
504#include "jemalloc/internal/hash.h"
505#include "jemalloc/internal/quarantine.h"
506#include "jemalloc/internal/prof.h"
1a4d82fc 507#include "jemalloc/internal/tsd.h"
970d7e83
LB
508
509#undef JEMALLOC_H_EXTERNS
510/******************************************************************************/
1a4d82fc 511#define JEMALLOC_H_INLINES
970d7e83 512
54a0048b 513#include "jemalloc/internal/nstime.h"
1a4d82fc 514#include "jemalloc/internal/valgrind.h"
970d7e83
LB
515#include "jemalloc/internal/util.h"
516#include "jemalloc/internal/atomic.h"
517#include "jemalloc/internal/prng.h"
54a0048b 518#include "jemalloc/internal/ticker.h"
970d7e83
LB
519#include "jemalloc/internal/ckh.h"
520#include "jemalloc/internal/size_classes.h"
54a0048b 521#include "jemalloc/internal/smoothstep.h"
970d7e83
LB
522#include "jemalloc/internal/stats.h"
523#include "jemalloc/internal/ctl.h"
524#include "jemalloc/internal/mutex.h"
525#include "jemalloc/internal/tsd.h"
526#include "jemalloc/internal/mb.h"
527#include "jemalloc/internal/extent.h"
528#include "jemalloc/internal/base.h"
54a0048b
SL
529#include "jemalloc/internal/rtree.h"
530#include "jemalloc/internal/pages.h"
970d7e83
LB
531#include "jemalloc/internal/chunk.h"
532#include "jemalloc/internal/huge.h"
533
1a4d82fc 534#ifndef JEMALLOC_ENABLE_INLINE
54a0048b
SL
535szind_t size2index_compute(size_t size);
536szind_t size2index_lookup(size_t size);
537szind_t size2index(size_t size);
538size_t index2size_compute(szind_t index);
539size_t index2size_lookup(szind_t index);
540size_t index2size(szind_t index);
541size_t s2u_compute(size_t size);
542size_t s2u_lookup(size_t size);
970d7e83
LB
543size_t s2u(size_t size);
544size_t sa2u(size_t size, size_t alignment);
54a0048b
SL
545arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
546arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
547 bool refresh_if_missing);
548arena_t *arena_get(unsigned ind, bool init_if_missing);
549ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
970d7e83
LB
550#endif
551
552#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
54a0048b
SL
553JEMALLOC_INLINE szind_t
554size2index_compute(size_t size)
555{
556
557#if (NTBINS != 0)
558 if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
559 szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
560 szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
561 return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
562 }
563#endif
564 {
565 szind_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
566 (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
567 : lg_floor((size<<1)-1);
568 szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
569 x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
570 szind_t grp = shift << LG_SIZE_CLASS_GROUP;
571
572 szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
573 ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
574
575 size_t delta_inverse_mask = ZI(-1) << lg_delta;
576 szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
577 ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
578
579 szind_t index = NTBINS + grp + mod;
580 return (index);
581 }
582}
583
584JEMALLOC_ALWAYS_INLINE szind_t
585size2index_lookup(size_t size)
586{
587
588 assert(size <= LOOKUP_MAXCLASS);
589 {
590 szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
591 assert(ret == size2index_compute(size));
592 return (ret);
593 }
594}
595
596JEMALLOC_ALWAYS_INLINE szind_t
597size2index(size_t size)
598{
599
600 assert(size > 0);
601 if (likely(size <= LOOKUP_MAXCLASS))
602 return (size2index_lookup(size));
603 return (size2index_compute(size));
604}
605
606JEMALLOC_INLINE size_t
607index2size_compute(szind_t index)
608{
609
610#if (NTBINS > 0)
611 if (index < NTBINS)
612 return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
613#endif
614 {
615 size_t reduced_index = index - NTBINS;
616 size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
617 size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
618 1);
619
620 size_t grp_size_mask = ~((!!grp)-1);
621 size_t grp_size = ((ZU(1) << (LG_QUANTUM +
622 (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
623
624 size_t shift = (grp == 0) ? 1 : grp;
625 size_t lg_delta = shift + (LG_QUANTUM-1);
626 size_t mod_size = (mod+1) << lg_delta;
627
628 size_t usize = grp_size + mod_size;
629 return (usize);
630 }
631}
632
633JEMALLOC_ALWAYS_INLINE size_t
634index2size_lookup(szind_t index)
635{
636 size_t ret = (size_t)index2size_tab[index];
637 assert(ret == index2size_compute(index));
638 return (ret);
639}
640
641JEMALLOC_ALWAYS_INLINE size_t
642index2size(szind_t index)
643{
644
645 assert(index < NSIZES);
646 return (index2size_lookup(index));
647}
648
649JEMALLOC_ALWAYS_INLINE size_t
650s2u_compute(size_t size)
651{
652
653#if (NTBINS > 0)
654 if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
655 size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
656 size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
657 return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
658 (ZU(1) << lg_ceil));
659 }
660#endif
661 {
662 size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
663 (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
664 : lg_floor((size<<1)-1);
665 size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
666 ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
667 size_t delta = ZU(1) << lg_delta;
668 size_t delta_mask = delta - 1;
669 size_t usize = (size + delta_mask) & ~delta_mask;
670 return (usize);
671 }
672}
673
674JEMALLOC_ALWAYS_INLINE size_t
675s2u_lookup(size_t size)
676{
677 size_t ret = index2size_lookup(size2index_lookup(size));
678
679 assert(ret == s2u_compute(size));
680 return (ret);
681}
682
970d7e83
LB
683/*
684 * Compute usable size that would result from allocating an object with the
685 * specified size.
686 */
687JEMALLOC_ALWAYS_INLINE size_t
688s2u(size_t size)
689{
690
54a0048b
SL
691 assert(size > 0);
692 if (likely(size <= LOOKUP_MAXCLASS))
693 return (s2u_lookup(size));
694 return (s2u_compute(size));
970d7e83
LB
695}
696
697/*
698 * Compute usable size that would result from allocating an object with the
699 * specified size and alignment.
700 */
701JEMALLOC_ALWAYS_INLINE size_t
702sa2u(size_t size, size_t alignment)
703{
704 size_t usize;
705
706 assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
707
54a0048b
SL
708 /* Try for a small size class. */
709 if (size <= SMALL_MAXCLASS && alignment < PAGE) {
710 /*
711 * Round size up to the nearest multiple of alignment.
712 *
713 * This done, we can take advantage of the fact that for each
714 * small size class, every object is aligned at the smallest
715 * power of two that is non-zero in the base two representation
716 * of the size. For example:
717 *
718 * Size | Base 2 | Minimum alignment
719 * -----+----------+------------------
720 * 96 | 1100000 | 32
721 * 144 | 10100000 | 32
722 * 192 | 11000000 | 64
723 */
724 usize = s2u(ALIGNMENT_CEILING(size, alignment));
725 if (usize < LARGE_MINCLASS)
726 return (usize);
970d7e83
LB
727 }
728
54a0048b
SL
729 /* Try for a large size class. */
730 if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
970d7e83
LB
731 /*
732 * We can't achieve subpage alignment, so round up alignment
54a0048b 733 * to the minimum that can actually be supported.
970d7e83
LB
734 */
735 alignment = PAGE_CEILING(alignment);
54a0048b
SL
736
737 /* Make sure result is a large size class. */
738 usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
970d7e83
LB
739
740 /*
741 * Calculate the size of the over-size run that arena_palloc()
742 * would need to allocate in order to guarantee the alignment.
970d7e83 743 */
54a0048b
SL
744 if (usize + large_pad + alignment - PAGE <= arena_maxrun)
745 return (usize);
970d7e83 746 }
970d7e83 747
54a0048b 748 /* Huge size class. Beware of overflow. */
9cc50fc6 749
54a0048b
SL
750 if (unlikely(alignment > HUGE_MAXCLASS))
751 return (0);
970d7e83 752
54a0048b
SL
753 /*
754 * We can't achieve subchunk alignment, so round up alignment to the
755 * minimum that can actually be supported.
756 */
757 alignment = CHUNK_CEILING(alignment);
758
759 /* Make sure result is a huge size class. */
760 if (size <= chunksize)
761 usize = chunksize;
762 else {
763 usize = s2u(size);
764 if (usize < size) {
765 /* size_t overflow. */
766 return (0);
767 }
768 }
769
770 /*
771 * Calculate the multi-chunk mapping that huge_palloc() would need in
772 * order to guarantee the alignment.
773 */
774 if (usize + alignment - PAGE < usize) {
775 /* size_t overflow. */
776 return (0);
777 }
778 return (usize);
970d7e83
LB
779}
780
781/* Choose an arena based on a per-thread value. */
782JEMALLOC_INLINE arena_t *
54a0048b 783arena_choose(tsd_t *tsd, arena_t *arena)
970d7e83
LB
784{
785 arena_t *ret;
786
787 if (arena != NULL)
788 return (arena);
789
54a0048b
SL
790 if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
791 ret = arena_choose_hard(tsd);
792
793 return (ret);
794}
795
796JEMALLOC_INLINE arena_tdata_t *
797arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
798{
799 arena_tdata_t *tdata;
800 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
801
802 if (unlikely(arenas_tdata == NULL)) {
803 /* arenas_tdata hasn't been initialized yet. */
804 return (arena_tdata_get_hard(tsd, ind));
805 }
806 if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
807 /*
808 * ind is invalid, cache is old (too small), or tdata to be
809 * initialized.
810 */
811 return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
812 NULL);
7453a54e 813 }
970d7e83 814
54a0048b
SL
815 tdata = &arenas_tdata[ind];
816 if (likely(tdata != NULL) || !refresh_if_missing)
817 return (tdata);
818 return (arena_tdata_get_hard(tsd, ind));
819}
820
821JEMALLOC_INLINE arena_t *
822arena_get(unsigned ind, bool init_if_missing)
823{
824 arena_t *ret;
825
826 assert(ind <= MALLOCX_ARENA_MAX);
827
828 ret = arenas[ind];
829 if (unlikely(ret == NULL)) {
830 ret = atomic_read_p((void *)&arenas[ind]);
831 if (init_if_missing && unlikely(ret == NULL))
832 ret = arena_init(ind);
833 }
970d7e83
LB
834 return (ret);
835}
54a0048b
SL
836
837JEMALLOC_INLINE ticker_t *
838decay_ticker_get(tsd_t *tsd, unsigned ind)
839{
840 arena_tdata_t *tdata;
841
842 tdata = arena_tdata_get(tsd, ind, true);
843 if (unlikely(tdata == NULL))
844 return (NULL);
845 return (&tdata->decay_ticker);
846}
970d7e83
LB
847#endif
848
849#include "jemalloc/internal/bitmap.h"
970d7e83 850/*
54a0048b
SL
851 * Include portions of arena.h interleaved with tcache.h in order to resolve
852 * circular dependencies.
970d7e83 853 */
54a0048b 854#define JEMALLOC_ARENA_INLINE_A
1a4d82fc 855#include "jemalloc/internal/arena.h"
54a0048b 856#undef JEMALLOC_ARENA_INLINE_A
7453a54e 857#include "jemalloc/internal/tcache.h"
54a0048b 858#define JEMALLOC_ARENA_INLINE_B
7453a54e 859#include "jemalloc/internal/arena.h"
54a0048b 860#undef JEMALLOC_ARENA_INLINE_B
970d7e83
LB
861#include "jemalloc/internal/hash.h"
862#include "jemalloc/internal/quarantine.h"
863
864#ifndef JEMALLOC_ENABLE_INLINE
54a0048b
SL
865arena_t *iaalloc(const void *ptr);
866size_t isalloc(const void *ptr, bool demote);
867void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
868 tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
869void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
870 arena_t *arena);
871void *imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path);
872void *icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
873 arena_t *arena);
874void *icalloc(tsd_t *tsd, size_t size, szind_t ind);
875void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
876 tcache_t *tcache, bool is_metadata, arena_t *arena);
1a4d82fc 877void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
54a0048b 878 tcache_t *tcache, arena_t *arena);
1a4d82fc 879void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
970d7e83
LB
880size_t ivsalloc(const void *ptr, bool demote);
881size_t u2rz(size_t usize);
882size_t p2rz(const void *ptr);
54a0048b
SL
883void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
884 bool slow_path);
885void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
1a4d82fc 886void idalloc(tsd_t *tsd, void *ptr);
54a0048b
SL
887void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
888void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
889void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
1a4d82fc 890void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
54a0048b
SL
891 size_t extra, size_t alignment, bool zero, tcache_t *tcache,
892 arena_t *arena);
893void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
894 size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
895void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
896 size_t alignment, bool zero);
897bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
898 size_t extra, size_t alignment, bool zero);
970d7e83
LB
899#endif
900
901#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
54a0048b
SL
902JEMALLOC_ALWAYS_INLINE arena_t *
903iaalloc(const void *ptr)
904{
905
906 assert(ptr != NULL);
907
908 return (arena_aalloc(ptr));
909}
910
911/*
912 * Typical usage:
913 * void *ptr = [...]
914 * size_t sz = isalloc(ptr, config_prof);
915 */
916JEMALLOC_ALWAYS_INLINE size_t
917isalloc(const void *ptr, bool demote)
918{
919
920 assert(ptr != NULL);
921 /* Demotion only makes sense if config_prof is true. */
922 assert(config_prof || !demote);
923
924 return (arena_salloc(ptr, demote));
925}
926
970d7e83 927JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
928iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
929 bool is_metadata, arena_t *arena, bool slow_path)
970d7e83 930{
54a0048b 931 void *ret;
970d7e83
LB
932
933 assert(size != 0);
934
54a0048b
SL
935 ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
936 if (config_stats && is_metadata && likely(ret != NULL)) {
937 arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
938 config_prof));
939 }
940 return (ret);
970d7e83
LB
941}
942
943JEMALLOC_ALWAYS_INLINE void *
54a0048b 944imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
970d7e83
LB
945{
946
54a0048b 947 return (iallocztm(tsd, size, ind, false, tcache, false, arena, true));
970d7e83
LB
948}
949
950JEMALLOC_ALWAYS_INLINE void *
54a0048b 951imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path)
970d7e83
LB
952{
953
54a0048b
SL
954 return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false,
955 NULL, slow_path));
970d7e83
LB
956}
957
958JEMALLOC_ALWAYS_INLINE void *
54a0048b 959icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
970d7e83
LB
960{
961
54a0048b 962 return (iallocztm(tsd, size, ind, true, tcache, false, arena, true));
970d7e83
LB
963}
964
965JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
966icalloc(tsd_t *tsd, size_t size, szind_t ind)
967{
968
969 return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false,
970 NULL, true));
971}
972
973JEMALLOC_ALWAYS_INLINE void *
974ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
975 tcache_t *tcache, bool is_metadata, arena_t *arena)
970d7e83
LB
976{
977 void *ret;
978
979 assert(usize != 0);
980 assert(usize == sa2u(usize, alignment));
981
54a0048b 982 ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
7453a54e 983 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
54a0048b
SL
984 if (config_stats && is_metadata && likely(ret != NULL)) {
985 arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
986 config_prof));
987 }
970d7e83
LB
988 return (ret);
989}
990
991JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
992ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
993 tcache_t *tcache, arena_t *arena)
970d7e83
LB
994{
995
54a0048b 996 return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
970d7e83
LB
997}
998
54a0048b
SL
999JEMALLOC_ALWAYS_INLINE void *
1000ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
970d7e83 1001{
7453a54e 1002
54a0048b
SL
1003 return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true),
1004 false, NULL));
970d7e83
LB
1005}
1006
1007JEMALLOC_ALWAYS_INLINE size_t
1008ivsalloc(const void *ptr, bool demote)
1009{
54a0048b 1010 extent_node_t *node;
970d7e83
LB
1011
1012 /* Return 0 if ptr is not within a chunk managed by jemalloc. */
54a0048b
SL
1013 node = chunk_lookup(ptr, false);
1014 if (node == NULL)
970d7e83 1015 return (0);
54a0048b
SL
1016 /* Only arena chunks should be looked up via interior pointers. */
1017 assert(extent_node_addr_get(node) == ptr ||
1018 extent_node_achunk_get(node));
970d7e83
LB
1019
1020 return (isalloc(ptr, demote));
1021}
1022
1023JEMALLOC_INLINE size_t
1024u2rz(size_t usize)
1025{
1026 size_t ret;
1027
1028 if (usize <= SMALL_MAXCLASS) {
54a0048b 1029 szind_t binind = size2index(usize);
970d7e83
LB
1030 ret = arena_bin_info[binind].redzone_size;
1031 } else
1032 ret = 0;
1033
1034 return (ret);
1035}
1036
1037JEMALLOC_INLINE size_t
1038p2rz(const void *ptr)
1039{
1040 size_t usize = isalloc(ptr, false);
1041
1042 return (u2rz(usize));
1043}
1044
1045JEMALLOC_ALWAYS_INLINE void
54a0048b
SL
1046idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
1047 bool slow_path)
970d7e83 1048{
970d7e83
LB
1049
1050 assert(ptr != NULL);
54a0048b
SL
1051 if (config_stats && is_metadata) {
1052 arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
1053 config_prof));
1054 }
970d7e83 1055
54a0048b 1056 arena_dalloc(tsd, ptr, tcache, slow_path);
970d7e83
LB
1057}
1058
1059JEMALLOC_ALWAYS_INLINE void
54a0048b 1060idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
970d7e83 1061{
1a4d82fc 1062
54a0048b 1063 idalloctm(tsd, ptr, tcache, false, true);
970d7e83
LB
1064}
1065
1066JEMALLOC_ALWAYS_INLINE void
1a4d82fc 1067idalloc(tsd_t *tsd, void *ptr)
970d7e83
LB
1068{
1069
54a0048b 1070 idalloctm(tsd, ptr, tcache_get(tsd, false), false, true);
1a4d82fc
JJ
1071}
1072
1073JEMALLOC_ALWAYS_INLINE void
54a0048b 1074iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1a4d82fc
JJ
1075{
1076
54a0048b 1077 if (slow_path && config_fill && unlikely(opt_quarantine))
1a4d82fc 1078 quarantine(tsd, ptr);
970d7e83 1079 else
54a0048b 1080 idalloctm(tsd, ptr, tcache, false, slow_path);
970d7e83
LB
1081}
1082
1083JEMALLOC_ALWAYS_INLINE void
54a0048b
SL
1084isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
1085{
1086
1087 arena_sdalloc(tsd, ptr, size, tcache);
1088}
1089
1090JEMALLOC_ALWAYS_INLINE void
1091isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
970d7e83
LB
1092{
1093
1a4d82fc
JJ
1094 if (config_fill && unlikely(opt_quarantine))
1095 quarantine(tsd, ptr);
1096 else
54a0048b 1097 isdalloct(tsd, ptr, size, tcache);
970d7e83
LB
1098}
1099
1100JEMALLOC_ALWAYS_INLINE void *
1a4d82fc 1101iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
54a0048b 1102 size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1a4d82fc
JJ
1103{
1104 void *p;
1105 size_t usize, copysize;
1106
1107 usize = sa2u(size + extra, alignment);
54a0048b 1108 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
1a4d82fc 1109 return (NULL);
54a0048b 1110 p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
1a4d82fc
JJ
1111 if (p == NULL) {
1112 if (extra == 0)
1113 return (NULL);
1114 /* Try again, without extra this time. */
1115 usize = sa2u(size, alignment);
54a0048b 1116 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
1a4d82fc 1117 return (NULL);
54a0048b 1118 p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
1a4d82fc
JJ
1119 if (p == NULL)
1120 return (NULL);
1121 }
1122 /*
1123 * Copy at most size bytes (not size+extra), since the caller has no
1124 * expectation that the extra bytes will be reliably preserved.
1125 */
1126 copysize = (size < oldsize) ? size : oldsize;
1127 memcpy(p, ptr, copysize);
54a0048b 1128 isqalloc(tsd, ptr, oldsize, tcache);
1a4d82fc
JJ
1129 return (p);
1130}
1131
1132JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
1133iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1134 bool zero, tcache_t *tcache, arena_t *arena)
970d7e83 1135{
970d7e83
LB
1136
1137 assert(ptr != NULL);
1138 assert(size != 0);
1139
970d7e83
LB
1140 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1141 != 0) {
970d7e83
LB
1142 /*
1143 * Existing object alignment is inadequate; allocate new space
1144 * and copy.
1145 */
1a4d82fc 1146 return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
54a0048b 1147 zero, tcache, arena));
970d7e83
LB
1148 }
1149
54a0048b
SL
1150 return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
1151 tcache));
970d7e83
LB
1152}
1153
1154JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
1155iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1156 bool zero)
970d7e83
LB
1157{
1158
54a0048b
SL
1159 return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
1160 tcache_get(tsd, true), NULL));
970d7e83
LB
1161}
1162
1a4d82fc 1163JEMALLOC_ALWAYS_INLINE bool
54a0048b
SL
1164ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
1165 size_t alignment, bool zero)
1a4d82fc 1166{
1a4d82fc
JJ
1167
1168 assert(ptr != NULL);
1169 assert(size != 0);
1170
1a4d82fc
JJ
1171 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1172 != 0) {
1173 /* Existing object alignment is inadequate. */
1174 return (true);
1175 }
1176
54a0048b 1177 return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
1a4d82fc 1178}
970d7e83
LB
1179#endif
1180
1181#include "jemalloc/internal/prof.h"
1182
1183#undef JEMALLOC_H_INLINES
1184/******************************************************************************/
1185#endif /* JEMALLOC_INTERNAL_H */