1 #ifndef JEMALLOC_INTERNAL_H
2 #define JEMALLOC_INTERNAL_H
4 #include "jemalloc_internal_defs.h"
5 #include "jemalloc/internal/jemalloc_internal_decls.h"
8 #include <sys/ktrace.h>
11 #define JEMALLOC_NO_DEMANGLE
13 # define JEMALLOC_N(n) jet_##n
14 # include "jemalloc/internal/public_namespace.h"
15 # define JEMALLOC_NO_RENAME
16 # include "../jemalloc@install_suffix@.h"
17 # undef JEMALLOC_NO_RENAME
19 # define JEMALLOC_N(n) @private_namespace@##n
20 # include "../jemalloc@install_suffix@.h"
22 #include "jemalloc/internal/private_namespace.h"
24 static const bool config_debug
=
31 static const bool have_dss
=
38 static const bool config_fill
=
45 static const bool config_lazy_lock
=
46 #ifdef JEMALLOC_LAZY_LOCK
52 static const char * const config_malloc_conf
= JEMALLOC_CONFIG_MALLOC_CONF
;
53 static const bool config_prof
=
60 static const bool config_prof_libgcc
=
61 #ifdef JEMALLOC_PROF_LIBGCC
67 static const bool config_prof_libunwind
=
68 #ifdef JEMALLOC_PROF_LIBUNWIND
74 static const bool maps_coalesce
=
75 #ifdef JEMALLOC_MAPS_COALESCE
81 static const bool config_munmap
=
82 #ifdef JEMALLOC_MUNMAP
88 static const bool config_stats
=
95 static const bool config_tcache
=
96 #ifdef JEMALLOC_TCACHE
102 static const bool config_tls
=
109 static const bool config_utrace
=
110 #ifdef JEMALLOC_UTRACE
116 static const bool config_valgrind
=
117 #ifdef JEMALLOC_VALGRIND
123 static const bool config_xmalloc
=
124 #ifdef JEMALLOC_XMALLOC
130 static const bool config_ivsalloc
=
131 #ifdef JEMALLOC_IVSALLOC
137 static const bool config_cache_oblivious
=
138 #ifdef JEMALLOC_CACHE_OBLIVIOUS
145 #ifdef JEMALLOC_C11ATOMICS
146 #include <stdatomic.h>
149 #ifdef JEMALLOC_ATOMIC9
150 #include <machine/atomic.h>
153 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
154 #include <libkern/OSAtomic.h>
158 #include <mach/mach_error.h>
159 #include <mach/mach_init.h>
160 #include <mach/vm_map.h>
161 #include <malloc/malloc.h>
165 #include "jemalloc/internal/rb.h"
166 #include "jemalloc/internal/qr.h"
167 #include "jemalloc/internal/ql.h"
170 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
171 * but there are circular dependencies that cannot be broken without
172 * substantial performance degradation. In order to reduce the effect on
173 * visual code flow, read the header files in multiple passes, with one of the
174 * following cpp variables defined during each pass:
176 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
178 * JEMALLOC_H_STRUCTS : Data structures.
179 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
180 * JEMALLOC_H_INLINES : Inline functions.
182 /******************************************************************************/
183 #define JEMALLOC_H_TYPES
185 #include "jemalloc/internal/jemalloc_internal_macros.h"
187 /* Size class index type. */
188 typedef unsigned szind_t
;
199 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
201 #define MALLOCX_ARENA_MASK ((int)~0xfffff)
202 #define MALLOCX_ARENA_MAX 0xffe
203 #define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
204 #define MALLOCX_TCACHE_MAX 0xffd
205 #define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
206 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
207 #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
208 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
209 #define MALLOCX_ALIGN_GET(flags) \
210 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
211 #define MALLOCX_ZERO_GET(flags) \
212 ((bool)(flags & MALLOCX_ZERO))
214 #define MALLOCX_TCACHE_GET(flags) \
215 (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
216 #define MALLOCX_ARENA_GET(flags) \
217 (((unsigned)(((unsigned)flags) >> 20)) - 1)
219 /* Smallest size class to support. */
220 #define TINY_MIN (1U << LG_TINY_MIN)
223 * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
227 # if (defined(__i386__) || defined(_M_IX86))
228 # define LG_QUANTUM 4
231 # define LG_QUANTUM 4
234 # define LG_QUANTUM 4
236 # if (defined(__sparc64__) || defined(__sparcv9))
237 # define LG_QUANTUM 4
239 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
240 # define LG_QUANTUM 4
243 # define LG_QUANTUM 3
246 # define LG_QUANTUM 4
249 # define LG_QUANTUM 4
252 # define LG_QUANTUM 3
255 # define LG_QUANTUM 3
258 # define LG_QUANTUM 4
261 # define LG_QUANTUM 4
264 # define LG_QUANTUM 4
267 # define LG_QUANTUM 4
270 # define LG_QUANTUM 4
273 # error "Unknown minimum alignment for architecture; specify via "
278 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
279 #define QUANTUM_MASK (QUANTUM - 1)
281 /* Return the smallest quantum multiple that is >= a. */
282 #define QUANTUM_CEILING(a) \
283 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
285 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
286 #define LONG_MASK (LONG - 1)
288 /* Return the smallest long multiple that is >= a. */
289 #define LONG_CEILING(a) \
290 (((a) + LONG_MASK) & ~LONG_MASK)
292 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
293 #define PTR_MASK (SIZEOF_PTR - 1)
295 /* Return the smallest (void *) multiple that is >= a. */
296 #define PTR_CEILING(a) \
297 (((a) + PTR_MASK) & ~PTR_MASK)
300 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
301 * In addition, this controls the spacing of cacheline-spaced size classes.
303 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
304 * only handle raw constants.
306 #define LG_CACHELINE 6
308 #define CACHELINE_MASK (CACHELINE - 1)
310 /* Return the smallest cacheline multiple that is >= s. */
311 #define CACHELINE_CEILING(s) \
312 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
314 /* Page size. LG_PAGE is determined by the configure script. */
318 #define PAGE ((size_t)(1U << LG_PAGE))
319 #define PAGE_MASK ((size_t)(PAGE - 1))
321 /* Return the page base address for the page containing address a. */
322 #define PAGE_ADDR2BASE(a) \
323 ((void *)((uintptr_t)(a) & ~PAGE_MASK))
325 /* Return the smallest pagesize multiple that is >= s. */
326 #define PAGE_CEILING(s) \
327 (((s) + PAGE_MASK) & ~PAGE_MASK)
329 /* Return the nearest aligned address at or below a. */
330 #define ALIGNMENT_ADDR2BASE(a, alignment) \
331 ((void *)((uintptr_t)(a) & (-(alignment))))
333 /* Return the offset between a and the nearest aligned address at or below a. */
334 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
335 ((size_t)((uintptr_t)(a) & (alignment - 1)))
337 /* Return the smallest alignment multiple that is >= s. */
338 #define ALIGNMENT_CEILING(s, alignment) \
339 (((s) + (alignment - 1)) & (-(alignment)))
341 /* Declare a variable-length array. */
342 #if __STDC_VERSION__ < 199901L
345 # define alloca _alloca
347 # ifdef JEMALLOC_HAS_ALLOCA_H
353 # define VARIABLE_ARRAY(type, name, count) \
354 type *name = alloca(sizeof(type) * (count))
356 # define VARIABLE_ARRAY(type, name, count) type name[(count)]
359 #include "jemalloc/internal/nstime.h"
360 #include "jemalloc/internal/valgrind.h"
361 #include "jemalloc/internal/util.h"
362 #include "jemalloc/internal/atomic.h"
363 #include "jemalloc/internal/prng.h"
364 #include "jemalloc/internal/ticker.h"
365 #include "jemalloc/internal/ckh.h"
366 #include "jemalloc/internal/size_classes.h"
367 #include "jemalloc/internal/smoothstep.h"
368 #include "jemalloc/internal/stats.h"
369 #include "jemalloc/internal/ctl.h"
370 #include "jemalloc/internal/mutex.h"
371 #include "jemalloc/internal/tsd.h"
372 #include "jemalloc/internal/mb.h"
373 #include "jemalloc/internal/extent.h"
374 #include "jemalloc/internal/arena.h"
375 #include "jemalloc/internal/bitmap.h"
376 #include "jemalloc/internal/base.h"
377 #include "jemalloc/internal/rtree.h"
378 #include "jemalloc/internal/pages.h"
379 #include "jemalloc/internal/chunk.h"
380 #include "jemalloc/internal/huge.h"
381 #include "jemalloc/internal/tcache.h"
382 #include "jemalloc/internal/hash.h"
383 #include "jemalloc/internal/quarantine.h"
384 #include "jemalloc/internal/prof.h"
386 #undef JEMALLOC_H_TYPES
387 /******************************************************************************/
388 #define JEMALLOC_H_STRUCTS
390 #include "jemalloc/internal/nstime.h"
391 #include "jemalloc/internal/valgrind.h"
392 #include "jemalloc/internal/util.h"
393 #include "jemalloc/internal/atomic.h"
394 #include "jemalloc/internal/prng.h"
395 #include "jemalloc/internal/ticker.h"
396 #include "jemalloc/internal/ckh.h"
397 #include "jemalloc/internal/size_classes.h"
398 #include "jemalloc/internal/smoothstep.h"
399 #include "jemalloc/internal/stats.h"
400 #include "jemalloc/internal/ctl.h"
401 #include "jemalloc/internal/mutex.h"
402 #include "jemalloc/internal/mb.h"
403 #include "jemalloc/internal/bitmap.h"
404 #define JEMALLOC_ARENA_STRUCTS_A
405 #include "jemalloc/internal/arena.h"
406 #undef JEMALLOC_ARENA_STRUCTS_A
407 #include "jemalloc/internal/extent.h"
408 #define JEMALLOC_ARENA_STRUCTS_B
409 #include "jemalloc/internal/arena.h"
410 #undef JEMALLOC_ARENA_STRUCTS_B
411 #include "jemalloc/internal/base.h"
412 #include "jemalloc/internal/rtree.h"
413 #include "jemalloc/internal/pages.h"
414 #include "jemalloc/internal/chunk.h"
415 #include "jemalloc/internal/huge.h"
416 #include "jemalloc/internal/tcache.h"
417 #include "jemalloc/internal/hash.h"
418 #include "jemalloc/internal/quarantine.h"
419 #include "jemalloc/internal/prof.h"
421 #include "jemalloc/internal/tsd.h"
423 #undef JEMALLOC_H_STRUCTS
424 /******************************************************************************/
425 #define JEMALLOC_H_EXTERNS
427 extern bool opt_abort
;
428 extern const char *opt_junk
;
429 extern bool opt_junk_alloc
;
430 extern bool opt_junk_free
;
431 extern size_t opt_quarantine
;
432 extern bool opt_redzone
;
433 extern bool opt_utrace
;
434 extern bool opt_xmalloc
;
435 extern bool opt_zero
;
436 extern unsigned opt_narenas
;
438 extern bool in_valgrind
;
440 /* Number of CPUs. */
441 extern unsigned ncpus
;
444 * Arenas that are used to service external requests. Not all elements of the
445 * arenas array are necessarily used; arenas are created lazily as needed.
447 extern arena_t
**arenas
;
450 * index2size_tab encodes the same information as could be computed (at
451 * unacceptable cost in some code paths) by index2size_compute().
453 extern size_t const index2size_tab
[NSIZES
+1];
455 * size2index_tab is a compact lookup table that rounds request sizes up to
456 * size classes. In order to reduce cache footprint, the table is compressed,
457 * and all accesses are via size2index().
459 extern uint8_t const size2index_tab
[];
461 void *a0malloc(size_t size
);
462 void a0dalloc(void *ptr
);
463 void *bootstrap_malloc(size_t size
);
464 void *bootstrap_calloc(size_t num
, size_t size
);
465 void bootstrap_free(void *ptr
);
466 arena_t
*arenas_extend(unsigned ind
);
467 unsigned narenas_total_get(void);
468 arena_t
*arena_init(unsigned ind
);
469 arena_tdata_t
*arena_tdata_get_hard(tsd_t
*tsd
, unsigned ind
);
470 arena_t
*arena_choose_hard(tsd_t
*tsd
);
471 void arena_migrate(tsd_t
*tsd
, unsigned oldind
, unsigned newind
);
472 void thread_allocated_cleanup(tsd_t
*tsd
);
473 void thread_deallocated_cleanup(tsd_t
*tsd
);
474 void arena_cleanup(tsd_t
*tsd
);
475 void arenas_tdata_cleanup(tsd_t
*tsd
);
476 void narenas_tdata_cleanup(tsd_t
*tsd
);
477 void arenas_tdata_bypass_cleanup(tsd_t
*tsd
);
478 void jemalloc_prefork(void);
479 void jemalloc_postfork_parent(void);
480 void jemalloc_postfork_child(void);
482 #include "jemalloc/internal/nstime.h"
483 #include "jemalloc/internal/valgrind.h"
484 #include "jemalloc/internal/util.h"
485 #include "jemalloc/internal/atomic.h"
486 #include "jemalloc/internal/prng.h"
487 #include "jemalloc/internal/ticker.h"
488 #include "jemalloc/internal/ckh.h"
489 #include "jemalloc/internal/size_classes.h"
490 #include "jemalloc/internal/smoothstep.h"
491 #include "jemalloc/internal/stats.h"
492 #include "jemalloc/internal/ctl.h"
493 #include "jemalloc/internal/mutex.h"
494 #include "jemalloc/internal/mb.h"
495 #include "jemalloc/internal/bitmap.h"
496 #include "jemalloc/internal/extent.h"
497 #include "jemalloc/internal/arena.h"
498 #include "jemalloc/internal/base.h"
499 #include "jemalloc/internal/rtree.h"
500 #include "jemalloc/internal/pages.h"
501 #include "jemalloc/internal/chunk.h"
502 #include "jemalloc/internal/huge.h"
503 #include "jemalloc/internal/tcache.h"
504 #include "jemalloc/internal/hash.h"
505 #include "jemalloc/internal/quarantine.h"
506 #include "jemalloc/internal/prof.h"
507 #include "jemalloc/internal/tsd.h"
509 #undef JEMALLOC_H_EXTERNS
510 /******************************************************************************/
511 #define JEMALLOC_H_INLINES
513 #include "jemalloc/internal/nstime.h"
514 #include "jemalloc/internal/valgrind.h"
515 #include "jemalloc/internal/util.h"
516 #include "jemalloc/internal/atomic.h"
517 #include "jemalloc/internal/prng.h"
518 #include "jemalloc/internal/ticker.h"
519 #include "jemalloc/internal/ckh.h"
520 #include "jemalloc/internal/size_classes.h"
521 #include "jemalloc/internal/smoothstep.h"
522 #include "jemalloc/internal/stats.h"
523 #include "jemalloc/internal/ctl.h"
524 #include "jemalloc/internal/mutex.h"
525 #include "jemalloc/internal/tsd.h"
526 #include "jemalloc/internal/mb.h"
527 #include "jemalloc/internal/extent.h"
528 #include "jemalloc/internal/base.h"
529 #include "jemalloc/internal/rtree.h"
530 #include "jemalloc/internal/pages.h"
531 #include "jemalloc/internal/chunk.h"
532 #include "jemalloc/internal/huge.h"
534 #ifndef JEMALLOC_ENABLE_INLINE
535 szind_t
size2index_compute(size_t size
);
536 szind_t
size2index_lookup(size_t size
);
537 szind_t
size2index(size_t size
);
538 size_t index2size_compute(szind_t index
);
539 size_t index2size_lookup(szind_t index
);
540 size_t index2size(szind_t index
);
541 size_t s2u_compute(size_t size
);
542 size_t s2u_lookup(size_t size
);
543 size_t s2u(size_t size
);
544 size_t sa2u(size_t size
, size_t alignment
);
545 arena_t
*arena_choose(tsd_t
*tsd
, arena_t
*arena
);
546 arena_tdata_t
*arena_tdata_get(tsd_t
*tsd
, unsigned ind
,
547 bool refresh_if_missing
);
548 arena_t
*arena_get(unsigned ind
, bool init_if_missing
);
549 ticker_t
*decay_ticker_get(tsd_t
*tsd
, unsigned ind
);
552 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
553 JEMALLOC_INLINE szind_t
554 size2index_compute(size_t size
)
558 if (size
<= (ZU(1) << LG_TINY_MAXCLASS
)) {
559 szind_t lg_tmin
= LG_TINY_MAXCLASS
- NTBINS
+ 1;
560 szind_t lg_ceil
= lg_floor(pow2_ceil_zu(size
));
561 return (lg_ceil
< lg_tmin
? 0 : lg_ceil
- lg_tmin
);
565 szind_t x
= unlikely(ZI(size
) < 0) ? ((size
<<1) ?
566 (ZU(1)<<(LG_SIZEOF_PTR
+3)) : ((ZU(1)<<(LG_SIZEOF_PTR
+3))-1))
567 : lg_floor((size
<<1)-1);
568 szind_t shift
= (x
< LG_SIZE_CLASS_GROUP
+ LG_QUANTUM
) ? 0 :
569 x
- (LG_SIZE_CLASS_GROUP
+ LG_QUANTUM
);
570 szind_t grp
= shift
<< LG_SIZE_CLASS_GROUP
;
572 szind_t lg_delta
= (x
< LG_SIZE_CLASS_GROUP
+ LG_QUANTUM
+ 1)
573 ? LG_QUANTUM
: x
- LG_SIZE_CLASS_GROUP
- 1;
575 size_t delta_inverse_mask
= ZI(-1) << lg_delta
;
576 szind_t mod
= ((((size
-1) & delta_inverse_mask
) >> lg_delta
)) &
577 ((ZU(1) << LG_SIZE_CLASS_GROUP
) - 1);
579 szind_t index
= NTBINS
+ grp
+ mod
;
584 JEMALLOC_ALWAYS_INLINE szind_t
585 size2index_lookup(size_t size
)
588 assert(size
<= LOOKUP_MAXCLASS
);
590 szind_t ret
= (size2index_tab
[(size
-1) >> LG_TINY_MIN
]);
591 assert(ret
== size2index_compute(size
));
596 JEMALLOC_ALWAYS_INLINE szind_t
597 size2index(size_t size
)
601 if (likely(size
<= LOOKUP_MAXCLASS
))
602 return (size2index_lookup(size
));
603 return (size2index_compute(size
));
606 JEMALLOC_INLINE
size_t
607 index2size_compute(szind_t index
)
612 return (ZU(1) << (LG_TINY_MAXCLASS
- NTBINS
+ 1 + index
));
615 size_t reduced_index
= index
- NTBINS
;
616 size_t grp
= reduced_index
>> LG_SIZE_CLASS_GROUP
;
617 size_t mod
= reduced_index
& ((ZU(1) << LG_SIZE_CLASS_GROUP
) -
620 size_t grp_size_mask
= ~((!!grp
)-1);
621 size_t grp_size
= ((ZU(1) << (LG_QUANTUM
+
622 (LG_SIZE_CLASS_GROUP
-1))) << grp
) & grp_size_mask
;
624 size_t shift
= (grp
== 0) ? 1 : grp
;
625 size_t lg_delta
= shift
+ (LG_QUANTUM
-1);
626 size_t mod_size
= (mod
+1) << lg_delta
;
628 size_t usize
= grp_size
+ mod_size
;
633 JEMALLOC_ALWAYS_INLINE
size_t
634 index2size_lookup(szind_t index
)
636 size_t ret
= (size_t)index2size_tab
[index
];
637 assert(ret
== index2size_compute(index
));
641 JEMALLOC_ALWAYS_INLINE
size_t
642 index2size(szind_t index
)
645 assert(index
< NSIZES
);
646 return (index2size_lookup(index
));
649 JEMALLOC_ALWAYS_INLINE
size_t
650 s2u_compute(size_t size
)
654 if (size
<= (ZU(1) << LG_TINY_MAXCLASS
)) {
655 size_t lg_tmin
= LG_TINY_MAXCLASS
- NTBINS
+ 1;
656 size_t lg_ceil
= lg_floor(pow2_ceil_zu(size
));
657 return (lg_ceil
< lg_tmin
? (ZU(1) << lg_tmin
) :
662 size_t x
= unlikely(ZI(size
) < 0) ? ((size
<<1) ?
663 (ZU(1)<<(LG_SIZEOF_PTR
+3)) : ((ZU(1)<<(LG_SIZEOF_PTR
+3))-1))
664 : lg_floor((size
<<1)-1);
665 size_t lg_delta
= (x
< LG_SIZE_CLASS_GROUP
+ LG_QUANTUM
+ 1)
666 ? LG_QUANTUM
: x
- LG_SIZE_CLASS_GROUP
- 1;
667 size_t delta
= ZU(1) << lg_delta
;
668 size_t delta_mask
= delta
- 1;
669 size_t usize
= (size
+ delta_mask
) & ~delta_mask
;
674 JEMALLOC_ALWAYS_INLINE
size_t
675 s2u_lookup(size_t size
)
677 size_t ret
= index2size_lookup(size2index_lookup(size
));
679 assert(ret
== s2u_compute(size
));
684 * Compute usable size that would result from allocating an object with the
687 JEMALLOC_ALWAYS_INLINE
size_t
692 if (likely(size
<= LOOKUP_MAXCLASS
))
693 return (s2u_lookup(size
));
694 return (s2u_compute(size
));
698 * Compute usable size that would result from allocating an object with the
699 * specified size and alignment.
701 JEMALLOC_ALWAYS_INLINE
size_t
702 sa2u(size_t size
, size_t alignment
)
706 assert(alignment
!= 0 && ((alignment
- 1) & alignment
) == 0);
708 /* Try for a small size class. */
709 if (size
<= SMALL_MAXCLASS
&& alignment
< PAGE
) {
711 * Round size up to the nearest multiple of alignment.
713 * This done, we can take advantage of the fact that for each
714 * small size class, every object is aligned at the smallest
715 * power of two that is non-zero in the base two representation
716 * of the size. For example:
718 * Size | Base 2 | Minimum alignment
719 * -----+----------+------------------
721 * 144 | 10100000 | 32
722 * 192 | 11000000 | 64
724 usize
= s2u(ALIGNMENT_CEILING(size
, alignment
));
725 if (usize
< LARGE_MINCLASS
)
729 /* Try for a large size class. */
730 if (likely(size
<= large_maxclass
) && likely(alignment
< chunksize
)) {
732 * We can't achieve subpage alignment, so round up alignment
733 * to the minimum that can actually be supported.
735 alignment
= PAGE_CEILING(alignment
);
737 /* Make sure result is a large size class. */
738 usize
= (size
<= LARGE_MINCLASS
) ? LARGE_MINCLASS
: s2u(size
);
741 * Calculate the size of the over-size run that arena_palloc()
742 * would need to allocate in order to guarantee the alignment.
744 if (usize
+ large_pad
+ alignment
- PAGE
<= arena_maxrun
)
748 /* Huge size class. Beware of overflow. */
750 if (unlikely(alignment
> HUGE_MAXCLASS
))
754 * We can't achieve subchunk alignment, so round up alignment to the
755 * minimum that can actually be supported.
757 alignment
= CHUNK_CEILING(alignment
);
759 /* Make sure result is a huge size class. */
760 if (size
<= chunksize
)
765 /* size_t overflow. */
771 * Calculate the multi-chunk mapping that huge_palloc() would need in
772 * order to guarantee the alignment.
774 if (usize
+ alignment
- PAGE
< usize
) {
775 /* size_t overflow. */
781 /* Choose an arena based on a per-thread value. */
782 JEMALLOC_INLINE arena_t
*
783 arena_choose(tsd_t
*tsd
, arena_t
*arena
)
790 if (unlikely((ret
= tsd_arena_get(tsd
)) == NULL
))
791 ret
= arena_choose_hard(tsd
);
796 JEMALLOC_INLINE arena_tdata_t
*
797 arena_tdata_get(tsd_t
*tsd
, unsigned ind
, bool refresh_if_missing
)
799 arena_tdata_t
*tdata
;
800 arena_tdata_t
*arenas_tdata
= tsd_arenas_tdata_get(tsd
);
802 if (unlikely(arenas_tdata
== NULL
)) {
803 /* arenas_tdata hasn't been initialized yet. */
804 return (arena_tdata_get_hard(tsd
, ind
));
806 if (unlikely(ind
>= tsd_narenas_tdata_get(tsd
))) {
808 * ind is invalid, cache is old (too small), or tdata to be
811 return (refresh_if_missing
? arena_tdata_get_hard(tsd
, ind
) :
815 tdata
= &arenas_tdata
[ind
];
816 if (likely(tdata
!= NULL
) || !refresh_if_missing
)
818 return (arena_tdata_get_hard(tsd
, ind
));
821 JEMALLOC_INLINE arena_t
*
822 arena_get(unsigned ind
, bool init_if_missing
)
826 assert(ind
<= MALLOCX_ARENA_MAX
);
829 if (unlikely(ret
== NULL
)) {
830 ret
= atomic_read_p((void *)&arenas
[ind
]);
831 if (init_if_missing
&& unlikely(ret
== NULL
))
832 ret
= arena_init(ind
);
837 JEMALLOC_INLINE ticker_t
*
838 decay_ticker_get(tsd_t
*tsd
, unsigned ind
)
840 arena_tdata_t
*tdata
;
842 tdata
= arena_tdata_get(tsd
, ind
, true);
843 if (unlikely(tdata
== NULL
))
845 return (&tdata
->decay_ticker
);
849 #include "jemalloc/internal/bitmap.h"
851 * Include portions of arena.h interleaved with tcache.h in order to resolve
852 * circular dependencies.
854 #define JEMALLOC_ARENA_INLINE_A
855 #include "jemalloc/internal/arena.h"
856 #undef JEMALLOC_ARENA_INLINE_A
857 #include "jemalloc/internal/tcache.h"
858 #define JEMALLOC_ARENA_INLINE_B
859 #include "jemalloc/internal/arena.h"
860 #undef JEMALLOC_ARENA_INLINE_B
861 #include "jemalloc/internal/hash.h"
862 #include "jemalloc/internal/quarantine.h"
864 #ifndef JEMALLOC_ENABLE_INLINE
865 arena_t
*iaalloc(const void *ptr
);
866 size_t isalloc(const void *ptr
, bool demote
);
867 void *iallocztm(tsd_t
*tsd
, size_t size
, szind_t ind
, bool zero
,
868 tcache_t
*tcache
, bool is_metadata
, arena_t
*arena
, bool slow_path
);
869 void *imalloct(tsd_t
*tsd
, size_t size
, szind_t ind
, tcache_t
*tcache
,
871 void *imalloc(tsd_t
*tsd
, size_t size
, szind_t ind
, bool slow_path
);
872 void *icalloct(tsd_t
*tsd
, size_t size
, szind_t ind
, tcache_t
*tcache
,
874 void *icalloc(tsd_t
*tsd
, size_t size
, szind_t ind
);
875 void *ipallocztm(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
876 tcache_t
*tcache
, bool is_metadata
, arena_t
*arena
);
877 void *ipalloct(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
878 tcache_t
*tcache
, arena_t
*arena
);
879 void *ipalloc(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
);
880 size_t ivsalloc(const void *ptr
, bool demote
);
881 size_t u2rz(size_t usize
);
882 size_t p2rz(const void *ptr
);
883 void idalloctm(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
, bool is_metadata
,
885 void idalloct(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
);
886 void idalloc(tsd_t
*tsd
, void *ptr
);
887 void iqalloc(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
, bool slow_path
);
888 void isdalloct(tsd_t
*tsd
, void *ptr
, size_t size
, tcache_t
*tcache
);
889 void isqalloc(tsd_t
*tsd
, void *ptr
, size_t size
, tcache_t
*tcache
);
890 void *iralloct_realign(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
891 size_t extra
, size_t alignment
, bool zero
, tcache_t
*tcache
,
893 void *iralloct(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
894 size_t alignment
, bool zero
, tcache_t
*tcache
, arena_t
*arena
);
895 void *iralloc(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
896 size_t alignment
, bool zero
);
897 bool ixalloc(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
898 size_t extra
, size_t alignment
, bool zero
);
901 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
902 JEMALLOC_ALWAYS_INLINE arena_t
*
903 iaalloc(const void *ptr
)
908 return (arena_aalloc(ptr
));
914 * size_t sz = isalloc(ptr, config_prof);
916 JEMALLOC_ALWAYS_INLINE
size_t
917 isalloc(const void *ptr
, bool demote
)
921 /* Demotion only makes sense if config_prof is true. */
922 assert(config_prof
|| !demote
);
924 return (arena_salloc(ptr
, demote
));
927 JEMALLOC_ALWAYS_INLINE
void *
928 iallocztm(tsd_t
*tsd
, size_t size
, szind_t ind
, bool zero
, tcache_t
*tcache
,
929 bool is_metadata
, arena_t
*arena
, bool slow_path
)
935 ret
= arena_malloc(tsd
, arena
, size
, ind
, zero
, tcache
, slow_path
);
936 if (config_stats
&& is_metadata
&& likely(ret
!= NULL
)) {
937 arena_metadata_allocated_add(iaalloc(ret
), isalloc(ret
,
943 JEMALLOC_ALWAYS_INLINE
void *
944 imalloct(tsd_t
*tsd
, size_t size
, szind_t ind
, tcache_t
*tcache
, arena_t
*arena
)
947 return (iallocztm(tsd
, size
, ind
, false, tcache
, false, arena
, true));
950 JEMALLOC_ALWAYS_INLINE
void *
951 imalloc(tsd_t
*tsd
, size_t size
, szind_t ind
, bool slow_path
)
954 return (iallocztm(tsd
, size
, ind
, false, tcache_get(tsd
, true), false,
958 JEMALLOC_ALWAYS_INLINE
void *
959 icalloct(tsd_t
*tsd
, size_t size
, szind_t ind
, tcache_t
*tcache
, arena_t
*arena
)
962 return (iallocztm(tsd
, size
, ind
, true, tcache
, false, arena
, true));
965 JEMALLOC_ALWAYS_INLINE
void *
966 icalloc(tsd_t
*tsd
, size_t size
, szind_t ind
)
969 return (iallocztm(tsd
, size
, ind
, true, tcache_get(tsd
, true), false,
973 JEMALLOC_ALWAYS_INLINE
void *
974 ipallocztm(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
975 tcache_t
*tcache
, bool is_metadata
, arena_t
*arena
)
980 assert(usize
== sa2u(usize
, alignment
));
982 ret
= arena_palloc(tsd
, arena
, usize
, alignment
, zero
, tcache
);
983 assert(ALIGNMENT_ADDR2BASE(ret
, alignment
) == ret
);
984 if (config_stats
&& is_metadata
&& likely(ret
!= NULL
)) {
985 arena_metadata_allocated_add(iaalloc(ret
), isalloc(ret
,
991 JEMALLOC_ALWAYS_INLINE
void *
992 ipalloct(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
993 tcache_t
*tcache
, arena_t
*arena
)
996 return (ipallocztm(tsd
, usize
, alignment
, zero
, tcache
, false, arena
));
999 JEMALLOC_ALWAYS_INLINE
void *
1000 ipalloc(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
)
1003 return (ipallocztm(tsd
, usize
, alignment
, zero
, tcache_get(tsd
, true),
1007 JEMALLOC_ALWAYS_INLINE
size_t
1008 ivsalloc(const void *ptr
, bool demote
)
1010 extent_node_t
*node
;
1012 /* Return 0 if ptr is not within a chunk managed by jemalloc. */
1013 node
= chunk_lookup(ptr
, false);
1016 /* Only arena chunks should be looked up via interior pointers. */
1017 assert(extent_node_addr_get(node
) == ptr
||
1018 extent_node_achunk_get(node
));
1020 return (isalloc(ptr
, demote
));
1023 JEMALLOC_INLINE
size_t
1028 if (usize
<= SMALL_MAXCLASS
) {
1029 szind_t binind
= size2index(usize
);
1030 ret
= arena_bin_info
[binind
].redzone_size
;
1037 JEMALLOC_INLINE
size_t
1038 p2rz(const void *ptr
)
1040 size_t usize
= isalloc(ptr
, false);
1042 return (u2rz(usize
));
1045 JEMALLOC_ALWAYS_INLINE
void
1046 idalloctm(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
, bool is_metadata
,
1050 assert(ptr
!= NULL
);
1051 if (config_stats
&& is_metadata
) {
1052 arena_metadata_allocated_sub(iaalloc(ptr
), isalloc(ptr
,
1056 arena_dalloc(tsd
, ptr
, tcache
, slow_path
);
1059 JEMALLOC_ALWAYS_INLINE
void
1060 idalloct(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
)
1063 idalloctm(tsd
, ptr
, tcache
, false, true);
1066 JEMALLOC_ALWAYS_INLINE
void
1067 idalloc(tsd_t
*tsd
, void *ptr
)
1070 idalloctm(tsd
, ptr
, tcache_get(tsd
, false), false, true);
1073 JEMALLOC_ALWAYS_INLINE
void
1074 iqalloc(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
, bool slow_path
)
1077 if (slow_path
&& config_fill
&& unlikely(opt_quarantine
))
1078 quarantine(tsd
, ptr
);
1080 idalloctm(tsd
, ptr
, tcache
, false, slow_path
);
1083 JEMALLOC_ALWAYS_INLINE
void
1084 isdalloct(tsd_t
*tsd
, void *ptr
, size_t size
, tcache_t
*tcache
)
1087 arena_sdalloc(tsd
, ptr
, size
, tcache
);
1090 JEMALLOC_ALWAYS_INLINE
void
1091 isqalloc(tsd_t
*tsd
, void *ptr
, size_t size
, tcache_t
*tcache
)
1094 if (config_fill
&& unlikely(opt_quarantine
))
1095 quarantine(tsd
, ptr
);
1097 isdalloct(tsd
, ptr
, size
, tcache
);
1100 JEMALLOC_ALWAYS_INLINE
void *
1101 iralloct_realign(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
1102 size_t extra
, size_t alignment
, bool zero
, tcache_t
*tcache
, arena_t
*arena
)
1105 size_t usize
, copysize
;
1107 usize
= sa2u(size
+ extra
, alignment
);
1108 if (unlikely(usize
== 0 || usize
> HUGE_MAXCLASS
))
1110 p
= ipalloct(tsd
, usize
, alignment
, zero
, tcache
, arena
);
1114 /* Try again, without extra this time. */
1115 usize
= sa2u(size
, alignment
);
1116 if (unlikely(usize
== 0 || usize
> HUGE_MAXCLASS
))
1118 p
= ipalloct(tsd
, usize
, alignment
, zero
, tcache
, arena
);
1123 * Copy at most size bytes (not size+extra), since the caller has no
1124 * expectation that the extra bytes will be reliably preserved.
1126 copysize
= (size
< oldsize
) ? size
: oldsize
;
1127 memcpy(p
, ptr
, copysize
);
1128 isqalloc(tsd
, ptr
, oldsize
, tcache
);
1132 JEMALLOC_ALWAYS_INLINE
void *
1133 iralloct(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
, size_t alignment
,
1134 bool zero
, tcache_t
*tcache
, arena_t
*arena
)
1137 assert(ptr
!= NULL
);
1140 if (alignment
!= 0 && ((uintptr_t)ptr
& ((uintptr_t)alignment
-1))
1143 * Existing object alignment is inadequate; allocate new space
1146 return (iralloct_realign(tsd
, ptr
, oldsize
, size
, 0, alignment
,
1147 zero
, tcache
, arena
));
1150 return (arena_ralloc(tsd
, arena
, ptr
, oldsize
, size
, alignment
, zero
,
1154 JEMALLOC_ALWAYS_INLINE
void *
1155 iralloc(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
, size_t alignment
,
1159 return (iralloct(tsd
, ptr
, oldsize
, size
, alignment
, zero
,
1160 tcache_get(tsd
, true), NULL
));
1163 JEMALLOC_ALWAYS_INLINE
bool
1164 ixalloc(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
, size_t extra
,
1165 size_t alignment
, bool zero
)
1168 assert(ptr
!= NULL
);
1171 if (alignment
!= 0 && ((uintptr_t)ptr
& ((uintptr_t)alignment
-1))
1173 /* Existing object alignment is inadequate. */
1177 return (arena_ralloc_no_move(tsd
, ptr
, oldsize
, size
, extra
, zero
));
1181 #include "jemalloc/internal/prof.h"
1183 #undef JEMALLOC_H_INLINES
1184 /******************************************************************************/
1185 #endif /* JEMALLOC_INTERNAL_H */