2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 /* Runtime configuration options. */
8 const char *je_malloc_conf
JEMALLOC_ATTR(weak
);
16 const char *opt_junk
=
17 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
24 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
31 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
38 size_t opt_quarantine
= ZU(0);
39 bool opt_redzone
= false;
40 bool opt_utrace
= false;
41 bool opt_xmalloc
= false;
42 bool opt_zero
= false;
43 unsigned opt_narenas
= 0;
45 /* Initialized to true if the process is running inside Valgrind. */
50 /* Protects arenas initialization. */
51 static malloc_mutex_t arenas_lock
;
53 * Arenas that are used to service external requests. Not all elements of the
54 * arenas array are necessarily used; arenas are created lazily as needed.
56 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
58 * takes some action to create them and allocate from them.
61 static unsigned narenas_total
; /* Use narenas_total_*(). */
62 static arena_t
*a0
; /* arenas[0]; read-only after initialization. */
63 static unsigned narenas_auto
; /* Read-only after initialization. */
66 malloc_init_uninitialized
= 3,
67 malloc_init_a0_initialized
= 2,
68 malloc_init_recursible
= 1,
69 malloc_init_initialized
= 0 /* Common case --> jnz. */
71 static malloc_init_t malloc_init_state
= malloc_init_uninitialized
;
73 /* 0 should be the common case. Set to true to trigger initialization. */
74 static bool malloc_slow
= true;
76 /* When malloc_slow != 0, set the corresponding bits for sanity check. */
78 flag_opt_junk_alloc
= (1U),
79 flag_opt_junk_free
= (1U << 1),
80 flag_opt_quarantine
= (1U << 2),
81 flag_opt_zero
= (1U << 3),
82 flag_opt_utrace
= (1U << 4),
83 flag_in_valgrind
= (1U << 5),
84 flag_opt_xmalloc
= (1U << 6)
86 static uint8_t malloc_slow_flags
;
88 /* Last entry for overflow detection only. */
89 JEMALLOC_ALIGNED(CACHELINE
)
90 const size_t index2size_tab
[NSIZES
+1] = {
91 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
92 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
98 JEMALLOC_ALIGNED(CACHELINE
)
99 const uint8_t size2index_tab
[] = {
101 #warning "Dangerous LG_TINY_MIN"
103 #elif LG_TINY_MIN == 1
104 #warning "Dangerous LG_TINY_MIN"
106 #elif LG_TINY_MIN == 2
107 #warning "Dangerous LG_TINY_MIN"
109 #elif LG_TINY_MIN == 3
111 #elif LG_TINY_MIN == 4
113 #elif LG_TINY_MIN == 5
115 #elif LG_TINY_MIN == 6
117 #elif LG_TINY_MIN == 7
119 #elif LG_TINY_MIN == 8
121 #elif LG_TINY_MIN == 9
123 #elif LG_TINY_MIN == 10
125 #elif LG_TINY_MIN == 11
128 #error "Unsupported LG_TINY_MIN"
131 #define S2B_1(i) S2B_0(i) S2B_0(i)
134 #define S2B_2(i) S2B_1(i) S2B_1(i)
137 #define S2B_3(i) S2B_2(i) S2B_2(i)
140 #define S2B_4(i) S2B_3(i) S2B_3(i)
143 #define S2B_5(i) S2B_4(i) S2B_4(i)
146 #define S2B_6(i) S2B_5(i) S2B_5(i)
149 #define S2B_7(i) S2B_6(i) S2B_6(i)
152 #define S2B_8(i) S2B_7(i) S2B_7(i)
155 #define S2B_9(i) S2B_8(i) S2B_8(i)
158 #define S2B_10(i) S2B_9(i) S2B_9(i)
161 #define S2B_11(i) S2B_10(i) S2B_10(i)
164 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
165 S2B_##lg_delta_lookup(index)
180 #ifdef JEMALLOC_THREADED_INIT
181 /* Used to let the initializing thread recursively allocate. */
182 # define NO_INITIALIZER ((unsigned long)0)
183 # define INITIALIZER pthread_self()
184 # define IS_INITIALIZER (malloc_initializer == pthread_self())
185 static pthread_t malloc_initializer
= NO_INITIALIZER
;
187 # define NO_INITIALIZER false
188 # define INITIALIZER true
189 # define IS_INITIALIZER malloc_initializer
190 static bool malloc_initializer
= NO_INITIALIZER
;
193 /* Used to avoid initialization races. */
195 #if _WIN32_WINNT >= 0x0600
196 static malloc_mutex_t init_lock
= SRWLOCK_INIT
;
198 static malloc_mutex_t init_lock
;
199 static bool init_lock_initialized
= false;
201 JEMALLOC_ATTR(constructor
)
203 _init_init_lock(void)
206 /* If another constructor in the same binary is using mallctl to
207 * e.g. setup chunk hooks, it may end up running before this one,
208 * and malloc_init_hard will crash trying to lock the uninitialized
209 * lock. So we force an initialization of the lock in
210 * malloc_init_hard as well. We don't try to care about atomicity
211 * of the accessed to the init_lock_initialized boolean, since it
212 * really only matters early in the process creation, before any
213 * separate thread normally starts doing anything. */
214 if (!init_lock_initialized
)
215 malloc_mutex_init(&init_lock
);
216 init_lock_initialized
= true;
220 # pragma section(".CRT$XCU", read)
221 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used
)
222 static const void (WINAPI
*init_init_lock
)(void) = _init_init_lock
;
226 static malloc_mutex_t init_lock
= MALLOC_MUTEX_INITIALIZER
;
230 void *p
; /* Input pointer (as in realloc(p, s)). */
231 size_t s
; /* Request size. */
232 void *r
; /* Result pointer. */
235 #ifdef JEMALLOC_UTRACE
236 # define UTRACE(a, b, c) do { \
237 if (unlikely(opt_utrace)) { \
238 int utrace_serrno = errno; \
239 malloc_utrace_t ut; \
243 utrace(&ut, sizeof(ut)); \
244 errno = utrace_serrno; \
248 # define UTRACE(a, b, c)
251 /******************************************************************************/
253 * Function prototypes for static functions that are referenced prior to
257 static bool malloc_init_hard_a0(void);
258 static bool malloc_init_hard(void);
260 /******************************************************************************/
262 * Begin miscellaneous support functions.
265 JEMALLOC_ALWAYS_INLINE_C
bool
266 malloc_initialized(void)
269 return (malloc_init_state
== malloc_init_initialized
);
272 JEMALLOC_ALWAYS_INLINE_C
void
273 malloc_thread_init(void)
277 * TSD initialization can't be safely done as a side effect of
278 * deallocation, because it is possible for a thread to do nothing but
279 * deallocate its TLS data via free(), in which case writing to TLS
280 * would cause write-after-free memory corruption. The quarantine
281 * facility *only* gets used as a side effect of deallocation, so make
282 * a best effort attempt at initializing its TSD by hooking all
285 if (config_fill
&& unlikely(opt_quarantine
))
286 quarantine_alloc_hook();
289 JEMALLOC_ALWAYS_INLINE_C
bool
293 if (unlikely(malloc_init_state
== malloc_init_uninitialized
))
294 return (malloc_init_hard_a0());
298 JEMALLOC_ALWAYS_INLINE_C
bool
302 if (unlikely(!malloc_initialized()) && malloc_init_hard())
304 malloc_thread_init();
310 * The a0*() functions are used instead of i[mcd]alloc() in situations that
311 * cannot tolerate TLS variable access.
315 a0ialloc(size_t size
, bool zero
, bool is_metadata
)
318 if (unlikely(malloc_init_a0()))
321 return (iallocztm(NULL
, size
, size2index(size
), zero
, false,
322 is_metadata
, arena_get(0, false), true));
326 a0idalloc(void *ptr
, bool is_metadata
)
329 idalloctm(NULL
, ptr
, false, is_metadata
, true);
333 a0malloc(size_t size
)
336 return (a0ialloc(size
, false, true));
343 a0idalloc(ptr
, true);
347 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
348 * situations that cannot tolerate TLS variable access (TLS allocation and very
349 * early internal data structure initialization).
353 bootstrap_malloc(size_t size
)
356 if (unlikely(size
== 0))
359 return (a0ialloc(size
, false, false));
363 bootstrap_calloc(size_t num
, size_t size
)
367 num_size
= num
* size
;
368 if (unlikely(num_size
== 0)) {
369 assert(num
== 0 || size
== 0);
373 return (a0ialloc(num_size
, true, false));
377 bootstrap_free(void *ptr
)
380 if (unlikely(ptr
== NULL
))
383 a0idalloc(ptr
, false);
387 arena_set(unsigned ind
, arena_t
*arena
)
390 atomic_write_p((void **)&arenas
[ind
], arena
);
394 narenas_total_set(unsigned narenas
)
397 atomic_write_u(&narenas_total
, narenas
);
401 narenas_total_inc(void)
404 atomic_add_u(&narenas_total
, 1);
408 narenas_total_get(void)
411 return (atomic_read_u(&narenas_total
));
414 /* Create a new arena and insert it into the arenas array at index ind. */
416 arena_init_locked(unsigned ind
)
420 assert(ind
<= narenas_total_get());
421 if (ind
> MALLOCX_ARENA_MAX
)
423 if (ind
== narenas_total_get())
427 * Another thread may have already initialized arenas[ind] if it's an
430 arena
= arena_get(ind
, false);
432 assert(ind
< narenas_auto
);
436 /* Actually initialize the arena. */
437 arena
= arena_new(ind
);
438 arena_set(ind
, arena
);
443 arena_init(unsigned ind
)
447 malloc_mutex_lock(&arenas_lock
);
448 arena
= arena_init_locked(ind
);
449 malloc_mutex_unlock(&arenas_lock
);
454 arena_bind(tsd_t
*tsd
, unsigned ind
)
458 arena
= arena_get(ind
, false);
459 arena_nthreads_inc(arena
);
461 if (tsd_nominal(tsd
))
462 tsd_arena_set(tsd
, arena
);
466 arena_migrate(tsd_t
*tsd
, unsigned oldind
, unsigned newind
)
468 arena_t
*oldarena
, *newarena
;
470 oldarena
= arena_get(oldind
, false);
471 newarena
= arena_get(newind
, false);
472 arena_nthreads_dec(oldarena
);
473 arena_nthreads_inc(newarena
);
474 tsd_arena_set(tsd
, newarena
);
478 arena_unbind(tsd_t
*tsd
, unsigned ind
)
482 arena
= arena_get(ind
, false);
483 arena_nthreads_dec(arena
);
484 tsd_arena_set(tsd
, NULL
);
488 arena_tdata_get_hard(tsd_t
*tsd
, unsigned ind
)
490 arena_tdata_t
*tdata
, *arenas_tdata_old
;
491 arena_tdata_t
*arenas_tdata
= tsd_arenas_tdata_get(tsd
);
492 unsigned narenas_tdata_old
, i
;
493 unsigned narenas_tdata
= tsd_narenas_tdata_get(tsd
);
494 unsigned narenas_actual
= narenas_total_get();
497 * Dissociate old tdata array (and set up for deallocation upon return)
500 if (arenas_tdata
!= NULL
&& narenas_tdata
< narenas_actual
) {
501 arenas_tdata_old
= arenas_tdata
;
502 narenas_tdata_old
= narenas_tdata
;
505 tsd_arenas_tdata_set(tsd
, arenas_tdata
);
506 tsd_narenas_tdata_set(tsd
, narenas_tdata
);
508 arenas_tdata_old
= NULL
;
509 narenas_tdata_old
= 0;
512 /* Allocate tdata array if it's missing. */
513 if (arenas_tdata
== NULL
) {
514 bool *arenas_tdata_bypassp
= tsd_arenas_tdata_bypassp_get(tsd
);
515 narenas_tdata
= (ind
< narenas_actual
) ? narenas_actual
: ind
+1;
517 if (tsd_nominal(tsd
) && !*arenas_tdata_bypassp
) {
518 *arenas_tdata_bypassp
= true;
519 arenas_tdata
= (arena_tdata_t
*)a0malloc(
520 sizeof(arena_tdata_t
) * narenas_tdata
);
521 *arenas_tdata_bypassp
= false;
523 if (arenas_tdata
== NULL
) {
527 assert(tsd_nominal(tsd
) && !*arenas_tdata_bypassp
);
528 tsd_arenas_tdata_set(tsd
, arenas_tdata
);
529 tsd_narenas_tdata_set(tsd
, narenas_tdata
);
533 * Copy to tdata array. It's possible that the actual number of arenas
534 * has increased since narenas_total_get() was called above, but that
535 * causes no correctness issues unless two threads concurrently execute
536 * the arenas.extend mallctl, which we trust mallctl synchronization to
540 /* Copy/initialize tickers. */
541 for (i
= 0; i
< narenas_actual
; i
++) {
542 if (i
< narenas_tdata_old
) {
543 ticker_copy(&arenas_tdata
[i
].decay_ticker
,
544 &arenas_tdata_old
[i
].decay_ticker
);
546 ticker_init(&arenas_tdata
[i
].decay_ticker
,
547 DECAY_NTICKS_PER_UPDATE
);
550 if (narenas_tdata
> narenas_actual
) {
551 memset(&arenas_tdata
[narenas_actual
], 0, sizeof(arena_tdata_t
)
552 * (narenas_tdata
- narenas_actual
));
555 /* Read the refreshed tdata array. */
556 tdata
= &arenas_tdata
[ind
];
558 if (arenas_tdata_old
!= NULL
)
559 a0dalloc(arenas_tdata_old
);
563 /* Slow path, called only by arena_choose(). */
565 arena_choose_hard(tsd_t
*tsd
)
569 if (narenas_auto
> 1) {
570 unsigned i
, choose
, first_null
;
573 first_null
= narenas_auto
;
574 malloc_mutex_lock(&arenas_lock
);
575 assert(arena_get(0, false) != NULL
);
576 for (i
= 1; i
< narenas_auto
; i
++) {
577 if (arena_get(i
, false) != NULL
) {
579 * Choose the first arena that has the lowest
580 * number of threads assigned to it.
582 if (arena_nthreads_get(arena_get(i
, false)) <
583 arena_nthreads_get(arena_get(choose
,
586 } else if (first_null
== narenas_auto
) {
588 * Record the index of the first uninitialized
589 * arena, in case all extant arenas are in use.
591 * NB: It is possible for there to be
592 * discontinuities in terms of initialized
593 * versus uninitialized arenas, due to the
594 * "thread.arena" mallctl.
600 if (arena_nthreads_get(arena_get(choose
, false)) == 0
601 || first_null
== narenas_auto
) {
603 * Use an unloaded arena, or the least loaded arena if
604 * all arenas are already initialized.
606 ret
= arena_get(choose
, false);
608 /* Initialize a new arena. */
610 ret
= arena_init_locked(choose
);
612 malloc_mutex_unlock(&arenas_lock
);
616 arena_bind(tsd
, choose
);
617 malloc_mutex_unlock(&arenas_lock
);
619 ret
= arena_get(0, false);
627 thread_allocated_cleanup(tsd_t
*tsd
)
634 thread_deallocated_cleanup(tsd_t
*tsd
)
641 arena_cleanup(tsd_t
*tsd
)
645 arena
= tsd_arena_get(tsd
);
647 arena_unbind(tsd
, arena
->ind
);
651 arenas_tdata_cleanup(tsd_t
*tsd
)
653 arena_tdata_t
*arenas_tdata
;
655 /* Prevent tsd->arenas_tdata from being (re)created. */
656 *tsd_arenas_tdata_bypassp_get(tsd
) = true;
658 arenas_tdata
= tsd_arenas_tdata_get(tsd
);
659 if (arenas_tdata
!= NULL
) {
660 tsd_arenas_tdata_set(tsd
, NULL
);
661 a0dalloc(arenas_tdata
);
666 narenas_tdata_cleanup(tsd_t
*tsd
)
673 arenas_tdata_bypass_cleanup(tsd_t
*tsd
)
680 stats_print_atexit(void)
683 if (config_tcache
&& config_stats
) {
687 * Merge stats from extant threads. This is racy, since
688 * individual threads do not lock when recording tcache stats
689 * events. As a consequence, the final stats may be slightly
690 * out of date by the time they are reported, if other threads
691 * continue to allocate.
693 for (i
= 0, narenas
= narenas_total_get(); i
< narenas
; i
++) {
694 arena_t
*arena
= arena_get(i
, false);
699 * tcache_stats_merge() locks bins, so if any
700 * code is introduced that acquires both arena
701 * and bin locks in the opposite order,
702 * deadlocks may result.
704 malloc_mutex_lock(&arena
->lock
);
705 ql_foreach(tcache
, &arena
->tcache_ql
, link
) {
706 tcache_stats_merge(tcache
, arena
);
708 malloc_mutex_unlock(&arena
->lock
);
712 je_malloc_stats_print(NULL
, NULL
, NULL
);
716 * End miscellaneous support functions.
718 /******************************************************************************/
720 * Begin initialization functions.
723 #ifndef JEMALLOC_HAVE_SECURE_GETENV
725 secure_getenv(const char *name
)
728 # ifdef JEMALLOC_HAVE_ISSETUGID
729 if (issetugid() != 0)
732 return (getenv(name
));
744 result
= si
.dwNumberOfProcessors
;
746 result
= sysconf(_SC_NPROCESSORS_ONLN
);
748 return ((result
== -1) ? 1 : (unsigned)result
);
752 malloc_conf_next(char const **opts_p
, char const **k_p
, size_t *klen_p
,
753 char const **v_p
, size_t *vlen_p
)
756 const char *opts
= *opts_p
;
760 for (accept
= false; !accept
;) {
762 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
763 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
764 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
765 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
767 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
768 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
769 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
770 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
772 case '0': case '1': case '2': case '3': case '4': case '5':
773 case '6': case '7': case '8': case '9':
779 *klen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*k_p
;
784 if (opts
!= *opts_p
) {
785 malloc_write("<jemalloc>: Conf string ends "
790 malloc_write("<jemalloc>: Malformed conf string\n");
795 for (accept
= false; !accept
;) {
800 * Look ahead one character here, because the next time
801 * this function is called, it will assume that end of
802 * input has been cleanly reached if no input remains,
803 * but we have optimistically already consumed the
804 * comma if one exists.
807 malloc_write("<jemalloc>: Conf string ends "
810 *vlen_p
= (uintptr_t)opts
- 1 - (uintptr_t)*v_p
;
814 *vlen_p
= (uintptr_t)opts
- (uintptr_t)*v_p
;
828 malloc_conf_error(const char *msg
, const char *k
, size_t klen
, const char *v
,
832 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg
, (int)klen
, k
,
837 malloc_slow_flag_init(void)
840 * Combine the runtime options into malloc_slow for fast path. Called
841 * after processing all the options.
843 malloc_slow_flags
|= (opt_junk_alloc
? flag_opt_junk_alloc
: 0)
844 | (opt_junk_free
? flag_opt_junk_free
: 0)
845 | (opt_quarantine
? flag_opt_quarantine
: 0)
846 | (opt_zero
? flag_opt_zero
: 0)
847 | (opt_utrace
? flag_opt_utrace
: 0)
848 | (opt_xmalloc
? flag_opt_xmalloc
: 0);
851 malloc_slow_flags
|= (in_valgrind
? flag_in_valgrind
: 0);
853 malloc_slow
= (malloc_slow_flags
!= 0);
857 malloc_conf_init(void)
860 char buf
[PATH_MAX
+ 1];
861 const char *opts
, *k
, *v
;
865 * Automatically configure valgrind before processing options. The
866 * valgrind option remains in jemalloc 3.x for compatibility reasons.
868 if (config_valgrind
) {
869 in_valgrind
= (RUNNING_ON_VALGRIND
!= 0) ? true : false;
870 if (config_fill
&& unlikely(in_valgrind
)) {
872 opt_junk_alloc
= false;
873 opt_junk_free
= false;
875 opt_quarantine
= JEMALLOC_VALGRIND_QUARANTINE_DEFAULT
;
878 if (config_tcache
&& unlikely(in_valgrind
))
882 for (i
= 0; i
< 4; i
++) {
883 /* Get runtime configuration. */
886 opts
= config_malloc_conf
;
889 if (je_malloc_conf
!= NULL
) {
891 * Use options that were compiled into the
894 opts
= je_malloc_conf
;
896 /* No configuration specified. */
904 int saved_errno
= errno
;
905 const char *linkname
=
906 # ifdef JEMALLOC_PREFIX
907 "/etc/"JEMALLOC_PREFIX
"malloc.conf"
914 * Try to use the contents of the "/etc/malloc.conf"
915 * symbolic link's name.
917 linklen
= readlink(linkname
, buf
, sizeof(buf
) - 1);
919 /* No configuration specified. */
922 set_errno(saved_errno
);
929 const char *envname
=
930 #ifdef JEMALLOC_PREFIX
931 JEMALLOC_CPREFIX
"MALLOC_CONF"
937 if ((opts
= secure_getenv(envname
)) != NULL
) {
939 * Do nothing; opts is already initialized to
940 * the value of the MALLOC_CONF environment
944 /* No configuration specified. */
955 while (*opts
!= '\0' && !malloc_conf_next(&opts
, &k
, &klen
, &v
,
957 #define CONF_MATCH(n) \
958 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
959 #define CONF_MATCH_VALUE(n) \
960 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
961 #define CONF_HANDLE_BOOL(o, n, cont) \
962 if (CONF_MATCH(n)) { \
963 if (CONF_MATCH_VALUE("true")) \
965 else if (CONF_MATCH_VALUE("false")) \
969 "Invalid conf value", \
975 #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
976 if (CONF_MATCH(n)) { \
981 um = malloc_strtoumax(v, &end, 0); \
982 if (get_errno() != 0 || (uintptr_t)end -\
983 (uintptr_t)v != vlen) { \
985 "Invalid conf value", \
988 if ((min) != 0 && um < (min)) \
990 else if (um > (max)) \
995 if (((min) != 0 && um < (min)) \
1000 k, klen, v, vlen); \
1006 #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
1007 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1008 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
1009 CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
1010 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1011 if (CONF_MATCH(n)) { \
1016 l = strtol(v, &end, 0); \
1017 if (get_errno() != 0 || (uintptr_t)end -\
1018 (uintptr_t)v != vlen) { \
1019 malloc_conf_error( \
1020 "Invalid conf value", \
1021 k, klen, v, vlen); \
1022 } else if (l < (ssize_t)(min) || l > \
1024 malloc_conf_error( \
1025 "Out-of-range conf value", \
1026 k, klen, v, vlen); \
1031 #define CONF_HANDLE_CHAR_P(o, n, d) \
1032 if (CONF_MATCH(n)) { \
1033 size_t cpylen = (vlen <= \
1034 sizeof(o)-1) ? vlen : \
1036 strncpy(o, v, cpylen); \
1041 CONF_HANDLE_BOOL(opt_abort
, "abort", true)
1043 * Chunks always require at least one header page,
1044 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1045 * possibly an additional page in the presence of
1046 * redzones. In order to simplify options processing,
1047 * use a conservative bound that accommodates all these
1050 CONF_HANDLE_SIZE_T(opt_lg_chunk
, "lg_chunk", LG_PAGE
+
1051 LG_SIZE_CLASS_GROUP
+ (config_fill
? 2 : 1),
1052 (sizeof(size_t) << 3) - 1, true)
1053 if (strncmp("dss", k
, klen
) == 0) {
1056 for (i
= 0; i
< dss_prec_limit
; i
++) {
1057 if (strncmp(dss_prec_names
[i
], v
, vlen
)
1059 if (chunk_dss_prec_set(i
)) {
1061 "Error setting dss",
1072 malloc_conf_error("Invalid conf value",
1077 CONF_HANDLE_UNSIGNED(opt_narenas
, "narenas", 1,
1079 if (strncmp("purge", k
, klen
) == 0) {
1082 for (i
= 0; i
< purge_mode_limit
; i
++) {
1083 if (strncmp(purge_mode_names
[i
], v
,
1085 opt_purge
= (purge_mode_t
)i
;
1091 malloc_conf_error("Invalid conf value",
1096 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult
, "lg_dirty_mult",
1097 -1, (sizeof(size_t) << 3) - 1)
1098 CONF_HANDLE_SSIZE_T(opt_decay_time
, "decay_time", -1,
1100 CONF_HANDLE_BOOL(opt_stats_print
, "stats_print", true)
1102 if (CONF_MATCH("junk")) {
1103 if (CONF_MATCH_VALUE("true")) {
1105 opt_junk_alloc
= opt_junk_free
=
1107 } else if (CONF_MATCH_VALUE("false")) {
1109 opt_junk_alloc
= opt_junk_free
=
1111 } else if (CONF_MATCH_VALUE("alloc")) {
1113 opt_junk_alloc
= true;
1114 opt_junk_free
= false;
1115 } else if (CONF_MATCH_VALUE("free")) {
1117 opt_junk_alloc
= false;
1118 opt_junk_free
= true;
1121 "Invalid conf value", k
,
1126 CONF_HANDLE_SIZE_T(opt_quarantine
, "quarantine",
1127 0, SIZE_T_MAX
, false)
1128 CONF_HANDLE_BOOL(opt_redzone
, "redzone", true)
1129 CONF_HANDLE_BOOL(opt_zero
, "zero", true)
1131 if (config_utrace
) {
1132 CONF_HANDLE_BOOL(opt_utrace
, "utrace", true)
1134 if (config_xmalloc
) {
1135 CONF_HANDLE_BOOL(opt_xmalloc
, "xmalloc", true)
1137 if (config_tcache
) {
1138 CONF_HANDLE_BOOL(opt_tcache
, "tcache",
1139 !config_valgrind
|| !in_valgrind
)
1140 if (CONF_MATCH("tcache")) {
1141 assert(config_valgrind
&& in_valgrind
);
1145 "tcache cannot be enabled "
1146 "while running inside Valgrind",
1151 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max
,
1152 "lg_tcache_max", -1,
1153 (sizeof(size_t) << 3) - 1)
1156 CONF_HANDLE_BOOL(opt_prof
, "prof", true)
1157 CONF_HANDLE_CHAR_P(opt_prof_prefix
,
1158 "prof_prefix", "jeprof")
1159 CONF_HANDLE_BOOL(opt_prof_active
, "prof_active",
1161 CONF_HANDLE_BOOL(opt_prof_thread_active_init
,
1162 "prof_thread_active_init", true)
1163 CONF_HANDLE_SIZE_T(opt_lg_prof_sample
,
1164 "lg_prof_sample", 0,
1165 (sizeof(uint64_t) << 3) - 1, true)
1166 CONF_HANDLE_BOOL(opt_prof_accum
, "prof_accum",
1168 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval
,
1169 "lg_prof_interval", -1,
1170 (sizeof(uint64_t) << 3) - 1)
1171 CONF_HANDLE_BOOL(opt_prof_gdump
, "prof_gdump",
1173 CONF_HANDLE_BOOL(opt_prof_final
, "prof_final",
1175 CONF_HANDLE_BOOL(opt_prof_leak
, "prof_leak",
1178 malloc_conf_error("Invalid conf pair", k
, klen
, v
,
1181 #undef CONF_HANDLE_BOOL
1182 #undef CONF_HANDLE_SIZE_T
1183 #undef CONF_HANDLE_SSIZE_T
1184 #undef CONF_HANDLE_CHAR_P
1189 /* init_lock must be held. */
1191 malloc_init_hard_needed(void)
1194 if (malloc_initialized() || (IS_INITIALIZER
&& malloc_init_state
==
1195 malloc_init_recursible
)) {
1197 * Another thread initialized the allocator before this one
1198 * acquired init_lock, or this thread is the initializing
1199 * thread, and it is recursively allocating.
1203 #ifdef JEMALLOC_THREADED_INIT
1204 if (malloc_initializer
!= NO_INITIALIZER
&& !IS_INITIALIZER
) {
1205 /* Busy-wait until the initializing thread completes. */
1207 malloc_mutex_unlock(&init_lock
);
1209 malloc_mutex_lock(&init_lock
);
1210 } while (!malloc_initialized());
1217 /* init_lock must be held. */
1219 malloc_init_hard_a0_locked(void)
1222 malloc_initializer
= INITIALIZER
;
1227 if (opt_stats_print
) {
1228 /* Print statistics at exit. */
1229 if (atexit(stats_print_atexit
) != 0) {
1230 malloc_write("<jemalloc>: Error in atexit()\n");
1245 if (config_tcache
&& tcache_boot())
1247 if (malloc_mutex_init(&arenas_lock
))
1250 * Create enough scaffolding to allow recursive allocation in
1254 narenas_total_set(narenas_auto
);
1256 memset(arenas
, 0, sizeof(arena_t
*) * narenas_auto
);
1258 * Initialize one arena here. The rest are lazily created in
1259 * arena_choose_hard().
1261 if (arena_init(0) == NULL
)
1263 malloc_init_state
= malloc_init_a0_initialized
;
1268 malloc_init_hard_a0(void)
1272 malloc_mutex_lock(&init_lock
);
1273 ret
= malloc_init_hard_a0_locked();
1274 malloc_mutex_unlock(&init_lock
);
1279 * Initialize data structures which may trigger recursive allocation.
1281 * init_lock must be held.
1284 malloc_init_hard_recursible(void)
1288 malloc_init_state
= malloc_init_recursible
;
1289 malloc_mutex_unlock(&init_lock
);
1291 /* LinuxThreads' pthread_setspecific() allocates. */
1292 if (malloc_tsd_boot0()) {
1297 ncpus
= malloc_ncpus();
1299 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1300 && !defined(_WIN32) && !defined(__native_client__))
1301 /* LinuxThreads' pthread_atfork() allocates. */
1302 if (pthread_atfork(jemalloc_prefork
, jemalloc_postfork_parent
,
1303 jemalloc_postfork_child
) != 0) {
1305 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1312 malloc_mutex_lock(&init_lock
);
1316 /* init_lock must be held. */
1318 malloc_init_hard_finish(void)
1324 if (opt_narenas
== 0) {
1326 * For SMP systems, create more than one arena per CPU by
1330 opt_narenas
= ncpus
<< 2;
1334 narenas_auto
= opt_narenas
;
1336 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1338 if (narenas_auto
> MALLOCX_ARENA_MAX
) {
1339 narenas_auto
= MALLOCX_ARENA_MAX
;
1340 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1343 narenas_total_set(narenas_auto
);
1345 /* Allocate and initialize arenas. */
1346 arenas
= (arena_t
**)base_alloc(sizeof(arena_t
*) *
1347 (MALLOCX_ARENA_MAX
+1));
1350 /* Copy the pointer to the one arena that was already initialized. */
1353 malloc_init_state
= malloc_init_initialized
;
1354 malloc_slow_flag_init();
1360 malloc_init_hard(void)
1363 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1366 malloc_mutex_lock(&init_lock
);
1367 if (!malloc_init_hard_needed()) {
1368 malloc_mutex_unlock(&init_lock
);
1372 if (malloc_init_state
!= malloc_init_a0_initialized
&&
1373 malloc_init_hard_a0_locked()) {
1374 malloc_mutex_unlock(&init_lock
);
1378 if (malloc_init_hard_recursible()) {
1379 malloc_mutex_unlock(&init_lock
);
1383 if (config_prof
&& prof_boot2()) {
1384 malloc_mutex_unlock(&init_lock
);
1388 if (malloc_init_hard_finish()) {
1389 malloc_mutex_unlock(&init_lock
);
1393 malloc_mutex_unlock(&init_lock
);
1399 * End initialization functions.
1401 /******************************************************************************/
1403 * Begin malloc(3)-compatible functions.
1407 imalloc_prof_sample(tsd_t
*tsd
, size_t usize
, szind_t ind
,
1408 prof_tctx_t
*tctx
, bool slow_path
)
1414 if (usize
<= SMALL_MAXCLASS
) {
1415 szind_t ind_large
= size2index(LARGE_MINCLASS
);
1416 p
= imalloc(tsd
, LARGE_MINCLASS
, ind_large
, slow_path
);
1419 arena_prof_promoted(p
, usize
);
1421 p
= imalloc(tsd
, usize
, ind
, slow_path
);
1426 JEMALLOC_ALWAYS_INLINE_C
void *
1427 imalloc_prof(tsd_t
*tsd
, size_t usize
, szind_t ind
, bool slow_path
)
1432 tctx
= prof_alloc_prep(tsd
, usize
, prof_active_get_unlocked(), true);
1433 if (unlikely((uintptr_t)tctx
!= (uintptr_t)1U))
1434 p
= imalloc_prof_sample(tsd
, usize
, ind
, tctx
, slow_path
);
1436 p
= imalloc(tsd
, usize
, ind
, slow_path
);
1437 if (unlikely(p
== NULL
)) {
1438 prof_alloc_rollback(tsd
, tctx
, true);
1441 prof_malloc(p
, usize
, tctx
);
1446 JEMALLOC_ALWAYS_INLINE_C
void *
1447 imalloc_body(size_t size
, tsd_t
**tsd
, size_t *usize
, bool slow_path
)
1451 if (slow_path
&& unlikely(malloc_init()))
1454 ind
= size2index(size
);
1455 if (unlikely(ind
>= NSIZES
))
1458 if (config_stats
|| (config_prof
&& opt_prof
) || (slow_path
&&
1459 config_valgrind
&& unlikely(in_valgrind
))) {
1460 *usize
= index2size(ind
);
1461 assert(*usize
> 0 && *usize
<= HUGE_MAXCLASS
);
1464 if (config_prof
&& opt_prof
)
1465 return (imalloc_prof(*tsd
, *usize
, ind
, slow_path
));
1467 return (imalloc(*tsd
, size
, ind
, slow_path
));
1470 JEMALLOC_ALWAYS_INLINE_C
void
1471 imalloc_post_check(void *ret
, tsd_t
*tsd
, size_t usize
, bool slow_path
)
1473 if (unlikely(ret
== NULL
)) {
1474 if (slow_path
&& config_xmalloc
&& unlikely(opt_xmalloc
)) {
1475 malloc_write("<jemalloc>: Error in malloc(): "
1481 if (config_stats
&& likely(ret
!= NULL
)) {
1482 assert(usize
== isalloc(ret
, config_prof
));
1483 *tsd_thread_allocatedp_get(tsd
) += usize
;
1487 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1488 void JEMALLOC_NOTHROW
*
1489 JEMALLOC_ATTR(malloc
) JEMALLOC_ALLOC_SIZE(1)
1490 je_malloc(size_t size
)
1494 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
1499 if (likely(!malloc_slow
)) {
1501 * imalloc_body() is inlined so that fast and slow paths are
1502 * generated separately with statically known slow_path.
1504 ret
= imalloc_body(size
, &tsd
, &usize
, false);
1505 imalloc_post_check(ret
, tsd
, usize
, false);
1507 ret
= imalloc_body(size
, &tsd
, &usize
, true);
1508 imalloc_post_check(ret
, tsd
, usize
, true);
1509 UTRACE(0, size
, ret
);
1510 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, usize
, false);
1517 imemalign_prof_sample(tsd_t
*tsd
, size_t alignment
, size_t usize
,
1524 if (usize
<= SMALL_MAXCLASS
) {
1525 assert(sa2u(LARGE_MINCLASS
, alignment
) == LARGE_MINCLASS
);
1526 p
= ipalloc(tsd
, LARGE_MINCLASS
, alignment
, false);
1529 arena_prof_promoted(p
, usize
);
1531 p
= ipalloc(tsd
, usize
, alignment
, false);
1536 JEMALLOC_ALWAYS_INLINE_C
void *
1537 imemalign_prof(tsd_t
*tsd
, size_t alignment
, size_t usize
)
1542 tctx
= prof_alloc_prep(tsd
, usize
, prof_active_get_unlocked(), true);
1543 if (unlikely((uintptr_t)tctx
!= (uintptr_t)1U))
1544 p
= imemalign_prof_sample(tsd
, alignment
, usize
, tctx
);
1546 p
= ipalloc(tsd
, usize
, alignment
, false);
1547 if (unlikely(p
== NULL
)) {
1548 prof_alloc_rollback(tsd
, tctx
, true);
1551 prof_malloc(p
, usize
, tctx
);
1556 JEMALLOC_ATTR(nonnull(1))
1558 imemalign(void **memptr
, size_t alignment
, size_t size
, size_t min_alignment
)
1565 assert(min_alignment
!= 0);
1567 if (unlikely(malloc_init())) {
1575 /* Make sure that alignment is a large enough power of 2. */
1576 if (unlikely(((alignment
- 1) & alignment
) != 0
1577 || (alignment
< min_alignment
))) {
1578 if (config_xmalloc
&& unlikely(opt_xmalloc
)) {
1579 malloc_write("<jemalloc>: Error allocating "
1580 "aligned memory: invalid alignment\n");
1588 usize
= sa2u(size
, alignment
);
1589 if (unlikely(usize
== 0 || usize
> HUGE_MAXCLASS
)) {
1594 if (config_prof
&& opt_prof
)
1595 result
= imemalign_prof(tsd
, alignment
, usize
);
1597 result
= ipalloc(tsd
, usize
, alignment
, false);
1598 if (unlikely(result
== NULL
))
1600 assert(((uintptr_t)result
& (alignment
- 1)) == ZU(0));
1605 if (config_stats
&& likely(result
!= NULL
)) {
1606 assert(usize
== isalloc(result
, config_prof
));
1607 *tsd_thread_allocatedp_get(tsd
) += usize
;
1609 UTRACE(0, size
, result
);
1612 assert(result
== NULL
);
1613 if (config_xmalloc
&& unlikely(opt_xmalloc
)) {
1614 malloc_write("<jemalloc>: Error allocating aligned memory: "
1622 JEMALLOC_EXPORT
int JEMALLOC_NOTHROW
1623 JEMALLOC_ATTR(nonnull(1))
1624 je_posix_memalign(void **memptr
, size_t alignment
, size_t size
)
1626 int ret
= imemalign(memptr
, alignment
, size
, sizeof(void *));
1627 JEMALLOC_VALGRIND_MALLOC(ret
== 0, *memptr
, isalloc(*memptr
,
1628 config_prof
), false);
1632 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1633 void JEMALLOC_NOTHROW
*
1634 JEMALLOC_ATTR(malloc
) JEMALLOC_ALLOC_SIZE(2)
1635 je_aligned_alloc(size_t alignment
, size_t size
)
1640 if (unlikely((err
= imemalign(&ret
, alignment
, size
, 1)) != 0)) {
1644 JEMALLOC_VALGRIND_MALLOC(err
== 0, ret
, isalloc(ret
, config_prof
),
1650 icalloc_prof_sample(tsd_t
*tsd
, size_t usize
, szind_t ind
, prof_tctx_t
*tctx
)
1656 if (usize
<= SMALL_MAXCLASS
) {
1657 szind_t ind_large
= size2index(LARGE_MINCLASS
);
1658 p
= icalloc(tsd
, LARGE_MINCLASS
, ind_large
);
1661 arena_prof_promoted(p
, usize
);
1663 p
= icalloc(tsd
, usize
, ind
);
1668 JEMALLOC_ALWAYS_INLINE_C
void *
1669 icalloc_prof(tsd_t
*tsd
, size_t usize
, szind_t ind
)
1674 tctx
= prof_alloc_prep(tsd
, usize
, prof_active_get_unlocked(), true);
1675 if (unlikely((uintptr_t)tctx
!= (uintptr_t)1U))
1676 p
= icalloc_prof_sample(tsd
, usize
, ind
, tctx
);
1678 p
= icalloc(tsd
, usize
, ind
);
1679 if (unlikely(p
== NULL
)) {
1680 prof_alloc_rollback(tsd
, tctx
, true);
1683 prof_malloc(p
, usize
, tctx
);
1688 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1689 void JEMALLOC_NOTHROW
*
1690 JEMALLOC_ATTR(malloc
) JEMALLOC_ALLOC_SIZE2(1, 2)
1691 je_calloc(size_t num
, size_t size
)
1697 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
1699 if (unlikely(malloc_init())) {
1706 num_size
= num
* size
;
1707 if (unlikely(num_size
== 0)) {
1708 if (num
== 0 || size
== 0)
1715 * Try to avoid division here. We know that it isn't possible to
1716 * overflow during multiplication if neither operand uses any of the
1717 * most significant half of the bits in a size_t.
1719 } else if (unlikely(((num
| size
) & (SIZE_T_MAX
<< (sizeof(size_t) <<
1720 2))) && (num_size
/ size
!= num
))) {
1721 /* size_t overflow. */
1726 ind
= size2index(num_size
);
1727 if (unlikely(ind
>= NSIZES
)) {
1731 if (config_prof
&& opt_prof
) {
1732 usize
= index2size(ind
);
1733 ret
= icalloc_prof(tsd
, usize
, ind
);
1735 if (config_stats
|| (config_valgrind
&& unlikely(in_valgrind
)))
1736 usize
= index2size(ind
);
1737 ret
= icalloc(tsd
, num_size
, ind
);
1741 if (unlikely(ret
== NULL
)) {
1742 if (config_xmalloc
&& unlikely(opt_xmalloc
)) {
1743 malloc_write("<jemalloc>: Error in calloc(): out of "
1749 if (config_stats
&& likely(ret
!= NULL
)) {
1750 assert(usize
== isalloc(ret
, config_prof
));
1751 *tsd_thread_allocatedp_get(tsd
) += usize
;
1753 UTRACE(0, num_size
, ret
);
1754 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, usize
, true);
1759 irealloc_prof_sample(tsd_t
*tsd
, void *old_ptr
, size_t old_usize
, size_t usize
,
1766 if (usize
<= SMALL_MAXCLASS
) {
1767 p
= iralloc(tsd
, old_ptr
, old_usize
, LARGE_MINCLASS
, 0, false);
1770 arena_prof_promoted(p
, usize
);
1772 p
= iralloc(tsd
, old_ptr
, old_usize
, usize
, 0, false);
1777 JEMALLOC_ALWAYS_INLINE_C
void *
1778 irealloc_prof(tsd_t
*tsd
, void *old_ptr
, size_t old_usize
, size_t usize
)
1782 prof_tctx_t
*old_tctx
, *tctx
;
1784 prof_active
= prof_active_get_unlocked();
1785 old_tctx
= prof_tctx_get(old_ptr
);
1786 tctx
= prof_alloc_prep(tsd
, usize
, prof_active
, true);
1787 if (unlikely((uintptr_t)tctx
!= (uintptr_t)1U))
1788 p
= irealloc_prof_sample(tsd
, old_ptr
, old_usize
, usize
, tctx
);
1790 p
= iralloc(tsd
, old_ptr
, old_usize
, usize
, 0, false);
1791 if (unlikely(p
== NULL
)) {
1792 prof_alloc_rollback(tsd
, tctx
, true);
1795 prof_realloc(tsd
, p
, usize
, tctx
, prof_active
, true, old_ptr
, old_usize
,
1801 JEMALLOC_INLINE_C
void
1802 ifree(tsd_t
*tsd
, void *ptr
, tcache_t
*tcache
, bool slow_path
)
1805 UNUSED
size_t rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1807 assert(ptr
!= NULL
);
1808 assert(malloc_initialized() || IS_INITIALIZER
);
1810 if (config_prof
&& opt_prof
) {
1811 usize
= isalloc(ptr
, config_prof
);
1812 prof_free(tsd
, ptr
, usize
);
1813 } else if (config_stats
|| config_valgrind
)
1814 usize
= isalloc(ptr
, config_prof
);
1816 *tsd_thread_deallocatedp_get(tsd
) += usize
;
1818 if (likely(!slow_path
))
1819 iqalloc(tsd
, ptr
, tcache
, false);
1821 if (config_valgrind
&& unlikely(in_valgrind
))
1823 iqalloc(tsd
, ptr
, tcache
, true);
1824 JEMALLOC_VALGRIND_FREE(ptr
, rzsize
);
1828 JEMALLOC_INLINE_C
void
1829 isfree(tsd_t
*tsd
, void *ptr
, size_t usize
, tcache_t
*tcache
)
1831 UNUSED
size_t rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1833 assert(ptr
!= NULL
);
1834 assert(malloc_initialized() || IS_INITIALIZER
);
1836 if (config_prof
&& opt_prof
)
1837 prof_free(tsd
, ptr
, usize
);
1839 *tsd_thread_deallocatedp_get(tsd
) += usize
;
1840 if (config_valgrind
&& unlikely(in_valgrind
))
1842 isqalloc(tsd
, ptr
, usize
, tcache
);
1843 JEMALLOC_VALGRIND_FREE(ptr
, rzsize
);
1846 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1847 void JEMALLOC_NOTHROW
*
1848 JEMALLOC_ALLOC_SIZE(2)
1849 je_realloc(void *ptr
, size_t size
)
1852 tsd_t
*tsd
JEMALLOC_CC_SILENCE_INIT(NULL
);
1853 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
1854 size_t old_usize
= 0;
1855 UNUSED
size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
1857 if (unlikely(size
== 0)) {
1859 /* realloc(ptr, 0) is equivalent to free(ptr). */
1862 ifree(tsd
, ptr
, tcache_get(tsd
, false), true);
1868 if (likely(ptr
!= NULL
)) {
1869 assert(malloc_initialized() || IS_INITIALIZER
);
1870 malloc_thread_init();
1873 old_usize
= isalloc(ptr
, config_prof
);
1874 if (config_valgrind
&& unlikely(in_valgrind
))
1875 old_rzsize
= config_prof
? p2rz(ptr
) : u2rz(old_usize
);
1877 if (config_prof
&& opt_prof
) {
1879 ret
= unlikely(usize
== 0 || usize
> HUGE_MAXCLASS
) ?
1880 NULL
: irealloc_prof(tsd
, ptr
, old_usize
, usize
);
1882 if (config_stats
|| (config_valgrind
&&
1883 unlikely(in_valgrind
)))
1885 ret
= iralloc(tsd
, ptr
, old_usize
, size
, 0, false);
1888 /* realloc(NULL, size) is equivalent to malloc(size). */
1889 if (likely(!malloc_slow
))
1890 ret
= imalloc_body(size
, &tsd
, &usize
, false);
1892 ret
= imalloc_body(size
, &tsd
, &usize
, true);
1895 if (unlikely(ret
== NULL
)) {
1896 if (config_xmalloc
&& unlikely(opt_xmalloc
)) {
1897 malloc_write("<jemalloc>: Error in realloc(): "
1903 if (config_stats
&& likely(ret
!= NULL
)) {
1904 assert(usize
== isalloc(ret
, config_prof
));
1905 *tsd_thread_allocatedp_get(tsd
) += usize
;
1906 *tsd_thread_deallocatedp_get(tsd
) += old_usize
;
1908 UTRACE(ptr
, size
, ret
);
1909 JEMALLOC_VALGRIND_REALLOC(true, ret
, usize
, true, ptr
, old_usize
,
1910 old_rzsize
, true, false);
1914 JEMALLOC_EXPORT
void JEMALLOC_NOTHROW
1919 if (likely(ptr
!= NULL
)) {
1920 tsd_t
*tsd
= tsd_fetch();
1921 if (likely(!malloc_slow
))
1922 ifree(tsd
, ptr
, tcache_get(tsd
, false), false);
1924 ifree(tsd
, ptr
, tcache_get(tsd
, false), true);
1929 * End malloc(3)-compatible functions.
1931 /******************************************************************************/
1933 * Begin non-standard override functions.
1936 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1937 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1938 void JEMALLOC_NOTHROW
*
1939 JEMALLOC_ATTR(malloc
)
1940 je_memalign(size_t alignment
, size_t size
)
1942 void *ret
JEMALLOC_CC_SILENCE_INIT(NULL
);
1943 if (unlikely(imemalign(&ret
, alignment
, size
, 1) != 0))
1945 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, size
, false);
1950 #ifdef JEMALLOC_OVERRIDE_VALLOC
1951 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1952 void JEMALLOC_NOTHROW
*
1953 JEMALLOC_ATTR(malloc
)
1954 je_valloc(size_t size
)
1956 void *ret
JEMALLOC_CC_SILENCE_INIT(NULL
);
1957 if (unlikely(imemalign(&ret
, PAGE
, size
, 1) != 0))
1959 JEMALLOC_VALGRIND_MALLOC(ret
!= NULL
, ret
, size
, false);
1965 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1966 * #define je_malloc malloc
1968 #define malloc_is_malloc 1
1969 #define is_malloc_(a) malloc_is_ ## a
1970 #define is_malloc(a) is_malloc_(a)
1972 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1974 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1975 * to inconsistently reference libc's malloc(3)-compatible functions
1976 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1978 * These definitions interpose hooks in glibc. The functions are actually
1979 * passed an extra argument for the caller return address, which will be
1982 JEMALLOC_EXPORT
void (*__free_hook
)(void *ptr
) = je_free
;
1983 JEMALLOC_EXPORT
void *(*__malloc_hook
)(size_t size
) = je_malloc
;
1984 JEMALLOC_EXPORT
void *(*__realloc_hook
)(void *ptr
, size_t size
) = je_realloc
;
1985 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1986 JEMALLOC_EXPORT
void *(*__memalign_hook
)(size_t alignment
, size_t size
) =
1992 * End non-standard override functions.
1994 /******************************************************************************/
1996 * Begin non-standard functions.
1999 JEMALLOC_ALWAYS_INLINE_C
bool
2000 imallocx_flags_decode_hard(tsd_t
*tsd
, size_t size
, int flags
, size_t *usize
,
2001 size_t *alignment
, bool *zero
, tcache_t
**tcache
, arena_t
**arena
)
2004 if ((flags
& MALLOCX_LG_ALIGN_MASK
) == 0) {
2008 *alignment
= MALLOCX_ALIGN_GET_SPECIFIED(flags
);
2009 *usize
= sa2u(size
, *alignment
);
2011 if (unlikely(*usize
== 0 || *usize
> HUGE_MAXCLASS
))
2013 *zero
= MALLOCX_ZERO_GET(flags
);
2014 if ((flags
& MALLOCX_TCACHE_MASK
) != 0) {
2015 if ((flags
& MALLOCX_TCACHE_MASK
) == MALLOCX_TCACHE_NONE
)
2018 *tcache
= tcaches_get(tsd
, MALLOCX_TCACHE_GET(flags
));
2020 *tcache
= tcache_get(tsd
, true);
2021 if ((flags
& MALLOCX_ARENA_MASK
) != 0) {
2022 unsigned arena_ind
= MALLOCX_ARENA_GET(flags
);
2023 *arena
= arena_get(arena_ind
, true);
2024 if (unlikely(*arena
== NULL
))
2031 JEMALLOC_ALWAYS_INLINE_C
bool
2032 imallocx_flags_decode(tsd_t
*tsd
, size_t size
, int flags
, size_t *usize
,
2033 size_t *alignment
, bool *zero
, tcache_t
**tcache
, arena_t
**arena
)
2036 if (likely(flags
== 0)) {
2038 if (unlikely(*usize
== 0 || *usize
> HUGE_MAXCLASS
))
2042 *tcache
= tcache_get(tsd
, true);
2046 return (imallocx_flags_decode_hard(tsd
, size
, flags
, usize
,
2047 alignment
, zero
, tcache
, arena
));
2051 JEMALLOC_ALWAYS_INLINE_C
void *
2052 imallocx_flags(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
2053 tcache_t
*tcache
, arena_t
*arena
)
2057 if (unlikely(alignment
!= 0))
2058 return (ipalloct(tsd
, usize
, alignment
, zero
, tcache
, arena
));
2059 ind
= size2index(usize
);
2060 assert(ind
< NSIZES
);
2062 return (icalloct(tsd
, usize
, ind
, tcache
, arena
));
2063 return (imalloct(tsd
, usize
, ind
, tcache
, arena
));
2067 imallocx_prof_sample(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
2068 tcache_t
*tcache
, arena_t
*arena
)
2072 if (usize
<= SMALL_MAXCLASS
) {
2073 assert(((alignment
== 0) ? s2u(LARGE_MINCLASS
) :
2074 sa2u(LARGE_MINCLASS
, alignment
)) == LARGE_MINCLASS
);
2075 p
= imallocx_flags(tsd
, LARGE_MINCLASS
, alignment
, zero
, tcache
,
2079 arena_prof_promoted(p
, usize
);
2081 p
= imallocx_flags(tsd
, usize
, alignment
, zero
, tcache
, arena
);
2086 JEMALLOC_ALWAYS_INLINE_C
void *
2087 imallocx_prof(tsd_t
*tsd
, size_t size
, int flags
, size_t *usize
)
2096 if (unlikely(imallocx_flags_decode(tsd
, size
, flags
, usize
, &alignment
,
2097 &zero
, &tcache
, &arena
)))
2099 tctx
= prof_alloc_prep(tsd
, *usize
, prof_active_get_unlocked(), true);
2100 if (likely((uintptr_t)tctx
== (uintptr_t)1U))
2101 p
= imallocx_flags(tsd
, *usize
, alignment
, zero
, tcache
, arena
);
2102 else if ((uintptr_t)tctx
> (uintptr_t)1U) {
2103 p
= imallocx_prof_sample(tsd
, *usize
, alignment
, zero
, tcache
,
2107 if (unlikely(p
== NULL
)) {
2108 prof_alloc_rollback(tsd
, tctx
, true);
2111 prof_malloc(p
, *usize
, tctx
);
2113 assert(alignment
== 0 || ((uintptr_t)p
& (alignment
- 1)) == ZU(0));
2117 JEMALLOC_ALWAYS_INLINE_C
void *
2118 imallocx_no_prof(tsd_t
*tsd
, size_t size
, int flags
, size_t *usize
)
2126 if (likely(flags
== 0)) {
2127 szind_t ind
= size2index(size
);
2128 if (unlikely(ind
>= NSIZES
))
2130 if (config_stats
|| (config_valgrind
&&
2131 unlikely(in_valgrind
))) {
2132 *usize
= index2size(ind
);
2133 assert(*usize
> 0 && *usize
<= HUGE_MAXCLASS
);
2135 return (imalloc(tsd
, size
, ind
, true));
2138 if (unlikely(imallocx_flags_decode_hard(tsd
, size
, flags
, usize
,
2139 &alignment
, &zero
, &tcache
, &arena
)))
2141 p
= imallocx_flags(tsd
, *usize
, alignment
, zero
, tcache
, arena
);
2142 assert(alignment
== 0 || ((uintptr_t)p
& (alignment
- 1)) == ZU(0));
2146 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2147 void JEMALLOC_NOTHROW
*
2148 JEMALLOC_ATTR(malloc
) JEMALLOC_ALLOC_SIZE(1)
2149 je_mallocx(size_t size
, int flags
)
2157 if (unlikely(malloc_init()))
2161 if (config_prof
&& opt_prof
)
2162 p
= imallocx_prof(tsd
, size
, flags
, &usize
);
2164 p
= imallocx_no_prof(tsd
, size
, flags
, &usize
);
2165 if (unlikely(p
== NULL
))
2169 assert(usize
== isalloc(p
, config_prof
));
2170 *tsd_thread_allocatedp_get(tsd
) += usize
;
2173 JEMALLOC_VALGRIND_MALLOC(true, p
, usize
, MALLOCX_ZERO_GET(flags
));
2176 if (config_xmalloc
&& unlikely(opt_xmalloc
)) {
2177 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2185 irallocx_prof_sample(tsd_t
*tsd
, void *old_ptr
, size_t old_usize
,
2186 size_t usize
, size_t alignment
, bool zero
, tcache_t
*tcache
, arena_t
*arena
,
2193 if (usize
<= SMALL_MAXCLASS
) {
2194 p
= iralloct(tsd
, old_ptr
, old_usize
, LARGE_MINCLASS
, alignment
,
2195 zero
, tcache
, arena
);
2198 arena_prof_promoted(p
, usize
);
2200 p
= iralloct(tsd
, old_ptr
, old_usize
, usize
, alignment
, zero
,
2207 JEMALLOC_ALWAYS_INLINE_C
void *
2208 irallocx_prof(tsd_t
*tsd
, void *old_ptr
, size_t old_usize
, size_t size
,
2209 size_t alignment
, size_t *usize
, bool zero
, tcache_t
*tcache
,
2214 prof_tctx_t
*old_tctx
, *tctx
;
2216 prof_active
= prof_active_get_unlocked();
2217 old_tctx
= prof_tctx_get(old_ptr
);
2218 tctx
= prof_alloc_prep(tsd
, *usize
, prof_active
, true);
2219 if (unlikely((uintptr_t)tctx
!= (uintptr_t)1U)) {
2220 p
= irallocx_prof_sample(tsd
, old_ptr
, old_usize
, *usize
,
2221 alignment
, zero
, tcache
, arena
, tctx
);
2223 p
= iralloct(tsd
, old_ptr
, old_usize
, size
, alignment
, zero
,
2226 if (unlikely(p
== NULL
)) {
2227 prof_alloc_rollback(tsd
, tctx
, true);
2231 if (p
== old_ptr
&& alignment
!= 0) {
2233 * The allocation did not move, so it is possible that the size
2234 * class is smaller than would guarantee the requested
2235 * alignment, and that the alignment constraint was
2236 * serendipitously satisfied. Additionally, old_usize may not
2237 * be the same as the current usize because of in-place large
2238 * reallocation. Therefore, query the actual value of usize.
2240 *usize
= isalloc(p
, config_prof
);
2242 prof_realloc(tsd
, p
, *usize
, tctx
, prof_active
, true, old_ptr
,
2243 old_usize
, old_tctx
);
2248 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2249 void JEMALLOC_NOTHROW
*
2250 JEMALLOC_ALLOC_SIZE(2)
2251 je_rallocx(void *ptr
, size_t size
, int flags
)
2257 UNUSED
size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
2258 size_t alignment
= MALLOCX_ALIGN_GET(flags
);
2259 bool zero
= flags
& MALLOCX_ZERO
;
2263 assert(ptr
!= NULL
);
2265 assert(malloc_initialized() || IS_INITIALIZER
);
2266 malloc_thread_init();
2269 if (unlikely((flags
& MALLOCX_ARENA_MASK
) != 0)) {
2270 unsigned arena_ind
= MALLOCX_ARENA_GET(flags
);
2271 arena
= arena_get(arena_ind
, true);
2272 if (unlikely(arena
== NULL
))
2277 if (unlikely((flags
& MALLOCX_TCACHE_MASK
) != 0)) {
2278 if ((flags
& MALLOCX_TCACHE_MASK
) == MALLOCX_TCACHE_NONE
)
2281 tcache
= tcaches_get(tsd
, MALLOCX_TCACHE_GET(flags
));
2283 tcache
= tcache_get(tsd
, true);
2285 old_usize
= isalloc(ptr
, config_prof
);
2286 if (config_valgrind
&& unlikely(in_valgrind
))
2287 old_rzsize
= u2rz(old_usize
);
2289 if (config_prof
&& opt_prof
) {
2290 usize
= (alignment
== 0) ? s2u(size
) : sa2u(size
, alignment
);
2291 if (unlikely(usize
== 0 || usize
> HUGE_MAXCLASS
))
2293 p
= irallocx_prof(tsd
, ptr
, old_usize
, size
, alignment
, &usize
,
2294 zero
, tcache
, arena
);
2295 if (unlikely(p
== NULL
))
2298 p
= iralloct(tsd
, ptr
, old_usize
, size
, alignment
, zero
,
2300 if (unlikely(p
== NULL
))
2302 if (config_stats
|| (config_valgrind
&& unlikely(in_valgrind
)))
2303 usize
= isalloc(p
, config_prof
);
2305 assert(alignment
== 0 || ((uintptr_t)p
& (alignment
- 1)) == ZU(0));
2308 *tsd_thread_allocatedp_get(tsd
) += usize
;
2309 *tsd_thread_deallocatedp_get(tsd
) += old_usize
;
2311 UTRACE(ptr
, size
, p
);
2312 JEMALLOC_VALGRIND_REALLOC(true, p
, usize
, false, ptr
, old_usize
,
2313 old_rzsize
, false, zero
);
2316 if (config_xmalloc
&& unlikely(opt_xmalloc
)) {
2317 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2320 UTRACE(ptr
, size
, 0);
2324 JEMALLOC_ALWAYS_INLINE_C
size_t
2325 ixallocx_helper(tsd_t
*tsd
, void *ptr
, size_t old_usize
, size_t size
,
2326 size_t extra
, size_t alignment
, bool zero
)
2330 if (ixalloc(tsd
, ptr
, old_usize
, size
, extra
, alignment
, zero
))
2332 usize
= isalloc(ptr
, config_prof
);
2338 ixallocx_prof_sample(tsd_t
*tsd
, void *ptr
, size_t old_usize
, size_t size
,
2339 size_t extra
, size_t alignment
, bool zero
, prof_tctx_t
*tctx
)
2345 usize
= ixallocx_helper(tsd
, ptr
, old_usize
, size
, extra
, alignment
,
2351 JEMALLOC_ALWAYS_INLINE_C
size_t
2352 ixallocx_prof(tsd_t
*tsd
, void *ptr
, size_t old_usize
, size_t size
,
2353 size_t extra
, size_t alignment
, bool zero
)
2355 size_t usize_max
, usize
;
2357 prof_tctx_t
*old_tctx
, *tctx
;
2359 prof_active
= prof_active_get_unlocked();
2360 old_tctx
= prof_tctx_get(ptr
);
2362 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2363 * Therefore, compute its maximum possible value and use that in
2364 * prof_alloc_prep() to decide whether to capture a backtrace.
2365 * prof_realloc() will use the actual usize to decide whether to sample.
2367 if (alignment
== 0) {
2368 usize_max
= s2u(size
+extra
);
2369 assert(usize_max
> 0 && usize_max
<= HUGE_MAXCLASS
);
2371 usize_max
= sa2u(size
+extra
, alignment
);
2372 if (unlikely(usize_max
== 0 || usize_max
> HUGE_MAXCLASS
)) {
2374 * usize_max is out of range, and chances are that
2375 * allocation will fail, but use the maximum possible
2376 * value and carry on with prof_alloc_prep(), just in
2377 * case allocation succeeds.
2379 usize_max
= HUGE_MAXCLASS
;
2382 tctx
= prof_alloc_prep(tsd
, usize_max
, prof_active
, false);
2384 if (unlikely((uintptr_t)tctx
!= (uintptr_t)1U)) {
2385 usize
= ixallocx_prof_sample(tsd
, ptr
, old_usize
, size
, extra
,
2386 alignment
, zero
, tctx
);
2388 usize
= ixallocx_helper(tsd
, ptr
, old_usize
, size
, extra
,
2391 if (usize
== old_usize
) {
2392 prof_alloc_rollback(tsd
, tctx
, false);
2395 prof_realloc(tsd
, ptr
, usize
, tctx
, prof_active
, false, ptr
, old_usize
,
2401 JEMALLOC_EXPORT
size_t JEMALLOC_NOTHROW
2402 je_xallocx(void *ptr
, size_t size
, size_t extra
, int flags
)
2405 size_t usize
, old_usize
;
2406 UNUSED
size_t old_rzsize
JEMALLOC_CC_SILENCE_INIT(0);
2407 size_t alignment
= MALLOCX_ALIGN_GET(flags
);
2408 bool zero
= flags
& MALLOCX_ZERO
;
2410 assert(ptr
!= NULL
);
2412 assert(SIZE_T_MAX
- size
>= extra
);
2413 assert(malloc_initialized() || IS_INITIALIZER
);
2414 malloc_thread_init();
2417 old_usize
= isalloc(ptr
, config_prof
);
2420 * The API explicitly absolves itself of protecting against (size +
2421 * extra) numerical overflow, but we may need to clamp extra to avoid
2422 * exceeding HUGE_MAXCLASS.
2424 * Ordinarily, size limit checking is handled deeper down, but here we
2425 * have to check as part of (size + extra) clamping, since we need the
2426 * clamped value in the above helper functions.
2428 if (unlikely(size
> HUGE_MAXCLASS
)) {
2430 goto label_not_resized
;
2432 if (unlikely(HUGE_MAXCLASS
- size
< extra
))
2433 extra
= HUGE_MAXCLASS
- size
;
2435 if (config_valgrind
&& unlikely(in_valgrind
))
2436 old_rzsize
= u2rz(old_usize
);
2438 if (config_prof
&& opt_prof
) {
2439 usize
= ixallocx_prof(tsd
, ptr
, old_usize
, size
, extra
,
2442 usize
= ixallocx_helper(tsd
, ptr
, old_usize
, size
, extra
,
2445 if (unlikely(usize
== old_usize
))
2446 goto label_not_resized
;
2449 *tsd_thread_allocatedp_get(tsd
) += usize
;
2450 *tsd_thread_deallocatedp_get(tsd
) += old_usize
;
2452 JEMALLOC_VALGRIND_REALLOC(false, ptr
, usize
, false, ptr
, old_usize
,
2453 old_rzsize
, false, zero
);
2455 UTRACE(ptr
, size
, ptr
);
2459 JEMALLOC_EXPORT
size_t JEMALLOC_NOTHROW
2461 je_sallocx(const void *ptr
, int flags
)
2465 assert(malloc_initialized() || IS_INITIALIZER
);
2466 malloc_thread_init();
2468 if (config_ivsalloc
)
2469 usize
= ivsalloc(ptr
, config_prof
);
2471 usize
= isalloc(ptr
, config_prof
);
2476 JEMALLOC_EXPORT
void JEMALLOC_NOTHROW
2477 je_dallocx(void *ptr
, int flags
)
2482 assert(ptr
!= NULL
);
2483 assert(malloc_initialized() || IS_INITIALIZER
);
2486 if (unlikely((flags
& MALLOCX_TCACHE_MASK
) != 0)) {
2487 if ((flags
& MALLOCX_TCACHE_MASK
) == MALLOCX_TCACHE_NONE
)
2490 tcache
= tcaches_get(tsd
, MALLOCX_TCACHE_GET(flags
));
2492 tcache
= tcache_get(tsd
, false);
2495 ifree(tsd_fetch(), ptr
, tcache
, true);
2498 JEMALLOC_ALWAYS_INLINE_C
size_t
2499 inallocx(size_t size
, int flags
)
2503 if (likely((flags
& MALLOCX_LG_ALIGN_MASK
) == 0))
2506 usize
= sa2u(size
, MALLOCX_ALIGN_GET_SPECIFIED(flags
));
2510 JEMALLOC_EXPORT
void JEMALLOC_NOTHROW
2511 je_sdallocx(void *ptr
, size_t size
, int flags
)
2517 assert(ptr
!= NULL
);
2518 assert(malloc_initialized() || IS_INITIALIZER
);
2519 usize
= inallocx(size
, flags
);
2520 assert(usize
== isalloc(ptr
, config_prof
));
2523 if (unlikely((flags
& MALLOCX_TCACHE_MASK
) != 0)) {
2524 if ((flags
& MALLOCX_TCACHE_MASK
) == MALLOCX_TCACHE_NONE
)
2527 tcache
= tcaches_get(tsd
, MALLOCX_TCACHE_GET(flags
));
2529 tcache
= tcache_get(tsd
, false);
2532 isfree(tsd
, ptr
, usize
, tcache
);
2535 JEMALLOC_EXPORT
size_t JEMALLOC_NOTHROW
2537 je_nallocx(size_t size
, int flags
)
2543 if (unlikely(malloc_init()))
2546 usize
= inallocx(size
, flags
);
2547 if (unlikely(usize
> HUGE_MAXCLASS
))
2553 JEMALLOC_EXPORT
int JEMALLOC_NOTHROW
2554 je_mallctl(const char *name
, void *oldp
, size_t *oldlenp
, void *newp
,
2558 if (unlikely(malloc_init()))
2561 return (ctl_byname(name
, oldp
, oldlenp
, newp
, newlen
));
2564 JEMALLOC_EXPORT
int JEMALLOC_NOTHROW
2565 je_mallctlnametomib(const char *name
, size_t *mibp
, size_t *miblenp
)
2568 if (unlikely(malloc_init()))
2571 return (ctl_nametomib(name
, mibp
, miblenp
));
2574 JEMALLOC_EXPORT
int JEMALLOC_NOTHROW
2575 je_mallctlbymib(const size_t *mib
, size_t miblen
, void *oldp
, size_t *oldlenp
,
2576 void *newp
, size_t newlen
)
2579 if (unlikely(malloc_init()))
2582 return (ctl_bymib(mib
, miblen
, oldp
, oldlenp
, newp
, newlen
));
2585 JEMALLOC_EXPORT
void JEMALLOC_NOTHROW
2586 je_malloc_stats_print(void (*write_cb
)(void *, const char *), void *cbopaque
,
2590 stats_print(write_cb
, cbopaque
, opts
);
2593 JEMALLOC_EXPORT
size_t JEMALLOC_NOTHROW
2594 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST
void *ptr
)
2598 assert(malloc_initialized() || IS_INITIALIZER
);
2599 malloc_thread_init();
2601 if (config_ivsalloc
)
2602 ret
= ivsalloc(ptr
, config_prof
);
2604 ret
= (ptr
== NULL
) ? 0 : isalloc(ptr
, config_prof
);
2610 * End non-standard functions.
2612 /******************************************************************************/
2614 * The following functions are used by threading libraries for protection of
2615 * malloc during fork().
2619 * If an application creates a thread before doing any allocation in the main
2620 * thread, then calls fork(2) in the main thread followed by memory allocation
2621 * in the child process, a race can occur that results in deadlock within the
2622 * child: the main thread may have forked while the created thread had
2623 * partially initialized the allocator. Ordinarily jemalloc prevents
2624 * fork/malloc races via the following functions it registers during
2625 * initialization using pthread_atfork(), but of course that does no good if
2626 * the allocator isn't fully initialized at fork time. The following library
2627 * constructor is a partial solution to this problem. It may still be possible
2628 * to trigger the deadlock described above, but doing so would involve forking
2629 * via a library constructor that runs before jemalloc's runs.
2631 JEMALLOC_ATTR(constructor
)
2633 jemalloc_constructor(void)
2639 #ifndef JEMALLOC_MUTEX_INIT_CB
2641 jemalloc_prefork(void)
2643 JEMALLOC_EXPORT
void
2644 _malloc_prefork(void)
2647 unsigned i
, narenas
;
2649 #ifdef JEMALLOC_MUTEX_INIT_CB
2650 if (!malloc_initialized())
2653 assert(malloc_initialized());
2655 /* Acquire all mutexes in a safe order. */
2658 malloc_mutex_prefork(&arenas_lock
);
2659 for (i
= 0, narenas
= narenas_total_get(); i
< narenas
; i
++) {
2662 if ((arena
= arena_get(i
, false)) != NULL
)
2663 arena_prefork(arena
);
2669 #ifndef JEMALLOC_MUTEX_INIT_CB
2671 jemalloc_postfork_parent(void)
2673 JEMALLOC_EXPORT
void
2674 _malloc_postfork(void)
2677 unsigned i
, narenas
;
2679 #ifdef JEMALLOC_MUTEX_INIT_CB
2680 if (!malloc_initialized())
2683 assert(malloc_initialized());
2685 /* Release all mutexes, now that fork() has completed. */
2686 base_postfork_parent();
2687 chunk_postfork_parent();
2688 for (i
= 0, narenas
= narenas_total_get(); i
< narenas
; i
++) {
2691 if ((arena
= arena_get(i
, false)) != NULL
)
2692 arena_postfork_parent(arena
);
2694 malloc_mutex_postfork_parent(&arenas_lock
);
2695 prof_postfork_parent();
2696 ctl_postfork_parent();
2700 jemalloc_postfork_child(void)
2702 unsigned i
, narenas
;
2704 assert(malloc_initialized());
2706 /* Release all mutexes, now that fork() has completed. */
2707 base_postfork_child();
2708 chunk_postfork_child();
2709 for (i
= 0, narenas
= narenas_total_get(); i
< narenas
; i
++) {
2712 if ((arena
= arena_get(i
, false)) != NULL
)
2713 arena_postfork_child(arena
);
2715 malloc_mutex_postfork_child(&arenas_lock
);
2716 prof_postfork_child();
2717 ctl_postfork_child();
2720 /******************************************************************************/