]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/src/jemalloc.c
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / jemalloc / src / jemalloc.c
CommitLineData
970d7e83
LB
1#define JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
970d7e83 7/* Runtime configuration options. */
1a4d82fc 8const char *je_malloc_conf JEMALLOC_ATTR(weak);
970d7e83
LB
9bool opt_abort =
10#ifdef JEMALLOC_DEBUG
11 true
12#else
13 false
14#endif
15 ;
54a0048b
SL
16const char *opt_junk =
17#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18 "true"
19#else
20 "false"
21#endif
22 ;
23bool opt_junk_alloc =
24#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
25 true
26#else
27 false
28#endif
29 ;
30bool opt_junk_free =
9cc50fc6
SL
31#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32 true
33#else
34 false
35#endif
36 ;
54a0048b 37
970d7e83
LB
38size_t opt_quarantine = ZU(0);
39bool opt_redzone = false;
40bool opt_utrace = false;
970d7e83
LB
41bool opt_xmalloc = false;
42bool opt_zero = false;
54a0048b 43unsigned opt_narenas = 0;
970d7e83 44
1a4d82fc
JJ
45/* Initialized to true if the process is running inside Valgrind. */
46bool in_valgrind;
47
970d7e83
LB
48unsigned ncpus;
49
54a0048b
SL
50/* Protects arenas initialization. */
51static malloc_mutex_t arenas_lock;
52/*
53 * Arenas that are used to service external requests. Not all elements of the
54 * arenas array are necessarily used; arenas are created lazily as needed.
55 *
56 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
58 * takes some action to create them and allocate from them.
59 */
7453a54e 60arena_t **arenas;
54a0048b
SL
61static unsigned narenas_total; /* Use narenas_total_*(). */
62static arena_t *a0; /* arenas[0]; read-only after initialization. */
63static unsigned narenas_auto; /* Read-only after initialization. */
64
65typedef enum {
66 malloc_init_uninitialized = 3,
67 malloc_init_a0_initialized = 2,
68 malloc_init_recursible = 1,
69 malloc_init_initialized = 0 /* Common case --> jnz. */
70} malloc_init_t;
71static malloc_init_t malloc_init_state = malloc_init_uninitialized;
72
73/* 0 should be the common case. Set to true to trigger initialization. */
74static bool malloc_slow = true;
75
76/* When malloc_slow != 0, set the corresponding bits for sanity check. */
77enum {
78 flag_opt_junk_alloc = (1U),
79 flag_opt_junk_free = (1U << 1),
80 flag_opt_quarantine = (1U << 2),
81 flag_opt_zero = (1U << 3),
82 flag_opt_utrace = (1U << 4),
83 flag_in_valgrind = (1U << 5),
84 flag_opt_xmalloc = (1U << 6)
85};
86static uint8_t malloc_slow_flags;
87
88/* Last entry for overflow detection only. */
89JEMALLOC_ALIGNED(CACHELINE)
90const size_t index2size_tab[NSIZES+1] = {
91#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
92 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
93 SIZE_CLASSES
94#undef SC
95 ZU(0)
96};
97
98JEMALLOC_ALIGNED(CACHELINE)
99const uint8_t size2index_tab[] = {
100#if LG_TINY_MIN == 0
101#warning "Dangerous LG_TINY_MIN"
102#define S2B_0(i) i,
103#elif LG_TINY_MIN == 1
104#warning "Dangerous LG_TINY_MIN"
105#define S2B_1(i) i,
106#elif LG_TINY_MIN == 2
107#warning "Dangerous LG_TINY_MIN"
108#define S2B_2(i) i,
109#elif LG_TINY_MIN == 3
110#define S2B_3(i) i,
111#elif LG_TINY_MIN == 4
112#define S2B_4(i) i,
113#elif LG_TINY_MIN == 5
114#define S2B_5(i) i,
115#elif LG_TINY_MIN == 6
116#define S2B_6(i) i,
117#elif LG_TINY_MIN == 7
118#define S2B_7(i) i,
119#elif LG_TINY_MIN == 8
120#define S2B_8(i) i,
121#elif LG_TINY_MIN == 9
122#define S2B_9(i) i,
123#elif LG_TINY_MIN == 10
124#define S2B_10(i) i,
125#elif LG_TINY_MIN == 11
126#define S2B_11(i) i,
127#else
128#error "Unsupported LG_TINY_MIN"
129#endif
130#if LG_TINY_MIN < 1
131#define S2B_1(i) S2B_0(i) S2B_0(i)
132#endif
133#if LG_TINY_MIN < 2
134#define S2B_2(i) S2B_1(i) S2B_1(i)
135#endif
136#if LG_TINY_MIN < 3
137#define S2B_3(i) S2B_2(i) S2B_2(i)
138#endif
139#if LG_TINY_MIN < 4
140#define S2B_4(i) S2B_3(i) S2B_3(i)
141#endif
142#if LG_TINY_MIN < 5
143#define S2B_5(i) S2B_4(i) S2B_4(i)
144#endif
145#if LG_TINY_MIN < 6
146#define S2B_6(i) S2B_5(i) S2B_5(i)
147#endif
148#if LG_TINY_MIN < 7
149#define S2B_7(i) S2B_6(i) S2B_6(i)
150#endif
151#if LG_TINY_MIN < 8
152#define S2B_8(i) S2B_7(i) S2B_7(i)
153#endif
154#if LG_TINY_MIN < 9
155#define S2B_9(i) S2B_8(i) S2B_8(i)
156#endif
157#if LG_TINY_MIN < 10
158#define S2B_10(i) S2B_9(i) S2B_9(i)
159#endif
160#if LG_TINY_MIN < 11
161#define S2B_11(i) S2B_10(i) S2B_10(i)
162#endif
163#define S2B_no(i)
164#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
165 S2B_##lg_delta_lookup(index)
166 SIZE_CLASSES
167#undef S2B_3
168#undef S2B_4
169#undef S2B_5
170#undef S2B_6
171#undef S2B_7
172#undef S2B_8
173#undef S2B_9
174#undef S2B_10
175#undef S2B_11
176#undef S2B_no
177#undef SC
178};
970d7e83
LB
179
180#ifdef JEMALLOC_THREADED_INIT
181/* Used to let the initializing thread recursively allocate. */
182# define NO_INITIALIZER ((unsigned long)0)
183# define INITIALIZER pthread_self()
184# define IS_INITIALIZER (malloc_initializer == pthread_self())
185static pthread_t malloc_initializer = NO_INITIALIZER;
186#else
187# define NO_INITIALIZER false
188# define INITIALIZER true
189# define IS_INITIALIZER malloc_initializer
190static bool malloc_initializer = NO_INITIALIZER;
191#endif
192
193/* Used to avoid initialization races. */
194#ifdef _WIN32
54a0048b
SL
195#if _WIN32_WINNT >= 0x0600
196static malloc_mutex_t init_lock = SRWLOCK_INIT;
197#else
970d7e83 198static malloc_mutex_t init_lock;
54a0048b 199static bool init_lock_initialized = false;
970d7e83
LB
200
201JEMALLOC_ATTR(constructor)
202static void WINAPI
203_init_init_lock(void)
204{
205
54a0048b
SL
206 /* If another constructor in the same binary is using mallctl to
207 * e.g. setup chunk hooks, it may end up running before this one,
208 * and malloc_init_hard will crash trying to lock the uninitialized
209 * lock. So we force an initialization of the lock in
210 * malloc_init_hard as well. We don't try to care about atomicity
211 * of the accessed to the init_lock_initialized boolean, since it
212 * really only matters early in the process creation, before any
213 * separate thread normally starts doing anything. */
214 if (!init_lock_initialized)
215 malloc_mutex_init(&init_lock);
216 init_lock_initialized = true;
970d7e83
LB
217}
218
219#ifdef _MSC_VER
220# pragma section(".CRT$XCU", read)
221JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
222static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
223#endif
54a0048b 224#endif
970d7e83
LB
225#else
226static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
227#endif
228
229typedef struct {
230 void *p; /* Input pointer (as in realloc(p, s)). */
231 size_t s; /* Request size. */
232 void *r; /* Result pointer. */
233} malloc_utrace_t;
234
235#ifdef JEMALLOC_UTRACE
236# define UTRACE(a, b, c) do { \
1a4d82fc 237 if (unlikely(opt_utrace)) { \
970d7e83
LB
238 int utrace_serrno = errno; \
239 malloc_utrace_t ut; \
240 ut.p = (a); \
241 ut.s = (b); \
242 ut.r = (c); \
243 utrace(&ut, sizeof(ut)); \
244 errno = utrace_serrno; \
245 } \
246} while (0)
247#else
248# define UTRACE(a, b, c)
249#endif
250
251/******************************************************************************/
1a4d82fc
JJ
252/*
253 * Function prototypes for static functions that are referenced prior to
254 * definition.
255 */
256
54a0048b 257static bool malloc_init_hard_a0(void);
970d7e83 258static bool malloc_init_hard(void);
970d7e83
LB
259
260/******************************************************************************/
261/*
262 * Begin miscellaneous support functions.
263 */
264
54a0048b
SL
265JEMALLOC_ALWAYS_INLINE_C bool
266malloc_initialized(void)
267{
268
269 return (malloc_init_state == malloc_init_initialized);
270}
271
272JEMALLOC_ALWAYS_INLINE_C void
273malloc_thread_init(void)
274{
275
276 /*
277 * TSD initialization can't be safely done as a side effect of
278 * deallocation, because it is possible for a thread to do nothing but
279 * deallocate its TLS data via free(), in which case writing to TLS
280 * would cause write-after-free memory corruption. The quarantine
281 * facility *only* gets used as a side effect of deallocation, so make
282 * a best effort attempt at initializing its TSD by hooking all
283 * allocation events.
284 */
285 if (config_fill && unlikely(opt_quarantine))
286 quarantine_alloc_hook();
287}
288
289JEMALLOC_ALWAYS_INLINE_C bool
290malloc_init_a0(void)
291{
292
293 if (unlikely(malloc_init_state == malloc_init_uninitialized))
294 return (malloc_init_hard_a0());
295 return (false);
296}
297
298JEMALLOC_ALWAYS_INLINE_C bool
299malloc_init(void)
300{
301
302 if (unlikely(!malloc_initialized()) && malloc_init_hard())
303 return (true);
304 malloc_thread_init();
305
306 return (false);
307}
308
309/*
310 * The a0*() functions are used instead of i[mcd]alloc() in situations that
311 * cannot tolerate TLS variable access.
312 */
313
314static void *
315a0ialloc(size_t size, bool zero, bool is_metadata)
316{
317
318 if (unlikely(malloc_init_a0()))
319 return (NULL);
320
321 return (iallocztm(NULL, size, size2index(size), zero, false,
322 is_metadata, arena_get(0, false), true));
323}
324
325static void
326a0idalloc(void *ptr, bool is_metadata)
327{
328
329 idalloctm(NULL, ptr, false, is_metadata, true);
330}
331
332void *
333a0malloc(size_t size)
334{
335
336 return (a0ialloc(size, false, true));
337}
338
339void
340a0dalloc(void *ptr)
341{
342
343 a0idalloc(ptr, true);
344}
345
346/*
347 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
348 * situations that cannot tolerate TLS variable access (TLS allocation and very
349 * early internal data structure initialization).
350 */
351
352void *
353bootstrap_malloc(size_t size)
354{
355
356 if (unlikely(size == 0))
357 size = 1;
358
359 return (a0ialloc(size, false, false));
360}
361
362void *
363bootstrap_calloc(size_t num, size_t size)
364{
365 size_t num_size;
366
367 num_size = num * size;
368 if (unlikely(num_size == 0)) {
369 assert(num == 0 || size == 0);
370 num_size = 1;
371 }
372
373 return (a0ialloc(num_size, true, false));
374}
375
376void
377bootstrap_free(void *ptr)
378{
379
380 if (unlikely(ptr == NULL))
381 return;
382
383 a0idalloc(ptr, false);
384}
385
386static void
387arena_set(unsigned ind, arena_t *arena)
388{
389
390 atomic_write_p((void **)&arenas[ind], arena);
391}
392
393static void
394narenas_total_set(unsigned narenas)
395{
396
397 atomic_write_u(&narenas_total, narenas);
398}
399
400static void
401narenas_total_inc(void)
402{
403
404 atomic_add_u(&narenas_total, 1);
405}
406
407unsigned
408narenas_total_get(void)
409{
410
411 return (atomic_read_u(&narenas_total));
412}
413
970d7e83 414/* Create a new arena and insert it into the arenas array at index ind. */
54a0048b
SL
415static arena_t *
416arena_init_locked(unsigned ind)
417{
418 arena_t *arena;
419
420 assert(ind <= narenas_total_get());
421 if (ind > MALLOCX_ARENA_MAX)
422 return (NULL);
423 if (ind == narenas_total_get())
424 narenas_total_inc();
425
426 /*
427 * Another thread may have already initialized arenas[ind] if it's an
428 * auto arena.
429 */
430 arena = arena_get(ind, false);
431 if (arena != NULL) {
432 assert(ind < narenas_auto);
433 return (arena);
434 }
435
436 /* Actually initialize the arena. */
437 arena = arena_new(ind);
438 arena_set(ind, arena);
439 return (arena);
440}
441
970d7e83 442arena_t *
54a0048b 443arena_init(unsigned ind)
9cc50fc6 444{
54a0048b
SL
445 arena_t *arena;
446
447 malloc_mutex_lock(&arenas_lock);
448 arena = arena_init_locked(ind);
449 malloc_mutex_unlock(&arenas_lock);
450 return (arena);
451}
452
453static void
454arena_bind(tsd_t *tsd, unsigned ind)
455{
456 arena_t *arena;
457
458 arena = arena_get(ind, false);
459 arena_nthreads_inc(arena);
460
461 if (tsd_nominal(tsd))
462 tsd_arena_set(tsd, arena);
463}
464
465void
466arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
467{
468 arena_t *oldarena, *newarena;
469
470 oldarena = arena_get(oldind, false);
471 newarena = arena_get(newind, false);
472 arena_nthreads_dec(oldarena);
473 arena_nthreads_inc(newarena);
474 tsd_arena_set(tsd, newarena);
475}
476
477static void
478arena_unbind(tsd_t *tsd, unsigned ind)
479{
480 arena_t *arena;
9cc50fc6 481
54a0048b
SL
482 arena = arena_get(ind, false);
483 arena_nthreads_dec(arena);
484 tsd_arena_set(tsd, NULL);
485}
486
487arena_tdata_t *
488arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
489{
490 arena_tdata_t *tdata, *arenas_tdata_old;
491 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
492 unsigned narenas_tdata_old, i;
493 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
494 unsigned narenas_actual = narenas_total_get();
495
496 /*
497 * Dissociate old tdata array (and set up for deallocation upon return)
498 * if it's too small.
499 */
500 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
501 arenas_tdata_old = arenas_tdata;
502 narenas_tdata_old = narenas_tdata;
503 arenas_tdata = NULL;
504 narenas_tdata = 0;
505 tsd_arenas_tdata_set(tsd, arenas_tdata);
506 tsd_narenas_tdata_set(tsd, narenas_tdata);
507 } else {
508 arenas_tdata_old = NULL;
509 narenas_tdata_old = 0;
510 }
511
512 /* Allocate tdata array if it's missing. */
513 if (arenas_tdata == NULL) {
514 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
515 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
516
517 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
518 *arenas_tdata_bypassp = true;
519 arenas_tdata = (arena_tdata_t *)a0malloc(
520 sizeof(arena_tdata_t) * narenas_tdata);
521 *arenas_tdata_bypassp = false;
522 }
523 if (arenas_tdata == NULL) {
524 tdata = NULL;
525 goto label_return;
526 }
527 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
528 tsd_arenas_tdata_set(tsd, arenas_tdata);
529 tsd_narenas_tdata_set(tsd, narenas_tdata);
970d7e83 530 }
970d7e83
LB
531
532 /*
54a0048b
SL
533 * Copy to tdata array. It's possible that the actual number of arenas
534 * has increased since narenas_total_get() was called above, but that
535 * causes no correctness issues unless two threads concurrently execute
536 * the arenas.extend mallctl, which we trust mallctl synchronization to
537 * prevent.
970d7e83 538 */
970d7e83 539
54a0048b
SL
540 /* Copy/initialize tickers. */
541 for (i = 0; i < narenas_actual; i++) {
542 if (i < narenas_tdata_old) {
543 ticker_copy(&arenas_tdata[i].decay_ticker,
544 &arenas_tdata_old[i].decay_ticker);
545 } else {
546 ticker_init(&arenas_tdata[i].decay_ticker,
547 DECAY_NTICKS_PER_UPDATE);
548 }
549 }
550 if (narenas_tdata > narenas_actual) {
551 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
552 * (narenas_tdata - narenas_actual));
553 }
554
555 /* Read the refreshed tdata array. */
556 tdata = &arenas_tdata[ind];
557label_return:
558 if (arenas_tdata_old != NULL)
559 a0dalloc(arenas_tdata_old);
560 return (tdata);
970d7e83
LB
561}
562
54a0048b 563/* Slow path, called only by arena_choose(). */
970d7e83 564arena_t *
54a0048b 565arena_choose_hard(tsd_t *tsd)
970d7e83
LB
566{
567 arena_t *ret;
568
569 if (narenas_auto > 1) {
570 unsigned i, choose, first_null;
571
572 choose = 0;
573 first_null = narenas_auto;
574 malloc_mutex_lock(&arenas_lock);
54a0048b 575 assert(arena_get(0, false) != NULL);
970d7e83 576 for (i = 1; i < narenas_auto; i++) {
54a0048b 577 if (arena_get(i, false) != NULL) {
970d7e83
LB
578 /*
579 * Choose the first arena that has the lowest
580 * number of threads assigned to it.
581 */
54a0048b
SL
582 if (arena_nthreads_get(arena_get(i, false)) <
583 arena_nthreads_get(arena_get(choose,
584 false)))
970d7e83
LB
585 choose = i;
586 } else if (first_null == narenas_auto) {
587 /*
588 * Record the index of the first uninitialized
589 * arena, in case all extant arenas are in use.
590 *
591 * NB: It is possible for there to be
592 * discontinuities in terms of initialized
593 * versus uninitialized arenas, due to the
594 * "thread.arena" mallctl.
595 */
596 first_null = i;
597 }
598 }
599
54a0048b 600 if (arena_nthreads_get(arena_get(choose, false)) == 0
970d7e83
LB
601 || first_null == narenas_auto) {
602 /*
603 * Use an unloaded arena, or the least loaded arena if
604 * all arenas are already initialized.
605 */
54a0048b 606 ret = arena_get(choose, false);
970d7e83
LB
607 } else {
608 /* Initialize a new arena. */
54a0048b
SL
609 choose = first_null;
610 ret = arena_init_locked(choose);
611 if (ret == NULL) {
612 malloc_mutex_unlock(&arenas_lock);
613 return (NULL);
614 }
970d7e83 615 }
54a0048b 616 arena_bind(tsd, choose);
970d7e83
LB
617 malloc_mutex_unlock(&arenas_lock);
618 } else {
54a0048b
SL
619 ret = arena_get(0, false);
620 arena_bind(tsd, 0);
970d7e83
LB
621 }
622
970d7e83
LB
623 return (ret);
624}
625
1a4d82fc
JJ
626void
627thread_allocated_cleanup(tsd_t *tsd)
628{
629
630 /* Do nothing. */
631}
632
633void
634thread_deallocated_cleanup(tsd_t *tsd)
635{
636
637 /* Do nothing. */
638}
639
640void
641arena_cleanup(tsd_t *tsd)
54a0048b
SL
642{
643 arena_t *arena;
644
645 arena = tsd_arena_get(tsd);
646 if (arena != NULL)
647 arena_unbind(tsd, arena->ind);
648}
649
650void
651arenas_tdata_cleanup(tsd_t *tsd)
652{
653 arena_tdata_t *arenas_tdata;
654
655 /* Prevent tsd->arenas_tdata from being (re)created. */
656 *tsd_arenas_tdata_bypassp_get(tsd) = true;
657
658 arenas_tdata = tsd_arenas_tdata_get(tsd);
659 if (arenas_tdata != NULL) {
660 tsd_arenas_tdata_set(tsd, NULL);
661 a0dalloc(arenas_tdata);
662 }
663}
664
665void
666narenas_tdata_cleanup(tsd_t *tsd)
667{
668
669 /* Do nothing. */
670}
671
672void
673arenas_tdata_bypass_cleanup(tsd_t *tsd)
1a4d82fc
JJ
674{
675
676 /* Do nothing. */
677}
678
970d7e83
LB
679static void
680stats_print_atexit(void)
681{
682
683 if (config_tcache && config_stats) {
684 unsigned narenas, i;
685
686 /*
687 * Merge stats from extant threads. This is racy, since
688 * individual threads do not lock when recording tcache stats
689 * events. As a consequence, the final stats may be slightly
690 * out of date by the time they are reported, if other threads
691 * continue to allocate.
692 */
693 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
54a0048b 694 arena_t *arena = arena_get(i, false);
970d7e83
LB
695 if (arena != NULL) {
696 tcache_t *tcache;
697
698 /*
699 * tcache_stats_merge() locks bins, so if any
700 * code is introduced that acquires both arena
701 * and bin locks in the opposite order,
702 * deadlocks may result.
703 */
704 malloc_mutex_lock(&arena->lock);
705 ql_foreach(tcache, &arena->tcache_ql, link) {
706 tcache_stats_merge(tcache, arena);
707 }
708 malloc_mutex_unlock(&arena->lock);
709 }
710 }
711 }
712 je_malloc_stats_print(NULL, NULL, NULL);
713}
714
715/*
716 * End miscellaneous support functions.
717 */
718/******************************************************************************/
719/*
720 * Begin initialization functions.
721 */
722
54a0048b
SL
723#ifndef JEMALLOC_HAVE_SECURE_GETENV
724static char *
725secure_getenv(const char *name)
726{
727
728# ifdef JEMALLOC_HAVE_ISSETUGID
729 if (issetugid() != 0)
730 return (NULL);
731# endif
732 return (getenv(name));
733}
734#endif
735
970d7e83
LB
736static unsigned
737malloc_ncpus(void)
738{
970d7e83
LB
739 long result;
740
741#ifdef _WIN32
742 SYSTEM_INFO si;
743 GetSystemInfo(&si);
744 result = si.dwNumberOfProcessors;
745#else
746 result = sysconf(_SC_NPROCESSORS_ONLN);
747#endif
1a4d82fc 748 return ((result == -1) ? 1 : (unsigned)result);
970d7e83
LB
749}
750
970d7e83
LB
751static bool
752malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
753 char const **v_p, size_t *vlen_p)
754{
755 bool accept;
756 const char *opts = *opts_p;
757
758 *k_p = opts;
759
1a4d82fc 760 for (accept = false; !accept;) {
970d7e83
LB
761 switch (*opts) {
762 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
763 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
764 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
765 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
766 case 'Y': case 'Z':
767 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
768 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
769 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
770 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
771 case 'y': case 'z':
772 case '0': case '1': case '2': case '3': case '4': case '5':
773 case '6': case '7': case '8': case '9':
774 case '_':
775 opts++;
776 break;
777 case ':':
778 opts++;
779 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
780 *v_p = opts;
781 accept = true;
782 break;
783 case '\0':
784 if (opts != *opts_p) {
785 malloc_write("<jemalloc>: Conf string ends "
786 "with key\n");
787 }
788 return (true);
789 default:
790 malloc_write("<jemalloc>: Malformed conf string\n");
791 return (true);
792 }
793 }
794
1a4d82fc 795 for (accept = false; !accept;) {
970d7e83
LB
796 switch (*opts) {
797 case ',':
798 opts++;
799 /*
800 * Look ahead one character here, because the next time
801 * this function is called, it will assume that end of
802 * input has been cleanly reached if no input remains,
803 * but we have optimistically already consumed the
804 * comma if one exists.
805 */
806 if (*opts == '\0') {
807 malloc_write("<jemalloc>: Conf string ends "
808 "with comma\n");
809 }
810 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
811 accept = true;
812 break;
813 case '\0':
814 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
815 accept = true;
816 break;
817 default:
818 opts++;
819 break;
820 }
821 }
822
823 *opts_p = opts;
824 return (false);
825}
826
827static void
828malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
829 size_t vlen)
830{
831
832 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
833 (int)vlen, v);
834}
835
54a0048b
SL
836static void
837malloc_slow_flag_init(void)
838{
839 /*
840 * Combine the runtime options into malloc_slow for fast path. Called
841 * after processing all the options.
842 */
843 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
844 | (opt_junk_free ? flag_opt_junk_free : 0)
845 | (opt_quarantine ? flag_opt_quarantine : 0)
846 | (opt_zero ? flag_opt_zero : 0)
847 | (opt_utrace ? flag_opt_utrace : 0)
848 | (opt_xmalloc ? flag_opt_xmalloc : 0);
849
850 if (config_valgrind)
851 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
852
853 malloc_slow = (malloc_slow_flags != 0);
854}
855
970d7e83
LB
856static void
857malloc_conf_init(void)
858{
859 unsigned i;
860 char buf[PATH_MAX + 1];
861 const char *opts, *k, *v;
862 size_t klen, vlen;
863
864 /*
865 * Automatically configure valgrind before processing options. The
866 * valgrind option remains in jemalloc 3.x for compatibility reasons.
867 */
868 if (config_valgrind) {
1a4d82fc
JJ
869 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
870 if (config_fill && unlikely(in_valgrind)) {
54a0048b
SL
871 opt_junk = "false";
872 opt_junk_alloc = false;
873 opt_junk_free = false;
1a4d82fc 874 assert(!opt_zero);
970d7e83
LB
875 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
876 opt_redzone = true;
877 }
1a4d82fc 878 if (config_tcache && unlikely(in_valgrind))
970d7e83
LB
879 opt_tcache = false;
880 }
881
54a0048b 882 for (i = 0; i < 4; i++) {
970d7e83
LB
883 /* Get runtime configuration. */
884 switch (i) {
885 case 0:
54a0048b
SL
886 opts = config_malloc_conf;
887 break;
888 case 1:
970d7e83
LB
889 if (je_malloc_conf != NULL) {
890 /*
891 * Use options that were compiled into the
892 * program.
893 */
894 opts = je_malloc_conf;
895 } else {
896 /* No configuration specified. */
897 buf[0] = '\0';
898 opts = buf;
899 }
900 break;
54a0048b
SL
901 case 2: {
902 ssize_t linklen = 0;
970d7e83 903#ifndef _WIN32
1a4d82fc 904 int saved_errno = errno;
970d7e83
LB
905 const char *linkname =
906# ifdef JEMALLOC_PREFIX
907 "/etc/"JEMALLOC_PREFIX"malloc.conf"
908# else
909 "/etc/malloc.conf"
910# endif
911 ;
912
1a4d82fc
JJ
913 /*
914 * Try to use the contents of the "/etc/malloc.conf"
915 * symbolic link's name.
916 */
917 linklen = readlink(linkname, buf, sizeof(buf) - 1);
918 if (linklen == -1) {
970d7e83 919 /* No configuration specified. */
1a4d82fc 920 linklen = 0;
54a0048b 921 /* Restore errno. */
1a4d82fc 922 set_errno(saved_errno);
970d7e83 923 }
1a4d82fc
JJ
924#endif
925 buf[linklen] = '\0';
926 opts = buf;
970d7e83 927 break;
54a0048b 928 } case 3: {
970d7e83
LB
929 const char *envname =
930#ifdef JEMALLOC_PREFIX
931 JEMALLOC_CPREFIX"MALLOC_CONF"
932#else
933 "MALLOC_CONF"
934#endif
935 ;
936
54a0048b 937 if ((opts = secure_getenv(envname)) != NULL) {
970d7e83
LB
938 /*
939 * Do nothing; opts is already initialized to
940 * the value of the MALLOC_CONF environment
941 * variable.
942 */
943 } else {
944 /* No configuration specified. */
945 buf[0] = '\0';
946 opts = buf;
947 }
948 break;
949 } default:
1a4d82fc 950 not_reached();
970d7e83
LB
951 buf[0] = '\0';
952 opts = buf;
953 }
954
1a4d82fc
JJ
955 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
956 &vlen)) {
957#define CONF_MATCH(n) \
958 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
54a0048b
SL
959#define CONF_MATCH_VALUE(n) \
960 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
1a4d82fc
JJ
961#define CONF_HANDLE_BOOL(o, n, cont) \
962 if (CONF_MATCH(n)) { \
54a0048b 963 if (CONF_MATCH_VALUE("true")) \
970d7e83 964 o = true; \
54a0048b 965 else if (CONF_MATCH_VALUE("false")) \
970d7e83
LB
966 o = false; \
967 else { \
968 malloc_conf_error( \
969 "Invalid conf value", \
970 k, klen, v, vlen); \
971 } \
1a4d82fc
JJ
972 if (cont) \
973 continue; \
970d7e83 974 }
54a0048b 975#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
1a4d82fc 976 if (CONF_MATCH(n)) { \
970d7e83
LB
977 uintmax_t um; \
978 char *end; \
979 \
980 set_errno(0); \
981 um = malloc_strtoumax(v, &end, 0); \
982 if (get_errno() != 0 || (uintptr_t)end -\
983 (uintptr_t)v != vlen) { \
984 malloc_conf_error( \
985 "Invalid conf value", \
986 k, klen, v, vlen); \
987 } else if (clip) { \
54a0048b
SL
988 if ((min) != 0 && um < (min)) \
989 o = (t)(min); \
990 else if (um > (max)) \
991 o = (t)(max); \
970d7e83 992 else \
54a0048b 993 o = (t)um; \
970d7e83 994 } else { \
54a0048b
SL
995 if (((min) != 0 && um < (min)) \
996 || um > (max)) { \
970d7e83
LB
997 malloc_conf_error( \
998 "Out-of-range " \
999 "conf value", \
1000 k, klen, v, vlen); \
1001 } else \
54a0048b 1002 o = (t)um; \
970d7e83
LB
1003 } \
1004 continue; \
1005 }
54a0048b
SL
1006#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
1007 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1008#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
1009 CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
970d7e83 1010#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1a4d82fc 1011 if (CONF_MATCH(n)) { \
970d7e83
LB
1012 long l; \
1013 char *end; \
1014 \
1015 set_errno(0); \
1016 l = strtol(v, &end, 0); \
1017 if (get_errno() != 0 || (uintptr_t)end -\
1018 (uintptr_t)v != vlen) { \
1019 malloc_conf_error( \
1020 "Invalid conf value", \
1021 k, klen, v, vlen); \
54a0048b
SL
1022 } else if (l < (ssize_t)(min) || l > \
1023 (ssize_t)(max)) { \
970d7e83
LB
1024 malloc_conf_error( \
1025 "Out-of-range conf value", \
1026 k, klen, v, vlen); \
1027 } else \
1028 o = l; \
1029 continue; \
1030 }
1031#define CONF_HANDLE_CHAR_P(o, n, d) \
1a4d82fc 1032 if (CONF_MATCH(n)) { \
970d7e83
LB
1033 size_t cpylen = (vlen <= \
1034 sizeof(o)-1) ? vlen : \
1035 sizeof(o)-1; \
1036 strncpy(o, v, cpylen); \
1037 o[cpylen] = '\0'; \
1038 continue; \
1039 }
1040
1a4d82fc 1041 CONF_HANDLE_BOOL(opt_abort, "abort", true)
970d7e83 1042 /*
54a0048b
SL
1043 * Chunks always require at least one header page,
1044 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1045 * possibly an additional page in the presence of
1046 * redzones. In order to simplify options processing,
1047 * use a conservative bound that accommodates all these
1048 * constraints.
970d7e83
LB
1049 */
1050 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
54a0048b
SL
1051 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1052 (sizeof(size_t) << 3) - 1, true)
970d7e83
LB
1053 if (strncmp("dss", k, klen) == 0) {
1054 int i;
1055 bool match = false;
1056 for (i = 0; i < dss_prec_limit; i++) {
1057 if (strncmp(dss_prec_names[i], v, vlen)
1058 == 0) {
1059 if (chunk_dss_prec_set(i)) {
1060 malloc_conf_error(
1061 "Error setting dss",
1062 k, klen, v, vlen);
1063 } else {
1064 opt_dss =
1065 dss_prec_names[i];
1066 match = true;
1067 break;
1068 }
1069 }
1070 }
1a4d82fc 1071 if (!match) {
970d7e83
LB
1072 malloc_conf_error("Invalid conf value",
1073 k, klen, v, vlen);
1074 }
1075 continue;
1076 }
54a0048b
SL
1077 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1078 UINT_MAX, false)
1079 if (strncmp("purge", k, klen) == 0) {
1080 int i;
1081 bool match = false;
1082 for (i = 0; i < purge_mode_limit; i++) {
1083 if (strncmp(purge_mode_names[i], v,
1084 vlen) == 0) {
1085 opt_purge = (purge_mode_t)i;
1086 match = true;
1087 break;
1088 }
1089 }
1090 if (!match) {
1091 malloc_conf_error("Invalid conf value",
1092 k, klen, v, vlen);
1093 }
1094 continue;
1095 }
970d7e83
LB
1096 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1097 -1, (sizeof(size_t) << 3) - 1)
54a0048b
SL
1098 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1099 NSTIME_SEC_MAX);
7453a54e
SL
1100 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1101 if (config_fill) {
54a0048b
SL
1102 if (CONF_MATCH("junk")) {
1103 if (CONF_MATCH_VALUE("true")) {
1104 opt_junk = "true";
1105 opt_junk_alloc = opt_junk_free =
1106 true;
1107 } else if (CONF_MATCH_VALUE("false")) {
1108 opt_junk = "false";
1109 opt_junk_alloc = opt_junk_free =
1110 false;
1111 } else if (CONF_MATCH_VALUE("alloc")) {
1112 opt_junk = "alloc";
1113 opt_junk_alloc = true;
1114 opt_junk_free = false;
1115 } else if (CONF_MATCH_VALUE("free")) {
1116 opt_junk = "free";
1117 opt_junk_alloc = false;
1118 opt_junk_free = true;
1119 } else {
1120 malloc_conf_error(
1121 "Invalid conf value", k,
1122 klen, v, vlen);
1123 }
1124 continue;
1125 }
970d7e83
LB
1126 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1127 0, SIZE_T_MAX, false)
1a4d82fc
JJ
1128 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1129 CONF_HANDLE_BOOL(opt_zero, "zero", true)
970d7e83
LB
1130 }
1131 if (config_utrace) {
1a4d82fc 1132 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
970d7e83
LB
1133 }
1134 if (config_xmalloc) {
1a4d82fc 1135 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
970d7e83
LB
1136 }
1137 if (config_tcache) {
1a4d82fc
JJ
1138 CONF_HANDLE_BOOL(opt_tcache, "tcache",
1139 !config_valgrind || !in_valgrind)
1140 if (CONF_MATCH("tcache")) {
1141 assert(config_valgrind && in_valgrind);
1142 if (opt_tcache) {
1143 opt_tcache = false;
1144 malloc_conf_error(
1145 "tcache cannot be enabled "
1146 "while running inside Valgrind",
1147 k, klen, v, vlen);
1148 }
1149 continue;
1150 }
970d7e83
LB
1151 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1152 "lg_tcache_max", -1,
1153 (sizeof(size_t) << 3) - 1)
1154 }
1155 if (config_prof) {
1a4d82fc 1156 CONF_HANDLE_BOOL(opt_prof, "prof", true)
970d7e83
LB
1157 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1158 "prof_prefix", "jeprof")
1a4d82fc
JJ
1159 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1160 true)
1161 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1162 "prof_thread_active_init", true)
1163 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
970d7e83 1164 "lg_prof_sample", 0,
1a4d82fc
JJ
1165 (sizeof(uint64_t) << 3) - 1, true)
1166 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1167 true)
970d7e83
LB
1168 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1169 "lg_prof_interval", -1,
1170 (sizeof(uint64_t) << 3) - 1)
1a4d82fc
JJ
1171 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1172 true)
1173 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1174 true)
1175 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1176 true)
970d7e83
LB
1177 }
1178 malloc_conf_error("Invalid conf pair", k, klen, v,
1179 vlen);
1a4d82fc 1180#undef CONF_MATCH
970d7e83
LB
1181#undef CONF_HANDLE_BOOL
1182#undef CONF_HANDLE_SIZE_T
1183#undef CONF_HANDLE_SSIZE_T
1184#undef CONF_HANDLE_CHAR_P
1185 }
1186 }
1187}
1188
54a0048b 1189/* init_lock must be held. */
970d7e83 1190static bool
54a0048b 1191malloc_init_hard_needed(void)
970d7e83 1192{
970d7e83 1193
54a0048b
SL
1194 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1195 malloc_init_recursible)) {
970d7e83
LB
1196 /*
1197 * Another thread initialized the allocator before this one
1198 * acquired init_lock, or this thread is the initializing
1199 * thread, and it is recursively allocating.
1200 */
970d7e83
LB
1201 return (false);
1202 }
1203#ifdef JEMALLOC_THREADED_INIT
1a4d82fc 1204 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
970d7e83
LB
1205 /* Busy-wait until the initializing thread completes. */
1206 do {
1207 malloc_mutex_unlock(&init_lock);
1208 CPU_SPINWAIT;
1209 malloc_mutex_lock(&init_lock);
54a0048b 1210 } while (!malloc_initialized());
970d7e83
LB
1211 return (false);
1212 }
1213#endif
54a0048b
SL
1214 return (true);
1215}
1a4d82fc 1216
54a0048b
SL
1217/* init_lock must be held. */
1218static bool
1219malloc_init_hard_a0_locked(void)
1220{
1221
1222 malloc_initializer = INITIALIZER;
7453a54e 1223
970d7e83
LB
1224 if (config_prof)
1225 prof_boot0();
970d7e83 1226 malloc_conf_init();
970d7e83
LB
1227 if (opt_stats_print) {
1228 /* Print statistics at exit. */
1229 if (atexit(stats_print_atexit) != 0) {
1230 malloc_write("<jemalloc>: Error in atexit()\n");
1231 if (opt_abort)
1232 abort();
1233 }
1234 }
54a0048b 1235 if (base_boot())
970d7e83 1236 return (true);
54a0048b 1237 if (chunk_boot())
970d7e83 1238 return (true);
54a0048b 1239 if (ctl_boot())
970d7e83 1240 return (true);
970d7e83
LB
1241 if (config_prof)
1242 prof_boot1();
54a0048b 1243 if (arena_boot())
970d7e83 1244 return (true);
54a0048b 1245 if (config_tcache && tcache_boot())
970d7e83 1246 return (true);
54a0048b 1247 if (malloc_mutex_init(&arenas_lock))
970d7e83 1248 return (true);
970d7e83
LB
1249 /*
1250 * Create enough scaffolding to allow recursive allocation in
1251 * malloc_ncpus().
1252 */
54a0048b
SL
1253 narenas_auto = 1;
1254 narenas_total_set(narenas_auto);
1255 arenas = &a0;
970d7e83 1256 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
970d7e83
LB
1257 /*
1258 * Initialize one arena here. The rest are lazily created in
54a0048b 1259 * arena_choose_hard().
970d7e83 1260 */
54a0048b 1261 if (arena_init(0) == NULL)
970d7e83 1262 return (true);
54a0048b
SL
1263 malloc_init_state = malloc_init_a0_initialized;
1264 return (false);
1265}
9cc50fc6 1266
54a0048b
SL
1267static bool
1268malloc_init_hard_a0(void)
1269{
1270 bool ret;
970d7e83 1271
54a0048b
SL
1272 malloc_mutex_lock(&init_lock);
1273 ret = malloc_init_hard_a0_locked();
1a4d82fc 1274 malloc_mutex_unlock(&init_lock);
54a0048b
SL
1275 return (ret);
1276}
1277
1278/*
1279 * Initialize data structures which may trigger recursive allocation.
1280 *
1281 * init_lock must be held.
1282 */
1283static bool
1284malloc_init_hard_recursible(void)
1285{
1286 bool ret = false;
1287
1288 malloc_init_state = malloc_init_recursible;
1289 malloc_mutex_unlock(&init_lock);
1290
1291 /* LinuxThreads' pthread_setspecific() allocates. */
1292 if (malloc_tsd_boot0()) {
1293 ret = true;
1294 goto label_return;
1295 }
970d7e83 1296
1a4d82fc 1297 ncpus = malloc_ncpus();
970d7e83 1298
1a4d82fc
JJ
1299#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1300 && !defined(_WIN32) && !defined(__native_client__))
54a0048b 1301 /* LinuxThreads' pthread_atfork() allocates. */
1a4d82fc
JJ
1302 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1303 jemalloc_postfork_child) != 0) {
54a0048b 1304 ret = true;
1a4d82fc
JJ
1305 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1306 if (opt_abort)
1307 abort();
970d7e83 1308 }
1a4d82fc 1309#endif
970d7e83 1310
54a0048b 1311label_return:
7453a54e 1312 malloc_mutex_lock(&init_lock);
54a0048b
SL
1313 return (ret);
1314}
1315
1316/* init_lock must be held. */
1317static bool
1318malloc_init_hard_finish(void)
1319{
9cc50fc6 1320
54a0048b 1321 if (mutex_boot())
970d7e83 1322 return (true);
970d7e83
LB
1323
1324 if (opt_narenas == 0) {
1325 /*
1326 * For SMP systems, create more than one arena per CPU by
1327 * default.
1328 */
1329 if (ncpus > 1)
1330 opt_narenas = ncpus << 2;
1331 else
1332 opt_narenas = 1;
1333 }
1334 narenas_auto = opt_narenas;
1335 /*
54a0048b 1336 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
970d7e83 1337 */
54a0048b
SL
1338 if (narenas_auto > MALLOCX_ARENA_MAX) {
1339 narenas_auto = MALLOCX_ARENA_MAX;
970d7e83
LB
1340 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1341 narenas_auto);
1342 }
54a0048b 1343 narenas_total_set(narenas_auto);
970d7e83
LB
1344
1345 /* Allocate and initialize arenas. */
54a0048b
SL
1346 arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
1347 (MALLOCX_ARENA_MAX+1));
1348 if (arenas == NULL)
1349 return (true);
1350 /* Copy the pointer to the one arena that was already initialized. */
1351 arena_set(0, a0);
1352
1353 malloc_init_state = malloc_init_initialized;
1354 malloc_slow_flag_init();
1355
1356 return (false);
1357}
1358
1359static bool
1360malloc_init_hard(void)
1361{
1362
1363#if defined(_WIN32) && _WIN32_WINNT < 0x0600
1364 _init_init_lock();
1365#endif
1366 malloc_mutex_lock(&init_lock);
1367 if (!malloc_init_hard_needed()) {
1368 malloc_mutex_unlock(&init_lock);
1369 return (false);
1370 }
1371
1372 if (malloc_init_state != malloc_init_a0_initialized &&
1373 malloc_init_hard_a0_locked()) {
7453a54e 1374 malloc_mutex_unlock(&init_lock);
970d7e83 1375 return (true);
7453a54e 1376 }
1a4d82fc 1377
54a0048b
SL
1378 if (malloc_init_hard_recursible()) {
1379 malloc_mutex_unlock(&init_lock);
1380 return (true);
1381 }
1382
1383 if (config_prof && prof_boot2()) {
1384 malloc_mutex_unlock(&init_lock);
1385 return (true);
1386 }
1387
1388 if (malloc_init_hard_finish()) {
1389 malloc_mutex_unlock(&init_lock);
1390 return (true);
1391 }
7453a54e 1392
54a0048b
SL
1393 malloc_mutex_unlock(&init_lock);
1394 malloc_tsd_boot1();
970d7e83
LB
1395 return (false);
1396}
1397
1398/*
1399 * End initialization functions.
1400 */
1401/******************************************************************************/
1402/*
1403 * Begin malloc(3)-compatible functions.
1404 */
1405
1a4d82fc 1406static void *
54a0048b
SL
1407imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
1408 prof_tctx_t *tctx, bool slow_path)
1a4d82fc
JJ
1409{
1410 void *p;
1411
1412 if (tctx == NULL)
1413 return (NULL);
1414 if (usize <= SMALL_MAXCLASS) {
54a0048b
SL
1415 szind_t ind_large = size2index(LARGE_MINCLASS);
1416 p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
1a4d82fc
JJ
1417 if (p == NULL)
1418 return (NULL);
1419 arena_prof_promoted(p, usize);
1420 } else
54a0048b 1421 p = imalloc(tsd, usize, ind, slow_path);
1a4d82fc
JJ
1422
1423 return (p);
1424}
1425
1426JEMALLOC_ALWAYS_INLINE_C void *
54a0048b 1427imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
1a4d82fc
JJ
1428{
1429 void *p;
1430 prof_tctx_t *tctx;
1431
54a0048b 1432 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1a4d82fc 1433 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
54a0048b 1434 p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
1a4d82fc 1435 else
54a0048b
SL
1436 p = imalloc(tsd, usize, ind, slow_path);
1437 if (unlikely(p == NULL)) {
1a4d82fc
JJ
1438 prof_alloc_rollback(tsd, tctx, true);
1439 return (NULL);
1440 }
1441 prof_malloc(p, usize, tctx);
1442
1443 return (p);
1444}
1445
1446JEMALLOC_ALWAYS_INLINE_C void *
54a0048b 1447imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
1a4d82fc 1448{
54a0048b 1449 szind_t ind;
1a4d82fc 1450
54a0048b 1451 if (slow_path && unlikely(malloc_init()))
1a4d82fc
JJ
1452 return (NULL);
1453 *tsd = tsd_fetch();
54a0048b
SL
1454 ind = size2index(size);
1455 if (unlikely(ind >= NSIZES))
1456 return (NULL);
1a4d82fc 1457
54a0048b
SL
1458 if (config_stats || (config_prof && opt_prof) || (slow_path &&
1459 config_valgrind && unlikely(in_valgrind))) {
1460 *usize = index2size(ind);
1461 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
1a4d82fc
JJ
1462 }
1463
54a0048b
SL
1464 if (config_prof && opt_prof)
1465 return (imalloc_prof(*tsd, *usize, ind, slow_path));
1466
1467 return (imalloc(*tsd, size, ind, slow_path));
1a4d82fc
JJ
1468}
1469
54a0048b
SL
1470JEMALLOC_ALWAYS_INLINE_C void
1471imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
970d7e83 1472{
1a4d82fc 1473 if (unlikely(ret == NULL)) {
54a0048b 1474 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
970d7e83
LB
1475 malloc_write("<jemalloc>: Error in malloc(): "
1476 "out of memory\n");
1477 abort();
1478 }
1479 set_errno(ENOMEM);
1480 }
1a4d82fc 1481 if (config_stats && likely(ret != NULL)) {
970d7e83 1482 assert(usize == isalloc(ret, config_prof));
1a4d82fc 1483 *tsd_thread_allocatedp_get(tsd) += usize;
970d7e83 1484 }
54a0048b
SL
1485}
1486
1487JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1488void JEMALLOC_NOTHROW *
1489JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1490je_malloc(size_t size)
1491{
1492 void *ret;
1493 tsd_t *tsd;
1494 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1495
1496 if (size == 0)
1497 size = 1;
1498
1499 if (likely(!malloc_slow)) {
1500 /*
1501 * imalloc_body() is inlined so that fast and slow paths are
1502 * generated separately with statically known slow_path.
1503 */
1504 ret = imalloc_body(size, &tsd, &usize, false);
1505 imalloc_post_check(ret, tsd, usize, false);
1506 } else {
1507 ret = imalloc_body(size, &tsd, &usize, true);
1508 imalloc_post_check(ret, tsd, usize, true);
1509 UTRACE(0, size, ret);
1510 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1511 }
1512
970d7e83
LB
1513 return (ret);
1514}
1515
1a4d82fc
JJ
1516static void *
1517imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1518 prof_tctx_t *tctx)
1519{
1520 void *p;
1521
1522 if (tctx == NULL)
1523 return (NULL);
1524 if (usize <= SMALL_MAXCLASS) {
1525 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
54a0048b 1526 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1a4d82fc
JJ
1527 if (p == NULL)
1528 return (NULL);
1529 arena_prof_promoted(p, usize);
1530 } else
1531 p = ipalloc(tsd, usize, alignment, false);
1532
1533 return (p);
1534}
1535
1536JEMALLOC_ALWAYS_INLINE_C void *
1537imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1538{
1539 void *p;
1540 prof_tctx_t *tctx;
1541
54a0048b 1542 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1a4d82fc
JJ
1543 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1544 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1545 else
1546 p = ipalloc(tsd, usize, alignment, false);
54a0048b 1547 if (unlikely(p == NULL)) {
1a4d82fc
JJ
1548 prof_alloc_rollback(tsd, tctx, true);
1549 return (NULL);
1550 }
1551 prof_malloc(p, usize, tctx);
1552
1553 return (p);
1554}
1555
970d7e83 1556JEMALLOC_ATTR(nonnull(1))
970d7e83 1557static int
1a4d82fc 1558imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
970d7e83
LB
1559{
1560 int ret;
1a4d82fc 1561 tsd_t *tsd;
970d7e83
LB
1562 size_t usize;
1563 void *result;
970d7e83
LB
1564
1565 assert(min_alignment != 0);
1566
1a4d82fc 1567 if (unlikely(malloc_init())) {
970d7e83 1568 result = NULL;
1a4d82fc 1569 goto label_oom;
54a0048b
SL
1570 }
1571 tsd = tsd_fetch();
1572 if (size == 0)
1573 size = 1;
970d7e83 1574
54a0048b
SL
1575 /* Make sure that alignment is a large enough power of 2. */
1576 if (unlikely(((alignment - 1) & alignment) != 0
1577 || (alignment < min_alignment))) {
1578 if (config_xmalloc && unlikely(opt_xmalloc)) {
1579 malloc_write("<jemalloc>: Error allocating "
1580 "aligned memory: invalid alignment\n");
1581 abort();
970d7e83 1582 }
54a0048b
SL
1583 result = NULL;
1584 ret = EINVAL;
1585 goto label_return;
1586 }
970d7e83 1587
54a0048b
SL
1588 usize = sa2u(size, alignment);
1589 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
1590 result = NULL;
1591 goto label_oom;
970d7e83
LB
1592 }
1593
54a0048b
SL
1594 if (config_prof && opt_prof)
1595 result = imemalign_prof(tsd, alignment, usize);
1596 else
1597 result = ipalloc(tsd, usize, alignment, false);
1598 if (unlikely(result == NULL))
1599 goto label_oom;
1600 assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1601
970d7e83
LB
1602 *memptr = result;
1603 ret = 0;
970d7e83 1604label_return:
1a4d82fc 1605 if (config_stats && likely(result != NULL)) {
970d7e83 1606 assert(usize == isalloc(result, config_prof));
1a4d82fc 1607 *tsd_thread_allocatedp_get(tsd) += usize;
970d7e83 1608 }
970d7e83
LB
1609 UTRACE(0, size, result);
1610 return (ret);
1a4d82fc
JJ
1611label_oom:
1612 assert(result == NULL);
1613 if (config_xmalloc && unlikely(opt_xmalloc)) {
1614 malloc_write("<jemalloc>: Error allocating aligned memory: "
1615 "out of memory\n");
1616 abort();
1617 }
1618 ret = ENOMEM;
1619 goto label_return;
970d7e83
LB
1620}
1621
54a0048b
SL
1622JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1623JEMALLOC_ATTR(nonnull(1))
970d7e83
LB
1624je_posix_memalign(void **memptr, size_t alignment, size_t size)
1625{
1626 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1627 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1628 config_prof), false);
1629 return (ret);
1630}
1631
54a0048b
SL
1632JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1633void JEMALLOC_NOTHROW *
1634JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
970d7e83
LB
1635je_aligned_alloc(size_t alignment, size_t size)
1636{
1637 void *ret;
1638 int err;
1639
1a4d82fc 1640 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
970d7e83
LB
1641 ret = NULL;
1642 set_errno(err);
1643 }
1644 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1645 false);
1646 return (ret);
1647}
1648
1a4d82fc 1649static void *
54a0048b 1650icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
1a4d82fc
JJ
1651{
1652 void *p;
1653
1654 if (tctx == NULL)
1655 return (NULL);
1656 if (usize <= SMALL_MAXCLASS) {
54a0048b
SL
1657 szind_t ind_large = size2index(LARGE_MINCLASS);
1658 p = icalloc(tsd, LARGE_MINCLASS, ind_large);
1a4d82fc
JJ
1659 if (p == NULL)
1660 return (NULL);
1661 arena_prof_promoted(p, usize);
1662 } else
54a0048b 1663 p = icalloc(tsd, usize, ind);
1a4d82fc
JJ
1664
1665 return (p);
1666}
1667
1668JEMALLOC_ALWAYS_INLINE_C void *
54a0048b 1669icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
1a4d82fc
JJ
1670{
1671 void *p;
1672 prof_tctx_t *tctx;
1673
54a0048b 1674 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1a4d82fc 1675 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
54a0048b 1676 p = icalloc_prof_sample(tsd, usize, ind, tctx);
1a4d82fc 1677 else
54a0048b
SL
1678 p = icalloc(tsd, usize, ind);
1679 if (unlikely(p == NULL)) {
1a4d82fc
JJ
1680 prof_alloc_rollback(tsd, tctx, true);
1681 return (NULL);
1682 }
1683 prof_malloc(p, usize, tctx);
1684
1685 return (p);
1686}
1687
54a0048b
SL
1688JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1689void JEMALLOC_NOTHROW *
1690JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
970d7e83
LB
1691je_calloc(size_t num, size_t size)
1692{
1693 void *ret;
1a4d82fc 1694 tsd_t *tsd;
970d7e83 1695 size_t num_size;
54a0048b 1696 szind_t ind;
970d7e83 1697 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
970d7e83 1698
1a4d82fc 1699 if (unlikely(malloc_init())) {
970d7e83
LB
1700 num_size = 0;
1701 ret = NULL;
1702 goto label_return;
1703 }
1a4d82fc 1704 tsd = tsd_fetch();
970d7e83
LB
1705
1706 num_size = num * size;
1a4d82fc 1707 if (unlikely(num_size == 0)) {
970d7e83
LB
1708 if (num == 0 || size == 0)
1709 num_size = 1;
1710 else {
1711 ret = NULL;
1712 goto label_return;
1713 }
1714 /*
1715 * Try to avoid division here. We know that it isn't possible to
1716 * overflow during multiplication if neither operand uses any of the
1717 * most significant half of the bits in a size_t.
1718 */
1a4d82fc
JJ
1719 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1720 2))) && (num_size / size != num))) {
970d7e83
LB
1721 /* size_t overflow. */
1722 ret = NULL;
1723 goto label_return;
1724 }
1725
54a0048b
SL
1726 ind = size2index(num_size);
1727 if (unlikely(ind >= NSIZES)) {
1728 ret = NULL;
1729 goto label_return;
1730 }
970d7e83 1731 if (config_prof && opt_prof) {
54a0048b
SL
1732 usize = index2size(ind);
1733 ret = icalloc_prof(tsd, usize, ind);
970d7e83 1734 } else {
1a4d82fc 1735 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
54a0048b
SL
1736 usize = index2size(ind);
1737 ret = icalloc(tsd, num_size, ind);
970d7e83
LB
1738 }
1739
1740label_return:
1a4d82fc
JJ
1741 if (unlikely(ret == NULL)) {
1742 if (config_xmalloc && unlikely(opt_xmalloc)) {
970d7e83
LB
1743 malloc_write("<jemalloc>: Error in calloc(): out of "
1744 "memory\n");
1745 abort();
1746 }
1747 set_errno(ENOMEM);
1748 }
1a4d82fc 1749 if (config_stats && likely(ret != NULL)) {
970d7e83 1750 assert(usize == isalloc(ret, config_prof));
1a4d82fc 1751 *tsd_thread_allocatedp_get(tsd) += usize;
970d7e83
LB
1752 }
1753 UTRACE(0, num_size, ret);
1754 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1755 return (ret);
1756}
1757
1a4d82fc 1758static void *
54a0048b
SL
1759irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1760 prof_tctx_t *tctx)
1a4d82fc
JJ
1761{
1762 void *p;
1763
1764 if (tctx == NULL)
1765 return (NULL);
1766 if (usize <= SMALL_MAXCLASS) {
54a0048b 1767 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1a4d82fc
JJ
1768 if (p == NULL)
1769 return (NULL);
1770 arena_prof_promoted(p, usize);
1771 } else
54a0048b 1772 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1a4d82fc
JJ
1773
1774 return (p);
1775}
1776
1777JEMALLOC_ALWAYS_INLINE_C void *
54a0048b 1778irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1a4d82fc
JJ
1779{
1780 void *p;
54a0048b 1781 bool prof_active;
1a4d82fc
JJ
1782 prof_tctx_t *old_tctx, *tctx;
1783
54a0048b
SL
1784 prof_active = prof_active_get_unlocked();
1785 old_tctx = prof_tctx_get(old_ptr);
1786 tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1a4d82fc 1787 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
54a0048b 1788 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1a4d82fc 1789 else
54a0048b
SL
1790 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1791 if (unlikely(p == NULL)) {
1792 prof_alloc_rollback(tsd, tctx, true);
1a4d82fc 1793 return (NULL);
54a0048b
SL
1794 }
1795 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1796 old_tctx);
1a4d82fc
JJ
1797
1798 return (p);
1799}
1800
1801JEMALLOC_INLINE_C void
54a0048b 1802ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1a4d82fc
JJ
1803{
1804 size_t usize;
1805 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1806
1807 assert(ptr != NULL);
54a0048b 1808 assert(malloc_initialized() || IS_INITIALIZER);
1a4d82fc
JJ
1809
1810 if (config_prof && opt_prof) {
1811 usize = isalloc(ptr, config_prof);
1812 prof_free(tsd, ptr, usize);
1813 } else if (config_stats || config_valgrind)
1814 usize = isalloc(ptr, config_prof);
1815 if (config_stats)
1816 *tsd_thread_deallocatedp_get(tsd) += usize;
54a0048b
SL
1817
1818 if (likely(!slow_path))
1819 iqalloc(tsd, ptr, tcache, false);
1820 else {
1821 if (config_valgrind && unlikely(in_valgrind))
1822 rzsize = p2rz(ptr);
1823 iqalloc(tsd, ptr, tcache, true);
1824 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1825 }
1a4d82fc
JJ
1826}
1827
1828JEMALLOC_INLINE_C void
54a0048b 1829isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1a4d82fc
JJ
1830{
1831 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1832
1833 assert(ptr != NULL);
54a0048b 1834 assert(malloc_initialized() || IS_INITIALIZER);
1a4d82fc
JJ
1835
1836 if (config_prof && opt_prof)
1837 prof_free(tsd, ptr, usize);
1838 if (config_stats)
1839 *tsd_thread_deallocatedp_get(tsd) += usize;
1840 if (config_valgrind && unlikely(in_valgrind))
1841 rzsize = p2rz(ptr);
54a0048b 1842 isqalloc(tsd, ptr, usize, tcache);
1a4d82fc
JJ
1843 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1844}
1845
54a0048b
SL
1846JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1847void JEMALLOC_NOTHROW *
1848JEMALLOC_ALLOC_SIZE(2)
970d7e83
LB
1849je_realloc(void *ptr, size_t size)
1850{
1851 void *ret;
1a4d82fc 1852 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
970d7e83 1853 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1a4d82fc
JJ
1854 size_t old_usize = 0;
1855 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
970d7e83 1856
1a4d82fc 1857 if (unlikely(size == 0)) {
970d7e83 1858 if (ptr != NULL) {
1a4d82fc
JJ
1859 /* realloc(ptr, 0) is equivalent to free(ptr). */
1860 UTRACE(ptr, 0, 0);
1861 tsd = tsd_fetch();
54a0048b 1862 ifree(tsd, ptr, tcache_get(tsd, false), true);
1a4d82fc
JJ
1863 return (NULL);
1864 }
1865 size = 1;
970d7e83
LB
1866 }
1867
1a4d82fc 1868 if (likely(ptr != NULL)) {
54a0048b 1869 assert(malloc_initialized() || IS_INITIALIZER);
970d7e83 1870 malloc_thread_init();
1a4d82fc
JJ
1871 tsd = tsd_fetch();
1872
54a0048b 1873 old_usize = isalloc(ptr, config_prof);
1a4d82fc
JJ
1874 if (config_valgrind && unlikely(in_valgrind))
1875 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
970d7e83 1876
970d7e83
LB
1877 if (config_prof && opt_prof) {
1878 usize = s2u(size);
54a0048b
SL
1879 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1880 NULL : irealloc_prof(tsd, ptr, old_usize, usize);
970d7e83 1881 } else {
1a4d82fc
JJ
1882 if (config_stats || (config_valgrind &&
1883 unlikely(in_valgrind)))
970d7e83 1884 usize = s2u(size);
54a0048b 1885 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
970d7e83
LB
1886 }
1887 } else {
1888 /* realloc(NULL, size) is equivalent to malloc(size). */
54a0048b
SL
1889 if (likely(!malloc_slow))
1890 ret = imalloc_body(size, &tsd, &usize, false);
1891 else
1892 ret = imalloc_body(size, &tsd, &usize, true);
1a4d82fc 1893 }
970d7e83 1894
1a4d82fc
JJ
1895 if (unlikely(ret == NULL)) {
1896 if (config_xmalloc && unlikely(opt_xmalloc)) {
1897 malloc_write("<jemalloc>: Error in realloc(): "
1898 "out of memory\n");
1899 abort();
970d7e83 1900 }
1a4d82fc 1901 set_errno(ENOMEM);
970d7e83 1902 }
1a4d82fc 1903 if (config_stats && likely(ret != NULL)) {
970d7e83 1904 assert(usize == isalloc(ret, config_prof));
1a4d82fc
JJ
1905 *tsd_thread_allocatedp_get(tsd) += usize;
1906 *tsd_thread_deallocatedp_get(tsd) += old_usize;
970d7e83
LB
1907 }
1908 UTRACE(ptr, size, ret);
1a4d82fc
JJ
1909 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1910 old_rzsize, true, false);
970d7e83
LB
1911 return (ret);
1912}
1913
54a0048b 1914JEMALLOC_EXPORT void JEMALLOC_NOTHROW
970d7e83
LB
1915je_free(void *ptr)
1916{
1917
1918 UTRACE(ptr, 0, 0);
54a0048b
SL
1919 if (likely(ptr != NULL)) {
1920 tsd_t *tsd = tsd_fetch();
1921 if (likely(!malloc_slow))
1922 ifree(tsd, ptr, tcache_get(tsd, false), false);
1923 else
1924 ifree(tsd, ptr, tcache_get(tsd, false), true);
1925 }
970d7e83
LB
1926}
1927
1928/*
1929 * End malloc(3)-compatible functions.
1930 */
1931/******************************************************************************/
1932/*
1933 * Begin non-standard override functions.
1934 */
1935
1936#ifdef JEMALLOC_OVERRIDE_MEMALIGN
54a0048b
SL
1937JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1938void JEMALLOC_NOTHROW *
1939JEMALLOC_ATTR(malloc)
970d7e83
LB
1940je_memalign(size_t alignment, size_t size)
1941{
1942 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
54a0048b
SL
1943 if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1944 ret = NULL;
970d7e83
LB
1945 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1946 return (ret);
1947}
1948#endif
1949
1950#ifdef JEMALLOC_OVERRIDE_VALLOC
54a0048b
SL
1951JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1952void JEMALLOC_NOTHROW *
1953JEMALLOC_ATTR(malloc)
970d7e83
LB
1954je_valloc(size_t size)
1955{
1956 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
54a0048b
SL
1957 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1958 ret = NULL;
970d7e83
LB
1959 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1960 return (ret);
1961}
1962#endif
1963
1964/*
1965 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1966 * #define je_malloc malloc
1967 */
1968#define malloc_is_malloc 1
1969#define is_malloc_(a) malloc_is_ ## a
1970#define is_malloc(a) is_malloc_(a)
1971
1a4d82fc 1972#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
970d7e83
LB
1973/*
1974 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1975 * to inconsistently reference libc's malloc(3)-compatible functions
1976 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1977 *
1978 * These definitions interpose hooks in glibc. The functions are actually
1979 * passed an extra argument for the caller return address, which will be
1980 * ignored.
1981 */
1a4d82fc
JJ
1982JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1983JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1984JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1985# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1986JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
970d7e83 1987 je_memalign;
1a4d82fc 1988# endif
970d7e83
LB
1989#endif
1990
1991/*
1992 * End non-standard override functions.
1993 */
1994/******************************************************************************/
1995/*
1996 * Begin non-standard functions.
1997 */
1998
54a0048b
SL
1999JEMALLOC_ALWAYS_INLINE_C bool
2000imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
2001 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
970d7e83 2002{
970d7e83 2003
1a4d82fc
JJ
2004 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2005 *alignment = 0;
2006 *usize = s2u(size);
2007 } else {
2008 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2009 *usize = sa2u(size, *alignment);
2010 }
54a0048b
SL
2011 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2012 return (true);
1a4d82fc 2013 *zero = MALLOCX_ZERO_GET(flags);
54a0048b
SL
2014 if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2015 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2016 *tcache = NULL;
2017 else
2018 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2019 } else
2020 *tcache = tcache_get(tsd, true);
1a4d82fc
JJ
2021 if ((flags & MALLOCX_ARENA_MASK) != 0) {
2022 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
54a0048b
SL
2023 *arena = arena_get(arena_ind, true);
2024 if (unlikely(*arena == NULL))
2025 return (true);
2026 } else
1a4d82fc 2027 *arena = NULL;
54a0048b 2028 return (false);
1a4d82fc 2029}
970d7e83 2030
54a0048b
SL
2031JEMALLOC_ALWAYS_INLINE_C bool
2032imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
2033 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1a4d82fc 2034{
970d7e83 2035
1a4d82fc
JJ
2036 if (likely(flags == 0)) {
2037 *usize = s2u(size);
54a0048b
SL
2038 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2039 return (true);
1a4d82fc
JJ
2040 *alignment = 0;
2041 *zero = false;
54a0048b 2042 *tcache = tcache_get(tsd, true);
1a4d82fc 2043 *arena = NULL;
54a0048b 2044 return (false);
1a4d82fc 2045 } else {
54a0048b
SL
2046 return (imallocx_flags_decode_hard(tsd, size, flags, usize,
2047 alignment, zero, tcache, arena));
1a4d82fc 2048 }
970d7e83
LB
2049}
2050
1a4d82fc
JJ
2051JEMALLOC_ALWAYS_INLINE_C void *
2052imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
54a0048b 2053 tcache_t *tcache, arena_t *arena)
970d7e83 2054{
54a0048b 2055 szind_t ind;
970d7e83 2056
54a0048b
SL
2057 if (unlikely(alignment != 0))
2058 return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
2059 ind = size2index(usize);
2060 assert(ind < NSIZES);
2061 if (unlikely(zero))
2062 return (icalloct(tsd, usize, ind, tcache, arena));
2063 return (imalloct(tsd, usize, ind, tcache, arena));
970d7e83
LB
2064}
2065
1a4d82fc 2066static void *
54a0048b
SL
2067imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2068 tcache_t *tcache, arena_t *arena)
970d7e83 2069{
1a4d82fc 2070 void *p;
970d7e83 2071
1a4d82fc
JJ
2072 if (usize <= SMALL_MAXCLASS) {
2073 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2074 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
54a0048b
SL
2075 p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
2076 arena);
1a4d82fc
JJ
2077 if (p == NULL)
2078 return (NULL);
2079 arena_prof_promoted(p, usize);
54a0048b
SL
2080 } else
2081 p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
970d7e83 2082
1a4d82fc 2083 return (p);
970d7e83
LB
2084}
2085
1a4d82fc
JJ
2086JEMALLOC_ALWAYS_INLINE_C void *
2087imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
970d7e83 2088{
1a4d82fc
JJ
2089 void *p;
2090 size_t alignment;
2091 bool zero;
54a0048b 2092 tcache_t *tcache;
1a4d82fc
JJ
2093 arena_t *arena;
2094 prof_tctx_t *tctx;
2095
54a0048b
SL
2096 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2097 &zero, &tcache, &arena)))
2098 return (NULL);
2099 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2100 if (likely((uintptr_t)tctx == (uintptr_t)1U))
2101 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2102 else if ((uintptr_t)tctx > (uintptr_t)1U) {
2103 p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2104 arena);
1a4d82fc
JJ
2105 } else
2106 p = NULL;
2107 if (unlikely(p == NULL)) {
2108 prof_alloc_rollback(tsd, tctx, true);
2109 return (NULL);
2110 }
2111 prof_malloc(p, *usize, tctx);
970d7e83 2112
54a0048b 2113 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
1a4d82fc 2114 return (p);
970d7e83
LB
2115}
2116
1a4d82fc
JJ
2117JEMALLOC_ALWAYS_INLINE_C void *
2118imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
970d7e83 2119{
54a0048b 2120 void *p;
1a4d82fc
JJ
2121 size_t alignment;
2122 bool zero;
54a0048b 2123 tcache_t *tcache;
1a4d82fc 2124 arena_t *arena;
970d7e83 2125
1a4d82fc 2126 if (likely(flags == 0)) {
54a0048b
SL
2127 szind_t ind = size2index(size);
2128 if (unlikely(ind >= NSIZES))
2129 return (NULL);
2130 if (config_stats || (config_valgrind &&
2131 unlikely(in_valgrind))) {
2132 *usize = index2size(ind);
2133 assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2134 }
2135 return (imalloc(tsd, size, ind, true));
1a4d82fc 2136 }
970d7e83 2137
54a0048b
SL
2138 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2139 &alignment, &zero, &tcache, &arena)))
2140 return (NULL);
2141 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2142 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2143 return (p);
970d7e83
LB
2144}
2145
54a0048b
SL
2146JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2147void JEMALLOC_NOTHROW *
2148JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1a4d82fc 2149je_mallocx(size_t size, int flags)
970d7e83 2150{
1a4d82fc 2151 tsd_t *tsd;
970d7e83
LB
2152 void *p;
2153 size_t usize;
970d7e83 2154
970d7e83
LB
2155 assert(size != 0);
2156
1a4d82fc 2157 if (unlikely(malloc_init()))
970d7e83 2158 goto label_oom;
1a4d82fc 2159 tsd = tsd_fetch();
970d7e83 2160
1a4d82fc
JJ
2161 if (config_prof && opt_prof)
2162 p = imallocx_prof(tsd, size, flags, &usize);
2163 else
2164 p = imallocx_no_prof(tsd, size, flags, &usize);
2165 if (unlikely(p == NULL))
970d7e83
LB
2166 goto label_oom;
2167
970d7e83
LB
2168 if (config_stats) {
2169 assert(usize == isalloc(p, config_prof));
1a4d82fc 2170 *tsd_thread_allocatedp_get(tsd) += usize;
970d7e83
LB
2171 }
2172 UTRACE(0, size, p);
1a4d82fc
JJ
2173 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2174 return (p);
970d7e83 2175label_oom:
1a4d82fc
JJ
2176 if (config_xmalloc && unlikely(opt_xmalloc)) {
2177 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
970d7e83
LB
2178 abort();
2179 }
970d7e83 2180 UTRACE(0, size, 0);
1a4d82fc 2181 return (NULL);
970d7e83
LB
2182}
2183
1a4d82fc 2184static void *
54a0048b
SL
2185irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2186 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2187 prof_tctx_t *tctx)
970d7e83 2188{
1a4d82fc
JJ
2189 void *p;
2190
2191 if (tctx == NULL)
2192 return (NULL);
2193 if (usize <= SMALL_MAXCLASS) {
54a0048b
SL
2194 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2195 zero, tcache, arena);
1a4d82fc
JJ
2196 if (p == NULL)
2197 return (NULL);
2198 arena_prof_promoted(p, usize);
2199 } else {
54a0048b
SL
2200 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2201 tcache, arena);
1a4d82fc
JJ
2202 }
2203
2204 return (p);
2205}
2206
2207JEMALLOC_ALWAYS_INLINE_C void *
54a0048b
SL
2208irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2209 size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2210 arena_t *arena)
1a4d82fc
JJ
2211{
2212 void *p;
54a0048b 2213 bool prof_active;
1a4d82fc
JJ
2214 prof_tctx_t *old_tctx, *tctx;
2215
54a0048b
SL
2216 prof_active = prof_active_get_unlocked();
2217 old_tctx = prof_tctx_get(old_ptr);
2218 tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
1a4d82fc 2219 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
54a0048b
SL
2220 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2221 alignment, zero, tcache, arena, tctx);
1a4d82fc 2222 } else {
54a0048b
SL
2223 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2224 tcache, arena);
1a4d82fc
JJ
2225 }
2226 if (unlikely(p == NULL)) {
54a0048b 2227 prof_alloc_rollback(tsd, tctx, true);
1a4d82fc
JJ
2228 return (NULL);
2229 }
2230
54a0048b 2231 if (p == old_ptr && alignment != 0) {
1a4d82fc
JJ
2232 /*
2233 * The allocation did not move, so it is possible that the size
2234 * class is smaller than would guarantee the requested
2235 * alignment, and that the alignment constraint was
2236 * serendipitously satisfied. Additionally, old_usize may not
2237 * be the same as the current usize because of in-place large
2238 * reallocation. Therefore, query the actual value of usize.
2239 */
2240 *usize = isalloc(p, config_prof);
2241 }
54a0048b
SL
2242 prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2243 old_usize, old_tctx);
1a4d82fc
JJ
2244
2245 return (p);
2246}
2247
54a0048b
SL
2248JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2249void JEMALLOC_NOTHROW *
2250JEMALLOC_ALLOC_SIZE(2)
1a4d82fc
JJ
2251je_rallocx(void *ptr, size_t size, int flags)
2252{
2253 void *p;
2254 tsd_t *tsd;
970d7e83 2255 size_t usize;
54a0048b 2256 size_t old_usize;
1a4d82fc
JJ
2257 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2258 size_t alignment = MALLOCX_ALIGN_GET(flags);
2259 bool zero = flags & MALLOCX_ZERO;
970d7e83 2260 arena_t *arena;
54a0048b 2261 tcache_t *tcache;
970d7e83
LB
2262
2263 assert(ptr != NULL);
970d7e83 2264 assert(size != 0);
54a0048b 2265 assert(malloc_initialized() || IS_INITIALIZER);
970d7e83 2266 malloc_thread_init();
1a4d82fc 2267 tsd = tsd_fetch();
970d7e83 2268
1a4d82fc
JJ
2269 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2270 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
54a0048b
SL
2271 arena = arena_get(arena_ind, true);
2272 if (unlikely(arena == NULL))
2273 goto label_oom;
2274 } else
970d7e83 2275 arena = NULL;
970d7e83 2276
54a0048b
SL
2277 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2278 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2279 tcache = NULL;
2280 else
2281 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2282 } else
2283 tcache = tcache_get(tsd, true);
2284
2285 old_usize = isalloc(ptr, config_prof);
1a4d82fc
JJ
2286 if (config_valgrind && unlikely(in_valgrind))
2287 old_rzsize = u2rz(old_usize);
970d7e83 2288
1a4d82fc
JJ
2289 if (config_prof && opt_prof) {
2290 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
54a0048b
SL
2291 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2292 goto label_oom;
1a4d82fc 2293 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
54a0048b 2294 zero, tcache, arena);
1a4d82fc 2295 if (unlikely(p == NULL))
970d7e83 2296 goto label_oom;
970d7e83 2297 } else {
54a0048b
SL
2298 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2299 tcache, arena);
1a4d82fc
JJ
2300 if (unlikely(p == NULL))
2301 goto label_oom;
2302 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2303 usize = isalloc(p, config_prof);
970d7e83 2304 }
54a0048b 2305 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
970d7e83 2306
970d7e83 2307 if (config_stats) {
1a4d82fc
JJ
2308 *tsd_thread_allocatedp_get(tsd) += usize;
2309 *tsd_thread_deallocatedp_get(tsd) += old_usize;
970d7e83 2310 }
1a4d82fc
JJ
2311 UTRACE(ptr, size, p);
2312 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2313 old_rzsize, false, zero);
2314 return (p);
970d7e83 2315label_oom:
1a4d82fc
JJ
2316 if (config_xmalloc && unlikely(opt_xmalloc)) {
2317 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
970d7e83
LB
2318 abort();
2319 }
1a4d82fc
JJ
2320 UTRACE(ptr, size, 0);
2321 return (NULL);
970d7e83
LB
2322}
2323
1a4d82fc 2324JEMALLOC_ALWAYS_INLINE_C size_t
54a0048b
SL
2325ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2326 size_t extra, size_t alignment, bool zero)
1a4d82fc
JJ
2327{
2328 size_t usize;
2329
54a0048b 2330 if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
1a4d82fc
JJ
2331 return (old_usize);
2332 usize = isalloc(ptr, config_prof);
2333
2334 return (usize);
2335}
2336
2337static size_t
54a0048b
SL
2338ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2339 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
1a4d82fc
JJ
2340{
2341 size_t usize;
2342
2343 if (tctx == NULL)
2344 return (old_usize);
54a0048b
SL
2345 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
2346 zero);
1a4d82fc
JJ
2347
2348 return (usize);
2349}
2350
2351JEMALLOC_ALWAYS_INLINE_C size_t
2352ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
54a0048b 2353 size_t extra, size_t alignment, bool zero)
1a4d82fc 2354{
54a0048b
SL
2355 size_t usize_max, usize;
2356 bool prof_active;
1a4d82fc
JJ
2357 prof_tctx_t *old_tctx, *tctx;
2358
54a0048b 2359 prof_active = prof_active_get_unlocked();
1a4d82fc
JJ
2360 old_tctx = prof_tctx_get(ptr);
2361 /*
2362 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2363 * Therefore, compute its maximum possible value and use that in
2364 * prof_alloc_prep() to decide whether to capture a backtrace.
2365 * prof_realloc() will use the actual usize to decide whether to sample.
2366 */
54a0048b
SL
2367 if (alignment == 0) {
2368 usize_max = s2u(size+extra);
2369 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2370 } else {
2371 usize_max = sa2u(size+extra, alignment);
2372 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2373 /*
2374 * usize_max is out of range, and chances are that
2375 * allocation will fail, but use the maximum possible
2376 * value and carry on with prof_alloc_prep(), just in
2377 * case allocation succeeds.
2378 */
2379 usize_max = HUGE_MAXCLASS;
2380 }
2381 }
2382 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2383
1a4d82fc 2384 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
54a0048b
SL
2385 usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
2386 alignment, zero, tctx);
1a4d82fc 2387 } else {
54a0048b
SL
2388 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2389 alignment, zero);
1a4d82fc 2390 }
54a0048b 2391 if (usize == old_usize) {
1a4d82fc
JJ
2392 prof_alloc_rollback(tsd, tctx, false);
2393 return (usize);
2394 }
54a0048b
SL
2395 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2396 old_tctx);
1a4d82fc
JJ
2397
2398 return (usize);
2399}
2400
54a0048b 2401JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
1a4d82fc 2402je_xallocx(void *ptr, size_t size, size_t extra, int flags)
970d7e83 2403{
1a4d82fc
JJ
2404 tsd_t *tsd;
2405 size_t usize, old_usize;
2406 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2407 size_t alignment = MALLOCX_ALIGN_GET(flags);
2408 bool zero = flags & MALLOCX_ZERO;
1a4d82fc
JJ
2409
2410 assert(ptr != NULL);
2411 assert(size != 0);
2412 assert(SIZE_T_MAX - size >= extra);
54a0048b 2413 assert(malloc_initialized() || IS_INITIALIZER);
1a4d82fc
JJ
2414 malloc_thread_init();
2415 tsd = tsd_fetch();
2416
7453a54e 2417 old_usize = isalloc(ptr, config_prof);
54a0048b
SL
2418
2419 /*
2420 * The API explicitly absolves itself of protecting against (size +
2421 * extra) numerical overflow, but we may need to clamp extra to avoid
2422 * exceeding HUGE_MAXCLASS.
2423 *
2424 * Ordinarily, size limit checking is handled deeper down, but here we
2425 * have to check as part of (size + extra) clamping, since we need the
2426 * clamped value in the above helper functions.
2427 */
2428 if (unlikely(size > HUGE_MAXCLASS)) {
2429 usize = old_usize;
2430 goto label_not_resized;
2431 }
2432 if (unlikely(HUGE_MAXCLASS - size < extra))
2433 extra = HUGE_MAXCLASS - size;
2434
1a4d82fc
JJ
2435 if (config_valgrind && unlikely(in_valgrind))
2436 old_rzsize = u2rz(old_usize);
2437
2438 if (config_prof && opt_prof) {
2439 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
54a0048b 2440 alignment, zero);
1a4d82fc 2441 } else {
54a0048b
SL
2442 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2443 alignment, zero);
1a4d82fc
JJ
2444 }
2445 if (unlikely(usize == old_usize))
2446 goto label_not_resized;
2447
2448 if (config_stats) {
2449 *tsd_thread_allocatedp_get(tsd) += usize;
2450 *tsd_thread_deallocatedp_get(tsd) += old_usize;
2451 }
2452 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2453 old_rzsize, false, zero);
2454label_not_resized:
2455 UTRACE(ptr, size, ptr);
2456 return (usize);
2457}
2458
54a0048b
SL
2459JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2460JEMALLOC_ATTR(pure)
1a4d82fc
JJ
2461je_sallocx(const void *ptr, int flags)
2462{
2463 size_t usize;
970d7e83 2464
54a0048b 2465 assert(malloc_initialized() || IS_INITIALIZER);
970d7e83
LB
2466 malloc_thread_init();
2467
2468 if (config_ivsalloc)
1a4d82fc 2469 usize = ivsalloc(ptr, config_prof);
54a0048b 2470 else
1a4d82fc 2471 usize = isalloc(ptr, config_prof);
970d7e83 2472
1a4d82fc 2473 return (usize);
970d7e83
LB
2474}
2475
54a0048b 2476JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1a4d82fc 2477je_dallocx(void *ptr, int flags)
970d7e83 2478{
54a0048b
SL
2479 tsd_t *tsd;
2480 tcache_t *tcache;
970d7e83
LB
2481
2482 assert(ptr != NULL);
54a0048b 2483 assert(malloc_initialized() || IS_INITIALIZER);
970d7e83 2484
54a0048b
SL
2485 tsd = tsd_fetch();
2486 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2487 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2488 tcache = NULL;
2489 else
2490 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
970d7e83 2491 } else
54a0048b 2492 tcache = tcache_get(tsd, false);
970d7e83
LB
2493
2494 UTRACE(ptr, 0, 0);
54a0048b 2495 ifree(tsd_fetch(), ptr, tcache, true);
1a4d82fc
JJ
2496}
2497
2498JEMALLOC_ALWAYS_INLINE_C size_t
2499inallocx(size_t size, int flags)
2500{
2501 size_t usize;
970d7e83 2502
1a4d82fc
JJ
2503 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2504 usize = s2u(size);
2505 else
2506 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
1a4d82fc 2507 return (usize);
970d7e83
LB
2508}
2509
54a0048b 2510JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1a4d82fc 2511je_sdallocx(void *ptr, size_t size, int flags)
970d7e83 2512{
54a0048b
SL
2513 tsd_t *tsd;
2514 tcache_t *tcache;
970d7e83 2515 size_t usize;
1a4d82fc
JJ
2516
2517 assert(ptr != NULL);
54a0048b 2518 assert(malloc_initialized() || IS_INITIALIZER);
1a4d82fc
JJ
2519 usize = inallocx(size, flags);
2520 assert(usize == isalloc(ptr, config_prof));
2521
54a0048b
SL
2522 tsd = tsd_fetch();
2523 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2524 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2525 tcache = NULL;
2526 else
2527 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
1a4d82fc 2528 } else
54a0048b 2529 tcache = tcache_get(tsd, false);
1a4d82fc
JJ
2530
2531 UTRACE(ptr, 0, 0);
54a0048b 2532 isfree(tsd, ptr, usize, tcache);
1a4d82fc
JJ
2533}
2534
54a0048b
SL
2535JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2536JEMALLOC_ATTR(pure)
1a4d82fc
JJ
2537je_nallocx(size_t size, int flags)
2538{
54a0048b 2539 size_t usize;
970d7e83
LB
2540
2541 assert(size != 0);
2542
1a4d82fc
JJ
2543 if (unlikely(malloc_init()))
2544 return (0);
2545
54a0048b
SL
2546 usize = inallocx(size, flags);
2547 if (unlikely(usize > HUGE_MAXCLASS))
2548 return (0);
2549
2550 return (usize);
1a4d82fc 2551}
970d7e83 2552
54a0048b 2553JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1a4d82fc
JJ
2554je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2555 size_t newlen)
2556{
970d7e83 2557
1a4d82fc
JJ
2558 if (unlikely(malloc_init()))
2559 return (EAGAIN);
2560
2561 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2562}
2563
54a0048b 2564JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1a4d82fc
JJ
2565je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2566{
2567
2568 if (unlikely(malloc_init()))
2569 return (EAGAIN);
2570
2571 return (ctl_nametomib(name, mibp, miblenp));
2572}
2573
54a0048b 2574JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1a4d82fc
JJ
2575je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2576 void *newp, size_t newlen)
2577{
2578
2579 if (unlikely(malloc_init()))
2580 return (EAGAIN);
2581
2582 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2583}
2584
54a0048b 2585JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1a4d82fc
JJ
2586je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2587 const char *opts)
2588{
2589
2590 stats_print(write_cb, cbopaque, opts);
2591}
2592
54a0048b 2593JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
1a4d82fc
JJ
2594je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2595{
2596 size_t ret;
2597
54a0048b 2598 assert(malloc_initialized() || IS_INITIALIZER);
1a4d82fc
JJ
2599 malloc_thread_init();
2600
2601 if (config_ivsalloc)
2602 ret = ivsalloc(ptr, config_prof);
2603 else
54a0048b 2604 ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
1a4d82fc
JJ
2605
2606 return (ret);
970d7e83
LB
2607}
2608
970d7e83 2609/*
1a4d82fc 2610 * End non-standard functions.
970d7e83
LB
2611 */
2612/******************************************************************************/
2613/*
2614 * The following functions are used by threading libraries for protection of
2615 * malloc during fork().
2616 */
2617
2618/*
2619 * If an application creates a thread before doing any allocation in the main
2620 * thread, then calls fork(2) in the main thread followed by memory allocation
2621 * in the child process, a race can occur that results in deadlock within the
2622 * child: the main thread may have forked while the created thread had
2623 * partially initialized the allocator. Ordinarily jemalloc prevents
2624 * fork/malloc races via the following functions it registers during
2625 * initialization using pthread_atfork(), but of course that does no good if
2626 * the allocator isn't fully initialized at fork time. The following library
54a0048b
SL
2627 * constructor is a partial solution to this problem. It may still be possible
2628 * to trigger the deadlock described above, but doing so would involve forking
2629 * via a library constructor that runs before jemalloc's runs.
970d7e83
LB
2630 */
2631JEMALLOC_ATTR(constructor)
2632static void
2633jemalloc_constructor(void)
2634{
2635
2636 malloc_init();
2637}
2638
2639#ifndef JEMALLOC_MUTEX_INIT_CB
2640void
2641jemalloc_prefork(void)
2642#else
2643JEMALLOC_EXPORT void
2644_malloc_prefork(void)
2645#endif
2646{
54a0048b 2647 unsigned i, narenas;
970d7e83
LB
2648
2649#ifdef JEMALLOC_MUTEX_INIT_CB
54a0048b 2650 if (!malloc_initialized())
970d7e83
LB
2651 return;
2652#endif
54a0048b 2653 assert(malloc_initialized());
970d7e83
LB
2654
2655 /* Acquire all mutexes in a safe order. */
2656 ctl_prefork();
2657 prof_prefork();
2658 malloc_mutex_prefork(&arenas_lock);
54a0048b
SL
2659 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2660 arena_t *arena;
2661
2662 if ((arena = arena_get(i, false)) != NULL)
2663 arena_prefork(arena);
970d7e83
LB
2664 }
2665 chunk_prefork();
2666 base_prefork();
970d7e83
LB
2667}
2668
2669#ifndef JEMALLOC_MUTEX_INIT_CB
2670void
2671jemalloc_postfork_parent(void)
2672#else
2673JEMALLOC_EXPORT void
2674_malloc_postfork(void)
2675#endif
2676{
54a0048b 2677 unsigned i, narenas;
970d7e83
LB
2678
2679#ifdef JEMALLOC_MUTEX_INIT_CB
54a0048b 2680 if (!malloc_initialized())
970d7e83
LB
2681 return;
2682#endif
54a0048b 2683 assert(malloc_initialized());
970d7e83
LB
2684
2685 /* Release all mutexes, now that fork() has completed. */
970d7e83
LB
2686 base_postfork_parent();
2687 chunk_postfork_parent();
54a0048b
SL
2688 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2689 arena_t *arena;
2690
2691 if ((arena = arena_get(i, false)) != NULL)
2692 arena_postfork_parent(arena);
970d7e83
LB
2693 }
2694 malloc_mutex_postfork_parent(&arenas_lock);
2695 prof_postfork_parent();
2696 ctl_postfork_parent();
2697}
2698
2699void
2700jemalloc_postfork_child(void)
2701{
54a0048b 2702 unsigned i, narenas;
970d7e83 2703
54a0048b 2704 assert(malloc_initialized());
970d7e83
LB
2705
2706 /* Release all mutexes, now that fork() has completed. */
970d7e83
LB
2707 base_postfork_child();
2708 chunk_postfork_child();
54a0048b
SL
2709 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2710 arena_t *arena;
2711
2712 if ((arena = arena_get(i, false)) != NULL)
2713 arena_postfork_child(arena);
970d7e83
LB
2714 }
2715 malloc_mutex_postfork_child(&arenas_lock);
2716 prof_postfork_child();
2717 ctl_postfork_child();
2718}
2719
2720/******************************************************************************/