1 #ifndef JEMALLOC_INTERNAL_H
2 #define JEMALLOC_INTERNAL_H
4 #include "jemalloc_internal_defs.h"
5 #include "jemalloc/internal/jemalloc_internal_decls.h"
8 #include <sys/ktrace.h>
11 #define JEMALLOC_NO_DEMANGLE
13 # define JEMALLOC_N(n) jet_##n
14 # include "jemalloc/internal/public_namespace.h"
15 # define JEMALLOC_NO_RENAME
16 # include "../jemalloc@install_suffix@.h"
17 # undef JEMALLOC_NO_RENAME
19 # define JEMALLOC_N(n) @private_namespace@##n
20 # include "../jemalloc@install_suffix@.h"
22 #include "jemalloc/internal/private_namespace.h"
24 static const bool config_debug
=
31 static const bool have_dss
=
38 static const bool config_fill
=
45 static const bool config_lazy_lock
=
46 #ifdef JEMALLOC_LAZY_LOCK
52 static const bool config_prof
=
59 static const bool config_prof_libgcc
=
60 #ifdef JEMALLOC_PROF_LIBGCC
66 static const bool config_prof_libunwind
=
67 #ifdef JEMALLOC_PROF_LIBUNWIND
73 static const bool config_munmap
=
74 #ifdef JEMALLOC_MUNMAP
80 static const bool config_stats
=
87 static const bool config_tcache
=
88 #ifdef JEMALLOC_TCACHE
94 static const bool config_tls
=
101 static const bool config_utrace
=
102 #ifdef JEMALLOC_UTRACE
108 static const bool config_valgrind
=
109 #ifdef JEMALLOC_VALGRIND
115 static const bool config_xmalloc
=
116 #ifdef JEMALLOC_XMALLOC
122 static const bool config_ivsalloc
=
123 #ifdef JEMALLOC_IVSALLOC
130 #ifdef JEMALLOC_ATOMIC9
131 #include <machine/atomic.h>
134 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
135 #include <libkern/OSAtomic.h>
139 #include <mach/mach_error.h>
140 #include <mach/mach_init.h>
141 #include <mach/vm_map.h>
142 #include <malloc/malloc.h>
146 #include "jemalloc/internal/rb.h"
147 #include "jemalloc/internal/qr.h"
148 #include "jemalloc/internal/ql.h"
151 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
152 * but there are circular dependencies that cannot be broken without
153 * substantial performance degradation. In order to reduce the effect on
154 * visual code flow, read the header files in multiple passes, with one of the
155 * following cpp variables defined during each pass:
157 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
159 * JEMALLOC_H_STRUCTS : Data structures.
160 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
161 * JEMALLOC_H_INLINES : Inline functions.
163 /******************************************************************************/
164 #define JEMALLOC_H_TYPES
166 #include "jemalloc/internal/jemalloc_internal_macros.h"
168 #define MALLOCX_ARENA_MASK ((int)~0xff)
169 #define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
170 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
171 #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
172 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
173 #define MALLOCX_ALIGN_GET(flags) \
174 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
175 #define MALLOCX_ZERO_GET(flags) \
176 ((bool)(flags & MALLOCX_ZERO))
177 #define MALLOCX_ARENA_GET(flags) \
178 (((unsigned)(flags >> 8)) - 1)
180 /* Smallest size class to support. */
181 #define LG_TINY_MIN 3
182 #define TINY_MIN (1U << LG_TINY_MIN)
185 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
189 # if (defined(__i386__) || defined(_M_IX86))
190 # define LG_QUANTUM 4
193 # define LG_QUANTUM 4
196 # define LG_QUANTUM 4
199 # define LG_QUANTUM 4
201 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
202 # define LG_QUANTUM 4
205 # define LG_QUANTUM 3
208 # define LG_QUANTUM 4
211 # define LG_QUANTUM 4
214 # define LG_QUANTUM 3
217 # define LG_QUANTUM 3
220 # define LG_QUANTUM 4
223 # define LG_QUANTUM 4
226 # define LG_QUANTUM 4
229 # define LG_QUANTUM 4
232 # define LG_QUANTUM 4
235 # error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
239 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
240 #define QUANTUM_MASK (QUANTUM - 1)
242 /* Return the smallest quantum multiple that is >= a. */
243 #define QUANTUM_CEILING(a) \
244 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
246 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
247 #define LONG_MASK (LONG - 1)
249 /* Return the smallest long multiple that is >= a. */
250 #define LONG_CEILING(a) \
251 (((a) + LONG_MASK) & ~LONG_MASK)
253 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
254 #define PTR_MASK (SIZEOF_PTR - 1)
256 /* Return the smallest (void *) multiple that is >= a. */
257 #define PTR_CEILING(a) \
258 (((a) + PTR_MASK) & ~PTR_MASK)
261 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
262 * In addition, this controls the spacing of cacheline-spaced size classes.
264 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
265 * only handle raw constants.
267 #define LG_CACHELINE 6
269 #define CACHELINE_MASK (CACHELINE - 1)
271 /* Return the smallest cacheline multiple that is >= s. */
272 #define CACHELINE_CEILING(s) \
273 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
275 /* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
279 #define LG_PAGE STATIC_PAGE_SHIFT
280 #define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
281 #define PAGE_MASK ((size_t)(PAGE - 1))
283 /* Return the smallest pagesize multiple that is >= s. */
284 #define PAGE_CEILING(s) \
285 (((s) + PAGE_MASK) & ~PAGE_MASK)
287 /* Return the nearest aligned address at or below a. */
288 #define ALIGNMENT_ADDR2BASE(a, alignment) \
289 ((void *)((uintptr_t)(a) & (-(alignment))))
291 /* Return the offset between a and the nearest aligned address at or below a. */
292 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
293 ((size_t)((uintptr_t)(a) & (alignment - 1)))
295 /* Return the smallest alignment multiple that is >= s. */
296 #define ALIGNMENT_CEILING(s, alignment) \
297 (((s) + (alignment - 1)) & (-(alignment)))
299 /* Declare a variable length array */
300 #if __STDC_VERSION__ < 199901L
303 # define alloca _alloca
305 # ifdef JEMALLOC_HAS_ALLOCA_H
311 # define VARIABLE_ARRAY(type, name, count) \
312 type *name = alloca(sizeof(type) * (count))
314 # define VARIABLE_ARRAY(type, name, count) type name[(count)]
317 #include "jemalloc/internal/valgrind.h"
318 #include "jemalloc/internal/util.h"
319 #include "jemalloc/internal/atomic.h"
320 #include "jemalloc/internal/prng.h"
321 #include "jemalloc/internal/ckh.h"
322 #include "jemalloc/internal/size_classes.h"
323 #include "jemalloc/internal/stats.h"
324 #include "jemalloc/internal/ctl.h"
325 #include "jemalloc/internal/mutex.h"
326 #include "jemalloc/internal/tsd.h"
327 #include "jemalloc/internal/mb.h"
328 #include "jemalloc/internal/extent.h"
329 #include "jemalloc/internal/arena.h"
330 #include "jemalloc/internal/bitmap.h"
331 #include "jemalloc/internal/base.h"
332 #include "jemalloc/internal/chunk.h"
333 #include "jemalloc/internal/huge.h"
334 #include "jemalloc/internal/rtree.h"
335 #include "jemalloc/internal/tcache.h"
336 #include "jemalloc/internal/hash.h"
337 #include "jemalloc/internal/quarantine.h"
338 #include "jemalloc/internal/prof.h"
340 #undef JEMALLOC_H_TYPES
341 /******************************************************************************/
342 #define JEMALLOC_H_STRUCTS
344 #include "jemalloc/internal/valgrind.h"
345 #include "jemalloc/internal/util.h"
346 #include "jemalloc/internal/atomic.h"
347 #include "jemalloc/internal/prng.h"
348 #include "jemalloc/internal/ckh.h"
349 #include "jemalloc/internal/size_classes.h"
350 #include "jemalloc/internal/stats.h"
351 #include "jemalloc/internal/ctl.h"
352 #include "jemalloc/internal/mutex.h"
353 #include "jemalloc/internal/mb.h"
354 #include "jemalloc/internal/bitmap.h"
355 #include "jemalloc/internal/extent.h"
356 #include "jemalloc/internal/arena.h"
357 #include "jemalloc/internal/base.h"
358 #include "jemalloc/internal/chunk.h"
359 #include "jemalloc/internal/huge.h"
360 #include "jemalloc/internal/rtree.h"
361 #include "jemalloc/internal/tcache.h"
362 #include "jemalloc/internal/hash.h"
363 #include "jemalloc/internal/quarantine.h"
364 #include "jemalloc/internal/prof.h"
366 #include "jemalloc/internal/tsd.h"
368 #undef JEMALLOC_H_STRUCTS
369 /******************************************************************************/
370 #define JEMALLOC_H_EXTERNS
372 extern bool opt_abort
;
373 extern bool opt_junk
;
374 extern size_t opt_quarantine
;
375 extern bool opt_redzone
;
376 extern bool opt_utrace
;
377 extern bool opt_xmalloc
;
378 extern bool opt_zero
;
379 extern size_t opt_narenas
;
381 extern bool in_valgrind
;
383 /* Number of CPUs. */
384 extern unsigned ncpus
;
386 /* Protects arenas initialization (arenas, arenas_total). */
387 extern malloc_mutex_t arenas_lock
;
389 * Arenas that are used to service external requests. Not all elements of the
390 * arenas array are necessarily used; arenas are created lazily as needed.
392 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
393 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
394 * takes some action to create them and allocate from them.
396 extern arena_t
**arenas
;
397 extern unsigned narenas_total
;
398 extern unsigned narenas_auto
; /* Read-only after initialization. */
400 arena_t
*arenas_extend(unsigned ind
);
401 arena_t
*choose_arena_hard(tsd_t
*tsd
);
402 void thread_allocated_cleanup(tsd_t
*tsd
);
403 void thread_deallocated_cleanup(tsd_t
*tsd
);
404 void arena_cleanup(tsd_t
*tsd
);
405 void jemalloc_prefork(void);
406 void jemalloc_postfork_parent(void);
407 void jemalloc_postfork_child(void);
409 #include "jemalloc/internal/valgrind.h"
410 #include "jemalloc/internal/util.h"
411 #include "jemalloc/internal/atomic.h"
412 #include "jemalloc/internal/prng.h"
413 #include "jemalloc/internal/ckh.h"
414 #include "jemalloc/internal/size_classes.h"
415 #include "jemalloc/internal/stats.h"
416 #include "jemalloc/internal/ctl.h"
417 #include "jemalloc/internal/mutex.h"
418 #include "jemalloc/internal/mb.h"
419 #include "jemalloc/internal/bitmap.h"
420 #include "jemalloc/internal/extent.h"
421 #include "jemalloc/internal/arena.h"
422 #include "jemalloc/internal/base.h"
423 #include "jemalloc/internal/chunk.h"
424 #include "jemalloc/internal/huge.h"
425 #include "jemalloc/internal/rtree.h"
426 #include "jemalloc/internal/tcache.h"
427 #include "jemalloc/internal/hash.h"
428 #include "jemalloc/internal/quarantine.h"
429 #include "jemalloc/internal/prof.h"
430 #include "jemalloc/internal/tsd.h"
432 #undef JEMALLOC_H_EXTERNS
433 /******************************************************************************/
434 #define JEMALLOC_H_INLINES
436 #include "jemalloc/internal/valgrind.h"
437 #include "jemalloc/internal/util.h"
438 #include "jemalloc/internal/atomic.h"
439 #include "jemalloc/internal/prng.h"
440 #include "jemalloc/internal/ckh.h"
441 #include "jemalloc/internal/size_classes.h"
442 #include "jemalloc/internal/stats.h"
443 #include "jemalloc/internal/ctl.h"
444 #include "jemalloc/internal/mutex.h"
445 #include "jemalloc/internal/tsd.h"
446 #include "jemalloc/internal/mb.h"
447 #include "jemalloc/internal/extent.h"
448 #include "jemalloc/internal/base.h"
449 #include "jemalloc/internal/chunk.h"
450 #include "jemalloc/internal/huge.h"
453 * Include arena.h the first time in order to provide inline functions for this
456 #define JEMALLOC_ARENA_INLINE_A
457 #include "jemalloc/internal/arena.h"
458 #undef JEMALLOC_ARENA_INLINE_A
460 #ifndef JEMALLOC_ENABLE_INLINE
461 size_t s2u(size_t size
);
462 size_t sa2u(size_t size
, size_t alignment
);
463 unsigned narenas_total_get(void);
464 arena_t
*choose_arena(tsd_t
*tsd
, arena_t
*arena
);
467 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
469 * Compute usable size that would result from allocating an object with the
472 JEMALLOC_ALWAYS_INLINE
size_t
476 if (size
<= SMALL_MAXCLASS
)
477 return (small_s2u(size
));
478 if (size
<= arena_maxclass
)
479 return (PAGE_CEILING(size
));
480 return (CHUNK_CEILING(size
));
484 * Compute usable size that would result from allocating an object with the
485 * specified size and alignment.
487 JEMALLOC_ALWAYS_INLINE
size_t
488 sa2u(size_t size
, size_t alignment
)
492 assert(alignment
!= 0 && ((alignment
- 1) & alignment
) == 0);
495 * Round size up to the nearest multiple of alignment.
497 * This done, we can take advantage of the fact that for each small
498 * size class, every object is aligned at the smallest power of two
499 * that is non-zero in the base two representation of the size. For
502 * Size | Base 2 | Minimum alignment
503 * -----+----------+------------------
505 * 144 | 10100000 | 32
506 * 192 | 11000000 | 64
508 usize
= ALIGNMENT_CEILING(size
, alignment
);
510 * (usize < size) protects against the combination of maximal
511 * alignment and size greater than maximal alignment.
514 /* size_t overflow. */
518 if (usize
<= arena_maxclass
&& alignment
<= PAGE
) {
519 if (usize
<= SMALL_MAXCLASS
)
520 return (small_s2u(usize
));
521 return (PAGE_CEILING(usize
));
526 * We can't achieve subpage alignment, so round up alignment
527 * permanently; it makes later calculations simpler.
529 alignment
= PAGE_CEILING(alignment
);
530 usize
= PAGE_CEILING(size
);
532 * (usize < size) protects against very large sizes within
533 * PAGE of SIZE_T_MAX.
535 * (usize + alignment < usize) protects against the
536 * combination of maximal alignment and usize large enough
537 * to cause overflow. This is similar to the first overflow
538 * check above, but it needs to be repeated due to the new
539 * usize value, which may now be *equal* to maximal
540 * alignment, whereas before we only detected overflow if the
541 * original size was *greater* than maximal alignment.
543 if (usize
< size
|| usize
+ alignment
< usize
) {
544 /* size_t overflow. */
549 * Calculate the size of the over-size run that arena_palloc()
550 * would need to allocate in order to guarantee the alignment.
551 * If the run wouldn't fit within a chunk, round up to a huge
554 run_size
= usize
+ alignment
- PAGE
;
555 if (run_size
<= arena_maxclass
)
556 return (PAGE_CEILING(usize
));
557 return (CHUNK_CEILING(usize
));
561 JEMALLOC_INLINE
unsigned
562 narenas_total_get(void)
566 malloc_mutex_lock(&arenas_lock
);
567 narenas
= narenas_total
;
568 malloc_mutex_unlock(&arenas_lock
);
573 /* Choose an arena based on a per-thread value. */
574 JEMALLOC_INLINE arena_t
*
575 choose_arena(tsd_t
*tsd
, arena_t
*arena
)
582 if (unlikely((ret
= tsd_arena_get(tsd
)) == NULL
)) {
583 ret
= choose_arena_hard(tsd
);
591 #include "jemalloc/internal/bitmap.h"
592 #include "jemalloc/internal/rtree.h"
594 * Include arena.h the second and third times in order to resolve circular
595 * dependencies with tcache.h.
597 #define JEMALLOC_ARENA_INLINE_B
598 #include "jemalloc/internal/arena.h"
599 #undef JEMALLOC_ARENA_INLINE_B
600 #include "jemalloc/internal/tcache.h"
601 #define JEMALLOC_ARENA_INLINE_C
602 #include "jemalloc/internal/arena.h"
603 #undef JEMALLOC_ARENA_INLINE_C
604 #include "jemalloc/internal/hash.h"
605 #include "jemalloc/internal/quarantine.h"
607 #ifndef JEMALLOC_ENABLE_INLINE
608 void *imalloct(tsd_t
*tsd
, size_t size
, bool try_tcache
, arena_t
*arena
);
609 void *imalloc(tsd_t
*tsd
, size_t size
);
610 void *icalloct(tsd_t
*tsd
, size_t size
, bool try_tcache
, arena_t
*arena
);
611 void *icalloc(tsd_t
*tsd
, size_t size
);
612 void *ipalloct(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
,
613 bool try_tcache
, arena_t
*arena
);
614 void *ipalloc(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
);
615 size_t isalloc(const void *ptr
, bool demote
);
616 size_t ivsalloc(const void *ptr
, bool demote
);
617 size_t u2rz(size_t usize
);
618 size_t p2rz(const void *ptr
);
619 void idalloct(tsd_t
*tsd
, void *ptr
, bool try_tcache
);
620 void isdalloct(tsd_t
*tsd
, void *ptr
, size_t size
, bool try_tcache
);
621 void idalloc(tsd_t
*tsd
, void *ptr
);
622 void iqalloc(tsd_t
*tsd
, void *ptr
, bool try_tcache
);
623 void isqalloc(tsd_t
*tsd
, void *ptr
, size_t size
, bool try_tcache
);
624 void *iralloct_realign(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
625 size_t extra
, size_t alignment
, bool zero
, bool try_tcache_alloc
,
626 bool try_tcache_dalloc
, arena_t
*arena
);
627 void *iralloct(tsd_t
*tsd
, void *ptr
, size_t size
, size_t alignment
,
628 bool zero
, bool try_tcache_alloc
, bool try_tcache_dalloc
, arena_t
*arena
);
629 void *iralloc(tsd_t
*tsd
, void *ptr
, size_t size
, size_t alignment
,
631 bool ixalloc(void *ptr
, size_t size
, size_t extra
, size_t alignment
,
635 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
636 JEMALLOC_ALWAYS_INLINE
void *
637 imalloct(tsd_t
*tsd
, size_t size
, bool try_tcache
, arena_t
*arena
)
642 if (size
<= arena_maxclass
)
643 return (arena_malloc(tsd
, arena
, size
, false, try_tcache
));
645 return (huge_malloc(tsd
, arena
, size
, false));
648 JEMALLOC_ALWAYS_INLINE
void *
649 imalloc(tsd_t
*tsd
, size_t size
)
652 return (imalloct(tsd
, size
, true, NULL
));
655 JEMALLOC_ALWAYS_INLINE
void *
656 icalloct(tsd_t
*tsd
, size_t size
, bool try_tcache
, arena_t
*arena
)
659 if (size
<= arena_maxclass
)
660 return (arena_malloc(tsd
, arena
, size
, true, try_tcache
));
662 return (huge_malloc(tsd
, arena
, size
, true));
665 JEMALLOC_ALWAYS_INLINE
void *
666 icalloc(tsd_t
*tsd
, size_t size
)
669 return (icalloct(tsd
, size
, true, NULL
));
672 JEMALLOC_ALWAYS_INLINE
void *
673 ipalloct(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
, bool try_tcache
,
679 assert(usize
== sa2u(usize
, alignment
));
681 if (usize
<= arena_maxclass
&& alignment
<= PAGE
)
682 ret
= arena_malloc(tsd
, arena
, usize
, zero
, try_tcache
);
684 if (usize
<= arena_maxclass
) {
685 ret
= arena_palloc(choose_arena(tsd
, arena
), usize
,
687 } else if (alignment
<= chunksize
)
688 ret
= huge_malloc(tsd
, arena
, usize
, zero
);
690 ret
= huge_palloc(tsd
, arena
, usize
, alignment
, zero
);
693 assert(ALIGNMENT_ADDR2BASE(ret
, alignment
) == ret
);
697 JEMALLOC_ALWAYS_INLINE
void *
698 ipalloc(tsd_t
*tsd
, size_t usize
, size_t alignment
, bool zero
)
701 return (ipalloct(tsd
, usize
, alignment
, zero
, true, NULL
));
707 * size_t sz = isalloc(ptr, config_prof);
709 JEMALLOC_ALWAYS_INLINE
size_t
710 isalloc(const void *ptr
, bool demote
)
713 arena_chunk_t
*chunk
;
716 /* Demotion only makes sense if config_prof is true. */
717 assert(config_prof
|| !demote
);
719 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
721 ret
= arena_salloc(ptr
, demote
);
723 ret
= huge_salloc(ptr
);
728 JEMALLOC_ALWAYS_INLINE
size_t
729 ivsalloc(const void *ptr
, bool demote
)
732 /* Return 0 if ptr is not within a chunk managed by jemalloc. */
733 if (rtree_get(chunks_rtree
, (uintptr_t)CHUNK_ADDR2BASE(ptr
)) == 0)
736 return (isalloc(ptr
, demote
));
739 JEMALLOC_INLINE
size_t
744 if (usize
<= SMALL_MAXCLASS
) {
745 size_t binind
= small_size2bin(usize
);
746 ret
= arena_bin_info
[binind
].redzone_size
;
753 JEMALLOC_INLINE
size_t
754 p2rz(const void *ptr
)
756 size_t usize
= isalloc(ptr
, false);
758 return (u2rz(usize
));
761 JEMALLOC_ALWAYS_INLINE
void
762 idalloct(tsd_t
*tsd
, void *ptr
, bool try_tcache
)
764 arena_chunk_t
*chunk
;
768 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
770 arena_dalloc(tsd
, chunk
, ptr
, try_tcache
);
775 JEMALLOC_ALWAYS_INLINE
void
776 isdalloct(tsd_t
*tsd
, void *ptr
, size_t size
, bool try_tcache
)
778 arena_chunk_t
*chunk
;
782 chunk
= (arena_chunk_t
*)CHUNK_ADDR2BASE(ptr
);
784 arena_sdalloc(tsd
, chunk
, ptr
, size
, try_tcache
);
789 JEMALLOC_ALWAYS_INLINE
void
790 idalloc(tsd_t
*tsd
, void *ptr
)
793 idalloct(tsd
, ptr
, true);
796 JEMALLOC_ALWAYS_INLINE
void
797 iqalloc(tsd_t
*tsd
, void *ptr
, bool try_tcache
)
800 if (config_fill
&& unlikely(opt_quarantine
))
801 quarantine(tsd
, ptr
);
803 idalloct(tsd
, ptr
, try_tcache
);
806 JEMALLOC_ALWAYS_INLINE
void
807 isqalloc(tsd_t
*tsd
, void *ptr
, size_t size
, bool try_tcache
)
810 if (config_fill
&& unlikely(opt_quarantine
))
811 quarantine(tsd
, ptr
);
813 isdalloct(tsd
, ptr
, size
, try_tcache
);
816 JEMALLOC_ALWAYS_INLINE
void *
817 iralloct_realign(tsd_t
*tsd
, void *ptr
, size_t oldsize
, size_t size
,
818 size_t extra
, size_t alignment
, bool zero
, bool try_tcache_alloc
,
819 bool try_tcache_dalloc
, arena_t
*arena
)
822 size_t usize
, copysize
;
824 usize
= sa2u(size
+ extra
, alignment
);
827 p
= ipalloct(tsd
, usize
, alignment
, zero
, try_tcache_alloc
, arena
);
831 /* Try again, without extra this time. */
832 usize
= sa2u(size
, alignment
);
835 p
= ipalloct(tsd
, usize
, alignment
, zero
, try_tcache_alloc
,
841 * Copy at most size bytes (not size+extra), since the caller has no
842 * expectation that the extra bytes will be reliably preserved.
844 copysize
= (size
< oldsize
) ? size
: oldsize
;
845 memcpy(p
, ptr
, copysize
);
846 iqalloc(tsd
, ptr
, try_tcache_dalloc
);
850 JEMALLOC_ALWAYS_INLINE
void *
851 iralloct(tsd_t
*tsd
, void *ptr
, size_t size
, size_t alignment
, bool zero
,
852 bool try_tcache_alloc
, bool try_tcache_dalloc
, arena_t
*arena
)
859 oldsize
= isalloc(ptr
, config_prof
);
861 if (alignment
!= 0 && ((uintptr_t)ptr
& ((uintptr_t)alignment
-1))
864 * Existing object alignment is inadequate; allocate new space
867 return (iralloct_realign(tsd
, ptr
, oldsize
, size
, 0, alignment
,
868 zero
, try_tcache_alloc
, try_tcache_dalloc
, arena
));
871 if (size
<= arena_maxclass
) {
872 return (arena_ralloc(tsd
, arena
, ptr
, oldsize
, size
, 0,
873 alignment
, zero
, try_tcache_alloc
, try_tcache_dalloc
));
875 return (huge_ralloc(tsd
, arena
, ptr
, oldsize
, size
, 0,
876 alignment
, zero
, try_tcache_dalloc
));
880 JEMALLOC_ALWAYS_INLINE
void *
881 iralloc(tsd_t
*tsd
, void *ptr
, size_t size
, size_t alignment
, bool zero
)
884 return (iralloct(tsd
, ptr
, size
, alignment
, zero
, true, true, NULL
));
887 JEMALLOC_ALWAYS_INLINE
bool
888 ixalloc(void *ptr
, size_t size
, size_t extra
, size_t alignment
, bool zero
)
895 oldsize
= isalloc(ptr
, config_prof
);
896 if (alignment
!= 0 && ((uintptr_t)ptr
& ((uintptr_t)alignment
-1))
898 /* Existing object alignment is inadequate. */
902 if (size
<= arena_maxclass
)
903 return (arena_ralloc_no_move(ptr
, oldsize
, size
, extra
, zero
));
905 return (huge_ralloc_no_move(ptr
, oldsize
, size
, extra
, zero
));
909 #include "jemalloc/internal/prof.h"
911 #undef JEMALLOC_H_INLINES
912 /******************************************************************************/
913 #endif /* JEMALLOC_INTERNAL_H */