]> git.proxmox.com Git - rustc.git/blob - src/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
Imported Upstream version 1.0.0~0alpha
[rustc.git] / src / jemalloc / include / jemalloc / internal / jemalloc_internal.h.in
1 #ifndef JEMALLOC_INTERNAL_H
2 #define JEMALLOC_INTERNAL_H
3
4 #include "jemalloc_internal_defs.h"
5 #include "jemalloc/internal/jemalloc_internal_decls.h"
6
7 #ifdef JEMALLOC_UTRACE
8 #include <sys/ktrace.h>
9 #endif
10
11 #define JEMALLOC_NO_DEMANGLE
12 #ifdef JEMALLOC_JET
13 # define JEMALLOC_N(n) jet_##n
14 # include "jemalloc/internal/public_namespace.h"
15 # define JEMALLOC_NO_RENAME
16 # include "../jemalloc@install_suffix@.h"
17 # undef JEMALLOC_NO_RENAME
18 #else
19 # define JEMALLOC_N(n) @private_namespace@##n
20 # include "../jemalloc@install_suffix@.h"
21 #endif
22 #include "jemalloc/internal/private_namespace.h"
23
24 static const bool config_debug =
25 #ifdef JEMALLOC_DEBUG
26 true
27 #else
28 false
29 #endif
30 ;
31 static const bool have_dss =
32 #ifdef JEMALLOC_DSS
33 true
34 #else
35 false
36 #endif
37 ;
38 static const bool config_fill =
39 #ifdef JEMALLOC_FILL
40 true
41 #else
42 false
43 #endif
44 ;
45 static const bool config_lazy_lock =
46 #ifdef JEMALLOC_LAZY_LOCK
47 true
48 #else
49 false
50 #endif
51 ;
52 static const bool config_prof =
53 #ifdef JEMALLOC_PROF
54 true
55 #else
56 false
57 #endif
58 ;
59 static const bool config_prof_libgcc =
60 #ifdef JEMALLOC_PROF_LIBGCC
61 true
62 #else
63 false
64 #endif
65 ;
66 static const bool config_prof_libunwind =
67 #ifdef JEMALLOC_PROF_LIBUNWIND
68 true
69 #else
70 false
71 #endif
72 ;
73 static const bool config_munmap =
74 #ifdef JEMALLOC_MUNMAP
75 true
76 #else
77 false
78 #endif
79 ;
80 static const bool config_stats =
81 #ifdef JEMALLOC_STATS
82 true
83 #else
84 false
85 #endif
86 ;
87 static const bool config_tcache =
88 #ifdef JEMALLOC_TCACHE
89 true
90 #else
91 false
92 #endif
93 ;
94 static const bool config_tls =
95 #ifdef JEMALLOC_TLS
96 true
97 #else
98 false
99 #endif
100 ;
101 static const bool config_utrace =
102 #ifdef JEMALLOC_UTRACE
103 true
104 #else
105 false
106 #endif
107 ;
108 static const bool config_valgrind =
109 #ifdef JEMALLOC_VALGRIND
110 true
111 #else
112 false
113 #endif
114 ;
115 static const bool config_xmalloc =
116 #ifdef JEMALLOC_XMALLOC
117 true
118 #else
119 false
120 #endif
121 ;
122 static const bool config_ivsalloc =
123 #ifdef JEMALLOC_IVSALLOC
124 true
125 #else
126 false
127 #endif
128 ;
129
130 #ifdef JEMALLOC_ATOMIC9
131 #include <machine/atomic.h>
132 #endif
133
134 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
135 #include <libkern/OSAtomic.h>
136 #endif
137
138 #ifdef JEMALLOC_ZONE
139 #include <mach/mach_error.h>
140 #include <mach/mach_init.h>
141 #include <mach/vm_map.h>
142 #include <malloc/malloc.h>
143 #endif
144
145 #define RB_COMPACT
146 #include "jemalloc/internal/rb.h"
147 #include "jemalloc/internal/qr.h"
148 #include "jemalloc/internal/ql.h"
149
150 /*
151 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
152 * but there are circular dependencies that cannot be broken without
153 * substantial performance degradation. In order to reduce the effect on
154 * visual code flow, read the header files in multiple passes, with one of the
155 * following cpp variables defined during each pass:
156 *
157 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
158 * types.
159 * JEMALLOC_H_STRUCTS : Data structures.
160 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
161 * JEMALLOC_H_INLINES : Inline functions.
162 */
163 /******************************************************************************/
164 #define JEMALLOC_H_TYPES
165
166 #include "jemalloc/internal/jemalloc_internal_macros.h"
167
168 #define MALLOCX_ARENA_MASK ((int)~0xff)
169 #define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
170 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
171 #define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
172 (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
173 #define MALLOCX_ALIGN_GET(flags) \
174 (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
175 #define MALLOCX_ZERO_GET(flags) \
176 ((bool)(flags & MALLOCX_ZERO))
177 #define MALLOCX_ARENA_GET(flags) \
178 (((unsigned)(flags >> 8)) - 1)
179
180 /* Smallest size class to support. */
181 #define LG_TINY_MIN 3
182 #define TINY_MIN (1U << LG_TINY_MIN)
183
184 /*
185 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
186 * classes).
187 */
188 #ifndef LG_QUANTUM
189 # if (defined(__i386__) || defined(_M_IX86))
190 # define LG_QUANTUM 4
191 # endif
192 # ifdef __ia64__
193 # define LG_QUANTUM 4
194 # endif
195 # ifdef __alpha__
196 # define LG_QUANTUM 4
197 # endif
198 # ifdef __sparc64__
199 # define LG_QUANTUM 4
200 # endif
201 # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
202 # define LG_QUANTUM 4
203 # endif
204 # ifdef __arm__
205 # define LG_QUANTUM 3
206 # endif
207 # ifdef __aarch64__
208 # define LG_QUANTUM 4
209 # endif
210 # ifdef __hppa__
211 # define LG_QUANTUM 4
212 # endif
213 # ifdef __mips__
214 # define LG_QUANTUM 3
215 # endif
216 # ifdef __or1k__
217 # define LG_QUANTUM 3
218 # endif
219 # ifdef __powerpc__
220 # define LG_QUANTUM 4
221 # endif
222 # ifdef __s390__
223 # define LG_QUANTUM 4
224 # endif
225 # ifdef __SH4__
226 # define LG_QUANTUM 4
227 # endif
228 # ifdef __tile__
229 # define LG_QUANTUM 4
230 # endif
231 # ifdef __le32__
232 # define LG_QUANTUM 4
233 # endif
234 # ifndef LG_QUANTUM
235 # error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
236 # endif
237 #endif
238
239 #define QUANTUM ((size_t)(1U << LG_QUANTUM))
240 #define QUANTUM_MASK (QUANTUM - 1)
241
242 /* Return the smallest quantum multiple that is >= a. */
243 #define QUANTUM_CEILING(a) \
244 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
245
246 #define LONG ((size_t)(1U << LG_SIZEOF_LONG))
247 #define LONG_MASK (LONG - 1)
248
249 /* Return the smallest long multiple that is >= a. */
250 #define LONG_CEILING(a) \
251 (((a) + LONG_MASK) & ~LONG_MASK)
252
253 #define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
254 #define PTR_MASK (SIZEOF_PTR - 1)
255
256 /* Return the smallest (void *) multiple that is >= a. */
257 #define PTR_CEILING(a) \
258 (((a) + PTR_MASK) & ~PTR_MASK)
259
260 /*
261 * Maximum size of L1 cache line. This is used to avoid cache line aliasing.
262 * In addition, this controls the spacing of cacheline-spaced size classes.
263 *
264 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
265 * only handle raw constants.
266 */
267 #define LG_CACHELINE 6
268 #define CACHELINE 64
269 #define CACHELINE_MASK (CACHELINE - 1)
270
271 /* Return the smallest cacheline multiple that is >= s. */
272 #define CACHELINE_CEILING(s) \
273 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
274
275 /* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
276 #ifdef PAGE_MASK
277 # undef PAGE_MASK
278 #endif
279 #define LG_PAGE STATIC_PAGE_SHIFT
280 #define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
281 #define PAGE_MASK ((size_t)(PAGE - 1))
282
283 /* Return the smallest pagesize multiple that is >= s. */
284 #define PAGE_CEILING(s) \
285 (((s) + PAGE_MASK) & ~PAGE_MASK)
286
287 /* Return the nearest aligned address at or below a. */
288 #define ALIGNMENT_ADDR2BASE(a, alignment) \
289 ((void *)((uintptr_t)(a) & (-(alignment))))
290
291 /* Return the offset between a and the nearest aligned address at or below a. */
292 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
293 ((size_t)((uintptr_t)(a) & (alignment - 1)))
294
295 /* Return the smallest alignment multiple that is >= s. */
296 #define ALIGNMENT_CEILING(s, alignment) \
297 (((s) + (alignment - 1)) & (-(alignment)))
298
299 /* Declare a variable length array */
300 #if __STDC_VERSION__ < 199901L
301 # ifdef _MSC_VER
302 # include <malloc.h>
303 # define alloca _alloca
304 # else
305 # ifdef JEMALLOC_HAS_ALLOCA_H
306 # include <alloca.h>
307 # else
308 # include <stdlib.h>
309 # endif
310 # endif
311 # define VARIABLE_ARRAY(type, name, count) \
312 type *name = alloca(sizeof(type) * (count))
313 #else
314 # define VARIABLE_ARRAY(type, name, count) type name[(count)]
315 #endif
316
317 #include "jemalloc/internal/valgrind.h"
318 #include "jemalloc/internal/util.h"
319 #include "jemalloc/internal/atomic.h"
320 #include "jemalloc/internal/prng.h"
321 #include "jemalloc/internal/ckh.h"
322 #include "jemalloc/internal/size_classes.h"
323 #include "jemalloc/internal/stats.h"
324 #include "jemalloc/internal/ctl.h"
325 #include "jemalloc/internal/mutex.h"
326 #include "jemalloc/internal/tsd.h"
327 #include "jemalloc/internal/mb.h"
328 #include "jemalloc/internal/extent.h"
329 #include "jemalloc/internal/arena.h"
330 #include "jemalloc/internal/bitmap.h"
331 #include "jemalloc/internal/base.h"
332 #include "jemalloc/internal/chunk.h"
333 #include "jemalloc/internal/huge.h"
334 #include "jemalloc/internal/rtree.h"
335 #include "jemalloc/internal/tcache.h"
336 #include "jemalloc/internal/hash.h"
337 #include "jemalloc/internal/quarantine.h"
338 #include "jemalloc/internal/prof.h"
339
340 #undef JEMALLOC_H_TYPES
341 /******************************************************************************/
342 #define JEMALLOC_H_STRUCTS
343
344 #include "jemalloc/internal/valgrind.h"
345 #include "jemalloc/internal/util.h"
346 #include "jemalloc/internal/atomic.h"
347 #include "jemalloc/internal/prng.h"
348 #include "jemalloc/internal/ckh.h"
349 #include "jemalloc/internal/size_classes.h"
350 #include "jemalloc/internal/stats.h"
351 #include "jemalloc/internal/ctl.h"
352 #include "jemalloc/internal/mutex.h"
353 #include "jemalloc/internal/mb.h"
354 #include "jemalloc/internal/bitmap.h"
355 #include "jemalloc/internal/extent.h"
356 #include "jemalloc/internal/arena.h"
357 #include "jemalloc/internal/base.h"
358 #include "jemalloc/internal/chunk.h"
359 #include "jemalloc/internal/huge.h"
360 #include "jemalloc/internal/rtree.h"
361 #include "jemalloc/internal/tcache.h"
362 #include "jemalloc/internal/hash.h"
363 #include "jemalloc/internal/quarantine.h"
364 #include "jemalloc/internal/prof.h"
365
366 #include "jemalloc/internal/tsd.h"
367
368 #undef JEMALLOC_H_STRUCTS
369 /******************************************************************************/
370 #define JEMALLOC_H_EXTERNS
371
372 extern bool opt_abort;
373 extern bool opt_junk;
374 extern size_t opt_quarantine;
375 extern bool opt_redzone;
376 extern bool opt_utrace;
377 extern bool opt_xmalloc;
378 extern bool opt_zero;
379 extern size_t opt_narenas;
380
381 extern bool in_valgrind;
382
383 /* Number of CPUs. */
384 extern unsigned ncpus;
385
386 /* Protects arenas initialization (arenas, arenas_total). */
387 extern malloc_mutex_t arenas_lock;
388 /*
389 * Arenas that are used to service external requests. Not all elements of the
390 * arenas array are necessarily used; arenas are created lazily as needed.
391 *
392 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
393 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
394 * takes some action to create them and allocate from them.
395 */
396 extern arena_t **arenas;
397 extern unsigned narenas_total;
398 extern unsigned narenas_auto; /* Read-only after initialization. */
399
400 arena_t *arenas_extend(unsigned ind);
401 arena_t *choose_arena_hard(tsd_t *tsd);
402 void thread_allocated_cleanup(tsd_t *tsd);
403 void thread_deallocated_cleanup(tsd_t *tsd);
404 void arena_cleanup(tsd_t *tsd);
405 void jemalloc_prefork(void);
406 void jemalloc_postfork_parent(void);
407 void jemalloc_postfork_child(void);
408
409 #include "jemalloc/internal/valgrind.h"
410 #include "jemalloc/internal/util.h"
411 #include "jemalloc/internal/atomic.h"
412 #include "jemalloc/internal/prng.h"
413 #include "jemalloc/internal/ckh.h"
414 #include "jemalloc/internal/size_classes.h"
415 #include "jemalloc/internal/stats.h"
416 #include "jemalloc/internal/ctl.h"
417 #include "jemalloc/internal/mutex.h"
418 #include "jemalloc/internal/mb.h"
419 #include "jemalloc/internal/bitmap.h"
420 #include "jemalloc/internal/extent.h"
421 #include "jemalloc/internal/arena.h"
422 #include "jemalloc/internal/base.h"
423 #include "jemalloc/internal/chunk.h"
424 #include "jemalloc/internal/huge.h"
425 #include "jemalloc/internal/rtree.h"
426 #include "jemalloc/internal/tcache.h"
427 #include "jemalloc/internal/hash.h"
428 #include "jemalloc/internal/quarantine.h"
429 #include "jemalloc/internal/prof.h"
430 #include "jemalloc/internal/tsd.h"
431
432 #undef JEMALLOC_H_EXTERNS
433 /******************************************************************************/
434 #define JEMALLOC_H_INLINES
435
436 #include "jemalloc/internal/valgrind.h"
437 #include "jemalloc/internal/util.h"
438 #include "jemalloc/internal/atomic.h"
439 #include "jemalloc/internal/prng.h"
440 #include "jemalloc/internal/ckh.h"
441 #include "jemalloc/internal/size_classes.h"
442 #include "jemalloc/internal/stats.h"
443 #include "jemalloc/internal/ctl.h"
444 #include "jemalloc/internal/mutex.h"
445 #include "jemalloc/internal/tsd.h"
446 #include "jemalloc/internal/mb.h"
447 #include "jemalloc/internal/extent.h"
448 #include "jemalloc/internal/base.h"
449 #include "jemalloc/internal/chunk.h"
450 #include "jemalloc/internal/huge.h"
451
452 /*
453 * Include arena.h the first time in order to provide inline functions for this
454 * header's inlines.
455 */
456 #define JEMALLOC_ARENA_INLINE_A
457 #include "jemalloc/internal/arena.h"
458 #undef JEMALLOC_ARENA_INLINE_A
459
460 #ifndef JEMALLOC_ENABLE_INLINE
461 size_t s2u(size_t size);
462 size_t sa2u(size_t size, size_t alignment);
463 unsigned narenas_total_get(void);
464 arena_t *choose_arena(tsd_t *tsd, arena_t *arena);
465 #endif
466
467 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
468 /*
469 * Compute usable size that would result from allocating an object with the
470 * specified size.
471 */
472 JEMALLOC_ALWAYS_INLINE size_t
473 s2u(size_t size)
474 {
475
476 if (size <= SMALL_MAXCLASS)
477 return (small_s2u(size));
478 if (size <= arena_maxclass)
479 return (PAGE_CEILING(size));
480 return (CHUNK_CEILING(size));
481 }
482
483 /*
484 * Compute usable size that would result from allocating an object with the
485 * specified size and alignment.
486 */
487 JEMALLOC_ALWAYS_INLINE size_t
488 sa2u(size_t size, size_t alignment)
489 {
490 size_t usize;
491
492 assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
493
494 /*
495 * Round size up to the nearest multiple of alignment.
496 *
497 * This done, we can take advantage of the fact that for each small
498 * size class, every object is aligned at the smallest power of two
499 * that is non-zero in the base two representation of the size. For
500 * example:
501 *
502 * Size | Base 2 | Minimum alignment
503 * -----+----------+------------------
504 * 96 | 1100000 | 32
505 * 144 | 10100000 | 32
506 * 192 | 11000000 | 64
507 */
508 usize = ALIGNMENT_CEILING(size, alignment);
509 /*
510 * (usize < size) protects against the combination of maximal
511 * alignment and size greater than maximal alignment.
512 */
513 if (usize < size) {
514 /* size_t overflow. */
515 return (0);
516 }
517
518 if (usize <= arena_maxclass && alignment <= PAGE) {
519 if (usize <= SMALL_MAXCLASS)
520 return (small_s2u(usize));
521 return (PAGE_CEILING(usize));
522 } else {
523 size_t run_size;
524
525 /*
526 * We can't achieve subpage alignment, so round up alignment
527 * permanently; it makes later calculations simpler.
528 */
529 alignment = PAGE_CEILING(alignment);
530 usize = PAGE_CEILING(size);
531 /*
532 * (usize < size) protects against very large sizes within
533 * PAGE of SIZE_T_MAX.
534 *
535 * (usize + alignment < usize) protects against the
536 * combination of maximal alignment and usize large enough
537 * to cause overflow. This is similar to the first overflow
538 * check above, but it needs to be repeated due to the new
539 * usize value, which may now be *equal* to maximal
540 * alignment, whereas before we only detected overflow if the
541 * original size was *greater* than maximal alignment.
542 */
543 if (usize < size || usize + alignment < usize) {
544 /* size_t overflow. */
545 return (0);
546 }
547
548 /*
549 * Calculate the size of the over-size run that arena_palloc()
550 * would need to allocate in order to guarantee the alignment.
551 * If the run wouldn't fit within a chunk, round up to a huge
552 * allocation size.
553 */
554 run_size = usize + alignment - PAGE;
555 if (run_size <= arena_maxclass)
556 return (PAGE_CEILING(usize));
557 return (CHUNK_CEILING(usize));
558 }
559 }
560
561 JEMALLOC_INLINE unsigned
562 narenas_total_get(void)
563 {
564 unsigned narenas;
565
566 malloc_mutex_lock(&arenas_lock);
567 narenas = narenas_total;
568 malloc_mutex_unlock(&arenas_lock);
569
570 return (narenas);
571 }
572
573 /* Choose an arena based on a per-thread value. */
574 JEMALLOC_INLINE arena_t *
575 choose_arena(tsd_t *tsd, arena_t *arena)
576 {
577 arena_t *ret;
578
579 if (arena != NULL)
580 return (arena);
581
582 if (unlikely((ret = tsd_arena_get(tsd)) == NULL)) {
583 ret = choose_arena_hard(tsd);
584 assert(ret != NULL);
585 }
586
587 return (ret);
588 }
589 #endif
590
591 #include "jemalloc/internal/bitmap.h"
592 #include "jemalloc/internal/rtree.h"
593 /*
594 * Include arena.h the second and third times in order to resolve circular
595 * dependencies with tcache.h.
596 */
597 #define JEMALLOC_ARENA_INLINE_B
598 #include "jemalloc/internal/arena.h"
599 #undef JEMALLOC_ARENA_INLINE_B
600 #include "jemalloc/internal/tcache.h"
601 #define JEMALLOC_ARENA_INLINE_C
602 #include "jemalloc/internal/arena.h"
603 #undef JEMALLOC_ARENA_INLINE_C
604 #include "jemalloc/internal/hash.h"
605 #include "jemalloc/internal/quarantine.h"
606
607 #ifndef JEMALLOC_ENABLE_INLINE
608 void *imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena);
609 void *imalloc(tsd_t *tsd, size_t size);
610 void *icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena);
611 void *icalloc(tsd_t *tsd, size_t size);
612 void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
613 bool try_tcache, arena_t *arena);
614 void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
615 size_t isalloc(const void *ptr, bool demote);
616 size_t ivsalloc(const void *ptr, bool demote);
617 size_t u2rz(size_t usize);
618 size_t p2rz(const void *ptr);
619 void idalloct(tsd_t *tsd, void *ptr, bool try_tcache);
620 void isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
621 void idalloc(tsd_t *tsd, void *ptr);
622 void iqalloc(tsd_t *tsd, void *ptr, bool try_tcache);
623 void isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
624 void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
625 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
626 bool try_tcache_dalloc, arena_t *arena);
627 void *iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment,
628 bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
629 void *iralloc(tsd_t *tsd, void *ptr, size_t size, size_t alignment,
630 bool zero);
631 bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
632 bool zero);
633 #endif
634
635 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
636 JEMALLOC_ALWAYS_INLINE void *
637 imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
638 {
639
640 assert(size != 0);
641
642 if (size <= arena_maxclass)
643 return (arena_malloc(tsd, arena, size, false, try_tcache));
644 else
645 return (huge_malloc(tsd, arena, size, false));
646 }
647
648 JEMALLOC_ALWAYS_INLINE void *
649 imalloc(tsd_t *tsd, size_t size)
650 {
651
652 return (imalloct(tsd, size, true, NULL));
653 }
654
655 JEMALLOC_ALWAYS_INLINE void *
656 icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
657 {
658
659 if (size <= arena_maxclass)
660 return (arena_malloc(tsd, arena, size, true, try_tcache));
661 else
662 return (huge_malloc(tsd, arena, size, true));
663 }
664
665 JEMALLOC_ALWAYS_INLINE void *
666 icalloc(tsd_t *tsd, size_t size)
667 {
668
669 return (icalloct(tsd, size, true, NULL));
670 }
671
672 JEMALLOC_ALWAYS_INLINE void *
673 ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache,
674 arena_t *arena)
675 {
676 void *ret;
677
678 assert(usize != 0);
679 assert(usize == sa2u(usize, alignment));
680
681 if (usize <= arena_maxclass && alignment <= PAGE)
682 ret = arena_malloc(tsd, arena, usize, zero, try_tcache);
683 else {
684 if (usize <= arena_maxclass) {
685 ret = arena_palloc(choose_arena(tsd, arena), usize,
686 alignment, zero);
687 } else if (alignment <= chunksize)
688 ret = huge_malloc(tsd, arena, usize, zero);
689 else
690 ret = huge_palloc(tsd, arena, usize, alignment, zero);
691 }
692
693 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
694 return (ret);
695 }
696
697 JEMALLOC_ALWAYS_INLINE void *
698 ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
699 {
700
701 return (ipalloct(tsd, usize, alignment, zero, true, NULL));
702 }
703
704 /*
705 * Typical usage:
706 * void *ptr = [...]
707 * size_t sz = isalloc(ptr, config_prof);
708 */
709 JEMALLOC_ALWAYS_INLINE size_t
710 isalloc(const void *ptr, bool demote)
711 {
712 size_t ret;
713 arena_chunk_t *chunk;
714
715 assert(ptr != NULL);
716 /* Demotion only makes sense if config_prof is true. */
717 assert(config_prof || !demote);
718
719 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
720 if (chunk != ptr)
721 ret = arena_salloc(ptr, demote);
722 else
723 ret = huge_salloc(ptr);
724
725 return (ret);
726 }
727
728 JEMALLOC_ALWAYS_INLINE size_t
729 ivsalloc(const void *ptr, bool demote)
730 {
731
732 /* Return 0 if ptr is not within a chunk managed by jemalloc. */
733 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
734 return (0);
735
736 return (isalloc(ptr, demote));
737 }
738
739 JEMALLOC_INLINE size_t
740 u2rz(size_t usize)
741 {
742 size_t ret;
743
744 if (usize <= SMALL_MAXCLASS) {
745 size_t binind = small_size2bin(usize);
746 ret = arena_bin_info[binind].redzone_size;
747 } else
748 ret = 0;
749
750 return (ret);
751 }
752
753 JEMALLOC_INLINE size_t
754 p2rz(const void *ptr)
755 {
756 size_t usize = isalloc(ptr, false);
757
758 return (u2rz(usize));
759 }
760
761 JEMALLOC_ALWAYS_INLINE void
762 idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
763 {
764 arena_chunk_t *chunk;
765
766 assert(ptr != NULL);
767
768 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
769 if (chunk != ptr)
770 arena_dalloc(tsd, chunk, ptr, try_tcache);
771 else
772 huge_dalloc(ptr);
773 }
774
775 JEMALLOC_ALWAYS_INLINE void
776 isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
777 {
778 arena_chunk_t *chunk;
779
780 assert(ptr != NULL);
781
782 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
783 if (chunk != ptr)
784 arena_sdalloc(tsd, chunk, ptr, size, try_tcache);
785 else
786 huge_dalloc(ptr);
787 }
788
789 JEMALLOC_ALWAYS_INLINE void
790 idalloc(tsd_t *tsd, void *ptr)
791 {
792
793 idalloct(tsd, ptr, true);
794 }
795
796 JEMALLOC_ALWAYS_INLINE void
797 iqalloc(tsd_t *tsd, void *ptr, bool try_tcache)
798 {
799
800 if (config_fill && unlikely(opt_quarantine))
801 quarantine(tsd, ptr);
802 else
803 idalloct(tsd, ptr, try_tcache);
804 }
805
806 JEMALLOC_ALWAYS_INLINE void
807 isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
808 {
809
810 if (config_fill && unlikely(opt_quarantine))
811 quarantine(tsd, ptr);
812 else
813 isdalloct(tsd, ptr, size, try_tcache);
814 }
815
816 JEMALLOC_ALWAYS_INLINE void *
817 iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
818 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
819 bool try_tcache_dalloc, arena_t *arena)
820 {
821 void *p;
822 size_t usize, copysize;
823
824 usize = sa2u(size + extra, alignment);
825 if (usize == 0)
826 return (NULL);
827 p = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc, arena);
828 if (p == NULL) {
829 if (extra == 0)
830 return (NULL);
831 /* Try again, without extra this time. */
832 usize = sa2u(size, alignment);
833 if (usize == 0)
834 return (NULL);
835 p = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
836 arena);
837 if (p == NULL)
838 return (NULL);
839 }
840 /*
841 * Copy at most size bytes (not size+extra), since the caller has no
842 * expectation that the extra bytes will be reliably preserved.
843 */
844 copysize = (size < oldsize) ? size : oldsize;
845 memcpy(p, ptr, copysize);
846 iqalloc(tsd, ptr, try_tcache_dalloc);
847 return (p);
848 }
849
850 JEMALLOC_ALWAYS_INLINE void *
851 iralloct(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero,
852 bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
853 {
854 size_t oldsize;
855
856 assert(ptr != NULL);
857 assert(size != 0);
858
859 oldsize = isalloc(ptr, config_prof);
860
861 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
862 != 0) {
863 /*
864 * Existing object alignment is inadequate; allocate new space
865 * and copy.
866 */
867 return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
868 zero, try_tcache_alloc, try_tcache_dalloc, arena));
869 }
870
871 if (size <= arena_maxclass) {
872 return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0,
873 alignment, zero, try_tcache_alloc, try_tcache_dalloc));
874 } else {
875 return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0,
876 alignment, zero, try_tcache_dalloc));
877 }
878 }
879
880 JEMALLOC_ALWAYS_INLINE void *
881 iralloc(tsd_t *tsd, void *ptr, size_t size, size_t alignment, bool zero)
882 {
883
884 return (iralloct(tsd, ptr, size, alignment, zero, true, true, NULL));
885 }
886
887 JEMALLOC_ALWAYS_INLINE bool
888 ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
889 {
890 size_t oldsize;
891
892 assert(ptr != NULL);
893 assert(size != 0);
894
895 oldsize = isalloc(ptr, config_prof);
896 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
897 != 0) {
898 /* Existing object alignment is inadequate. */
899 return (true);
900 }
901
902 if (size <= arena_maxclass)
903 return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
904 else
905 return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
906 }
907 #endif
908
909 #include "jemalloc/internal/prof.h"
910
911 #undef JEMALLOC_H_INLINES
912 /******************************************************************************/
913 #endif /* JEMALLOC_INTERNAL_H */