1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
4 typedef struct tcache_bin_info_s tcache_bin_info_t
;
5 typedef struct tcache_bin_s tcache_bin_t
;
6 typedef struct tcache_s tcache_t
;
7 typedef struct tcaches_s tcaches_t
;
10 * tcache pointers close to NULL are used to encode state information that is
11 * used for two purposes: preventing thread caching on a per thread basis and
12 * cleaning up during thread shutdown.
14 #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
15 #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
16 #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
17 #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
20 * Absolute minimum number of cache slots for each small bin.
22 #define TCACHE_NSLOTS_SMALL_MIN 20
25 * Absolute maximum number of cache slots for each small bin in the thread
26 * cache. This is an additional constraint beyond that imposed as: twice the
27 * number of regions per run for this size class.
29 * This constant must be an even number.
31 #define TCACHE_NSLOTS_SMALL_MAX 200
33 /* Number of cache slots for large size classes. */
34 #define TCACHE_NSLOTS_LARGE 20
36 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
37 #define LG_TCACHE_MAXCLASS_DEFAULT 15
40 * TCACHE_GC_SWEEP is the approximate number of allocation events between
41 * full GC sweeps. Integer rounding may cause the actual number to be
42 * slightly higher, since GC is performed incrementally.
44 #define TCACHE_GC_SWEEP 8192
46 /* Number of tcache allocation/deallocation events between incremental GCs. */
47 #define TCACHE_GC_INCR \
48 ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
50 #endif /* JEMALLOC_H_TYPES */
51 /******************************************************************************/
52 #ifdef JEMALLOC_H_STRUCTS
55 tcache_enabled_false
= 0, /* Enable cast to/from bool. */
56 tcache_enabled_true
= 1,
57 tcache_enabled_default
= 2
61 * Read-only information associated with each element of tcache_t's tbins array
62 * is stored separately, mainly to reduce memory usage.
64 struct tcache_bin_info_s
{
65 unsigned ncached_max
; /* Upper limit on ncached. */
69 tcache_bin_stats_t tstats
;
70 int low_water
; /* Min # cached since last GC. */
71 unsigned lg_fill_div
; /* Fill (ncached_max >> lg_fill_div). */
72 unsigned ncached
; /* # of cached objects. */
74 * To make use of adjacent cacheline prefetch, the items in the avail
75 * stack goes to higher address for newer allocations. avail points
76 * just above the available space, which means that
77 * avail[-ncached, ... -1] are available items and the lowest item will
80 void **avail
; /* Stack of available objects. */
84 ql_elm(tcache_t
) link
; /* Used for aggregating stats. */
85 uint64_t prof_accumbytes
;/* Cleared after arena_prof_accum(). */
86 ticker_t gc_ticker
; /* Drives incremental GC. */
87 szind_t next_gc_bin
; /* Next bin to GC. */
88 tcache_bin_t tbins
[1]; /* Dynamically sized. */
90 * The pointer stacks associated with tbins follow as a contiguous
91 * array. During tcache initialization, the avail pointer in each
92 * element of tbins is initialized to point to the proper offset within
97 /* Linkage for list of available (previously used) explicit tcache IDs. */
105 #endif /* JEMALLOC_H_STRUCTS */
106 /******************************************************************************/
107 #ifdef JEMALLOC_H_EXTERNS
109 extern bool opt_tcache
;
110 extern ssize_t opt_lg_tcache_max
;
112 extern tcache_bin_info_t
*tcache_bin_info
;
115 * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
118 extern unsigned nhbins
;
120 /* Maximum cached size class. */
121 extern size_t tcache_maxclass
;
124 * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
125 * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
126 * completely disjoint from this data structure. tcaches starts off as a sparse
127 * array, so it has no physical memory footprint until individual pages are
128 * touched. This allows the entire array to be allocated the first time an
129 * explicit tcache is created without a disproportionate impact on memory usage.
131 extern tcaches_t
*tcaches
;
133 size_t tcache_salloc(const void *ptr
);
134 void tcache_event_hard(tsd_t
*tsd
, tcache_t
*tcache
);
135 void *tcache_alloc_small_hard(tsd_t
*tsd
, arena_t
*arena
, tcache_t
*tcache
,
136 tcache_bin_t
*tbin
, szind_t binind
, bool *tcache_success
);
137 void tcache_bin_flush_small(tsd_t
*tsd
, tcache_t
*tcache
, tcache_bin_t
*tbin
,
138 szind_t binind
, unsigned rem
);
139 void tcache_bin_flush_large(tsd_t
*tsd
, tcache_bin_t
*tbin
, szind_t binind
,
140 unsigned rem
, tcache_t
*tcache
);
141 void tcache_arena_associate(tcache_t
*tcache
, arena_t
*arena
);
142 void tcache_arena_reassociate(tcache_t
*tcache
, arena_t
*oldarena
,
144 void tcache_arena_dissociate(tcache_t
*tcache
, arena_t
*arena
);
145 tcache_t
*tcache_get_hard(tsd_t
*tsd
);
146 tcache_t
*tcache_create(tsd_t
*tsd
, arena_t
*arena
);
147 void tcache_cleanup(tsd_t
*tsd
);
148 void tcache_enabled_cleanup(tsd_t
*tsd
);
149 void tcache_stats_merge(tcache_t
*tcache
, arena_t
*arena
);
150 bool tcaches_create(tsd_t
*tsd
, unsigned *r_ind
);
151 void tcaches_flush(tsd_t
*tsd
, unsigned ind
);
152 void tcaches_destroy(tsd_t
*tsd
, unsigned ind
);
153 bool tcache_boot(void);
155 #endif /* JEMALLOC_H_EXTERNS */
156 /******************************************************************************/
157 #ifdef JEMALLOC_H_INLINES
159 #ifndef JEMALLOC_ENABLE_INLINE
160 void tcache_event(tsd_t
*tsd
, tcache_t
*tcache
);
161 void tcache_flush(void);
162 bool tcache_enabled_get(void);
163 tcache_t
*tcache_get(tsd_t
*tsd
, bool create
);
164 void tcache_enabled_set(bool enabled
);
165 void *tcache_alloc_easy(tcache_bin_t
*tbin
, bool *tcache_success
);
166 void *tcache_alloc_small(tsd_t
*tsd
, arena_t
*arena
, tcache_t
*tcache
,
167 size_t size
, szind_t ind
, bool zero
, bool slow_path
);
168 void *tcache_alloc_large(tsd_t
*tsd
, arena_t
*arena
, tcache_t
*tcache
,
169 size_t size
, szind_t ind
, bool zero
, bool slow_path
);
170 void tcache_dalloc_small(tsd_t
*tsd
, tcache_t
*tcache
, void *ptr
,
171 szind_t binind
, bool slow_path
);
172 void tcache_dalloc_large(tsd_t
*tsd
, tcache_t
*tcache
, void *ptr
,
173 size_t size
, bool slow_path
);
174 tcache_t
*tcaches_get(tsd_t
*tsd
, unsigned ind
);
177 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
183 cassert(config_tcache
);
190 tcache_enabled_get(void)
193 tcache_enabled_t tcache_enabled
;
195 cassert(config_tcache
);
198 tcache_enabled
= tsd_tcache_enabled_get(tsd
);
199 if (tcache_enabled
== tcache_enabled_default
) {
200 tcache_enabled
= (tcache_enabled_t
)opt_tcache
;
201 tsd_tcache_enabled_set(tsd
, tcache_enabled
);
204 return ((bool)tcache_enabled
);
208 tcache_enabled_set(bool enabled
)
211 tcache_enabled_t tcache_enabled
;
213 cassert(config_tcache
);
217 tcache_enabled
= (tcache_enabled_t
)enabled
;
218 tsd_tcache_enabled_set(tsd
, tcache_enabled
);
224 JEMALLOC_ALWAYS_INLINE tcache_t
*
225 tcache_get(tsd_t
*tsd
, bool create
)
232 tcache
= tsd_tcache_get(tsd
);
235 if (unlikely(tcache
== NULL
) && tsd_nominal(tsd
)) {
236 tcache
= tcache_get_hard(tsd
);
237 tsd_tcache_set(tsd
, tcache
);
243 JEMALLOC_ALWAYS_INLINE
void
244 tcache_event(tsd_t
*tsd
, tcache_t
*tcache
)
247 if (TCACHE_GC_INCR
== 0)
250 if (unlikely(ticker_tick(&tcache
->gc_ticker
)))
251 tcache_event_hard(tsd
, tcache
);
254 JEMALLOC_ALWAYS_INLINE
void *
255 tcache_alloc_easy(tcache_bin_t
*tbin
, bool *tcache_success
)
259 if (unlikely(tbin
->ncached
== 0)) {
260 tbin
->low_water
= -1;
261 *tcache_success
= false;
265 * tcache_success (instead of ret) should be checked upon the return of
266 * this function. We avoid checking (ret == NULL) because there is
267 * never a null stored on the avail stack (which is unknown to the
268 * compiler), and eagerly checking ret would cause pipeline stall
269 * (waiting for the cacheline).
271 *tcache_success
= true;
272 ret
= *(tbin
->avail
- tbin
->ncached
);
275 if (unlikely((int)tbin
->ncached
< tbin
->low_water
))
276 tbin
->low_water
= tbin
->ncached
;
281 JEMALLOC_ALWAYS_INLINE
void *
282 tcache_alloc_small(tsd_t
*tsd
, arena_t
*arena
, tcache_t
*tcache
, size_t size
,
283 szind_t binind
, bool zero
, bool slow_path
)
288 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
290 assert(binind
< NBINS
);
291 tbin
= &tcache
->tbins
[binind
];
292 ret
= tcache_alloc_easy(tbin
, &tcache_success
);
293 assert(tcache_success
== (ret
!= NULL
));
294 if (unlikely(!tcache_success
)) {
295 bool tcache_hard_success
;
296 arena
= arena_choose(tsd
, arena
);
297 if (unlikely(arena
== NULL
))
300 ret
= tcache_alloc_small_hard(tsd
, arena
, tcache
, tbin
, binind
,
301 &tcache_hard_success
);
302 if (tcache_hard_success
== false)
308 * Only compute usize if required. The checks in the following if
309 * statement are all static.
311 if (config_prof
|| (slow_path
&& config_fill
) || unlikely(zero
)) {
312 usize
= index2size(binind
);
313 assert(tcache_salloc(ret
) == usize
);
317 if (slow_path
&& config_fill
) {
318 if (unlikely(opt_junk_alloc
)) {
319 arena_alloc_junk_small(ret
,
320 &arena_bin_info
[binind
], false);
321 } else if (unlikely(opt_zero
))
322 memset(ret
, 0, usize
);
325 if (slow_path
&& config_fill
&& unlikely(opt_junk_alloc
)) {
326 arena_alloc_junk_small(ret
, &arena_bin_info
[binind
],
329 memset(ret
, 0, usize
);
333 tbin
->tstats
.nrequests
++;
335 tcache
->prof_accumbytes
+= usize
;
336 tcache_event(tsd
, tcache
);
340 JEMALLOC_ALWAYS_INLINE
void *
341 tcache_alloc_large(tsd_t
*tsd
, arena_t
*arena
, tcache_t
*tcache
, size_t size
,
342 szind_t binind
, bool zero
, bool slow_path
)
348 assert(binind
< nhbins
);
349 tbin
= &tcache
->tbins
[binind
];
350 ret
= tcache_alloc_easy(tbin
, &tcache_success
);
351 assert(tcache_success
== (ret
!= NULL
));
352 if (unlikely(!tcache_success
)) {
354 * Only allocate one large object at a time, because it's quite
355 * expensive to create one and not use it.
357 arena
= arena_choose(tsd
, arena
);
358 if (unlikely(arena
== NULL
))
361 ret
= arena_malloc_large(tsd
, arena
, binind
, zero
);
365 size_t usize
JEMALLOC_CC_SILENCE_INIT(0);
367 /* Only compute usize on demand */
368 if (config_prof
|| (slow_path
&& config_fill
) ||
370 usize
= index2size(binind
);
371 assert(usize
<= tcache_maxclass
);
374 if (config_prof
&& usize
== LARGE_MINCLASS
) {
375 arena_chunk_t
*chunk
=
376 (arena_chunk_t
*)CHUNK_ADDR2BASE(ret
);
377 size_t pageind
= (((uintptr_t)ret
- (uintptr_t)chunk
) >>
379 arena_mapbits_large_binind_set(chunk
, pageind
,
383 if (slow_path
&& config_fill
) {
384 if (unlikely(opt_junk_alloc
))
385 memset(ret
, 0xa5, usize
);
386 else if (unlikely(opt_zero
))
387 memset(ret
, 0, usize
);
390 memset(ret
, 0, usize
);
393 tbin
->tstats
.nrequests
++;
395 tcache
->prof_accumbytes
+= usize
;
398 tcache_event(tsd
, tcache
);
402 JEMALLOC_ALWAYS_INLINE
void
403 tcache_dalloc_small(tsd_t
*tsd
, tcache_t
*tcache
, void *ptr
, szind_t binind
,
407 tcache_bin_info_t
*tbin_info
;
409 assert(tcache_salloc(ptr
) <= SMALL_MAXCLASS
);
411 if (slow_path
&& config_fill
&& unlikely(opt_junk_free
))
412 arena_dalloc_junk_small(ptr
, &arena_bin_info
[binind
]);
414 tbin
= &tcache
->tbins
[binind
];
415 tbin_info
= &tcache_bin_info
[binind
];
416 if (unlikely(tbin
->ncached
== tbin_info
->ncached_max
)) {
417 tcache_bin_flush_small(tsd
, tcache
, tbin
, binind
,
418 (tbin_info
->ncached_max
>> 1));
420 assert(tbin
->ncached
< tbin_info
->ncached_max
);
422 *(tbin
->avail
- tbin
->ncached
) = ptr
;
424 tcache_event(tsd
, tcache
);
427 JEMALLOC_ALWAYS_INLINE
void
428 tcache_dalloc_large(tsd_t
*tsd
, tcache_t
*tcache
, void *ptr
, size_t size
,
433 tcache_bin_info_t
*tbin_info
;
435 assert((size
& PAGE_MASK
) == 0);
436 assert(tcache_salloc(ptr
) > SMALL_MAXCLASS
);
437 assert(tcache_salloc(ptr
) <= tcache_maxclass
);
439 binind
= size2index(size
);
441 if (slow_path
&& config_fill
&& unlikely(opt_junk_free
))
442 arena_dalloc_junk_large(ptr
, size
);
444 tbin
= &tcache
->tbins
[binind
];
445 tbin_info
= &tcache_bin_info
[binind
];
446 if (unlikely(tbin
->ncached
== tbin_info
->ncached_max
)) {
447 tcache_bin_flush_large(tsd
, tbin
, binind
,
448 (tbin_info
->ncached_max
>> 1), tcache
);
450 assert(tbin
->ncached
< tbin_info
->ncached_max
);
452 *(tbin
->avail
- tbin
->ncached
) = ptr
;
454 tcache_event(tsd
, tcache
);
457 JEMALLOC_ALWAYS_INLINE tcache_t
*
458 tcaches_get(tsd_t
*tsd
, unsigned ind
)
460 tcaches_t
*elm
= &tcaches
[ind
];
461 if (unlikely(elm
->tcache
== NULL
))
462 elm
->tcache
= tcache_create(tsd
, arena_choose(tsd
, NULL
));
463 return (elm
->tcache
);
467 #endif /* JEMALLOC_H_INLINES */
468 /******************************************************************************/