]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/include/jemalloc/internal/tcache.h
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / jemalloc / include / jemalloc / internal / tcache.h
CommitLineData
970d7e83
LB
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4typedef struct tcache_bin_info_s tcache_bin_info_t;
5typedef struct tcache_bin_s tcache_bin_t;
6typedef struct tcache_s tcache_t;
54a0048b 7typedef struct tcaches_s tcaches_t;
970d7e83
LB
8
9/*
10 * tcache pointers close to NULL are used to encode state information that is
11 * used for two purposes: preventing thread caching on a per thread basis and
12 * cleaning up during thread shutdown.
13 */
14#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
15#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
16#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
17#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
18
54a0048b
SL
19/*
20 * Absolute minimum number of cache slots for each small bin.
21 */
22#define TCACHE_NSLOTS_SMALL_MIN 20
23
970d7e83
LB
24/*
25 * Absolute maximum number of cache slots for each small bin in the thread
26 * cache. This is an additional constraint beyond that imposed as: twice the
27 * number of regions per run for this size class.
28 *
29 * This constant must be an even number.
30 */
31#define TCACHE_NSLOTS_SMALL_MAX 200
32
33/* Number of cache slots for large size classes. */
34#define TCACHE_NSLOTS_LARGE 20
35
36/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
37#define LG_TCACHE_MAXCLASS_DEFAULT 15
38
39/*
40 * TCACHE_GC_SWEEP is the approximate number of allocation events between
41 * full GC sweeps. Integer rounding may cause the actual number to be
42 * slightly higher, since GC is performed incrementally.
43 */
44#define TCACHE_GC_SWEEP 8192
45
46/* Number of tcache allocation/deallocation events between incremental GCs. */
47#define TCACHE_GC_INCR \
48 ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
49
50#endif /* JEMALLOC_H_TYPES */
51/******************************************************************************/
52#ifdef JEMALLOC_H_STRUCTS
53
54typedef enum {
55 tcache_enabled_false = 0, /* Enable cast to/from bool. */
56 tcache_enabled_true = 1,
57 tcache_enabled_default = 2
58} tcache_enabled_t;
59
60/*
61 * Read-only information associated with each element of tcache_t's tbins array
62 * is stored separately, mainly to reduce memory usage.
63 */
64struct tcache_bin_info_s {
65 unsigned ncached_max; /* Upper limit on ncached. */
66};
67
68struct tcache_bin_s {
69 tcache_bin_stats_t tstats;
70 int low_water; /* Min # cached since last GC. */
71 unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
72 unsigned ncached; /* # of cached objects. */
54a0048b
SL
73 /*
74 * To make use of adjacent cacheline prefetch, the items in the avail
75 * stack goes to higher address for newer allocations. avail points
76 * just above the available space, which means that
77 * avail[-ncached, ... -1] are available items and the lowest item will
78 * be allocated first.
79 */
970d7e83
LB
80 void **avail; /* Stack of available objects. */
81};
82
83struct tcache_s {
84 ql_elm(tcache_t) link; /* Used for aggregating stats. */
54a0048b
SL
85 uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
86 ticker_t gc_ticker; /* Drives incremental GC. */
87 szind_t next_gc_bin; /* Next bin to GC. */
970d7e83
LB
88 tcache_bin_t tbins[1]; /* Dynamically sized. */
89 /*
90 * The pointer stacks associated with tbins follow as a contiguous
91 * array. During tcache initialization, the avail pointer in each
92 * element of tbins is initialized to point to the proper offset within
93 * this array.
94 */
95};
96
54a0048b
SL
97/* Linkage for list of available (previously used) explicit tcache IDs. */
98struct tcaches_s {
99 union {
100 tcache_t *tcache;
101 tcaches_t *next;
102 };
103};
104
970d7e83
LB
105#endif /* JEMALLOC_H_STRUCTS */
106/******************************************************************************/
107#ifdef JEMALLOC_H_EXTERNS
108
109extern bool opt_tcache;
110extern ssize_t opt_lg_tcache_max;
111
112extern tcache_bin_info_t *tcache_bin_info;
113
114/*
115 * Number of tcache bins. There are NBINS small-object bins, plus 0 or more
116 * large-object bins.
117 */
54a0048b 118extern unsigned nhbins;
970d7e83
LB
119
120/* Maximum cached size class. */
54a0048b
SL
121extern size_t tcache_maxclass;
122
123/*
124 * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
125 * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
126 * completely disjoint from this data structure. tcaches starts off as a sparse
127 * array, so it has no physical memory footprint until individual pages are
128 * touched. This allows the entire array to be allocated the first time an
129 * explicit tcache is created without a disproportionate impact on memory usage.
130 */
131extern tcaches_t *tcaches;
970d7e83
LB
132
133size_t tcache_salloc(const void *ptr);
54a0048b
SL
134void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
135void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
136 tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
137void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
138 szind_t binind, unsigned rem);
139void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
140 unsigned rem, tcache_t *tcache);
970d7e83 141void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
54a0048b
SL
142void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
143 arena_t *newarena);
144void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
1a4d82fc 145tcache_t *tcache_get_hard(tsd_t *tsd);
54a0048b 146tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
1a4d82fc
JJ
147void tcache_cleanup(tsd_t *tsd);
148void tcache_enabled_cleanup(tsd_t *tsd);
970d7e83 149void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
54a0048b
SL
150bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
151void tcaches_flush(tsd_t *tsd, unsigned ind);
152void tcaches_destroy(tsd_t *tsd, unsigned ind);
1a4d82fc 153bool tcache_boot(void);
970d7e83
LB
154
155#endif /* JEMALLOC_H_EXTERNS */
156/******************************************************************************/
157#ifdef JEMALLOC_H_INLINES
158
159#ifndef JEMALLOC_ENABLE_INLINE
54a0048b 160void tcache_event(tsd_t *tsd, tcache_t *tcache);
970d7e83
LB
161void tcache_flush(void);
162bool tcache_enabled_get(void);
1a4d82fc 163tcache_t *tcache_get(tsd_t *tsd, bool create);
970d7e83 164void tcache_enabled_set(bool enabled);
54a0048b
SL
165void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
166void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
167 size_t size, szind_t ind, bool zero, bool slow_path);
168void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
169 size_t size, szind_t ind, bool zero, bool slow_path);
170void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
171 szind_t binind, bool slow_path);
172void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
173 size_t size, bool slow_path);
174tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
970d7e83
LB
175#endif
176
177#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
970d7e83
LB
178JEMALLOC_INLINE void
179tcache_flush(void)
180{
1a4d82fc 181 tsd_t *tsd;
970d7e83
LB
182
183 cassert(config_tcache);
184
1a4d82fc
JJ
185 tsd = tsd_fetch();
186 tcache_cleanup(tsd);
970d7e83
LB
187}
188
189JEMALLOC_INLINE bool
190tcache_enabled_get(void)
191{
1a4d82fc 192 tsd_t *tsd;
970d7e83
LB
193 tcache_enabled_t tcache_enabled;
194
195 cassert(config_tcache);
196
1a4d82fc
JJ
197 tsd = tsd_fetch();
198 tcache_enabled = tsd_tcache_enabled_get(tsd);
970d7e83
LB
199 if (tcache_enabled == tcache_enabled_default) {
200 tcache_enabled = (tcache_enabled_t)opt_tcache;
1a4d82fc 201 tsd_tcache_enabled_set(tsd, tcache_enabled);
970d7e83
LB
202 }
203
204 return ((bool)tcache_enabled);
205}
206
207JEMALLOC_INLINE void
208tcache_enabled_set(bool enabled)
209{
1a4d82fc 210 tsd_t *tsd;
970d7e83 211 tcache_enabled_t tcache_enabled;
970d7e83
LB
212
213 cassert(config_tcache);
214
1a4d82fc
JJ
215 tsd = tsd_fetch();
216
970d7e83 217 tcache_enabled = (tcache_enabled_t)enabled;
1a4d82fc
JJ
218 tsd_tcache_enabled_set(tsd, tcache_enabled);
219
220 if (!enabled)
221 tcache_cleanup(tsd);
970d7e83
LB
222}
223
224JEMALLOC_ALWAYS_INLINE tcache_t *
1a4d82fc 225tcache_get(tsd_t *tsd, bool create)
970d7e83
LB
226{
227 tcache_t *tcache;
228
1a4d82fc 229 if (!config_tcache)
970d7e83
LB
230 return (NULL);
231
1a4d82fc
JJ
232 tcache = tsd_tcache_get(tsd);
233 if (!create)
234 return (tcache);
235 if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
236 tcache = tcache_get_hard(tsd);
237 tsd_tcache_set(tsd, tcache);
970d7e83
LB
238 }
239
240 return (tcache);
241}
242
243JEMALLOC_ALWAYS_INLINE void
54a0048b 244tcache_event(tsd_t *tsd, tcache_t *tcache)
970d7e83
LB
245{
246
247 if (TCACHE_GC_INCR == 0)
248 return;
249
54a0048b
SL
250 if (unlikely(ticker_tick(&tcache->gc_ticker)))
251 tcache_event_hard(tsd, tcache);
970d7e83
LB
252}
253
254JEMALLOC_ALWAYS_INLINE void *
54a0048b 255tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
970d7e83
LB
256{
257 void *ret;
258
1a4d82fc 259 if (unlikely(tbin->ncached == 0)) {
970d7e83 260 tbin->low_water = -1;
54a0048b 261 *tcache_success = false;
970d7e83
LB
262 return (NULL);
263 }
54a0048b
SL
264 /*
265 * tcache_success (instead of ret) should be checked upon the return of
266 * this function. We avoid checking (ret == NULL) because there is
267 * never a null stored on the avail stack (which is unknown to the
268 * compiler), and eagerly checking ret would cause pipeline stall
269 * (waiting for the cacheline).
270 */
271 *tcache_success = true;
272 ret = *(tbin->avail - tbin->ncached);
970d7e83 273 tbin->ncached--;
54a0048b 274
1a4d82fc 275 if (unlikely((int)tbin->ncached < tbin->low_water))
970d7e83 276 tbin->low_water = tbin->ncached;
54a0048b 277
970d7e83
LB
278 return (ret);
279}
280
281JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
282tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
283 szind_t binind, bool zero, bool slow_path)
970d7e83
LB
284{
285 void *ret;
970d7e83 286 tcache_bin_t *tbin;
54a0048b
SL
287 bool tcache_success;
288 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
970d7e83 289
970d7e83
LB
290 assert(binind < NBINS);
291 tbin = &tcache->tbins[binind];
54a0048b
SL
292 ret = tcache_alloc_easy(tbin, &tcache_success);
293 assert(tcache_success == (ret != NULL));
294 if (unlikely(!tcache_success)) {
295 bool tcache_hard_success;
296 arena = arena_choose(tsd, arena);
297 if (unlikely(arena == NULL))
970d7e83 298 return (NULL);
54a0048b
SL
299
300 ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind,
301 &tcache_hard_success);
302 if (tcache_hard_success == false)
303 return (NULL);
304 }
305
306 assert(ret);
307 /*
308 * Only compute usize if required. The checks in the following if
309 * statement are all static.
310 */
311 if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
312 usize = index2size(binind);
313 assert(tcache_salloc(ret) == usize);
970d7e83 314 }
970d7e83 315
1a4d82fc 316 if (likely(!zero)) {
54a0048b
SL
317 if (slow_path && config_fill) {
318 if (unlikely(opt_junk_alloc)) {
970d7e83
LB
319 arena_alloc_junk_small(ret,
320 &arena_bin_info[binind], false);
1a4d82fc 321 } else if (unlikely(opt_zero))
54a0048b 322 memset(ret, 0, usize);
970d7e83
LB
323 }
324 } else {
54a0048b 325 if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
970d7e83
LB
326 arena_alloc_junk_small(ret, &arena_bin_info[binind],
327 true);
328 }
54a0048b 329 memset(ret, 0, usize);
970d7e83 330 }
970d7e83
LB
331
332 if (config_stats)
333 tbin->tstats.nrequests++;
334 if (config_prof)
54a0048b
SL
335 tcache->prof_accumbytes += usize;
336 tcache_event(tsd, tcache);
970d7e83
LB
337 return (ret);
338}
339
340JEMALLOC_ALWAYS_INLINE void *
54a0048b
SL
341tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
342 szind_t binind, bool zero, bool slow_path)
970d7e83
LB
343{
344 void *ret;
970d7e83 345 tcache_bin_t *tbin;
54a0048b 346 bool tcache_success;
970d7e83 347
970d7e83
LB
348 assert(binind < nhbins);
349 tbin = &tcache->tbins[binind];
54a0048b
SL
350 ret = tcache_alloc_easy(tbin, &tcache_success);
351 assert(tcache_success == (ret != NULL));
352 if (unlikely(!tcache_success)) {
970d7e83
LB
353 /*
354 * Only allocate one large object at a time, because it's quite
355 * expensive to create one and not use it.
356 */
54a0048b
SL
357 arena = arena_choose(tsd, arena);
358 if (unlikely(arena == NULL))
359 return (NULL);
360
361 ret = arena_malloc_large(tsd, arena, binind, zero);
970d7e83
LB
362 if (ret == NULL)
363 return (NULL);
364 } else {
54a0048b
SL
365 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
366
367 /* Only compute usize on demand */
368 if (config_prof || (slow_path && config_fill) ||
369 unlikely(zero)) {
370 usize = index2size(binind);
371 assert(usize <= tcache_maxclass);
372 }
373
374 if (config_prof && usize == LARGE_MINCLASS) {
970d7e83
LB
375 arena_chunk_t *chunk =
376 (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
377 size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
378 LG_PAGE);
379 arena_mapbits_large_binind_set(chunk, pageind,
380 BININD_INVALID);
381 }
1a4d82fc 382 if (likely(!zero)) {
54a0048b
SL
383 if (slow_path && config_fill) {
384 if (unlikely(opt_junk_alloc))
385 memset(ret, 0xa5, usize);
1a4d82fc 386 else if (unlikely(opt_zero))
54a0048b 387 memset(ret, 0, usize);
970d7e83 388 }
1a4d82fc 389 } else
54a0048b 390 memset(ret, 0, usize);
970d7e83
LB
391
392 if (config_stats)
393 tbin->tstats.nrequests++;
394 if (config_prof)
54a0048b 395 tcache->prof_accumbytes += usize;
970d7e83
LB
396 }
397
54a0048b 398 tcache_event(tsd, tcache);
970d7e83
LB
399 return (ret);
400}
401
402JEMALLOC_ALWAYS_INLINE void
54a0048b
SL
403tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
404 bool slow_path)
970d7e83
LB
405{
406 tcache_bin_t *tbin;
407 tcache_bin_info_t *tbin_info;
408
409 assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
410
54a0048b 411 if (slow_path && config_fill && unlikely(opt_junk_free))
970d7e83
LB
412 arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
413
414 tbin = &tcache->tbins[binind];
415 tbin_info = &tcache_bin_info[binind];
1a4d82fc 416 if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
54a0048b
SL
417 tcache_bin_flush_small(tsd, tcache, tbin, binind,
418 (tbin_info->ncached_max >> 1));
970d7e83
LB
419 }
420 assert(tbin->ncached < tbin_info->ncached_max);
970d7e83 421 tbin->ncached++;
54a0048b 422 *(tbin->avail - tbin->ncached) = ptr;
970d7e83 423
54a0048b 424 tcache_event(tsd, tcache);
970d7e83
LB
425}
426
427JEMALLOC_ALWAYS_INLINE void
54a0048b
SL
428tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
429 bool slow_path)
970d7e83 430{
54a0048b 431 szind_t binind;
970d7e83
LB
432 tcache_bin_t *tbin;
433 tcache_bin_info_t *tbin_info;
434
435 assert((size & PAGE_MASK) == 0);
436 assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
437 assert(tcache_salloc(ptr) <= tcache_maxclass);
438
54a0048b 439 binind = size2index(size);
970d7e83 440
54a0048b
SL
441 if (slow_path && config_fill && unlikely(opt_junk_free))
442 arena_dalloc_junk_large(ptr, size);
970d7e83
LB
443
444 tbin = &tcache->tbins[binind];
445 tbin_info = &tcache_bin_info[binind];
1a4d82fc 446 if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
54a0048b
SL
447 tcache_bin_flush_large(tsd, tbin, binind,
448 (tbin_info->ncached_max >> 1), tcache);
970d7e83
LB
449 }
450 assert(tbin->ncached < tbin_info->ncached_max);
970d7e83 451 tbin->ncached++;
54a0048b 452 *(tbin->avail - tbin->ncached) = ptr;
970d7e83 453
54a0048b
SL
454 tcache_event(tsd, tcache);
455}
456
457JEMALLOC_ALWAYS_INLINE tcache_t *
458tcaches_get(tsd_t *tsd, unsigned ind)
459{
460 tcaches_t *elm = &tcaches[ind];
461 if (unlikely(elm->tcache == NULL))
462 elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
463 return (elm->tcache);
970d7e83
LB
464}
465#endif
466
467#endif /* JEMALLOC_H_INLINES */
468/******************************************************************************/