]> git.proxmox.com Git - rustc.git/blame - src/jemalloc/src/huge.c
Imported Upstream version 1.7.0+dfsg1
[rustc.git] / src / jemalloc / src / huge.c
CommitLineData
1a4d82fc
JJ
1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
1a4d82fc 5
9cc50fc6
SL
6static extent_node_t *
7huge_node_get(const void *ptr)
8{
9 extent_node_t *node;
1a4d82fc 10
9cc50fc6
SL
11 node = chunk_lookup(ptr, true);
12 assert(!extent_node_achunk_get(node));
13
14 return (node);
15}
1a4d82fc 16
9cc50fc6
SL
17static bool
18huge_node_set(const void *ptr, extent_node_t *node)
19{
20
21 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
23 return (chunk_register(ptr, node));
24}
25
26static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30 chunk_deregister(ptr, node);
31}
1a4d82fc
JJ
32
33void *
9cc50fc6
SL
34huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
35 tcache_t *tcache)
1a4d82fc 36{
9cc50fc6 37 size_t usize;
1a4d82fc 38
9cc50fc6
SL
39 usize = s2u(size);
40 if (usize == 0) {
41 /* size_t overflow. */
42 return (NULL);
43 }
44
45 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
1a4d82fc
JJ
46}
47
48void *
49huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
9cc50fc6 50 bool zero, tcache_t *tcache)
1a4d82fc
JJ
51{
52 void *ret;
9cc50fc6 53 size_t usize;
1a4d82fc
JJ
54 extent_node_t *node;
55 bool is_zeroed;
56
57 /* Allocate one or more contiguous chunks for this request. */
58
9cc50fc6
SL
59 usize = sa2u(size, alignment);
60 if (unlikely(usize == 0))
1a4d82fc 61 return (NULL);
9cc50fc6 62 assert(usize >= chunksize);
1a4d82fc
JJ
63
64 /* Allocate an extent node with which to track the chunk. */
9cc50fc6
SL
65 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
66 CACHELINE, false, tcache, true, arena);
1a4d82fc
JJ
67 if (node == NULL)
68 return (NULL);
69
70 /*
71 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
72 * it is possible to make correct junk/zero fill decisions below.
73 */
74 is_zeroed = zero;
9cc50fc6
SL
75 arena = arena_choose(tsd, arena);
76 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
77 size, alignment, &is_zeroed)) == NULL) {
78 idalloctm(tsd, node, tcache, true);
1a4d82fc
JJ
79 return (NULL);
80 }
81
9cc50fc6
SL
82 extent_node_init(node, arena, ret, size, is_zeroed, true);
83
84 if (huge_node_set(ret, node)) {
85 arena_chunk_dalloc_huge(arena, ret, size);
86 idalloctm(tsd, node, tcache, true);
87 return (NULL);
1a4d82fc
JJ
88 }
89
9cc50fc6
SL
90 /* Insert node into huge. */
91 malloc_mutex_lock(&arena->huge_mtx);
92 ql_elm_new(node, ql_link);
93 ql_tail_insert(&arena->huge, node, ql_link);
94 malloc_mutex_unlock(&arena->huge_mtx);
95
96 if (zero || (config_fill && unlikely(opt_zero))) {
97 if (!is_zeroed)
98 memset(ret, 0, size);
99 } else if (config_fill && unlikely(opt_junk_alloc))
100 memset(ret, 0xa5, size);
101
1a4d82fc
JJ
102 return (ret);
103}
104
105#ifdef JEMALLOC_JET
106#undef huge_dalloc_junk
107#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
108#endif
109static void
110huge_dalloc_junk(void *ptr, size_t usize)
111{
112
9cc50fc6 113 if (config_fill && have_dss && unlikely(opt_junk_free)) {
1a4d82fc
JJ
114 /*
115 * Only bother junk filling if the chunk isn't about to be
116 * unmapped.
117 */
118 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
119 memset(ptr, 0x5a, usize);
120 }
121}
122#ifdef JEMALLOC_JET
123#undef huge_dalloc_junk
124#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
125huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
126#endif
127
9cc50fc6
SL
128static void
129huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
130 size_t usize_max, bool zero)
131{
132 size_t usize, usize_next;
133 extent_node_t *node;
134 arena_t *arena;
135 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
136 bool pre_zeroed, post_zeroed;
137
138 /* Increase usize to incorporate extra. */
139 for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
140 <= oldsize; usize = usize_next)
141 ; /* Do nothing. */
142
143 if (oldsize == usize)
144 return;
145
146 node = huge_node_get(ptr);
147 arena = extent_node_arena_get(node);
148 pre_zeroed = extent_node_zeroed_get(node);
149
150 /* Fill if necessary (shrinking). */
151 if (oldsize > usize) {
152 size_t sdiff = oldsize - usize;
153 if (config_fill && unlikely(opt_junk_free)) {
154 memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
155 post_zeroed = false;
156 } else {
157 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
158 ptr, CHUNK_CEILING(oldsize), usize, sdiff);
159 }
160 } else
161 post_zeroed = pre_zeroed;
162
163 malloc_mutex_lock(&arena->huge_mtx);
164 /* Update the size of the huge allocation. */
165 assert(extent_node_size_get(node) != usize);
166 extent_node_size_set(node, usize);
167 /* Update zeroed. */
168 extent_node_zeroed_set(node, post_zeroed);
169 malloc_mutex_unlock(&arena->huge_mtx);
170
171 arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
172
173 /* Fill if necessary (growing). */
174 if (oldsize < usize) {
175 if (zero || (config_fill && unlikely(opt_zero))) {
176 if (!pre_zeroed) {
177 memset((void *)((uintptr_t)ptr + oldsize), 0,
178 usize - oldsize);
179 }
180 } else if (config_fill && unlikely(opt_junk_alloc)) {
181 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
182 oldsize);
183 }
184 }
185}
186
1a4d82fc 187static bool
9cc50fc6
SL
188huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
189{
190 extent_node_t *node;
1a4d82fc 191 arena_t *arena;
9cc50fc6
SL
192 chunk_hooks_t chunk_hooks;
193 size_t cdiff;
194 bool pre_zeroed, post_zeroed;
1a4d82fc 195
9cc50fc6
SL
196 node = huge_node_get(ptr);
197 arena = extent_node_arena_get(node);
198 pre_zeroed = extent_node_zeroed_get(node);
199 chunk_hooks = chunk_hooks_get(arena);
200
201 assert(oldsize > usize);
202
203 /* Split excess chunks. */
204 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
205 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
206 CHUNK_CEILING(usize), cdiff, true, arena->ind))
1a4d82fc 207 return (true);
1a4d82fc 208
9cc50fc6
SL
209 if (oldsize > usize) {
210 size_t sdiff = oldsize - usize;
211 if (config_fill && unlikely(opt_junk_free)) {
212 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
213 sdiff);
214 post_zeroed = false;
215 } else {
216 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
217 CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
218 CHUNK_CEILING(oldsize),
219 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
220 }
221 } else
222 post_zeroed = pre_zeroed;
223
224 malloc_mutex_lock(&arena->huge_mtx);
225 /* Update the size of the huge allocation. */
226 extent_node_size_set(node, usize);
227 /* Update zeroed. */
228 extent_node_zeroed_set(node, post_zeroed);
229 malloc_mutex_unlock(&arena->huge_mtx);
1a4d82fc 230
9cc50fc6
SL
231 /* Zap the excess chunks. */
232 arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
1a4d82fc 233
9cc50fc6
SL
234 return (false);
235}
1a4d82fc 236
9cc50fc6
SL
237static bool
238huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
239 extent_node_t *node;
240 arena_t *arena;
241 bool is_zeroed_subchunk, is_zeroed_chunk;
1a4d82fc 242
9cc50fc6
SL
243 node = huge_node_get(ptr);
244 arena = extent_node_arena_get(node);
245 malloc_mutex_lock(&arena->huge_mtx);
246 is_zeroed_subchunk = extent_node_zeroed_get(node);
247 malloc_mutex_unlock(&arena->huge_mtx);
1a4d82fc
JJ
248
249 /*
9cc50fc6
SL
250 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
251 * that it is possible to make correct junk/zero fill decisions below.
1a4d82fc 252 */
9cc50fc6 253 is_zeroed_chunk = zero;
1a4d82fc 254
9cc50fc6
SL
255 if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
256 &is_zeroed_chunk))
257 return (true);
1a4d82fc 258
9cc50fc6 259 malloc_mutex_lock(&arena->huge_mtx);
1a4d82fc 260 /* Update the size of the huge allocation. */
9cc50fc6
SL
261 extent_node_size_set(node, usize);
262 malloc_mutex_unlock(&arena->huge_mtx);
263
264 if (zero || (config_fill && unlikely(opt_zero))) {
265 if (!is_zeroed_subchunk) {
266 memset((void *)((uintptr_t)ptr + oldsize), 0,
267 CHUNK_CEILING(oldsize) - oldsize);
268 }
269 if (!is_zeroed_chunk) {
270 memset((void *)((uintptr_t)ptr +
271 CHUNK_CEILING(oldsize)), 0, usize -
272 CHUNK_CEILING(oldsize));
273 }
274 } else if (config_fill && unlikely(opt_junk_alloc)) {
275 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
276 oldsize);
1a4d82fc 277 }
9cc50fc6 278
1a4d82fc
JJ
279 return (false);
280}
281
282bool
9cc50fc6
SL
283huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
284 size_t usize_max, bool zero)
1a4d82fc
JJ
285{
286
9cc50fc6
SL
287 assert(s2u(oldsize) == oldsize);
288
1a4d82fc 289 /* Both allocations must be huge to avoid a move. */
9cc50fc6 290 if (oldsize < chunksize || usize_max < chunksize)
1a4d82fc
JJ
291 return (true);
292
9cc50fc6
SL
293 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
294 /* Attempt to expand the allocation in-place. */
295 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
296 return (false);
297 /* Try again, this time with usize_min. */
298 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
299 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
300 oldsize, usize_min, zero))
301 return (false);
302 }
1a4d82fc
JJ
303
304 /*
9cc50fc6
SL
305 * Avoid moving the allocation if the existing chunk size accommodates
306 * the new size.
1a4d82fc 307 */
9cc50fc6
SL
308 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
309 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
310 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
311 zero);
1a4d82fc
JJ
312 return (false);
313 }
314
9cc50fc6
SL
315 /* Attempt to shrink the allocation in-place. */
316 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
317 return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
318 return (true);
319}
1a4d82fc 320
9cc50fc6
SL
321static void *
322huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
323 size_t alignment, bool zero, tcache_t *tcache)
324{
1a4d82fc 325
9cc50fc6
SL
326 if (alignment <= chunksize)
327 return (huge_malloc(tsd, arena, usize, zero, tcache));
328 return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
1a4d82fc
JJ
329}
330
331void *
9cc50fc6
SL
332huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
333 size_t alignment, bool zero, tcache_t *tcache)
1a4d82fc
JJ
334{
335 void *ret;
336 size_t copysize;
337
338 /* Try to avoid moving the allocation. */
9cc50fc6 339 if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
1a4d82fc
JJ
340 return (ptr);
341
342 /*
9cc50fc6 343 * usize and oldsize are different enough that we need to use a
1a4d82fc
JJ
344 * different size class. In that case, fall back to allocating new
345 * space and copying.
346 */
9cc50fc6
SL
347 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
348 tcache);
349 if (ret == NULL)
350 return (NULL);
1a4d82fc 351
9cc50fc6 352 copysize = (usize < oldsize) ? usize : oldsize;
1a4d82fc 353 memcpy(ret, ptr, copysize);
9cc50fc6 354 isqalloc(tsd, ptr, oldsize, tcache);
1a4d82fc
JJ
355 return (ret);
356}
357
358void
9cc50fc6 359huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
1a4d82fc 360{
9cc50fc6
SL
361 extent_node_t *node;
362 arena_t *arena;
1a4d82fc 363
9cc50fc6
SL
364 node = huge_node_get(ptr);
365 arena = extent_node_arena_get(node);
366 huge_node_unset(ptr, node);
367 malloc_mutex_lock(&arena->huge_mtx);
368 ql_remove(&arena->huge, node, ql_link);
369 malloc_mutex_unlock(&arena->huge_mtx);
370
371 huge_dalloc_junk(extent_node_addr_get(node),
372 extent_node_size_get(node));
373 arena_chunk_dalloc_huge(extent_node_arena_get(node),
374 extent_node_addr_get(node), extent_node_size_get(node));
375 idalloctm(tsd, node, tcache, true);
376}
1a4d82fc 377
9cc50fc6
SL
378arena_t *
379huge_aalloc(const void *ptr)
380{
1a4d82fc 381
9cc50fc6 382 return (extent_node_arena_get(huge_node_get(ptr)));
1a4d82fc
JJ
383}
384
385size_t
386huge_salloc(const void *ptr)
387{
9cc50fc6
SL
388 size_t size;
389 extent_node_t *node;
390 arena_t *arena;
1a4d82fc 391
9cc50fc6
SL
392 node = huge_node_get(ptr);
393 arena = extent_node_arena_get(node);
394 malloc_mutex_lock(&arena->huge_mtx);
395 size = extent_node_size_get(node);
396 malloc_mutex_unlock(&arena->huge_mtx);
1a4d82fc 397
9cc50fc6 398 return (size);
1a4d82fc
JJ
399}
400
401prof_tctx_t *
402huge_prof_tctx_get(const void *ptr)
403{
9cc50fc6
SL
404 prof_tctx_t *tctx;
405 extent_node_t *node;
406 arena_t *arena;
1a4d82fc 407
9cc50fc6
SL
408 node = huge_node_get(ptr);
409 arena = extent_node_arena_get(node);
410 malloc_mutex_lock(&arena->huge_mtx);
411 tctx = extent_node_prof_tctx_get(node);
412 malloc_mutex_unlock(&arena->huge_mtx);
1a4d82fc 413
9cc50fc6 414 return (tctx);
1a4d82fc
JJ
415}
416
417void
418huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
419{
9cc50fc6
SL
420 extent_node_t *node;
421 arena_t *arena;
1a4d82fc 422
9cc50fc6
SL
423 node = huge_node_get(ptr);
424 arena = extent_node_arena_get(node);
425 malloc_mutex_lock(&arena->huge_mtx);
426 extent_node_prof_tctx_set(node, tctx);
427 malloc_mutex_unlock(&arena->huge_mtx);
1a4d82fc
JJ
428}
429
430void
9cc50fc6 431huge_prof_tctx_reset(const void *ptr)
1a4d82fc
JJ
432{
433
9cc50fc6 434 huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
1a4d82fc 435}