]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | #define JEMALLOC_HUGE_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
1a4d82fc | 5 | |
54a0048b SL |
6 | static extent_node_t * |
7 | huge_node_get(const void *ptr) | |
8 | { | |
9 | extent_node_t *node; | |
9cc50fc6 | 10 | |
54a0048b SL |
11 | node = chunk_lookup(ptr, true); |
12 | assert(!extent_node_achunk_get(node)); | |
13 | ||
14 | return (node); | |
15 | } | |
16 | ||
17 | static bool | |
18 | huge_node_set(const void *ptr, extent_node_t *node) | |
19 | { | |
20 | ||
21 | assert(extent_node_addr_get(node) == ptr); | |
22 | assert(!extent_node_achunk_get(node)); | |
23 | return (chunk_register(ptr, node)); | |
24 | } | |
9cc50fc6 | 25 | |
54a0048b SL |
26 | static void |
27 | huge_node_unset(const void *ptr, const extent_node_t *node) | |
28 | { | |
29 | ||
30 | chunk_deregister(ptr, node); | |
31 | } | |
1a4d82fc JJ |
32 | |
33 | void * | |
54a0048b SL |
34 | huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero, |
35 | tcache_t *tcache) | |
1a4d82fc JJ |
36 | { |
37 | ||
54a0048b SL |
38 | assert(usize == s2u(usize)); |
39 | ||
40 | return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); | |
1a4d82fc JJ |
41 | } |
42 | ||
43 | void * | |
54a0048b SL |
44 | huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, |
45 | bool zero, tcache_t *tcache) | |
1a4d82fc JJ |
46 | { |
47 | void *ret; | |
54a0048b | 48 | size_t ausize; |
1a4d82fc JJ |
49 | extent_node_t *node; |
50 | bool is_zeroed; | |
51 | ||
52 | /* Allocate one or more contiguous chunks for this request. */ | |
53 | ||
54a0048b SL |
54 | ausize = sa2u(usize, alignment); |
55 | if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) | |
1a4d82fc | 56 | return (NULL); |
54a0048b | 57 | assert(ausize >= chunksize); |
1a4d82fc JJ |
58 | |
59 | /* Allocate an extent node with which to track the chunk. */ | |
54a0048b SL |
60 | node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), |
61 | CACHELINE, false, tcache, true, arena); | |
1a4d82fc JJ |
62 | if (node == NULL) |
63 | return (NULL); | |
64 | ||
65 | /* | |
66 | * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that | |
67 | * it is possible to make correct junk/zero fill decisions below. | |
68 | */ | |
69 | is_zeroed = zero; | |
54a0048b SL |
70 | arena = arena_choose(tsd, arena); |
71 | if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, | |
72 | usize, alignment, &is_zeroed)) == NULL) { | |
73 | idalloctm(tsd, node, tcache, true, true); | |
9cc50fc6 | 74 | return (NULL); |
1a4d82fc JJ |
75 | } |
76 | ||
54a0048b SL |
77 | extent_node_init(node, arena, ret, usize, is_zeroed, true); |
78 | ||
79 | if (huge_node_set(ret, node)) { | |
80 | arena_chunk_dalloc_huge(arena, ret, usize); | |
81 | idalloctm(tsd, node, tcache, true, true); | |
82 | return (NULL); | |
7453a54e | 83 | } |
9cc50fc6 | 84 | |
54a0048b SL |
85 | /* Insert node into huge. */ |
86 | malloc_mutex_lock(&arena->huge_mtx); | |
87 | ql_elm_new(node, ql_link); | |
88 | ql_tail_insert(&arena->huge, node, ql_link); | |
89 | malloc_mutex_unlock(&arena->huge_mtx); | |
90 | ||
91 | if (zero || (config_fill && unlikely(opt_zero))) { | |
92 | if (!is_zeroed) | |
93 | memset(ret, 0, usize); | |
94 | } else if (config_fill && unlikely(opt_junk_alloc)) | |
95 | memset(ret, 0xa5, usize); | |
96 | ||
97 | arena_decay_tick(tsd, arena); | |
1a4d82fc JJ |
98 | return (ret); |
99 | } | |
100 | ||
101 | #ifdef JEMALLOC_JET | |
102 | #undef huge_dalloc_junk | |
103 | #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) | |
104 | #endif | |
105 | static void | |
106 | huge_dalloc_junk(void *ptr, size_t usize) | |
107 | { | |
108 | ||
54a0048b | 109 | if (config_fill && have_dss && unlikely(opt_junk_free)) { |
1a4d82fc JJ |
110 | /* |
111 | * Only bother junk filling if the chunk isn't about to be | |
112 | * unmapped. | |
113 | */ | |
114 | if (!config_munmap || (have_dss && chunk_in_dss(ptr))) | |
115 | memset(ptr, 0x5a, usize); | |
116 | } | |
117 | } | |
118 | #ifdef JEMALLOC_JET | |
119 | #undef huge_dalloc_junk | |
120 | #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) | |
121 | huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); | |
122 | #endif | |
123 | ||
54a0048b SL |
124 | static void |
125 | huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, | |
126 | size_t usize_max, bool zero) | |
127 | { | |
128 | size_t usize, usize_next; | |
129 | extent_node_t *node; | |
130 | arena_t *arena; | |
131 | chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; | |
132 | bool pre_zeroed, post_zeroed; | |
133 | ||
134 | /* Increase usize to incorporate extra. */ | |
135 | for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) | |
136 | <= oldsize; usize = usize_next) | |
137 | ; /* Do nothing. */ | |
138 | ||
139 | if (oldsize == usize) | |
140 | return; | |
141 | ||
142 | node = huge_node_get(ptr); | |
143 | arena = extent_node_arena_get(node); | |
144 | pre_zeroed = extent_node_zeroed_get(node); | |
145 | ||
146 | /* Fill if necessary (shrinking). */ | |
147 | if (oldsize > usize) { | |
148 | size_t sdiff = oldsize - usize; | |
149 | if (config_fill && unlikely(opt_junk_free)) { | |
150 | memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); | |
151 | post_zeroed = false; | |
152 | } else { | |
153 | post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, | |
154 | ptr, CHUNK_CEILING(oldsize), usize, sdiff); | |
155 | } | |
156 | } else | |
157 | post_zeroed = pre_zeroed; | |
158 | ||
159 | malloc_mutex_lock(&arena->huge_mtx); | |
160 | /* Update the size of the huge allocation. */ | |
161 | assert(extent_node_size_get(node) != usize); | |
162 | extent_node_size_set(node, usize); | |
163 | /* Update zeroed. */ | |
164 | extent_node_zeroed_set(node, post_zeroed); | |
165 | malloc_mutex_unlock(&arena->huge_mtx); | |
166 | ||
167 | arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); | |
168 | ||
169 | /* Fill if necessary (growing). */ | |
170 | if (oldsize < usize) { | |
171 | if (zero || (config_fill && unlikely(opt_zero))) { | |
172 | if (!pre_zeroed) { | |
173 | memset((void *)((uintptr_t)ptr + oldsize), 0, | |
174 | usize - oldsize); | |
175 | } | |
176 | } else if (config_fill && unlikely(opt_junk_alloc)) { | |
177 | memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - | |
178 | oldsize); | |
179 | } | |
180 | } | |
181 | } | |
182 | ||
1a4d82fc | 183 | static bool |
54a0048b SL |
184 | huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) |
185 | { | |
186 | extent_node_t *node; | |
1a4d82fc | 187 | arena_t *arena; |
54a0048b SL |
188 | chunk_hooks_t chunk_hooks; |
189 | size_t cdiff; | |
190 | bool pre_zeroed, post_zeroed; | |
9cc50fc6 | 191 | |
54a0048b SL |
192 | node = huge_node_get(ptr); |
193 | arena = extent_node_arena_get(node); | |
194 | pre_zeroed = extent_node_zeroed_get(node); | |
195 | chunk_hooks = chunk_hooks_get(arena); | |
196 | ||
197 | assert(oldsize > usize); | |
198 | ||
199 | /* Split excess chunks. */ | |
200 | cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); | |
201 | if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), | |
202 | CHUNK_CEILING(usize), cdiff, true, arena->ind)) | |
1a4d82fc | 203 | return (true); |
1a4d82fc | 204 | |
54a0048b SL |
205 | if (oldsize > usize) { |
206 | size_t sdiff = oldsize - usize; | |
207 | if (config_fill && unlikely(opt_junk_free)) { | |
208 | huge_dalloc_junk((void *)((uintptr_t)ptr + usize), | |
209 | sdiff); | |
210 | post_zeroed = false; | |
211 | } else { | |
212 | post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, | |
213 | CHUNK_ADDR2BASE((uintptr_t)ptr + usize), | |
214 | CHUNK_CEILING(oldsize), | |
215 | CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); | |
216 | } | |
217 | } else | |
218 | post_zeroed = pre_zeroed; | |
219 | ||
220 | malloc_mutex_lock(&arena->huge_mtx); | |
221 | /* Update the size of the huge allocation. */ | |
222 | extent_node_size_set(node, usize); | |
223 | /* Update zeroed. */ | |
224 | extent_node_zeroed_set(node, post_zeroed); | |
225 | malloc_mutex_unlock(&arena->huge_mtx); | |
1a4d82fc | 226 | |
54a0048b SL |
227 | /* Zap the excess chunks. */ |
228 | arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); | |
1a4d82fc | 229 | |
54a0048b SL |
230 | return (false); |
231 | } | |
1a4d82fc | 232 | |
54a0048b SL |
233 | static bool |
234 | huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { | |
235 | extent_node_t *node; | |
236 | arena_t *arena; | |
237 | bool is_zeroed_subchunk, is_zeroed_chunk; | |
1a4d82fc | 238 | |
54a0048b SL |
239 | node = huge_node_get(ptr); |
240 | arena = extent_node_arena_get(node); | |
241 | malloc_mutex_lock(&arena->huge_mtx); | |
242 | is_zeroed_subchunk = extent_node_zeroed_get(node); | |
243 | malloc_mutex_unlock(&arena->huge_mtx); | |
1a4d82fc JJ |
244 | |
245 | /* | |
54a0048b SL |
246 | * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so |
247 | * that it is possible to make correct junk/zero fill decisions below. | |
1a4d82fc | 248 | */ |
54a0048b | 249 | is_zeroed_chunk = zero; |
1a4d82fc | 250 | |
54a0048b SL |
251 | if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, |
252 | &is_zeroed_chunk)) | |
253 | return (true); | |
7453a54e | 254 | |
54a0048b | 255 | malloc_mutex_lock(&arena->huge_mtx); |
1a4d82fc | 256 | /* Update the size of the huge allocation. */ |
54a0048b SL |
257 | extent_node_size_set(node, usize); |
258 | malloc_mutex_unlock(&arena->huge_mtx); | |
259 | ||
260 | if (zero || (config_fill && unlikely(opt_zero))) { | |
261 | if (!is_zeroed_subchunk) { | |
262 | memset((void *)((uintptr_t)ptr + oldsize), 0, | |
263 | CHUNK_CEILING(oldsize) - oldsize); | |
264 | } | |
265 | if (!is_zeroed_chunk) { | |
266 | memset((void *)((uintptr_t)ptr + | |
267 | CHUNK_CEILING(oldsize)), 0, usize - | |
268 | CHUNK_CEILING(oldsize)); | |
269 | } | |
270 | } else if (config_fill && unlikely(opt_junk_alloc)) { | |
271 | memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - | |
272 | oldsize); | |
1a4d82fc | 273 | } |
54a0048b | 274 | |
1a4d82fc JJ |
275 | return (false); |
276 | } | |
277 | ||
278 | bool | |
54a0048b SL |
279 | huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, |
280 | size_t usize_max, bool zero) | |
1a4d82fc JJ |
281 | { |
282 | ||
54a0048b SL |
283 | assert(s2u(oldsize) == oldsize); |
284 | /* The following should have been caught by callers. */ | |
285 | assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); | |
286 | ||
1a4d82fc | 287 | /* Both allocations must be huge to avoid a move. */ |
54a0048b | 288 | if (oldsize < chunksize || usize_max < chunksize) |
1a4d82fc JJ |
289 | return (true); |
290 | ||
54a0048b SL |
291 | if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { |
292 | /* Attempt to expand the allocation in-place. */ | |
293 | if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, | |
294 | zero)) { | |
295 | arena_decay_tick(tsd, huge_aalloc(ptr)); | |
296 | return (false); | |
297 | } | |
298 | /* Try again, this time with usize_min. */ | |
299 | if (usize_min < usize_max && CHUNK_CEILING(usize_min) > | |
300 | CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, | |
301 | oldsize, usize_min, zero)) { | |
302 | arena_decay_tick(tsd, huge_aalloc(ptr)); | |
303 | return (false); | |
304 | } | |
305 | } | |
1a4d82fc JJ |
306 | |
307 | /* | |
54a0048b SL |
308 | * Avoid moving the allocation if the existing chunk size accommodates |
309 | * the new size. | |
1a4d82fc | 310 | */ |
54a0048b SL |
311 | if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) |
312 | && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { | |
313 | huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, | |
314 | zero); | |
315 | arena_decay_tick(tsd, huge_aalloc(ptr)); | |
1a4d82fc JJ |
316 | return (false); |
317 | } | |
318 | ||
54a0048b SL |
319 | /* Attempt to shrink the allocation in-place. */ |
320 | if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { | |
321 | if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) { | |
322 | arena_decay_tick(tsd, huge_aalloc(ptr)); | |
323 | return (false); | |
324 | } | |
7453a54e | 325 | } |
54a0048b SL |
326 | return (true); |
327 | } | |
7453a54e | 328 | |
54a0048b SL |
329 | static void * |
330 | huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, | |
331 | size_t alignment, bool zero, tcache_t *tcache) | |
332 | { | |
7453a54e | 333 | |
54a0048b SL |
334 | if (alignment <= chunksize) |
335 | return (huge_malloc(tsd, arena, usize, zero, tcache)); | |
336 | return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); | |
1a4d82fc JJ |
337 | } |
338 | ||
339 | void * | |
54a0048b SL |
340 | huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, |
341 | size_t alignment, bool zero, tcache_t *tcache) | |
1a4d82fc JJ |
342 | { |
343 | void *ret; | |
344 | size_t copysize; | |
345 | ||
54a0048b SL |
346 | /* The following should have been caught by callers. */ |
347 | assert(usize > 0 && usize <= HUGE_MAXCLASS); | |
348 | ||
1a4d82fc | 349 | /* Try to avoid moving the allocation. */ |
54a0048b | 350 | if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero)) |
1a4d82fc JJ |
351 | return (ptr); |
352 | ||
353 | /* | |
54a0048b | 354 | * usize and oldsize are different enough that we need to use a |
1a4d82fc JJ |
355 | * different size class. In that case, fall back to allocating new |
356 | * space and copying. | |
357 | */ | |
54a0048b SL |
358 | ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, |
359 | tcache); | |
360 | if (ret == NULL) | |
361 | return (NULL); | |
1a4d82fc | 362 | |
54a0048b | 363 | copysize = (usize < oldsize) ? usize : oldsize; |
1a4d82fc | 364 | memcpy(ret, ptr, copysize); |
54a0048b | 365 | isqalloc(tsd, ptr, oldsize, tcache); |
1a4d82fc JJ |
366 | return (ret); |
367 | } | |
368 | ||
369 | void | |
54a0048b | 370 | huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) |
1a4d82fc | 371 | { |
54a0048b SL |
372 | extent_node_t *node; |
373 | arena_t *arena; | |
1a4d82fc | 374 | |
54a0048b SL |
375 | node = huge_node_get(ptr); |
376 | arena = extent_node_arena_get(node); | |
377 | huge_node_unset(ptr, node); | |
378 | malloc_mutex_lock(&arena->huge_mtx); | |
379 | ql_remove(&arena->huge, node, ql_link); | |
380 | malloc_mutex_unlock(&arena->huge_mtx); | |
1a4d82fc | 381 | |
54a0048b SL |
382 | huge_dalloc_junk(extent_node_addr_get(node), |
383 | extent_node_size_get(node)); | |
384 | arena_chunk_dalloc_huge(extent_node_arena_get(node), | |
385 | extent_node_addr_get(node), extent_node_size_get(node)); | |
386 | idalloctm(tsd, node, tcache, true, true); | |
7453a54e | 387 | |
54a0048b SL |
388 | arena_decay_tick(tsd, arena); |
389 | } | |
1a4d82fc | 390 | |
54a0048b SL |
391 | arena_t * |
392 | huge_aalloc(const void *ptr) | |
393 | { | |
394 | ||
395 | return (extent_node_arena_get(huge_node_get(ptr))); | |
1a4d82fc JJ |
396 | } |
397 | ||
398 | size_t | |
399 | huge_salloc(const void *ptr) | |
400 | { | |
54a0048b SL |
401 | size_t size; |
402 | extent_node_t *node; | |
403 | arena_t *arena; | |
1a4d82fc | 404 | |
54a0048b SL |
405 | node = huge_node_get(ptr); |
406 | arena = extent_node_arena_get(node); | |
407 | malloc_mutex_lock(&arena->huge_mtx); | |
408 | size = extent_node_size_get(node); | |
409 | malloc_mutex_unlock(&arena->huge_mtx); | |
7453a54e | 410 | |
54a0048b | 411 | return (size); |
1a4d82fc JJ |
412 | } |
413 | ||
414 | prof_tctx_t * | |
415 | huge_prof_tctx_get(const void *ptr) | |
416 | { | |
54a0048b SL |
417 | prof_tctx_t *tctx; |
418 | extent_node_t *node; | |
419 | arena_t *arena; | |
1a4d82fc | 420 | |
54a0048b SL |
421 | node = huge_node_get(ptr); |
422 | arena = extent_node_arena_get(node); | |
423 | malloc_mutex_lock(&arena->huge_mtx); | |
424 | tctx = extent_node_prof_tctx_get(node); | |
425 | malloc_mutex_unlock(&arena->huge_mtx); | |
1a4d82fc | 426 | |
54a0048b | 427 | return (tctx); |
1a4d82fc JJ |
428 | } |
429 | ||
430 | void | |
431 | huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) | |
432 | { | |
54a0048b SL |
433 | extent_node_t *node; |
434 | arena_t *arena; | |
1a4d82fc | 435 | |
54a0048b SL |
436 | node = huge_node_get(ptr); |
437 | arena = extent_node_arena_get(node); | |
438 | malloc_mutex_lock(&arena->huge_mtx); | |
439 | extent_node_prof_tctx_set(node, tctx); | |
440 | malloc_mutex_unlock(&arena->huge_mtx); | |
1a4d82fc JJ |
441 | } |
442 | ||
443 | void | |
54a0048b | 444 | huge_prof_tctx_reset(const void *ptr) |
1a4d82fc JJ |
445 | { |
446 | ||
54a0048b | 447 | huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); |
1a4d82fc | 448 | } |