]>
Commit | Line | Data |
---|---|---|
970d7e83 LB |
1 | #define JEMALLOC_CHUNK_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
7 | const char *opt_dss = DSS_DEFAULT; | |
54a0048b | 8 | size_t opt_lg_chunk = 0; |
970d7e83 | 9 | |
54a0048b SL |
10 | /* Used exclusively for gdump triggering. */ |
11 | static size_t curchunks; | |
12 | static size_t highchunks; | |
970d7e83 | 13 | |
54a0048b | 14 | rtree_t chunks_rtree; |
970d7e83 LB |
15 | |
16 | /* Various chunk-related settings. */ | |
17 | size_t chunksize; | |
18 | size_t chunksize_mask; /* (chunksize - 1). */ | |
19 | size_t chunk_npages; | |
54a0048b SL |
20 | |
21 | static void *chunk_alloc_default(void *new_addr, size_t size, | |
22 | size_t alignment, bool *zero, bool *commit, unsigned arena_ind); | |
23 | static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, | |
24 | unsigned arena_ind); | |
25 | static bool chunk_commit_default(void *chunk, size_t size, size_t offset, | |
26 | size_t length, unsigned arena_ind); | |
27 | static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, | |
28 | size_t length, unsigned arena_ind); | |
29 | static bool chunk_purge_default(void *chunk, size_t size, size_t offset, | |
30 | size_t length, unsigned arena_ind); | |
31 | static bool chunk_split_default(void *chunk, size_t size, size_t size_a, | |
32 | size_t size_b, bool committed, unsigned arena_ind); | |
33 | static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, | |
34 | size_t size_b, bool committed, unsigned arena_ind); | |
35 | ||
36 | const chunk_hooks_t chunk_hooks_default = { | |
37 | chunk_alloc_default, | |
38 | chunk_dalloc_default, | |
39 | chunk_commit_default, | |
40 | chunk_decommit_default, | |
41 | chunk_purge_default, | |
42 | chunk_split_default, | |
43 | chunk_merge_default | |
44 | }; | |
970d7e83 LB |
45 | |
46 | /******************************************************************************/ | |
1a4d82fc JJ |
47 | /* |
48 | * Function prototypes for static functions that are referenced prior to | |
49 | * definition. | |
50 | */ | |
970d7e83 | 51 | |
54a0048b SL |
52 | static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, |
53 | extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, | |
54 | void *chunk, size_t size, bool zeroed, bool committed); | |
970d7e83 LB |
55 | |
56 | /******************************************************************************/ | |
57 | ||
54a0048b SL |
58 | static chunk_hooks_t |
59 | chunk_hooks_get_locked(arena_t *arena) | |
60 | { | |
61 | ||
62 | return (arena->chunk_hooks); | |
63 | } | |
64 | ||
65 | chunk_hooks_t | |
66 | chunk_hooks_get(arena_t *arena) | |
67 | { | |
68 | chunk_hooks_t chunk_hooks; | |
69 | ||
70 | malloc_mutex_lock(&arena->chunks_mtx); | |
71 | chunk_hooks = chunk_hooks_get_locked(arena); | |
72 | malloc_mutex_unlock(&arena->chunks_mtx); | |
73 | ||
74 | return (chunk_hooks); | |
75 | } | |
76 | ||
77 | chunk_hooks_t | |
78 | chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) | |
79 | { | |
80 | chunk_hooks_t old_chunk_hooks; | |
81 | ||
82 | malloc_mutex_lock(&arena->chunks_mtx); | |
83 | old_chunk_hooks = arena->chunk_hooks; | |
84 | /* | |
85 | * Copy each field atomically so that it is impossible for readers to | |
86 | * see partially updated pointers. There are places where readers only | |
87 | * need one hook function pointer (therefore no need to copy the | |
88 | * entirety of arena->chunk_hooks), and stale reads do not affect | |
89 | * correctness, so they perform unlocked reads. | |
90 | */ | |
91 | #define ATOMIC_COPY_HOOK(n) do { \ | |
92 | union { \ | |
93 | chunk_##n##_t **n; \ | |
94 | void **v; \ | |
95 | } u; \ | |
96 | u.n = &arena->chunk_hooks.n; \ | |
97 | atomic_write_p(u.v, chunk_hooks->n); \ | |
98 | } while (0) | |
99 | ATOMIC_COPY_HOOK(alloc); | |
100 | ATOMIC_COPY_HOOK(dalloc); | |
101 | ATOMIC_COPY_HOOK(commit); | |
102 | ATOMIC_COPY_HOOK(decommit); | |
103 | ATOMIC_COPY_HOOK(purge); | |
104 | ATOMIC_COPY_HOOK(split); | |
105 | ATOMIC_COPY_HOOK(merge); | |
106 | #undef ATOMIC_COPY_HOOK | |
107 | malloc_mutex_unlock(&arena->chunks_mtx); | |
108 | ||
109 | return (old_chunk_hooks); | |
110 | } | |
111 | ||
112 | static void | |
113 | chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, | |
114 | bool locked) | |
115 | { | |
116 | static const chunk_hooks_t uninitialized_hooks = | |
117 | CHUNK_HOOKS_INITIALIZER; | |
118 | ||
119 | if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == | |
120 | 0) { | |
121 | *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : | |
122 | chunk_hooks_get(arena); | |
123 | } | |
124 | } | |
125 | ||
126 | static void | |
127 | chunk_hooks_assure_initialized_locked(arena_t *arena, | |
128 | chunk_hooks_t *chunk_hooks) | |
129 | { | |
130 | ||
131 | chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); | |
132 | } | |
133 | ||
134 | static void | |
135 | chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) | |
136 | { | |
137 | ||
138 | chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); | |
139 | } | |
140 | ||
141 | bool | |
142 | chunk_register(const void *chunk, const extent_node_t *node) | |
143 | { | |
144 | ||
145 | assert(extent_node_addr_get(node) == chunk); | |
146 | ||
147 | if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) | |
148 | return (true); | |
149 | if (config_prof && opt_prof) { | |
150 | size_t size = extent_node_size_get(node); | |
151 | size_t nadd = (size == 0) ? 1 : size / chunksize; | |
152 | size_t cur = atomic_add_z(&curchunks, nadd); | |
153 | size_t high = atomic_read_z(&highchunks); | |
154 | while (cur > high && atomic_cas_z(&highchunks, high, cur)) { | |
155 | /* | |
156 | * Don't refresh cur, because it may have decreased | |
157 | * since this thread lost the highchunks update race. | |
158 | */ | |
159 | high = atomic_read_z(&highchunks); | |
160 | } | |
161 | if (cur > high && prof_gdump_get_unlocked()) | |
162 | prof_gdump(); | |
163 | } | |
164 | ||
165 | return (false); | |
166 | } | |
167 | ||
168 | void | |
169 | chunk_deregister(const void *chunk, const extent_node_t *node) | |
170 | { | |
171 | bool err; | |
172 | ||
173 | err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); | |
174 | assert(!err); | |
175 | if (config_prof && opt_prof) { | |
176 | size_t size = extent_node_size_get(node); | |
177 | size_t nsub = (size == 0) ? 1 : size / chunksize; | |
178 | assert(atomic_read_z(&curchunks) >= nsub); | |
179 | atomic_sub_z(&curchunks, nsub); | |
180 | } | |
181 | } | |
182 | ||
183 | /* | |
184 | * Do first-best-fit chunk selection, i.e. select the lowest chunk that best | |
185 | * fits. | |
186 | */ | |
187 | static extent_node_t * | |
188 | chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, | |
189 | extent_tree_t *chunks_ad, size_t size) | |
190 | { | |
191 | extent_node_t key; | |
192 | ||
193 | assert(size == CHUNK_CEILING(size)); | |
194 | ||
195 | extent_node_init(&key, arena, NULL, size, false, false); | |
196 | return (extent_tree_szad_nsearch(chunks_szad, &key)); | |
197 | } | |
198 | ||
970d7e83 | 199 | static void * |
54a0048b SL |
200 | chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, |
201 | extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, | |
202 | void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, | |
203 | bool dalloc_node) | |
970d7e83 LB |
204 | { |
205 | void *ret; | |
206 | extent_node_t *node; | |
970d7e83 | 207 | size_t alloc_size, leadsize, trailsize; |
54a0048b | 208 | bool zeroed, committed; |
970d7e83 | 209 | |
54a0048b SL |
210 | assert(new_addr == NULL || alignment == chunksize); |
211 | /* | |
212 | * Cached chunks use the node linkage embedded in their headers, in | |
213 | * which case dalloc_node is true, and new_addr is non-NULL because | |
214 | * we're operating on a specific chunk. | |
215 | */ | |
216 | assert(dalloc_node || new_addr != NULL); | |
970d7e83 | 217 | |
54a0048b | 218 | alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); |
970d7e83 LB |
219 | /* Beware size_t wrap-around. */ |
220 | if (alloc_size < size) | |
221 | return (NULL); | |
54a0048b SL |
222 | malloc_mutex_lock(&arena->chunks_mtx); |
223 | chunk_hooks_assure_initialized_locked(arena, chunk_hooks); | |
224 | if (new_addr != NULL) { | |
225 | extent_node_t key; | |
226 | extent_node_init(&key, arena, new_addr, alloc_size, false, | |
227 | false); | |
228 | node = extent_tree_ad_search(chunks_ad, &key); | |
229 | } else { | |
230 | node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, | |
231 | alloc_size); | |
232 | } | |
233 | if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < | |
234 | size)) { | |
235 | malloc_mutex_unlock(&arena->chunks_mtx); | |
970d7e83 LB |
236 | return (NULL); |
237 | } | |
54a0048b SL |
238 | leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), |
239 | alignment) - (uintptr_t)extent_node_addr_get(node); | |
240 | assert(new_addr == NULL || leadsize == 0); | |
241 | assert(extent_node_size_get(node) >= leadsize + size); | |
242 | trailsize = extent_node_size_get(node) - leadsize - size; | |
243 | ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); | |
244 | zeroed = extent_node_zeroed_get(node); | |
970d7e83 | 245 | if (zeroed) |
54a0048b SL |
246 | *zero = true; |
247 | committed = extent_node_committed_get(node); | |
248 | if (committed) | |
249 | *commit = true; | |
250 | /* Split the lead. */ | |
251 | if (leadsize != 0 && | |
252 | chunk_hooks->split(extent_node_addr_get(node), | |
253 | extent_node_size_get(node), leadsize, size, false, arena->ind)) { | |
254 | malloc_mutex_unlock(&arena->chunks_mtx); | |
255 | return (NULL); | |
256 | } | |
970d7e83 LB |
257 | /* Remove node from the tree. */ |
258 | extent_tree_szad_remove(chunks_szad, node); | |
259 | extent_tree_ad_remove(chunks_ad, node); | |
54a0048b | 260 | arena_chunk_cache_maybe_remove(arena, node, cache); |
970d7e83 LB |
261 | if (leadsize != 0) { |
262 | /* Insert the leading space as a smaller chunk. */ | |
54a0048b | 263 | extent_node_size_set(node, leadsize); |
970d7e83 LB |
264 | extent_tree_szad_insert(chunks_szad, node); |
265 | extent_tree_ad_insert(chunks_ad, node); | |
54a0048b | 266 | arena_chunk_cache_maybe_insert(arena, node, cache); |
970d7e83 LB |
267 | node = NULL; |
268 | } | |
269 | if (trailsize != 0) { | |
54a0048b SL |
270 | /* Split the trail. */ |
271 | if (chunk_hooks->split(ret, size + trailsize, size, | |
272 | trailsize, false, arena->ind)) { | |
273 | if (dalloc_node && node != NULL) | |
274 | arena_node_dalloc(arena, node); | |
275 | malloc_mutex_unlock(&arena->chunks_mtx); | |
276 | chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, | |
277 | cache, ret, size + trailsize, zeroed, committed); | |
278 | return (NULL); | |
279 | } | |
970d7e83 LB |
280 | /* Insert the trailing space as a smaller chunk. */ |
281 | if (node == NULL) { | |
54a0048b | 282 | node = arena_node_alloc(arena); |
970d7e83 | 283 | if (node == NULL) { |
54a0048b SL |
284 | malloc_mutex_unlock(&arena->chunks_mtx); |
285 | chunk_record(arena, chunk_hooks, chunks_szad, | |
286 | chunks_ad, cache, ret, size + trailsize, | |
287 | zeroed, committed); | |
970d7e83 LB |
288 | return (NULL); |
289 | } | |
970d7e83 | 290 | } |
54a0048b SL |
291 | extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), |
292 | trailsize, zeroed, committed); | |
970d7e83 LB |
293 | extent_tree_szad_insert(chunks_szad, node); |
294 | extent_tree_ad_insert(chunks_ad, node); | |
54a0048b | 295 | arena_chunk_cache_maybe_insert(arena, node, cache); |
970d7e83 LB |
296 | node = NULL; |
297 | } | |
54a0048b SL |
298 | if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { |
299 | malloc_mutex_unlock(&arena->chunks_mtx); | |
300 | chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, | |
301 | ret, size, zeroed, committed); | |
302 | return (NULL); | |
303 | } | |
304 | malloc_mutex_unlock(&arena->chunks_mtx); | |
970d7e83 | 305 | |
54a0048b SL |
306 | assert(dalloc_node || node != NULL); |
307 | if (dalloc_node && node != NULL) | |
308 | arena_node_dalloc(arena, node); | |
970d7e83 | 309 | if (*zero) { |
1a4d82fc | 310 | if (!zeroed) |
970d7e83 LB |
311 | memset(ret, 0, size); |
312 | else if (config_debug) { | |
313 | size_t i; | |
314 | size_t *p = (size_t *)(uintptr_t)ret; | |
315 | ||
1a4d82fc | 316 | JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); |
970d7e83 LB |
317 | for (i = 0; i < size / sizeof(size_t); i++) |
318 | assert(p[i] == 0); | |
319 | } | |
320 | } | |
321 | return (ret); | |
322 | } | |
323 | ||
324 | /* | |
1a4d82fc JJ |
325 | * If the caller specifies (!*zero), it is still possible to receive zeroed |
326 | * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes | |
327 | * advantage of this to avoid demanding zeroed chunks, but taking advantage of | |
328 | * them if they are returned. | |
970d7e83 | 329 | */ |
1a4d82fc | 330 | static void * |
54a0048b SL |
331 | chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, |
332 | bool *zero, bool *commit, dss_prec_t dss_prec) | |
970d7e83 LB |
333 | { |
334 | void *ret; | |
335 | ||
336 | assert(size != 0); | |
337 | assert((size & chunksize_mask) == 0); | |
338 | assert(alignment != 0); | |
339 | assert((alignment & chunksize_mask) == 0); | |
340 | ||
341 | /* "primary" dss. */ | |
54a0048b SL |
342 | if (have_dss && dss_prec == dss_prec_primary && (ret = |
343 | chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != | |
344 | NULL) | |
1a4d82fc | 345 | return (ret); |
54a0048b SL |
346 | /* mmap. */ |
347 | if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != | |
348 | NULL) | |
1a4d82fc | 349 | return (ret); |
970d7e83 | 350 | /* "secondary" dss. */ |
54a0048b SL |
351 | if (have_dss && dss_prec == dss_prec_secondary && (ret = |
352 | chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != | |
353 | NULL) | |
354 | return (ret); | |
970d7e83 LB |
355 | |
356 | /* All strategies for allocation failed. */ | |
1a4d82fc JJ |
357 | return (NULL); |
358 | } | |
359 | ||
54a0048b SL |
360 | void * |
361 | chunk_alloc_base(size_t size) | |
1a4d82fc | 362 | { |
54a0048b SL |
363 | void *ret; |
364 | bool zero, commit; | |
1a4d82fc | 365 | |
54a0048b SL |
366 | /* |
367 | * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() | |
368 | * because it's critical that chunk_alloc_base() return untouched | |
369 | * demand-zeroed virtual memory. | |
370 | */ | |
371 | zero = true; | |
372 | commit = true; | |
373 | ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); | |
374 | if (ret == NULL) | |
375 | return (NULL); | |
7453a54e | 376 | if (config_valgrind) |
54a0048b SL |
377 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); |
378 | ||
379 | return (ret); | |
1a4d82fc JJ |
380 | } |
381 | ||
382 | void * | |
54a0048b SL |
383 | chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, |
384 | size_t size, size_t alignment, bool *zero, bool dalloc_node) | |
1a4d82fc JJ |
385 | { |
386 | void *ret; | |
54a0048b | 387 | bool commit; |
1a4d82fc | 388 | |
54a0048b SL |
389 | assert(size != 0); |
390 | assert((size & chunksize_mask) == 0); | |
391 | assert(alignment != 0); | |
392 | assert((alignment & chunksize_mask) == 0); | |
393 | ||
394 | commit = true; | |
395 | ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, | |
396 | &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, | |
397 | &commit, dalloc_node); | |
1a4d82fc JJ |
398 | if (ret == NULL) |
399 | return (NULL); | |
54a0048b SL |
400 | assert(commit); |
401 | if (config_valgrind) | |
402 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); | |
1a4d82fc JJ |
403 | return (ret); |
404 | } | |
405 | ||
54a0048b SL |
406 | static arena_t * |
407 | chunk_arena_get(unsigned arena_ind) | |
408 | { | |
409 | arena_t *arena; | |
410 | ||
411 | arena = arena_get(arena_ind, false); | |
412 | /* | |
413 | * The arena we're allocating on behalf of must have been initialized | |
414 | * already. | |
415 | */ | |
416 | assert(arena != NULL); | |
417 | return (arena); | |
418 | } | |
419 | ||
420 | static void * | |
421 | chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, | |
422 | bool *commit, unsigned arena_ind) | |
1a4d82fc JJ |
423 | { |
424 | void *ret; | |
54a0048b | 425 | arena_t *arena; |
1a4d82fc | 426 | |
54a0048b SL |
427 | arena = chunk_arena_get(arena_ind); |
428 | ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, | |
429 | commit, arena->dss_prec); | |
430 | if (ret == NULL) | |
431 | return (NULL); | |
432 | if (config_valgrind) | |
433 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); | |
1a4d82fc | 434 | |
970d7e83 LB |
435 | return (ret); |
436 | } | |
437 | ||
54a0048b SL |
438 | static void * |
439 | chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, | |
440 | size_t size, size_t alignment, bool *zero, bool *commit) | |
441 | { | |
442 | ||
443 | assert(size != 0); | |
444 | assert((size & chunksize_mask) == 0); | |
445 | assert(alignment != 0); | |
446 | assert((alignment & chunksize_mask) == 0); | |
447 | ||
448 | return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained, | |
449 | &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, | |
450 | commit, true)); | |
451 | } | |
452 | ||
1a4d82fc | 453 | void * |
54a0048b SL |
454 | chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, |
455 | size_t size, size_t alignment, bool *zero, bool *commit) | |
1a4d82fc | 456 | { |
54a0048b SL |
457 | void *ret; |
458 | ||
459 | chunk_hooks_assure_initialized(arena, chunk_hooks); | |
1a4d82fc | 460 | |
54a0048b SL |
461 | ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size, |
462 | alignment, zero, commit); | |
463 | if (ret == NULL) { | |
464 | ret = chunk_hooks->alloc(new_addr, size, alignment, zero, | |
465 | commit, arena->ind); | |
466 | if (ret == NULL) | |
467 | return (NULL); | |
468 | } | |
469 | ||
470 | if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) | |
471 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); | |
472 | return (ret); | |
1a4d82fc JJ |
473 | } |
474 | ||
970d7e83 | 475 | static void |
54a0048b SL |
476 | chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, |
477 | extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, | |
478 | void *chunk, size_t size, bool zeroed, bool committed) | |
970d7e83 LB |
479 | { |
480 | bool unzeroed; | |
54a0048b SL |
481 | extent_node_t *node, *prev; |
482 | extent_node_t key; | |
970d7e83 | 483 | |
54a0048b SL |
484 | assert(!cache || !zeroed); |
485 | unzeroed = cache || !zeroed; | |
1a4d82fc | 486 | JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); |
970d7e83 | 487 | |
54a0048b SL |
488 | malloc_mutex_lock(&arena->chunks_mtx); |
489 | chunk_hooks_assure_initialized_locked(arena, chunk_hooks); | |
490 | extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, | |
491 | false, false); | |
970d7e83 LB |
492 | node = extent_tree_ad_nsearch(chunks_ad, &key); |
493 | /* Try to coalesce forward. */ | |
54a0048b SL |
494 | if (node != NULL && extent_node_addr_get(node) == |
495 | extent_node_addr_get(&key) && extent_node_committed_get(node) == | |
496 | committed && !chunk_hooks->merge(chunk, size, | |
497 | extent_node_addr_get(node), extent_node_size_get(node), false, | |
498 | arena->ind)) { | |
970d7e83 LB |
499 | /* |
500 | * Coalesce chunk with the following address range. This does | |
501 | * not change the position within chunks_ad, so only | |
502 | * remove/insert from/into chunks_szad. | |
503 | */ | |
504 | extent_tree_szad_remove(chunks_szad, node); | |
54a0048b SL |
505 | arena_chunk_cache_maybe_remove(arena, node, cache); |
506 | extent_node_addr_set(node, chunk); | |
507 | extent_node_size_set(node, size + extent_node_size_get(node)); | |
508 | extent_node_zeroed_set(node, extent_node_zeroed_get(node) && | |
509 | !unzeroed); | |
970d7e83 | 510 | extent_tree_szad_insert(chunks_szad, node); |
54a0048b | 511 | arena_chunk_cache_maybe_insert(arena, node, cache); |
970d7e83 LB |
512 | } else { |
513 | /* Coalescing forward failed, so insert a new node. */ | |
54a0048b SL |
514 | node = arena_node_alloc(arena); |
515 | if (node == NULL) { | |
970d7e83 | 516 | /* |
54a0048b SL |
517 | * Node allocation failed, which is an exceedingly |
518 | * unlikely failure. Leak chunk after making sure its | |
519 | * pages have already been purged, so that this is only | |
520 | * a virtual memory leak. | |
970d7e83 | 521 | */ |
54a0048b SL |
522 | if (cache) { |
523 | chunk_purge_wrapper(arena, chunk_hooks, chunk, | |
524 | size, 0, size); | |
525 | } | |
970d7e83 LB |
526 | goto label_return; |
527 | } | |
54a0048b SL |
528 | extent_node_init(node, arena, chunk, size, !unzeroed, |
529 | committed); | |
970d7e83 LB |
530 | extent_tree_ad_insert(chunks_ad, node); |
531 | extent_tree_szad_insert(chunks_szad, node); | |
54a0048b | 532 | arena_chunk_cache_maybe_insert(arena, node, cache); |
970d7e83 LB |
533 | } |
534 | ||
535 | /* Try to coalesce backward. */ | |
536 | prev = extent_tree_ad_prev(chunks_ad, node); | |
54a0048b SL |
537 | if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + |
538 | extent_node_size_get(prev)) == chunk && | |
539 | extent_node_committed_get(prev) == committed && | |
540 | !chunk_hooks->merge(extent_node_addr_get(prev), | |
541 | extent_node_size_get(prev), chunk, size, false, arena->ind)) { | |
970d7e83 LB |
542 | /* |
543 | * Coalesce chunk with the previous address range. This does | |
544 | * not change the position within chunks_ad, so only | |
545 | * remove/insert node from/into chunks_szad. | |
546 | */ | |
547 | extent_tree_szad_remove(chunks_szad, prev); | |
548 | extent_tree_ad_remove(chunks_ad, prev); | |
54a0048b | 549 | arena_chunk_cache_maybe_remove(arena, prev, cache); |
970d7e83 | 550 | extent_tree_szad_remove(chunks_szad, node); |
54a0048b SL |
551 | arena_chunk_cache_maybe_remove(arena, node, cache); |
552 | extent_node_addr_set(node, extent_node_addr_get(prev)); | |
553 | extent_node_size_set(node, extent_node_size_get(prev) + | |
554 | extent_node_size_get(node)); | |
555 | extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && | |
556 | extent_node_zeroed_get(node)); | |
970d7e83 | 557 | extent_tree_szad_insert(chunks_szad, node); |
54a0048b | 558 | arena_chunk_cache_maybe_insert(arena, node, cache); |
970d7e83 | 559 | |
54a0048b | 560 | arena_node_dalloc(arena, prev); |
970d7e83 LB |
561 | } |
562 | ||
563 | label_return: | |
54a0048b | 564 | malloc_mutex_unlock(&arena->chunks_mtx); |
970d7e83 LB |
565 | } |
566 | ||
567 | void | |
54a0048b SL |
568 | chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, |
569 | size_t size, bool committed) | |
970d7e83 | 570 | { |
54a0048b | 571 | |
970d7e83 LB |
572 | assert(chunk != NULL); |
573 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
574 | assert(size != 0); | |
575 | assert((size & chunksize_mask) == 0); | |
576 | ||
54a0048b SL |
577 | chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, |
578 | &arena->chunks_ad_cached, true, chunk, size, false, committed); | |
579 | arena_maybe_purge(arena); | |
970d7e83 LB |
580 | } |
581 | ||
54a0048b SL |
582 | void |
583 | chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, | |
584 | size_t size, bool zeroed, bool committed) | |
970d7e83 LB |
585 | { |
586 | ||
587 | assert(chunk != NULL); | |
588 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
589 | assert(size != 0); | |
590 | assert((size & chunksize_mask) == 0); | |
591 | ||
54a0048b SL |
592 | chunk_hooks_assure_initialized(arena, chunk_hooks); |
593 | /* Try to deallocate. */ | |
594 | if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) | |
595 | return; | |
596 | /* Try to decommit; purge if that fails. */ | |
597 | if (committed) { | |
598 | committed = chunk_hooks->decommit(chunk, size, 0, size, | |
599 | arena->ind); | |
970d7e83 | 600 | } |
54a0048b SL |
601 | zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, |
602 | arena->ind); | |
603 | chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, | |
604 | &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); | |
605 | } | |
606 | ||
607 | static bool | |
608 | chunk_dalloc_default(void *chunk, size_t size, bool committed, | |
609 | unsigned arena_ind) | |
610 | { | |
611 | ||
612 | if (!have_dss || !chunk_in_dss(chunk)) | |
613 | return (chunk_dalloc_mmap(chunk, size)); | |
614 | return (true); | |
615 | } | |
616 | ||
617 | void | |
618 | chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, | |
619 | size_t size, bool committed) | |
620 | { | |
621 | ||
622 | chunk_hooks_assure_initialized(arena, chunk_hooks); | |
623 | chunk_hooks->dalloc(chunk, size, committed, arena->ind); | |
624 | if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) | |
625 | JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); | |
626 | } | |
970d7e83 | 627 | |
54a0048b SL |
628 | static bool |
629 | chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, | |
630 | unsigned arena_ind) | |
631 | { | |
632 | ||
633 | return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), | |
634 | length)); | |
635 | } | |
636 | ||
637 | static bool | |
638 | chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, | |
639 | unsigned arena_ind) | |
640 | { | |
641 | ||
642 | return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), | |
643 | length)); | |
1a4d82fc JJ |
644 | } |
645 | ||
1a4d82fc | 646 | bool |
54a0048b SL |
647 | chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) |
648 | { | |
649 | ||
650 | assert(chunk != NULL); | |
651 | assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
652 | assert((offset & PAGE_MASK) == 0); | |
653 | assert(length != 0); | |
654 | assert((length & PAGE_MASK) == 0); | |
655 | ||
656 | return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), | |
657 | length)); | |
658 | } | |
659 | ||
660 | static bool | |
661 | chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, | |
662 | unsigned arena_ind) | |
663 | { | |
664 | ||
665 | return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, | |
666 | length)); | |
667 | } | |
668 | ||
669 | bool | |
670 | chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, | |
671 | size_t size, size_t offset, size_t length) | |
672 | { | |
673 | ||
674 | chunk_hooks_assure_initialized(arena, chunk_hooks); | |
675 | return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); | |
676 | } | |
677 | ||
678 | static bool | |
679 | chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, | |
680 | bool committed, unsigned arena_ind) | |
681 | { | |
682 | ||
683 | if (!maps_coalesce) | |
684 | return (true); | |
685 | return (false); | |
686 | } | |
687 | ||
688 | static bool | |
689 | chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, | |
690 | bool committed, unsigned arena_ind) | |
1a4d82fc JJ |
691 | { |
692 | ||
54a0048b SL |
693 | if (!maps_coalesce) |
694 | return (true); | |
695 | if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) | |
696 | return (true); | |
697 | ||
1a4d82fc | 698 | return (false); |
970d7e83 LB |
699 | } |
700 | ||
54a0048b SL |
701 | static rtree_node_elm_t * |
702 | chunks_rtree_node_alloc(size_t nelms) | |
703 | { | |
704 | ||
705 | return ((rtree_node_elm_t *)base_alloc(nelms * | |
706 | sizeof(rtree_node_elm_t))); | |
707 | } | |
708 | ||
970d7e83 LB |
709 | bool |
710 | chunk_boot(void) | |
711 | { | |
54a0048b SL |
712 | #ifdef _WIN32 |
713 | SYSTEM_INFO info; | |
714 | GetSystemInfo(&info); | |
715 | ||
716 | /* | |
717 | * Verify actual page size is equal to or an integral multiple of | |
718 | * configured page size. | |
719 | */ | |
720 | if (info.dwPageSize & ((1U << LG_PAGE) - 1)) | |
721 | return (true); | |
722 | ||
723 | /* | |
724 | * Configure chunksize (if not set) to match granularity (usually 64K), | |
725 | * so pages_map will always take fast path. | |
726 | */ | |
727 | if (!opt_lg_chunk) { | |
728 | opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) | |
729 | - 1; | |
730 | } | |
731 | #else | |
732 | if (!opt_lg_chunk) | |
733 | opt_lg_chunk = LG_CHUNK_DEFAULT; | |
734 | #endif | |
970d7e83 LB |
735 | |
736 | /* Set variables according to the value of opt_lg_chunk. */ | |
737 | chunksize = (ZU(1) << opt_lg_chunk); | |
738 | assert(chunksize >= PAGE); | |
739 | chunksize_mask = chunksize - 1; | |
740 | chunk_npages = (chunksize >> LG_PAGE); | |
741 | ||
1a4d82fc | 742 | if (have_dss && chunk_dss_boot()) |
970d7e83 | 743 | return (true); |
54a0048b SL |
744 | if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - |
745 | opt_lg_chunk), chunks_rtree_node_alloc, NULL)) | |
746 | return (true); | |
970d7e83 LB |
747 | |
748 | return (false); | |
749 | } | |
750 | ||
751 | void | |
752 | chunk_prefork(void) | |
753 | { | |
754 | ||
970d7e83 LB |
755 | chunk_dss_prefork(); |
756 | } | |
757 | ||
758 | void | |
759 | chunk_postfork_parent(void) | |
760 | { | |
761 | ||
762 | chunk_dss_postfork_parent(); | |
970d7e83 LB |
763 | } |
764 | ||
765 | void | |
766 | chunk_postfork_child(void) | |
767 | { | |
768 | ||
769 | chunk_dss_postfork_child(); | |
970d7e83 | 770 | } |