1 #define JEMALLOC_CHUNK_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 const char *opt_dss
= DSS_DEFAULT
;
8 size_t opt_lg_chunk
= 0;
10 /* Used exclusively for gdump triggering. */
11 static size_t curchunks
;
12 static size_t highchunks
;
16 /* Various chunk-related settings. */
18 size_t chunksize_mask
; /* (chunksize - 1). */
21 static void *chunk_alloc_default(void *new_addr
, size_t size
,
22 size_t alignment
, bool *zero
, bool *commit
, unsigned arena_ind
);
23 static bool chunk_dalloc_default(void *chunk
, size_t size
, bool committed
,
25 static bool chunk_commit_default(void *chunk
, size_t size
, size_t offset
,
26 size_t length
, unsigned arena_ind
);
27 static bool chunk_decommit_default(void *chunk
, size_t size
, size_t offset
,
28 size_t length
, unsigned arena_ind
);
29 static bool chunk_purge_default(void *chunk
, size_t size
, size_t offset
,
30 size_t length
, unsigned arena_ind
);
31 static bool chunk_split_default(void *chunk
, size_t size
, size_t size_a
,
32 size_t size_b
, bool committed
, unsigned arena_ind
);
33 static bool chunk_merge_default(void *chunk_a
, size_t size_a
, void *chunk_b
,
34 size_t size_b
, bool committed
, unsigned arena_ind
);
36 const chunk_hooks_t chunk_hooks_default
= {
40 chunk_decommit_default
,
46 /******************************************************************************/
48 * Function prototypes for static functions that are referenced prior to
52 static void chunk_record(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
53 extent_tree_t
*chunks_szad
, extent_tree_t
*chunks_ad
, bool cache
,
54 void *chunk
, size_t size
, bool zeroed
, bool committed
);
56 /******************************************************************************/
59 chunk_hooks_get_locked(arena_t
*arena
)
62 return (arena
->chunk_hooks
);
66 chunk_hooks_get(arena_t
*arena
)
68 chunk_hooks_t chunk_hooks
;
70 malloc_mutex_lock(&arena
->chunks_mtx
);
71 chunk_hooks
= chunk_hooks_get_locked(arena
);
72 malloc_mutex_unlock(&arena
->chunks_mtx
);
78 chunk_hooks_set(arena_t
*arena
, const chunk_hooks_t
*chunk_hooks
)
80 chunk_hooks_t old_chunk_hooks
;
82 malloc_mutex_lock(&arena
->chunks_mtx
);
83 old_chunk_hooks
= arena
->chunk_hooks
;
85 * Copy each field atomically so that it is impossible for readers to
86 * see partially updated pointers. There are places where readers only
87 * need one hook function pointer (therefore no need to copy the
88 * entirety of arena->chunk_hooks), and stale reads do not affect
89 * correctness, so they perform unlocked reads.
91 #define ATOMIC_COPY_HOOK(n) do { \
96 u.n = &arena->chunk_hooks.n; \
97 atomic_write_p(u.v, chunk_hooks->n); \
99 ATOMIC_COPY_HOOK(alloc
);
100 ATOMIC_COPY_HOOK(dalloc
);
101 ATOMIC_COPY_HOOK(commit
);
102 ATOMIC_COPY_HOOK(decommit
);
103 ATOMIC_COPY_HOOK(purge
);
104 ATOMIC_COPY_HOOK(split
);
105 ATOMIC_COPY_HOOK(merge
);
106 #undef ATOMIC_COPY_HOOK
107 malloc_mutex_unlock(&arena
->chunks_mtx
);
109 return (old_chunk_hooks
);
113 chunk_hooks_assure_initialized_impl(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
116 static const chunk_hooks_t uninitialized_hooks
=
117 CHUNK_HOOKS_INITIALIZER
;
119 if (memcmp(chunk_hooks
, &uninitialized_hooks
, sizeof(chunk_hooks_t
)) ==
121 *chunk_hooks
= locked
? chunk_hooks_get_locked(arena
) :
122 chunk_hooks_get(arena
);
127 chunk_hooks_assure_initialized_locked(arena_t
*arena
,
128 chunk_hooks_t
*chunk_hooks
)
131 chunk_hooks_assure_initialized_impl(arena
, chunk_hooks
, true);
135 chunk_hooks_assure_initialized(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
)
138 chunk_hooks_assure_initialized_impl(arena
, chunk_hooks
, false);
142 chunk_register(const void *chunk
, const extent_node_t
*node
)
145 assert(extent_node_addr_get(node
) == chunk
);
147 if (rtree_set(&chunks_rtree
, (uintptr_t)chunk
, node
))
149 if (config_prof
&& opt_prof
) {
150 size_t size
= extent_node_size_get(node
);
151 size_t nadd
= (size
== 0) ? 1 : size
/ chunksize
;
152 size_t cur
= atomic_add_z(&curchunks
, nadd
);
153 size_t high
= atomic_read_z(&highchunks
);
154 while (cur
> high
&& atomic_cas_z(&highchunks
, high
, cur
)) {
156 * Don't refresh cur, because it may have decreased
157 * since this thread lost the highchunks update race.
159 high
= atomic_read_z(&highchunks
);
161 if (cur
> high
&& prof_gdump_get_unlocked())
169 chunk_deregister(const void *chunk
, const extent_node_t
*node
)
173 err
= rtree_set(&chunks_rtree
, (uintptr_t)chunk
, NULL
);
175 if (config_prof
&& opt_prof
) {
176 size_t size
= extent_node_size_get(node
);
177 size_t nsub
= (size
== 0) ? 1 : size
/ chunksize
;
178 assert(atomic_read_z(&curchunks
) >= nsub
);
179 atomic_sub_z(&curchunks
, nsub
);
184 * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
187 static extent_node_t
*
188 chunk_first_best_fit(arena_t
*arena
, extent_tree_t
*chunks_szad
,
189 extent_tree_t
*chunks_ad
, size_t size
)
193 assert(size
== CHUNK_CEILING(size
));
195 extent_node_init(&key
, arena
, NULL
, size
, false, false);
196 return (extent_tree_szad_nsearch(chunks_szad
, &key
));
200 chunk_recycle(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
201 extent_tree_t
*chunks_szad
, extent_tree_t
*chunks_ad
, bool cache
,
202 void *new_addr
, size_t size
, size_t alignment
, bool *zero
, bool *commit
,
207 size_t alloc_size
, leadsize
, trailsize
;
208 bool zeroed
, committed
;
210 assert(new_addr
== NULL
|| alignment
== chunksize
);
212 * Cached chunks use the node linkage embedded in their headers, in
213 * which case dalloc_node is true, and new_addr is non-NULL because
214 * we're operating on a specific chunk.
216 assert(dalloc_node
|| new_addr
!= NULL
);
218 alloc_size
= CHUNK_CEILING(s2u(size
+ alignment
- chunksize
));
219 /* Beware size_t wrap-around. */
220 if (alloc_size
< size
)
222 malloc_mutex_lock(&arena
->chunks_mtx
);
223 chunk_hooks_assure_initialized_locked(arena
, chunk_hooks
);
224 if (new_addr
!= NULL
) {
226 extent_node_init(&key
, arena
, new_addr
, alloc_size
, false,
228 node
= extent_tree_ad_search(chunks_ad
, &key
);
230 node
= chunk_first_best_fit(arena
, chunks_szad
, chunks_ad
,
233 if (node
== NULL
|| (new_addr
!= NULL
&& extent_node_size_get(node
) <
235 malloc_mutex_unlock(&arena
->chunks_mtx
);
238 leadsize
= ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node
),
239 alignment
) - (uintptr_t)extent_node_addr_get(node
);
240 assert(new_addr
== NULL
|| leadsize
== 0);
241 assert(extent_node_size_get(node
) >= leadsize
+ size
);
242 trailsize
= extent_node_size_get(node
) - leadsize
- size
;
243 ret
= (void *)((uintptr_t)extent_node_addr_get(node
) + leadsize
);
244 zeroed
= extent_node_zeroed_get(node
);
247 committed
= extent_node_committed_get(node
);
250 /* Split the lead. */
252 chunk_hooks
->split(extent_node_addr_get(node
),
253 extent_node_size_get(node
), leadsize
, size
, false, arena
->ind
)) {
254 malloc_mutex_unlock(&arena
->chunks_mtx
);
257 /* Remove node from the tree. */
258 extent_tree_szad_remove(chunks_szad
, node
);
259 extent_tree_ad_remove(chunks_ad
, node
);
260 arena_chunk_cache_maybe_remove(arena
, node
, cache
);
262 /* Insert the leading space as a smaller chunk. */
263 extent_node_size_set(node
, leadsize
);
264 extent_tree_szad_insert(chunks_szad
, node
);
265 extent_tree_ad_insert(chunks_ad
, node
);
266 arena_chunk_cache_maybe_insert(arena
, node
, cache
);
269 if (trailsize
!= 0) {
270 /* Split the trail. */
271 if (chunk_hooks
->split(ret
, size
+ trailsize
, size
,
272 trailsize
, false, arena
->ind
)) {
273 if (dalloc_node
&& node
!= NULL
)
274 arena_node_dalloc(arena
, node
);
275 malloc_mutex_unlock(&arena
->chunks_mtx
);
276 chunk_record(arena
, chunk_hooks
, chunks_szad
, chunks_ad
,
277 cache
, ret
, size
+ trailsize
, zeroed
, committed
);
280 /* Insert the trailing space as a smaller chunk. */
282 node
= arena_node_alloc(arena
);
284 malloc_mutex_unlock(&arena
->chunks_mtx
);
285 chunk_record(arena
, chunk_hooks
, chunks_szad
,
286 chunks_ad
, cache
, ret
, size
+ trailsize
,
291 extent_node_init(node
, arena
, (void *)((uintptr_t)(ret
) + size
),
292 trailsize
, zeroed
, committed
);
293 extent_tree_szad_insert(chunks_szad
, node
);
294 extent_tree_ad_insert(chunks_ad
, node
);
295 arena_chunk_cache_maybe_insert(arena
, node
, cache
);
298 if (!committed
&& chunk_hooks
->commit(ret
, size
, 0, size
, arena
->ind
)) {
299 malloc_mutex_unlock(&arena
->chunks_mtx
);
300 chunk_record(arena
, chunk_hooks
, chunks_szad
, chunks_ad
, cache
,
301 ret
, size
, zeroed
, committed
);
304 malloc_mutex_unlock(&arena
->chunks_mtx
);
306 assert(dalloc_node
|| node
!= NULL
);
307 if (dalloc_node
&& node
!= NULL
)
308 arena_node_dalloc(arena
, node
);
311 memset(ret
, 0, size
);
312 else if (config_debug
) {
314 size_t *p
= (size_t *)(uintptr_t)ret
;
316 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret
, size
);
317 for (i
= 0; i
< size
/ sizeof(size_t); i
++)
325 * If the caller specifies (!*zero), it is still possible to receive zeroed
326 * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
331 chunk_alloc_core(arena_t
*arena
, void *new_addr
, size_t size
, size_t alignment
,
332 bool *zero
, bool *commit
, dss_prec_t dss_prec
)
337 assert((size
& chunksize_mask
) == 0);
338 assert(alignment
!= 0);
339 assert((alignment
& chunksize_mask
) == 0);
342 if (have_dss
&& dss_prec
== dss_prec_primary
&& (ret
=
343 chunk_alloc_dss(arena
, new_addr
, size
, alignment
, zero
, commit
)) !=
347 if ((ret
= chunk_alloc_mmap(new_addr
, size
, alignment
, zero
, commit
)) !=
350 /* "secondary" dss. */
351 if (have_dss
&& dss_prec
== dss_prec_secondary
&& (ret
=
352 chunk_alloc_dss(arena
, new_addr
, size
, alignment
, zero
, commit
)) !=
356 /* All strategies for allocation failed. */
361 chunk_alloc_base(size_t size
)
367 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
368 * because it's critical that chunk_alloc_base() return untouched
369 * demand-zeroed virtual memory.
373 ret
= chunk_alloc_mmap(NULL
, size
, chunksize
, &zero
, &commit
);
377 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
383 chunk_alloc_cache(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *new_addr
,
384 size_t size
, size_t alignment
, bool *zero
, bool dalloc_node
)
390 assert((size
& chunksize_mask
) == 0);
391 assert(alignment
!= 0);
392 assert((alignment
& chunksize_mask
) == 0);
395 ret
= chunk_recycle(arena
, chunk_hooks
, &arena
->chunks_szad_cached
,
396 &arena
->chunks_ad_cached
, true, new_addr
, size
, alignment
, zero
,
397 &commit
, dalloc_node
);
402 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
407 chunk_arena_get(unsigned arena_ind
)
411 arena
= arena_get(arena_ind
, false);
413 * The arena we're allocating on behalf of must have been initialized
416 assert(arena
!= NULL
);
421 chunk_alloc_default(void *new_addr
, size_t size
, size_t alignment
, bool *zero
,
422 bool *commit
, unsigned arena_ind
)
427 arena
= chunk_arena_get(arena_ind
);
428 ret
= chunk_alloc_core(arena
, new_addr
, size
, alignment
, zero
,
429 commit
, arena
->dss_prec
);
433 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, size
);
439 chunk_alloc_retained(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *new_addr
,
440 size_t size
, size_t alignment
, bool *zero
, bool *commit
)
444 assert((size
& chunksize_mask
) == 0);
445 assert(alignment
!= 0);
446 assert((alignment
& chunksize_mask
) == 0);
448 return (chunk_recycle(arena
, chunk_hooks
, &arena
->chunks_szad_retained
,
449 &arena
->chunks_ad_retained
, false, new_addr
, size
, alignment
, zero
,
454 chunk_alloc_wrapper(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *new_addr
,
455 size_t size
, size_t alignment
, bool *zero
, bool *commit
)
459 chunk_hooks_assure_initialized(arena
, chunk_hooks
);
461 ret
= chunk_alloc_retained(arena
, chunk_hooks
, new_addr
, size
,
462 alignment
, zero
, commit
);
464 ret
= chunk_hooks
->alloc(new_addr
, size
, alignment
, zero
,
470 if (config_valgrind
&& chunk_hooks
->alloc
!= chunk_alloc_default
)
471 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret
, chunksize
);
476 chunk_record(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
,
477 extent_tree_t
*chunks_szad
, extent_tree_t
*chunks_ad
, bool cache
,
478 void *chunk
, size_t size
, bool zeroed
, bool committed
)
481 extent_node_t
*node
, *prev
;
484 assert(!cache
|| !zeroed
);
485 unzeroed
= cache
|| !zeroed
;
486 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk
, size
);
488 malloc_mutex_lock(&arena
->chunks_mtx
);
489 chunk_hooks_assure_initialized_locked(arena
, chunk_hooks
);
490 extent_node_init(&key
, arena
, (void *)((uintptr_t)chunk
+ size
), 0,
492 node
= extent_tree_ad_nsearch(chunks_ad
, &key
);
493 /* Try to coalesce forward. */
494 if (node
!= NULL
&& extent_node_addr_get(node
) ==
495 extent_node_addr_get(&key
) && extent_node_committed_get(node
) ==
496 committed
&& !chunk_hooks
->merge(chunk
, size
,
497 extent_node_addr_get(node
), extent_node_size_get(node
), false,
500 * Coalesce chunk with the following address range. This does
501 * not change the position within chunks_ad, so only
502 * remove/insert from/into chunks_szad.
504 extent_tree_szad_remove(chunks_szad
, node
);
505 arena_chunk_cache_maybe_remove(arena
, node
, cache
);
506 extent_node_addr_set(node
, chunk
);
507 extent_node_size_set(node
, size
+ extent_node_size_get(node
));
508 extent_node_zeroed_set(node
, extent_node_zeroed_get(node
) &&
510 extent_tree_szad_insert(chunks_szad
, node
);
511 arena_chunk_cache_maybe_insert(arena
, node
, cache
);
513 /* Coalescing forward failed, so insert a new node. */
514 node
= arena_node_alloc(arena
);
517 * Node allocation failed, which is an exceedingly
518 * unlikely failure. Leak chunk after making sure its
519 * pages have already been purged, so that this is only
520 * a virtual memory leak.
523 chunk_purge_wrapper(arena
, chunk_hooks
, chunk
,
528 extent_node_init(node
, arena
, chunk
, size
, !unzeroed
,
530 extent_tree_ad_insert(chunks_ad
, node
);
531 extent_tree_szad_insert(chunks_szad
, node
);
532 arena_chunk_cache_maybe_insert(arena
, node
, cache
);
535 /* Try to coalesce backward. */
536 prev
= extent_tree_ad_prev(chunks_ad
, node
);
537 if (prev
!= NULL
&& (void *)((uintptr_t)extent_node_addr_get(prev
) +
538 extent_node_size_get(prev
)) == chunk
&&
539 extent_node_committed_get(prev
) == committed
&&
540 !chunk_hooks
->merge(extent_node_addr_get(prev
),
541 extent_node_size_get(prev
), chunk
, size
, false, arena
->ind
)) {
543 * Coalesce chunk with the previous address range. This does
544 * not change the position within chunks_ad, so only
545 * remove/insert node from/into chunks_szad.
547 extent_tree_szad_remove(chunks_szad
, prev
);
548 extent_tree_ad_remove(chunks_ad
, prev
);
549 arena_chunk_cache_maybe_remove(arena
, prev
, cache
);
550 extent_tree_szad_remove(chunks_szad
, node
);
551 arena_chunk_cache_maybe_remove(arena
, node
, cache
);
552 extent_node_addr_set(node
, extent_node_addr_get(prev
));
553 extent_node_size_set(node
, extent_node_size_get(prev
) +
554 extent_node_size_get(node
));
555 extent_node_zeroed_set(node
, extent_node_zeroed_get(prev
) &&
556 extent_node_zeroed_get(node
));
557 extent_tree_szad_insert(chunks_szad
, node
);
558 arena_chunk_cache_maybe_insert(arena
, node
, cache
);
560 arena_node_dalloc(arena
, prev
);
564 malloc_mutex_unlock(&arena
->chunks_mtx
);
568 chunk_dalloc_cache(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *chunk
,
569 size_t size
, bool committed
)
572 assert(chunk
!= NULL
);
573 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
575 assert((size
& chunksize_mask
) == 0);
577 chunk_record(arena
, chunk_hooks
, &arena
->chunks_szad_cached
,
578 &arena
->chunks_ad_cached
, true, chunk
, size
, false, committed
);
579 arena_maybe_purge(arena
);
583 chunk_dalloc_arena(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *chunk
,
584 size_t size
, bool zeroed
, bool committed
)
587 assert(chunk
!= NULL
);
588 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
590 assert((size
& chunksize_mask
) == 0);
592 chunk_hooks_assure_initialized(arena
, chunk_hooks
);
593 /* Try to deallocate. */
594 if (!chunk_hooks
->dalloc(chunk
, size
, committed
, arena
->ind
))
596 /* Try to decommit; purge if that fails. */
598 committed
= chunk_hooks
->decommit(chunk
, size
, 0, size
,
601 zeroed
= !committed
|| !chunk_hooks
->purge(chunk
, size
, 0, size
,
603 chunk_record(arena
, chunk_hooks
, &arena
->chunks_szad_retained
,
604 &arena
->chunks_ad_retained
, false, chunk
, size
, zeroed
, committed
);
608 chunk_dalloc_default(void *chunk
, size_t size
, bool committed
,
612 if (!have_dss
|| !chunk_in_dss(chunk
))
613 return (chunk_dalloc_mmap(chunk
, size
));
618 chunk_dalloc_wrapper(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *chunk
,
619 size_t size
, bool committed
)
622 chunk_hooks_assure_initialized(arena
, chunk_hooks
);
623 chunk_hooks
->dalloc(chunk
, size
, committed
, arena
->ind
);
624 if (config_valgrind
&& chunk_hooks
->dalloc
!= chunk_dalloc_default
)
625 JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk
, size
);
629 chunk_commit_default(void *chunk
, size_t size
, size_t offset
, size_t length
,
633 return (pages_commit((void *)((uintptr_t)chunk
+ (uintptr_t)offset
),
638 chunk_decommit_default(void *chunk
, size_t size
, size_t offset
, size_t length
,
642 return (pages_decommit((void *)((uintptr_t)chunk
+ (uintptr_t)offset
),
647 chunk_purge_arena(arena_t
*arena
, void *chunk
, size_t offset
, size_t length
)
650 assert(chunk
!= NULL
);
651 assert(CHUNK_ADDR2BASE(chunk
) == chunk
);
652 assert((offset
& PAGE_MASK
) == 0);
654 assert((length
& PAGE_MASK
) == 0);
656 return (pages_purge((void *)((uintptr_t)chunk
+ (uintptr_t)offset
),
661 chunk_purge_default(void *chunk
, size_t size
, size_t offset
, size_t length
,
665 return (chunk_purge_arena(chunk_arena_get(arena_ind
), chunk
, offset
,
670 chunk_purge_wrapper(arena_t
*arena
, chunk_hooks_t
*chunk_hooks
, void *chunk
,
671 size_t size
, size_t offset
, size_t length
)
674 chunk_hooks_assure_initialized(arena
, chunk_hooks
);
675 return (chunk_hooks
->purge(chunk
, size
, offset
, length
, arena
->ind
));
679 chunk_split_default(void *chunk
, size_t size
, size_t size_a
, size_t size_b
,
680 bool committed
, unsigned arena_ind
)
689 chunk_merge_default(void *chunk_a
, size_t size_a
, void *chunk_b
, size_t size_b
,
690 bool committed
, unsigned arena_ind
)
695 if (have_dss
&& chunk_in_dss(chunk_a
) != chunk_in_dss(chunk_b
))
701 static rtree_node_elm_t
*
702 chunks_rtree_node_alloc(size_t nelms
)
705 return ((rtree_node_elm_t
*)base_alloc(nelms
*
706 sizeof(rtree_node_elm_t
)));
714 GetSystemInfo(&info
);
717 * Verify actual page size is equal to or an integral multiple of
718 * configured page size.
720 if (info
.dwPageSize
& ((1U << LG_PAGE
) - 1))
724 * Configure chunksize (if not set) to match granularity (usually 64K),
725 * so pages_map will always take fast path.
728 opt_lg_chunk
= ffs_u((unsigned)info
.dwAllocationGranularity
)
733 opt_lg_chunk
= LG_CHUNK_DEFAULT
;
736 /* Set variables according to the value of opt_lg_chunk. */
737 chunksize
= (ZU(1) << opt_lg_chunk
);
738 assert(chunksize
>= PAGE
);
739 chunksize_mask
= chunksize
- 1;
740 chunk_npages
= (chunksize
>> LG_PAGE
);
742 if (have_dss
&& chunk_dss_boot())
744 if (rtree_new(&chunks_rtree
, (unsigned)((ZU(1) << (LG_SIZEOF_PTR
+3)) -
745 opt_lg_chunk
), chunks_rtree_node_alloc
, NULL
))
759 chunk_postfork_parent(void)
762 chunk_dss_postfork_parent();
766 chunk_postfork_child(void)
769 chunk_dss_postfork_child();