1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
6 const char *dss_prec_names
[] = {
13 /* Current dss precedence default, used when creating new arenas. */
14 static dss_prec_t dss_prec_default
= DSS_PREC_DEFAULT
;
17 * Protects sbrk() calls. This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
20 static malloc_mutex_t dss_mtx
;
22 /* Base address of the DSS. */
23 static void *dss_base
;
24 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25 static void *dss_prev
;
26 /* Current upper limit on DSS addresses. */
29 /******************************************************************************/
32 chunk_dss_sbrk(intptr_t increment
)
36 return (sbrk(increment
));
44 chunk_dss_prec_get(void)
49 return (dss_prec_disabled
);
50 malloc_mutex_lock(&dss_mtx
);
51 ret
= dss_prec_default
;
52 malloc_mutex_unlock(&dss_mtx
);
57 chunk_dss_prec_set(dss_prec_t dss_prec
)
61 return (dss_prec
!= dss_prec_disabled
);
62 malloc_mutex_lock(&dss_mtx
);
63 dss_prec_default
= dss_prec
;
64 malloc_mutex_unlock(&dss_mtx
);
69 chunk_alloc_dss(arena_t
*arena
, void *new_addr
, size_t size
, size_t alignment
,
70 bool *zero
, bool *commit
)
73 assert(size
> 0 && (size
& chunksize_mask
) == 0);
74 assert(alignment
> 0 && (alignment
& chunksize_mask
) == 0);
77 * sbrk() uses a signed increment argument, so take care not to
78 * interpret a huge allocation request as a negative increment.
80 if ((intptr_t)size
< 0)
83 malloc_mutex_lock(&dss_mtx
);
84 if (dss_prev
!= (void *)-1) {
87 * The loop is necessary to recover from races with other
88 * threads that are using the DSS for something other than
92 void *ret
, *cpad
, *dss_next
;
93 size_t gap_size
, cpad_size
;
95 /* Avoid an unnecessary system call. */
96 if (new_addr
!= NULL
&& dss_max
!= new_addr
)
99 /* Get the current end of the DSS. */
100 dss_max
= chunk_dss_sbrk(0);
102 /* Make sure the earlier condition still holds. */
103 if (new_addr
!= NULL
&& dss_max
!= new_addr
)
107 * Calculate how much padding is necessary to
108 * chunk-align the end of the DSS.
110 gap_size
= (chunksize
- CHUNK_ADDR2OFFSET(dss_max
)) &
113 * Compute how much chunk-aligned pad space (if any) is
114 * necessary to satisfy alignment. This space can be
115 * recycled for later use.
117 cpad
= (void *)((uintptr_t)dss_max
+ gap_size
);
118 ret
= (void *)ALIGNMENT_CEILING((uintptr_t)dss_max
,
120 cpad_size
= (uintptr_t)ret
- (uintptr_t)cpad
;
121 dss_next
= (void *)((uintptr_t)ret
+ size
);
122 if ((uintptr_t)ret
< (uintptr_t)dss_max
||
123 (uintptr_t)dss_next
< (uintptr_t)dss_max
) {
125 malloc_mutex_unlock(&dss_mtx
);
128 incr
= gap_size
+ cpad_size
+ size
;
129 dss_prev
= chunk_dss_sbrk(incr
);
130 if (dss_prev
== dss_max
) {
133 malloc_mutex_unlock(&dss_mtx
);
134 if (cpad_size
!= 0) {
135 chunk_hooks_t chunk_hooks
=
136 CHUNK_HOOKS_INITIALIZER
;
137 chunk_dalloc_wrapper(arena
,
138 &chunk_hooks
, cpad
, cpad_size
,
142 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
144 memset(ret
, 0, size
);
147 *commit
= pages_decommit(ret
, size
);
150 } while (dss_prev
!= (void *)-1);
152 malloc_mutex_unlock(&dss_mtx
);
158 chunk_in_dss(void *chunk
)
164 malloc_mutex_lock(&dss_mtx
);
165 if ((uintptr_t)chunk
>= (uintptr_t)dss_base
166 && (uintptr_t)chunk
< (uintptr_t)dss_max
)
170 malloc_mutex_unlock(&dss_mtx
);
181 if (malloc_mutex_init(&dss_mtx
))
183 dss_base
= chunk_dss_sbrk(0);
191 chunk_dss_prefork(void)
195 malloc_mutex_prefork(&dss_mtx
);
199 chunk_dss_postfork_parent(void)
203 malloc_mutex_postfork_parent(&dss_mtx
);
207 chunk_dss_postfork_child(void)
211 malloc_mutex_postfork_child(&dss_mtx
);
214 /******************************************************************************/