1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
6 const char *dss_prec_names
[] = {
14 * Current dss precedence default, used when creating new arenas. NB: This is
15 * stored as unsigned rather than dss_prec_t because in principle there's no
16 * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
17 * atomic operations to synchronize the setting.
19 static unsigned dss_prec_default
= (unsigned)DSS_PREC_DEFAULT
;
21 /* Base address of the DSS. */
22 static void *dss_base
;
23 /* Atomic boolean indicating whether the DSS is exhausted. */
24 static unsigned dss_exhausted
;
25 /* Atomic current upper limit on DSS addresses. */
28 /******************************************************************************/
31 chunk_dss_sbrk(intptr_t increment
)
35 return (sbrk(increment
));
43 chunk_dss_prec_get(void)
48 return (dss_prec_disabled
);
49 ret
= (dss_prec_t
)atomic_read_u(&dss_prec_default
);
54 chunk_dss_prec_set(dss_prec_t dss_prec
)
58 return (dss_prec
!= dss_prec_disabled
);
59 atomic_write_u(&dss_prec_default
, (unsigned)dss_prec
);
64 chunk_dss_max_update(void *new_addr
)
70 * Get the current end of the DSS as max_cur and assure that dss_max is
75 void *max_prev
= atomic_read_p(&dss_max
);
77 max_cur
= chunk_dss_sbrk(0);
78 if ((uintptr_t)max_prev
> (uintptr_t)max_cur
) {
80 * Another thread optimistically updated dss_max. Wait
83 spin_adaptive(&spinner
);
86 if (!atomic_cas_p(&dss_max
, max_prev
, max_cur
))
89 /* Fixed new_addr can only be supported if it is at the edge of DSS. */
90 if (new_addr
!= NULL
&& max_cur
!= new_addr
)
97 chunk_alloc_dss(tsdn_t
*tsdn
, arena_t
*arena
, void *new_addr
, size_t size
,
98 size_t alignment
, bool *zero
, bool *commit
)
101 assert(size
> 0 && (size
& chunksize_mask
) == 0);
102 assert(alignment
> 0 && (alignment
& chunksize_mask
) == 0);
105 * sbrk() uses a signed increment argument, so take care not to
106 * interpret a huge allocation request as a negative increment.
108 if ((intptr_t)size
< 0)
111 if (!atomic_read_u(&dss_exhausted
)) {
113 * The loop is necessary to recover from races with other
114 * threads that are using the DSS for something other than
118 void *ret
, *max_cur
, *dss_next
, *dss_prev
;
119 void *gap_addr_chunk
, *gap_addr_subchunk
;
120 size_t gap_size_chunk
, gap_size_subchunk
;
123 max_cur
= chunk_dss_max_update(new_addr
);
128 * Compute how much chunk-aligned gap space (if any) is
129 * necessary to satisfy alignment. This space can be
130 * recycled for later use.
132 gap_addr_chunk
= (void *)(CHUNK_CEILING(
133 (uintptr_t)max_cur
));
134 ret
= (void *)ALIGNMENT_CEILING(
135 (uintptr_t)gap_addr_chunk
, alignment
);
136 gap_size_chunk
= (uintptr_t)ret
-
137 (uintptr_t)gap_addr_chunk
;
139 * Compute the address just past the end of the desired
142 dss_next
= (void *)((uintptr_t)ret
+ size
);
143 if ((uintptr_t)ret
< (uintptr_t)max_cur
||
144 (uintptr_t)dss_next
< (uintptr_t)max_cur
)
145 goto label_oom
; /* Wrap-around. */
146 /* Compute the increment, including subchunk bytes. */
147 gap_addr_subchunk
= max_cur
;
148 gap_size_subchunk
= (uintptr_t)ret
-
149 (uintptr_t)gap_addr_subchunk
;
150 incr
= gap_size_subchunk
+ size
;
152 assert((uintptr_t)max_cur
+ incr
== (uintptr_t)ret
+
156 * Optimistically update dss_max, and roll back below if
157 * sbrk() fails. No other thread will try to extend the
158 * DSS while dss_max is greater than the current DSS
159 * max reported by sbrk(0).
161 if (atomic_cas_p(&dss_max
, max_cur
, dss_next
))
164 /* Try to allocate. */
165 dss_prev
= chunk_dss_sbrk(incr
);
166 if (dss_prev
== max_cur
) {
168 if (gap_size_chunk
!= 0) {
169 chunk_hooks_t chunk_hooks
=
170 CHUNK_HOOKS_INITIALIZER
;
171 chunk_dalloc_wrapper(tsdn
, arena
,
172 &chunk_hooks
, gap_addr_chunk
,
174 arena_extent_sn_next(arena
), false,
178 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
180 memset(ret
, 0, size
);
183 *commit
= pages_decommit(ret
, size
);
188 * Failure, whether due to OOM or a race with a raw
189 * sbrk() call from outside the allocator. Try to roll
190 * back optimistic dss_max update; if rollback fails,
191 * it's due to another caller of this function having
192 * succeeded since this invocation started, in which
193 * case rollback is not necessary.
195 atomic_cas_p(&dss_max
, dss_next
, max_cur
);
196 if (dss_prev
== (void *)-1) {
198 atomic_write_u(&dss_exhausted
, (unsigned)true);
208 chunk_in_dss_helper(void *chunk
, void *max
)
211 return ((uintptr_t)chunk
>= (uintptr_t)dss_base
&& (uintptr_t)chunk
<
216 chunk_in_dss(void *chunk
)
221 return (chunk_in_dss_helper(chunk
, atomic_read_p(&dss_max
)));
225 chunk_dss_mergeable(void *chunk_a
, void *chunk_b
)
231 max
= atomic_read_p(&dss_max
);
232 return (chunk_in_dss_helper(chunk_a
, max
) ==
233 chunk_in_dss_helper(chunk_b
, max
));
242 dss_base
= chunk_dss_sbrk(0);
243 dss_exhausted
= (unsigned)(dss_base
== (void *)-1);
247 /******************************************************************************/