]>
Commit | Line | Data |
---|---|---|
970d7e83 LB |
1 | #define JEMALLOC_CHUNK_DSS_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | /******************************************************************************/ | |
4 | /* Data. */ | |
5 | ||
6 | const char *dss_prec_names[] = { | |
7 | "disabled", | |
8 | "primary", | |
9 | "secondary", | |
10 | "N/A" | |
11 | }; | |
12 | ||
970d7e83 | 13 | /* |
3b2f2976 XL |
14 | * Current dss precedence default, used when creating new arenas. NB: This is |
15 | * stored as unsigned rather than dss_prec_t because in principle there's no | |
16 | * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use | |
17 | * atomic operations to synchronize the setting. | |
970d7e83 | 18 | */ |
3b2f2976 | 19 | static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; |
970d7e83 LB |
20 | |
21 | /* Base address of the DSS. */ | |
22 | static void *dss_base; | |
3b2f2976 XL |
23 | /* Atomic boolean indicating whether the DSS is exhausted. */ |
24 | static unsigned dss_exhausted; | |
25 | /* Atomic current upper limit on DSS addresses. */ | |
970d7e83 LB |
26 | static void *dss_max; |
27 | ||
28 | /******************************************************************************/ | |
29 | ||
970d7e83 | 30 | static void * |
1a4d82fc | 31 | chunk_dss_sbrk(intptr_t increment) |
970d7e83 LB |
32 | { |
33 | ||
1a4d82fc JJ |
34 | #ifdef JEMALLOC_DSS |
35 | return (sbrk(increment)); | |
36 | #else | |
970d7e83 | 37 | not_implemented(); |
970d7e83 | 38 | return (NULL); |
970d7e83 | 39 | #endif |
1a4d82fc | 40 | } |
970d7e83 LB |
41 | |
42 | dss_prec_t | |
43 | chunk_dss_prec_get(void) | |
44 | { | |
45 | dss_prec_t ret; | |
46 | ||
1a4d82fc | 47 | if (!have_dss) |
970d7e83 | 48 | return (dss_prec_disabled); |
3b2f2976 | 49 | ret = (dss_prec_t)atomic_read_u(&dss_prec_default); |
970d7e83 LB |
50 | return (ret); |
51 | } | |
52 | ||
53 | bool | |
54 | chunk_dss_prec_set(dss_prec_t dss_prec) | |
55 | { | |
56 | ||
1a4d82fc JJ |
57 | if (!have_dss) |
58 | return (dss_prec != dss_prec_disabled); | |
3b2f2976 | 59 | atomic_write_u(&dss_prec_default, (unsigned)dss_prec); |
970d7e83 LB |
60 | return (false); |
61 | } | |
62 | ||
3b2f2976 XL |
63 | static void * |
64 | chunk_dss_max_update(void *new_addr) | |
65 | { | |
66 | void *max_cur; | |
67 | spin_t spinner; | |
68 | ||
69 | /* | |
70 | * Get the current end of the DSS as max_cur and assure that dss_max is | |
71 | * up to date. | |
72 | */ | |
73 | spin_init(&spinner); | |
74 | while (true) { | |
75 | void *max_prev = atomic_read_p(&dss_max); | |
76 | ||
77 | max_cur = chunk_dss_sbrk(0); | |
78 | if ((uintptr_t)max_prev > (uintptr_t)max_cur) { | |
79 | /* | |
80 | * Another thread optimistically updated dss_max. Wait | |
81 | * for it to finish. | |
82 | */ | |
83 | spin_adaptive(&spinner); | |
84 | continue; | |
85 | } | |
86 | if (!atomic_cas_p(&dss_max, max_prev, max_cur)) | |
87 | break; | |
88 | } | |
89 | /* Fixed new_addr can only be supported if it is at the edge of DSS. */ | |
90 | if (new_addr != NULL && max_cur != new_addr) | |
91 | return (NULL); | |
92 | ||
93 | return (max_cur); | |
94 | } | |
95 | ||
970d7e83 | 96 | void * |
3b2f2976 XL |
97 | chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, |
98 | size_t alignment, bool *zero, bool *commit) | |
970d7e83 | 99 | { |
1a4d82fc | 100 | cassert(have_dss); |
970d7e83 LB |
101 | assert(size > 0 && (size & chunksize_mask) == 0); |
102 | assert(alignment > 0 && (alignment & chunksize_mask) == 0); | |
103 | ||
104 | /* | |
105 | * sbrk() uses a signed increment argument, so take care not to | |
106 | * interpret a huge allocation request as a negative increment. | |
107 | */ | |
108 | if ((intptr_t)size < 0) | |
109 | return (NULL); | |
110 | ||
3b2f2976 | 111 | if (!atomic_read_u(&dss_exhausted)) { |
970d7e83 LB |
112 | /* |
113 | * The loop is necessary to recover from races with other | |
114 | * threads that are using the DSS for something other than | |
115 | * malloc. | |
116 | */ | |
3b2f2976 XL |
117 | while (true) { |
118 | void *ret, *max_cur, *dss_next, *dss_prev; | |
119 | void *gap_addr_chunk, *gap_addr_subchunk; | |
120 | size_t gap_size_chunk, gap_size_subchunk; | |
54a0048b | 121 | intptr_t incr; |
54a0048b | 122 | |
3b2f2976 XL |
123 | max_cur = chunk_dss_max_update(new_addr); |
124 | if (max_cur == NULL) | |
125 | goto label_oom; | |
54a0048b | 126 | |
970d7e83 | 127 | /* |
3b2f2976 | 128 | * Compute how much chunk-aligned gap space (if any) is |
970d7e83 LB |
129 | * necessary to satisfy alignment. This space can be |
130 | * recycled for later use. | |
131 | */ | |
3b2f2976 XL |
132 | gap_addr_chunk = (void *)(CHUNK_CEILING( |
133 | (uintptr_t)max_cur)); | |
134 | ret = (void *)ALIGNMENT_CEILING( | |
135 | (uintptr_t)gap_addr_chunk, alignment); | |
136 | gap_size_chunk = (uintptr_t)ret - | |
137 | (uintptr_t)gap_addr_chunk; | |
138 | /* | |
139 | * Compute the address just past the end of the desired | |
140 | * allocation space. | |
141 | */ | |
970d7e83 | 142 | dss_next = (void *)((uintptr_t)ret + size); |
3b2f2976 XL |
143 | if ((uintptr_t)ret < (uintptr_t)max_cur || |
144 | (uintptr_t)dss_next < (uintptr_t)max_cur) | |
145 | goto label_oom; /* Wrap-around. */ | |
146 | /* Compute the increment, including subchunk bytes. */ | |
147 | gap_addr_subchunk = max_cur; | |
148 | gap_size_subchunk = (uintptr_t)ret - | |
149 | (uintptr_t)gap_addr_subchunk; | |
150 | incr = gap_size_subchunk + size; | |
151 | ||
152 | assert((uintptr_t)max_cur + incr == (uintptr_t)ret + | |
153 | size); | |
154 | ||
155 | /* | |
156 | * Optimistically update dss_max, and roll back below if | |
157 | * sbrk() fails. No other thread will try to extend the | |
158 | * DSS while dss_max is greater than the current DSS | |
159 | * max reported by sbrk(0). | |
160 | */ | |
161 | if (atomic_cas_p(&dss_max, max_cur, dss_next)) | |
162 | continue; | |
163 | ||
164 | /* Try to allocate. */ | |
1a4d82fc | 165 | dss_prev = chunk_dss_sbrk(incr); |
3b2f2976 | 166 | if (dss_prev == max_cur) { |
970d7e83 | 167 | /* Success. */ |
3b2f2976 | 168 | if (gap_size_chunk != 0) { |
54a0048b SL |
169 | chunk_hooks_t chunk_hooks = |
170 | CHUNK_HOOKS_INITIALIZER; | |
3b2f2976 XL |
171 | chunk_dalloc_wrapper(tsdn, arena, |
172 | &chunk_hooks, gap_addr_chunk, | |
173 | gap_size_chunk, | |
174 | arena_extent_sn_next(arena), false, | |
54a0048b SL |
175 | true); |
176 | } | |
970d7e83 | 177 | if (*zero) { |
1a4d82fc JJ |
178 | JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( |
179 | ret, size); | |
970d7e83 LB |
180 | memset(ret, 0, size); |
181 | } | |
54a0048b SL |
182 | if (!*commit) |
183 | *commit = pages_decommit(ret, size); | |
970d7e83 LB |
184 | return (ret); |
185 | } | |
970d7e83 | 186 | |
3b2f2976 XL |
187 | /* |
188 | * Failure, whether due to OOM or a race with a raw | |
189 | * sbrk() call from outside the allocator. Try to roll | |
190 | * back optimistic dss_max update; if rollback fails, | |
191 | * it's due to another caller of this function having | |
192 | * succeeded since this invocation started, in which | |
193 | * case rollback is not necessary. | |
194 | */ | |
195 | atomic_cas_p(&dss_max, dss_next, max_cur); | |
196 | if (dss_prev == (void *)-1) { | |
197 | /* OOM. */ | |
198 | atomic_write_u(&dss_exhausted, (unsigned)true); | |
199 | goto label_oom; | |
200 | } | |
201 | } | |
202 | } | |
203 | label_oom: | |
970d7e83 LB |
204 | return (NULL); |
205 | } | |
206 | ||
3b2f2976 XL |
207 | static bool |
208 | chunk_in_dss_helper(void *chunk, void *max) | |
970d7e83 | 209 | { |
970d7e83 | 210 | |
3b2f2976 XL |
211 | return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < |
212 | (uintptr_t)max); | |
970d7e83 LB |
213 | } |
214 | ||
215 | bool | |
3b2f2976 | 216 | chunk_in_dss(void *chunk) |
970d7e83 LB |
217 | { |
218 | ||
1a4d82fc | 219 | cassert(have_dss); |
970d7e83 | 220 | |
3b2f2976 | 221 | return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); |
970d7e83 LB |
222 | } |
223 | ||
3b2f2976 XL |
224 | bool |
225 | chunk_dss_mergeable(void *chunk_a, void *chunk_b) | |
970d7e83 | 226 | { |
3b2f2976 | 227 | void *max; |
970d7e83 | 228 | |
3b2f2976 | 229 | cassert(have_dss); |
970d7e83 | 230 | |
3b2f2976 XL |
231 | max = atomic_read_p(&dss_max); |
232 | return (chunk_in_dss_helper(chunk_a, max) == | |
233 | chunk_in_dss_helper(chunk_b, max)); | |
970d7e83 LB |
234 | } |
235 | ||
236 | void | |
3b2f2976 | 237 | chunk_dss_boot(void) |
970d7e83 LB |
238 | { |
239 | ||
3b2f2976 XL |
240 | cassert(have_dss); |
241 | ||
242 | dss_base = chunk_dss_sbrk(0); | |
243 | dss_exhausted = (unsigned)(dss_base == (void *)-1); | |
244 | dss_max = dss_base; | |
970d7e83 LB |
245 | } |
246 | ||
247 | /******************************************************************************/ |