]>
Commit | Line | Data |
---|---|---|
5ff7258c RH |
1 | /* |
2 | * Memory region management for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
c46184a9 | 26 | #include "qemu/units.h" |
b85ea5fa | 27 | #include "qemu/madvise.h" |
f2241d16 | 28 | #include "qemu/mprotect.h" |
5df022cf | 29 | #include "qemu/memalign.h" |
ad768e6f | 30 | #include "qemu/cacheinfo.h" |
1ff4a81b | 31 | #include "qemu/qtree.h" |
c46184a9 | 32 | #include "qapi/error.h" |
5ff7258c | 33 | #include "tcg/tcg.h" |
cac9b0fd | 34 | #include "exec/translation-block.h" |
5ff7258c | 35 | #include "tcg-internal.h" |
a97a8375 | 36 | #include "host/cpuinfo.h" |
5ff7258c RH |
37 | |
38 | ||
a97a8375 RH |
39 | /* |
40 | * Local source-level compatibility with Unix. | |
41 | * Used by tcg_region_init below. | |
42 | */ | |
43 | #if defined(_WIN32) | |
44 | #define PROT_READ 1 | |
45 | #define PROT_WRITE 2 | |
46 | #define PROT_EXEC 4 | |
47 | #endif | |
48 | ||
5ff7258c RH |
49 | struct tcg_region_tree { |
50 | QemuMutex lock; | |
1ff4a81b | 51 | QTree *tree; |
5ff7258c RH |
52 | /* padding to avoid false sharing is computed at run-time */ |
53 | }; | |
54 | ||
55 | /* | |
56 | * We divide code_gen_buffer into equally-sized "regions" that TCG threads | |
57 | * dynamically allocate from as demand dictates. Given appropriate region | |
58 | * sizing, this minimizes flushes even when some TCG threads generate a lot | |
59 | * more code than others. | |
60 | */ | |
61 | struct tcg_region_state { | |
62 | QemuMutex lock; | |
63 | ||
64 | /* fields set at init time */ | |
5ff7258c | 65 | void *start_aligned; |
c2471ca0 | 66 | void *after_prologue; |
5ff7258c RH |
67 | size_t n; |
68 | size_t size; /* size of one region */ | |
69 | size_t stride; /* .size + guard size */ | |
77bd7fd1 | 70 | size_t total_size; /* size of entire buffer, >= n * stride */ |
5ff7258c RH |
71 | |
72 | /* fields protected by the lock */ | |
73 | size_t current; /* current region index */ | |
74 | size_t agg_size_full; /* aggregate size of full regions */ | |
75 | }; | |
76 | ||
77 | static struct tcg_region_state region; | |
78 | ||
79 | /* | |
80 | * This is an array of struct tcg_region_tree's, with padding. | |
81 | * We use void * to simplify the computation of region_trees[i]; each | |
82 | * struct is found every tree_size bytes. | |
83 | */ | |
84 | static void *region_trees; | |
85 | static size_t tree_size; | |
86 | ||
47d590df RH |
87 | bool in_code_gen_buffer(const void *p) |
88 | { | |
47d590df RH |
89 | /* |
90 | * Much like it is valid to have a pointer to the byte past the | |
91 | * end of an array (so long as you don't dereference it), allow | |
92 | * a pointer to the byte past the end of the code gen buffer. | |
93 | */ | |
032a4b1b | 94 | return (size_t)(p - region.start_aligned) <= region.total_size; |
47d590df RH |
95 | } |
96 | ||
a97a8375 RH |
97 | #ifndef CONFIG_TCG_INTERPRETER |
98 | static int host_prot_read_exec(void) | |
99 | { | |
100 | #if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI) | |
101 | if (cpuinfo & CPUINFO_BTI) { | |
102 | return PROT_READ | PROT_EXEC | PROT_BTI; | |
103 | } | |
104 | #endif | |
105 | return PROT_READ | PROT_EXEC; | |
106 | } | |
107 | #endif | |
108 | ||
47d590df RH |
109 | #ifdef CONFIG_DEBUG_TCG |
110 | const void *tcg_splitwx_to_rx(void *rw) | |
111 | { | |
112 | /* Pass NULL pointers unchanged. */ | |
113 | if (rw) { | |
114 | g_assert(in_code_gen_buffer(rw)); | |
115 | rw += tcg_splitwx_diff; | |
116 | } | |
117 | return rw; | |
118 | } | |
119 | ||
120 | void *tcg_splitwx_to_rw(const void *rx) | |
121 | { | |
122 | /* Pass NULL pointers unchanged. */ | |
123 | if (rx) { | |
124 | rx -= tcg_splitwx_diff; | |
125 | /* Assert that we end with a pointer in the rw region. */ | |
126 | g_assert(in_code_gen_buffer(rx)); | |
127 | } | |
128 | return (void *)rx; | |
129 | } | |
130 | #endif /* CONFIG_DEBUG_TCG */ | |
131 | ||
5ff7258c RH |
132 | /* compare a pointer @ptr and a tb_tc @s */ |
133 | static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) | |
134 | { | |
135 | if (ptr >= s->ptr + s->size) { | |
136 | return 1; | |
137 | } else if (ptr < s->ptr) { | |
138 | return -1; | |
139 | } | |
140 | return 0; | |
141 | } | |
142 | ||
834361ef | 143 | static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata) |
5ff7258c RH |
144 | { |
145 | const struct tb_tc *a = ap; | |
146 | const struct tb_tc *b = bp; | |
147 | ||
148 | /* | |
149 | * When both sizes are set, we know this isn't a lookup. | |
150 | * This is the most likely case: every TB must be inserted; lookups | |
151 | * are a lot less frequent. | |
152 | */ | |
153 | if (likely(a->size && b->size)) { | |
154 | if (a->ptr > b->ptr) { | |
155 | return 1; | |
156 | } else if (a->ptr < b->ptr) { | |
157 | return -1; | |
158 | } | |
159 | /* a->ptr == b->ptr should happen only on deletions */ | |
160 | g_assert(a->size == b->size); | |
161 | return 0; | |
162 | } | |
163 | /* | |
164 | * All lookups have either .size field set to 0. | |
165 | * From the glib sources we see that @ap is always the lookup key. However | |
166 | * the docs provide no guarantee, so we just mark this case as likely. | |
167 | */ | |
168 | if (likely(a->size == 0)) { | |
169 | return ptr_cmp_tb_tc(a->ptr, b); | |
170 | } | |
171 | return ptr_cmp_tb_tc(b->ptr, a); | |
172 | } | |
173 | ||
834361ef LW |
174 | static void tb_destroy(gpointer value) |
175 | { | |
176 | TranslationBlock *tb = value; | |
177 | qemu_spin_destroy(&tb->jmp_lock); | |
178 | } | |
179 | ||
5ff7258c RH |
180 | static void tcg_region_trees_init(void) |
181 | { | |
182 | size_t i; | |
183 | ||
184 | tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); | |
185 | region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); | |
186 | for (i = 0; i < region.n; i++) { | |
187 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
188 | ||
189 | qemu_mutex_init(&rt->lock); | |
1ff4a81b | 190 | rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); |
5ff7258c RH |
191 | } |
192 | } | |
193 | ||
194 | static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) | |
195 | { | |
196 | size_t region_idx; | |
197 | ||
198 | /* | |
199 | * Like tcg_splitwx_to_rw, with no assert. The pc may come from | |
200 | * a signal handler over which the caller has no control. | |
201 | */ | |
202 | if (!in_code_gen_buffer(p)) { | |
203 | p -= tcg_splitwx_diff; | |
204 | if (!in_code_gen_buffer(p)) { | |
205 | return NULL; | |
206 | } | |
207 | } | |
208 | ||
209 | if (p < region.start_aligned) { | |
210 | region_idx = 0; | |
211 | } else { | |
212 | ptrdiff_t offset = p - region.start_aligned; | |
213 | ||
214 | if (offset > region.stride * (region.n - 1)) { | |
215 | region_idx = region.n - 1; | |
216 | } else { | |
217 | region_idx = offset / region.stride; | |
218 | } | |
219 | } | |
220 | return region_trees + region_idx * tree_size; | |
221 | } | |
222 | ||
223 | void tcg_tb_insert(TranslationBlock *tb) | |
224 | { | |
225 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | |
226 | ||
227 | g_assert(rt != NULL); | |
228 | qemu_mutex_lock(&rt->lock); | |
1ff4a81b | 229 | q_tree_insert(rt->tree, &tb->tc, tb); |
5ff7258c RH |
230 | qemu_mutex_unlock(&rt->lock); |
231 | } | |
232 | ||
233 | void tcg_tb_remove(TranslationBlock *tb) | |
234 | { | |
235 | struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); | |
236 | ||
237 | g_assert(rt != NULL); | |
238 | qemu_mutex_lock(&rt->lock); | |
1ff4a81b | 239 | q_tree_remove(rt->tree, &tb->tc); |
5ff7258c RH |
240 | qemu_mutex_unlock(&rt->lock); |
241 | } | |
242 | ||
243 | /* | |
244 | * Find the TB 'tb' such that | |
245 | * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size | |
246 | * Return NULL if not found. | |
247 | */ | |
248 | TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) | |
249 | { | |
250 | struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); | |
251 | TranslationBlock *tb; | |
252 | struct tb_tc s = { .ptr = (void *)tc_ptr }; | |
253 | ||
254 | if (rt == NULL) { | |
255 | return NULL; | |
256 | } | |
257 | ||
258 | qemu_mutex_lock(&rt->lock); | |
1ff4a81b | 259 | tb = q_tree_lookup(rt->tree, &s); |
5ff7258c RH |
260 | qemu_mutex_unlock(&rt->lock); |
261 | return tb; | |
262 | } | |
263 | ||
264 | static void tcg_region_tree_lock_all(void) | |
265 | { | |
266 | size_t i; | |
267 | ||
268 | for (i = 0; i < region.n; i++) { | |
269 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
270 | ||
271 | qemu_mutex_lock(&rt->lock); | |
272 | } | |
273 | } | |
274 | ||
275 | static void tcg_region_tree_unlock_all(void) | |
276 | { | |
277 | size_t i; | |
278 | ||
279 | for (i = 0; i < region.n; i++) { | |
280 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
281 | ||
282 | qemu_mutex_unlock(&rt->lock); | |
283 | } | |
284 | } | |
285 | ||
286 | void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) | |
287 | { | |
288 | size_t i; | |
289 | ||
290 | tcg_region_tree_lock_all(); | |
291 | for (i = 0; i < region.n; i++) { | |
292 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
293 | ||
1ff4a81b | 294 | q_tree_foreach(rt->tree, func, user_data); |
5ff7258c RH |
295 | } |
296 | tcg_region_tree_unlock_all(); | |
297 | } | |
298 | ||
299 | size_t tcg_nb_tbs(void) | |
300 | { | |
301 | size_t nb_tbs = 0; | |
302 | size_t i; | |
303 | ||
304 | tcg_region_tree_lock_all(); | |
305 | for (i = 0; i < region.n; i++) { | |
306 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
307 | ||
1ff4a81b | 308 | nb_tbs += q_tree_nnodes(rt->tree); |
5ff7258c RH |
309 | } |
310 | tcg_region_tree_unlock_all(); | |
311 | return nb_tbs; | |
312 | } | |
313 | ||
5ff7258c RH |
314 | static void tcg_region_tree_reset_all(void) |
315 | { | |
316 | size_t i; | |
317 | ||
318 | tcg_region_tree_lock_all(); | |
319 | for (i = 0; i < region.n; i++) { | |
320 | struct tcg_region_tree *rt = region_trees + i * tree_size; | |
321 | ||
5ff7258c | 322 | /* Increment the refcount first so that destroy acts as a reset */ |
1ff4a81b EC |
323 | q_tree_ref(rt->tree); |
324 | q_tree_destroy(rt->tree); | |
5ff7258c RH |
325 | } |
326 | tcg_region_tree_unlock_all(); | |
327 | } | |
328 | ||
329 | static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) | |
330 | { | |
331 | void *start, *end; | |
332 | ||
333 | start = region.start_aligned + curr_region * region.stride; | |
334 | end = start + region.size; | |
335 | ||
336 | if (curr_region == 0) { | |
c2471ca0 | 337 | start = region.after_prologue; |
5ff7258c | 338 | } |
77bd7fd1 | 339 | /* The final region may have a few extra pages due to earlier rounding. */ |
5ff7258c | 340 | if (curr_region == region.n - 1) { |
77bd7fd1 | 341 | end = region.start_aligned + region.total_size; |
5ff7258c RH |
342 | } |
343 | ||
344 | *pstart = start; | |
345 | *pend = end; | |
346 | } | |
347 | ||
348 | static void tcg_region_assign(TCGContext *s, size_t curr_region) | |
349 | { | |
350 | void *start, *end; | |
351 | ||
352 | tcg_region_bounds(curr_region, &start, &end); | |
353 | ||
354 | s->code_gen_buffer = start; | |
355 | s->code_gen_ptr = start; | |
356 | s->code_gen_buffer_size = end - start; | |
357 | s->code_gen_highwater = end - TCG_HIGHWATER; | |
358 | } | |
359 | ||
360 | static bool tcg_region_alloc__locked(TCGContext *s) | |
361 | { | |
362 | if (region.current == region.n) { | |
363 | return true; | |
364 | } | |
365 | tcg_region_assign(s, region.current); | |
366 | region.current++; | |
367 | return false; | |
368 | } | |
369 | ||
370 | /* | |
371 | * Request a new region once the one in use has filled up. | |
372 | * Returns true on error. | |
373 | */ | |
374 | bool tcg_region_alloc(TCGContext *s) | |
375 | { | |
376 | bool err; | |
377 | /* read the region size now; alloc__locked will overwrite it on success */ | |
378 | size_t size_full = s->code_gen_buffer_size; | |
379 | ||
380 | qemu_mutex_lock(®ion.lock); | |
381 | err = tcg_region_alloc__locked(s); | |
382 | if (!err) { | |
383 | region.agg_size_full += size_full - TCG_HIGHWATER; | |
384 | } | |
385 | qemu_mutex_unlock(®ion.lock); | |
386 | return err; | |
387 | } | |
388 | ||
389 | /* | |
390 | * Perform a context's first region allocation. | |
391 | * This function does _not_ increment region.agg_size_full. | |
392 | */ | |
393 | static void tcg_region_initial_alloc__locked(TCGContext *s) | |
394 | { | |
395 | bool err = tcg_region_alloc__locked(s); | |
396 | g_assert(!err); | |
397 | } | |
398 | ||
399 | void tcg_region_initial_alloc(TCGContext *s) | |
400 | { | |
401 | qemu_mutex_lock(®ion.lock); | |
402 | tcg_region_initial_alloc__locked(s); | |
403 | qemu_mutex_unlock(®ion.lock); | |
404 | } | |
405 | ||
406 | /* Call from a safe-work context */ | |
407 | void tcg_region_reset_all(void) | |
408 | { | |
0e2d61cf | 409 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); |
5ff7258c RH |
410 | unsigned int i; |
411 | ||
412 | qemu_mutex_lock(®ion.lock); | |
413 | region.current = 0; | |
414 | region.agg_size_full = 0; | |
415 | ||
416 | for (i = 0; i < n_ctxs; i++) { | |
417 | TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
418 | tcg_region_initial_alloc__locked(s); | |
419 | } | |
420 | qemu_mutex_unlock(®ion.lock); | |
421 | ||
422 | tcg_region_tree_reset_all(); | |
423 | } | |
424 | ||
01afda99 | 425 | static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) |
5ff7258c | 426 | { |
43b972b7 | 427 | #ifdef CONFIG_USER_ONLY |
5ff7258c | 428 | return 1; |
5ff7258c | 429 | #else |
01afda99 RH |
430 | size_t n_regions; |
431 | ||
43b972b7 RH |
432 | /* |
433 | * It is likely that some vCPUs will translate more code than others, | |
434 | * so we first try to set more regions than max_cpus, with those regions | |
435 | * being of reasonable size. If that's not possible we make do by evenly | |
436 | * dividing the code_gen_buffer among the vCPUs. | |
437 | */ | |
5ff7258c | 438 | /* Use a single region if all we have is one vCPU thread */ |
5ff7258c RH |
439 | if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { |
440 | return 1; | |
441 | } | |
442 | ||
01afda99 RH |
443 | /* |
444 | * Try to have more regions than max_cpus, with each region being >= 2 MB. | |
445 | * If we can't, then just allocate one region per vCPU thread. | |
446 | */ | |
447 | n_regions = tb_size / (2 * MiB); | |
448 | if (n_regions <= max_cpus) { | |
449 | return max_cpus; | |
5ff7258c | 450 | } |
01afda99 | 451 | return MIN(n_regions, max_cpus * 8); |
5ff7258c | 452 | #endif |
43b972b7 | 453 | } |
5ff7258c | 454 | |
c46184a9 RH |
455 | /* |
456 | * Minimum size of the code gen buffer. This number is randomly chosen, | |
457 | * but not so small that we can't have a fair number of TB's live. | |
26a75d12 RH |
458 | * |
459 | * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h. | |
460 | * Unless otherwise indicated, this is constrained by the range of | |
461 | * direct branches on the host cpu, as used by the TCG implementation | |
462 | * of goto_tb. | |
c46184a9 RH |
463 | */ |
464 | #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) | |
465 | ||
c46184a9 RH |
466 | #if TCG_TARGET_REG_BITS == 32 |
467 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) | |
468 | #ifdef CONFIG_USER_ONLY | |
469 | /* | |
470 | * For user mode on smaller 32 bit systems we may run into trouble | |
471 | * allocating big chunks of data in the right place. On these systems | |
472 | * we utilise a static code generation buffer directly in the binary. | |
473 | */ | |
474 | #define USE_STATIC_CODE_GEN_BUFFER | |
475 | #endif | |
476 | #else /* TCG_TARGET_REG_BITS == 64 */ | |
477 | #ifdef CONFIG_USER_ONLY | |
478 | /* | |
479 | * As user-mode emulation typically means running multiple instances | |
480 | * of the translator don't go too nuts with our default code gen | |
481 | * buffer lest we make things too hard for the OS. | |
482 | */ | |
483 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) | |
484 | #else | |
485 | /* | |
486 | * We expect most system emulation to run one or two guests per host. | |
487 | * Users running large scale system emulation may want to tweak their | |
488 | * runtime setup via the tb-size control on the command line. | |
489 | */ | |
490 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) | |
491 | #endif | |
492 | #endif | |
493 | ||
494 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
495 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
496 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
497 | ||
c46184a9 RH |
498 | #ifdef USE_STATIC_CODE_GEN_BUFFER |
499 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
500 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
501 | ||
7be9ebcf | 502 | static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) |
c46184a9 RH |
503 | { |
504 | void *buf, *end; | |
505 | size_t size; | |
506 | ||
507 | if (splitwx > 0) { | |
508 | error_setg(errp, "jit split-wx not supported"); | |
7be9ebcf | 509 | return -1; |
c46184a9 RH |
510 | } |
511 | ||
512 | /* page-align the beginning and end of the buffer */ | |
513 | buf = static_code_gen_buffer; | |
514 | end = static_code_gen_buffer + sizeof(static_code_gen_buffer); | |
8e3b0cbb MAL |
515 | buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size()); |
516 | end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size()); | |
c46184a9 RH |
517 | |
518 | size = end - buf; | |
519 | ||
520 | /* Honor a command-line option limiting the size of the buffer. */ | |
521 | if (size > tb_size) { | |
8e3b0cbb | 522 | size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size()); |
c46184a9 | 523 | } |
c46184a9 | 524 | |
032a4b1b RH |
525 | region.start_aligned = buf; |
526 | region.total_size = size; | |
7be9ebcf RH |
527 | |
528 | return PROT_READ | PROT_WRITE; | |
c46184a9 RH |
529 | } |
530 | #elif defined(_WIN32) | |
7be9ebcf | 531 | static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) |
c46184a9 RH |
532 | { |
533 | void *buf; | |
534 | ||
535 | if (splitwx > 0) { | |
536 | error_setg(errp, "jit split-wx not supported"); | |
7be9ebcf | 537 | return -1; |
c46184a9 RH |
538 | } |
539 | ||
540 | buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, | |
541 | PAGE_EXECUTE_READWRITE); | |
542 | if (buf == NULL) { | |
543 | error_setg_win32(errp, GetLastError(), | |
544 | "allocate %zu bytes for jit buffer", size); | |
545 | return false; | |
546 | } | |
547 | ||
032a4b1b RH |
548 | region.start_aligned = buf; |
549 | region.total_size = size; | |
7be9ebcf | 550 | |
64979566 | 551 | return PROT_READ | PROT_WRITE | PROT_EXEC; |
c46184a9 RH |
552 | } |
553 | #else | |
7be9ebcf RH |
554 | static int alloc_code_gen_buffer_anon(size_t size, int prot, |
555 | int flags, Error **errp) | |
c46184a9 RH |
556 | { |
557 | void *buf; | |
558 | ||
559 | buf = mmap(NULL, size, prot, flags, -1, 0); | |
560 | if (buf == MAP_FAILED) { | |
561 | error_setg_errno(errp, errno, | |
562 | "allocate %zu bytes for jit buffer", size); | |
7be9ebcf | 563 | return -1; |
c46184a9 | 564 | } |
c46184a9 | 565 | |
032a4b1b RH |
566 | region.start_aligned = buf; |
567 | region.total_size = size; | |
7be9ebcf | 568 | return prot; |
c46184a9 RH |
569 | } |
570 | ||
571 | #ifndef CONFIG_TCG_INTERPRETER | |
572 | #ifdef CONFIG_POSIX | |
573 | #include "qemu/memfd.h" | |
574 | ||
ba892411 | 575 | static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) |
c46184a9 RH |
576 | { |
577 | void *buf_rw = NULL, *buf_rx = MAP_FAILED; | |
578 | int fd = -1; | |
579 | ||
c46184a9 RH |
580 | buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); |
581 | if (buf_rw == NULL) { | |
582 | goto fail; | |
583 | } | |
584 | ||
a97a8375 | 585 | buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0); |
c46184a9 | 586 | if (buf_rx == MAP_FAILED) { |
8f8419d3 ST |
587 | error_setg_errno(errp, errno, |
588 | "failed to map shared memory for execute"); | |
589 | goto fail; | |
c46184a9 | 590 | } |
c46184a9 RH |
591 | |
592 | close(fd); | |
032a4b1b RH |
593 | region.start_aligned = buf_rw; |
594 | region.total_size = size; | |
c46184a9 RH |
595 | tcg_splitwx_diff = buf_rx - buf_rw; |
596 | ||
7be9ebcf | 597 | return PROT_READ | PROT_WRITE; |
c46184a9 | 598 | |
c46184a9 | 599 | fail: |
0e5e6219 | 600 | /* buf_rx is always equal to MAP_FAILED here and does not require cleanup */ |
c46184a9 RH |
601 | if (buf_rw) { |
602 | munmap(buf_rw, size); | |
603 | } | |
604 | if (fd >= 0) { | |
605 | close(fd); | |
606 | } | |
7be9ebcf | 607 | return -1; |
c46184a9 RH |
608 | } |
609 | #endif /* CONFIG_POSIX */ | |
610 | ||
611 | #ifdef CONFIG_DARWIN | |
612 | #include <mach/mach.h> | |
613 | ||
614 | extern kern_return_t mach_vm_remap(vm_map_t target_task, | |
615 | mach_vm_address_t *target_address, | |
616 | mach_vm_size_t size, | |
617 | mach_vm_offset_t mask, | |
618 | int flags, | |
619 | vm_map_t src_task, | |
620 | mach_vm_address_t src_address, | |
621 | boolean_t copy, | |
622 | vm_prot_t *cur_protection, | |
623 | vm_prot_t *max_protection, | |
624 | vm_inherit_t inheritance); | |
625 | ||
7be9ebcf | 626 | static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) |
c46184a9 RH |
627 | { |
628 | kern_return_t ret; | |
629 | mach_vm_address_t buf_rw, buf_rx; | |
630 | vm_prot_t cur_prot, max_prot; | |
631 | ||
632 | /* Map the read-write portion via normal anon memory. */ | |
633 | if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, | |
634 | MAP_PRIVATE | MAP_ANONYMOUS, errp)) { | |
7be9ebcf | 635 | return -1; |
c46184a9 RH |
636 | } |
637 | ||
032a4b1b | 638 | buf_rw = (mach_vm_address_t)region.start_aligned; |
c46184a9 RH |
639 | buf_rx = 0; |
640 | ret = mach_vm_remap(mach_task_self(), | |
641 | &buf_rx, | |
642 | size, | |
643 | 0, | |
644 | VM_FLAGS_ANYWHERE, | |
645 | mach_task_self(), | |
646 | buf_rw, | |
647 | false, | |
648 | &cur_prot, | |
649 | &max_prot, | |
650 | VM_INHERIT_NONE); | |
651 | if (ret != KERN_SUCCESS) { | |
652 | /* TODO: Convert "ret" to a human readable error message. */ | |
653 | error_setg(errp, "vm_remap for jit splitwx failed"); | |
654 | munmap((void *)buf_rw, size); | |
7be9ebcf | 655 | return -1; |
c46184a9 RH |
656 | } |
657 | ||
a97a8375 | 658 | if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) { |
c46184a9 RH |
659 | error_setg_errno(errp, errno, "mprotect for jit splitwx"); |
660 | munmap((void *)buf_rx, size); | |
661 | munmap((void *)buf_rw, size); | |
7be9ebcf | 662 | return -1; |
c46184a9 RH |
663 | } |
664 | ||
665 | tcg_splitwx_diff = buf_rx - buf_rw; | |
7be9ebcf | 666 | return PROT_READ | PROT_WRITE; |
c46184a9 RH |
667 | } |
668 | #endif /* CONFIG_DARWIN */ | |
669 | #endif /* CONFIG_TCG_INTERPRETER */ | |
670 | ||
7be9ebcf | 671 | static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp) |
c46184a9 RH |
672 | { |
673 | #ifndef CONFIG_TCG_INTERPRETER | |
674 | # ifdef CONFIG_DARWIN | |
675 | return alloc_code_gen_buffer_splitwx_vmremap(size, errp); | |
676 | # endif | |
677 | # ifdef CONFIG_POSIX | |
678 | return alloc_code_gen_buffer_splitwx_memfd(size, errp); | |
679 | # endif | |
680 | #endif | |
681 | error_setg(errp, "jit split-wx not supported"); | |
7be9ebcf | 682 | return -1; |
c46184a9 RH |
683 | } |
684 | ||
7be9ebcf | 685 | static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) |
c46184a9 RH |
686 | { |
687 | ERRP_GUARD(); | |
688 | int prot, flags; | |
689 | ||
690 | if (splitwx) { | |
7be9ebcf RH |
691 | prot = alloc_code_gen_buffer_splitwx(size, errp); |
692 | if (prot >= 0) { | |
693 | return prot; | |
c46184a9 RH |
694 | } |
695 | /* | |
696 | * If splitwx force-on (1), fail; | |
697 | * if splitwx default-on (-1), fall through to splitwx off. | |
698 | */ | |
699 | if (splitwx > 0) { | |
7be9ebcf | 700 | return -1; |
c46184a9 RH |
701 | } |
702 | error_free_or_abort(errp); | |
703 | } | |
704 | ||
b7da02da RH |
705 | /* |
706 | * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect | |
707 | * rejects a permission change from RWX -> NONE when reserving the | |
708 | * guard pages later. We can go the other way with the same number | |
709 | * of syscalls, so always begin with PROT_NONE. | |
710 | */ | |
711 | prot = PROT_NONE; | |
c46184a9 | 712 | flags = MAP_PRIVATE | MAP_ANONYMOUS; |
b7da02da | 713 | #ifdef CONFIG_DARWIN |
c46184a9 RH |
714 | /* Applicable to both iOS and macOS (Apple Silicon). */ |
715 | if (!splitwx) { | |
716 | flags |= MAP_JIT; | |
717 | } | |
718 | #endif | |
719 | ||
720 | return alloc_code_gen_buffer_anon(size, prot, flags, errp); | |
721 | } | |
722 | #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ | |
723 | ||
5ff7258c RH |
724 | /* |
725 | * Initializes region partitioning. | |
726 | * | |
727 | * Called at init time from the parent thread (i.e. the one calling | |
728 | * tcg_context_init), after the target's TCG globals have been set. | |
729 | * | |
730 | * Region partitioning works by splitting code_gen_buffer into separate regions, | |
731 | * and then assigning regions to TCG threads so that the threads can translate | |
732 | * code in parallel without synchronization. | |
733 | * | |
7893e42d | 734 | * In system-mode the number of TCG threads is bounded by max_cpus, so we use at |
5ff7258c RH |
735 | * least max_cpus regions in MTTCG. In !MTTCG we use a single region. |
736 | * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) | |
737 | * must have been parsed before calling this function, since it calls | |
738 | * qemu_tcg_mttcg_enabled(). | |
739 | * | |
740 | * In user-mode we use a single region. Having multiple regions in user-mode | |
741 | * is not supported, because the number of vCPU threads (recall that each thread | |
742 | * spawned by the guest corresponds to a vCPU thread) is only bounded by the | |
743 | * OS, and usually this number is huge (tens of thousands is not uncommon). | |
744 | * Thus, given this large bound on the number of vCPU threads and the fact | |
745 | * that code_gen_buffer is allocated at compile-time, we cannot guarantee | |
746 | * that the availability of at least one region per vCPU thread. | |
747 | * | |
748 | * However, this user-mode limitation is unlikely to be a significant problem | |
749 | * in practice. Multi-threaded guests share most if not all of their translated | |
7893e42d | 750 | * code, which makes parallel code generation less appealing than in system-mode |
5ff7258c | 751 | */ |
43b972b7 | 752 | void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) |
5ff7258c | 753 | { |
8e3b0cbb | 754 | const size_t page_size = qemu_real_host_page_size(); |
5ff7258c | 755 | size_t region_size; |
22c6a993 | 756 | int have_prot, need_prot; |
c46184a9 | 757 | |
ba22783d RH |
758 | /* Size the buffer. */ |
759 | if (tb_size == 0) { | |
760 | size_t phys_mem = qemu_get_host_physmem(); | |
761 | if (phys_mem == 0) { | |
762 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
763 | } else { | |
764 | tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size); | |
765 | tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size); | |
766 | } | |
767 | } | |
768 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
769 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
770 | } | |
771 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
772 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
773 | } | |
774 | ||
775 | have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal); | |
7be9ebcf | 776 | assert(have_prot >= 0); |
5ff7258c | 777 | |
cd9ea992 RH |
778 | /* Request large pages for the buffer and the splitwx. */ |
779 | qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE); | |
780 | if (tcg_splitwx_diff) { | |
781 | qemu_madvise(region.start_aligned + tcg_splitwx_diff, | |
782 | region.total_size, QEMU_MADV_HUGEPAGE); | |
783 | } | |
784 | ||
5ff7258c RH |
785 | /* |
786 | * Make region_size a multiple of page_size, using aligned as the start. | |
787 | * As a result of this we might end up with a few extra pages at the end of | |
788 | * the buffer; we will assign those to the last region. | |
789 | */ | |
ba22783d RH |
790 | region.n = tcg_n_regions(tb_size, max_cpus); |
791 | region_size = tb_size / region.n; | |
5ff7258c RH |
792 | region_size = QEMU_ALIGN_DOWN(region_size, page_size); |
793 | ||
794 | /* A region must have at least 2 pages; one code, one guard */ | |
795 | g_assert(region_size >= 2 * page_size); | |
032a4b1b RH |
796 | region.stride = region_size; |
797 | ||
798 | /* Reserve space for guard pages. */ | |
799 | region.size = region_size - page_size; | |
800 | region.total_size -= page_size; | |
801 | ||
802 | /* | |
803 | * The first region will be smaller than the others, via the prologue, | |
804 | * which has yet to be allocated. For now, the first region begins at | |
805 | * the page boundary. | |
806 | */ | |
807 | region.after_prologue = region.start_aligned; | |
5ff7258c RH |
808 | |
809 | /* init the region struct */ | |
810 | qemu_mutex_init(®ion.lock); | |
5ff7258c RH |
811 | |
812 | /* | |
813 | * Set guard pages in the rw buffer, as that's the one into which | |
814 | * buffer overruns could occur. Do not set guard pages in the rx | |
815 | * buffer -- let that one use hugepages throughout. | |
22c6a993 | 816 | * Work with the page protections set up with the initial mapping. |
5ff7258c | 817 | */ |
64979566 | 818 | need_prot = PROT_READ | PROT_WRITE; |
22c6a993 RH |
819 | #ifndef CONFIG_TCG_INTERPRETER |
820 | if (tcg_splitwx_diff == 0) { | |
a97a8375 | 821 | need_prot |= host_prot_read_exec(); |
22c6a993 RH |
822 | } |
823 | #endif | |
824 | for (size_t i = 0, n = region.n; i < n; i++) { | |
5ff7258c RH |
825 | void *start, *end; |
826 | ||
827 | tcg_region_bounds(i, &start, &end); | |
22c6a993 RH |
828 | if (have_prot != need_prot) { |
829 | int rc; | |
5ff7258c | 830 | |
64979566 | 831 | if (need_prot == (PROT_READ | PROT_WRITE | PROT_EXEC)) { |
22c6a993 | 832 | rc = qemu_mprotect_rwx(start, end - start); |
64979566 | 833 | } else if (need_prot == (PROT_READ | PROT_WRITE)) { |
22c6a993 RH |
834 | rc = qemu_mprotect_rw(start, end - start); |
835 | } else { | |
a97a8375 RH |
836 | #ifdef CONFIG_POSIX |
837 | rc = mprotect(start, end - start, need_prot); | |
838 | #else | |
22c6a993 | 839 | g_assert_not_reached(); |
a97a8375 | 840 | #endif |
22c6a993 RH |
841 | } |
842 | if (rc) { | |
843 | error_setg_errno(&error_fatal, errno, | |
844 | "mprotect of jit buffer"); | |
845 | } | |
846 | } | |
847 | if (have_prot != 0) { | |
b7da02da | 848 | /* Guard pages are nice for bug detection but are not essential. */ |
22c6a993 RH |
849 | (void)qemu_mprotect_none(end, page_size); |
850 | } | |
5ff7258c RH |
851 | } |
852 | ||
853 | tcg_region_trees_init(); | |
854 | ||
855 | /* | |
856 | * Leave the initial context initialized to the first region. | |
857 | * This will be the context into which we generate the prologue. | |
858 | * It is also the only context for CONFIG_USER_ONLY. | |
859 | */ | |
860 | tcg_region_initial_alloc__locked(&tcg_init_ctx); | |
861 | } | |
862 | ||
863 | void tcg_region_prologue_set(TCGContext *s) | |
864 | { | |
865 | /* Deduct the prologue from the first region. */ | |
c2471ca0 RH |
866 | g_assert(region.start_aligned == s->code_gen_buffer); |
867 | region.after_prologue = s->code_ptr; | |
5ff7258c RH |
868 | |
869 | /* Recompute boundaries of the first region. */ | |
870 | tcg_region_assign(s, 0); | |
871 | ||
872 | /* Register the balance of the buffer with gdb. */ | |
c2471ca0 RH |
873 | tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue), |
874 | region.start_aligned + region.total_size - | |
875 | region.after_prologue); | |
5ff7258c RH |
876 | } |
877 | ||
878 | /* | |
879 | * Returns the size (in bytes) of all translated code (i.e. from all regions) | |
880 | * currently in the cache. | |
881 | * See also: tcg_code_capacity() | |
882 | * Do not confuse with tcg_current_code_size(); that one applies to a single | |
883 | * TCG context. | |
884 | */ | |
885 | size_t tcg_code_size(void) | |
886 | { | |
0e2d61cf | 887 | unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); |
5ff7258c RH |
888 | unsigned int i; |
889 | size_t total; | |
890 | ||
891 | qemu_mutex_lock(®ion.lock); | |
892 | total = region.agg_size_full; | |
893 | for (i = 0; i < n_ctxs; i++) { | |
894 | const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | |
895 | size_t size; | |
896 | ||
897 | size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; | |
898 | g_assert(size <= s->code_gen_buffer_size); | |
899 | total += size; | |
900 | } | |
901 | qemu_mutex_unlock(®ion.lock); | |
902 | return total; | |
903 | } | |
904 | ||
905 | /* | |
906 | * Returns the code capacity (in bytes) of the entire cache, i.e. including all | |
907 | * regions. | |
908 | * See also: tcg_code_size() | |
909 | */ | |
910 | size_t tcg_code_capacity(void) | |
911 | { | |
912 | size_t guard_size, capacity; | |
913 | ||
914 | /* no need for synchronization; these variables are set at init time */ | |
915 | guard_size = region.stride - region.size; | |
77bd7fd1 RH |
916 | capacity = region.total_size; |
917 | capacity -= (region.n - 1) * guard_size; | |
918 | capacity -= region.n * TCG_HIGHWATER; | |
919 | ||
5ff7258c RH |
920 | return capacity; |
921 | } |