]>
Commit | Line | Data |
---|---|---|
d19893da FB |
1 | /* |
2 | * Host code generation | |
5fafdf24 | 3 | * |
d19893da FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d19893da | 18 | */ |
5b6dd868 BS |
19 | #ifdef _WIN32 |
20 | #include <windows.h> | |
21 | #else | |
22 | #include <sys/types.h> | |
23 | #include <sys/mman.h> | |
24 | #endif | |
d19893da FB |
25 | #include <stdarg.h> |
26 | #include <stdlib.h> | |
27 | #include <stdio.h> | |
28 | #include <string.h> | |
29 | #include <inttypes.h> | |
30 | ||
31 | #include "config.h" | |
2054396a | 32 | |
5b6dd868 | 33 | #include "qemu-common.h" |
af5ad107 | 34 | #define NO_CPU_IO_DEFS |
d3eead2e | 35 | #include "cpu.h" |
76cad711 | 36 | #include "disas/disas.h" |
57fec1fe | 37 | #include "tcg.h" |
1de7afc9 | 38 | #include "qemu/timer.h" |
022c62cb PB |
39 | #include "exec/memory.h" |
40 | #include "exec/address-spaces.h" | |
5b6dd868 BS |
41 | #if defined(CONFIG_USER_ONLY) |
42 | #include "qemu.h" | |
43 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | |
44 | #include <sys/param.h> | |
45 | #if __FreeBSD_version >= 700104 | |
46 | #define HAVE_KINFO_GETVMMAP | |
47 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ | |
48 | #include <sys/time.h> | |
49 | #include <sys/proc.h> | |
50 | #include <machine/profile.h> | |
51 | #define _KERNEL | |
52 | #include <sys/user.h> | |
53 | #undef _KERNEL | |
54 | #undef sigqueue | |
55 | #include <libutil.h> | |
56 | #endif | |
57 | #endif | |
58 | #endif | |
59 | ||
022c62cb | 60 | #include "exec/cputlb.h" |
5b6dd868 BS |
61 | #include "translate-all.h" |
62 | ||
63 | //#define DEBUG_TB_INVALIDATE | |
64 | //#define DEBUG_FLUSH | |
65 | /* make various TB consistency checks */ | |
66 | //#define DEBUG_TB_CHECK | |
67 | ||
68 | #if !defined(CONFIG_USER_ONLY) | |
69 | /* TB consistency checks only implemented for usermode emulation. */ | |
70 | #undef DEBUG_TB_CHECK | |
71 | #endif | |
72 | ||
73 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
74 | ||
75 | /* Code generation and translation blocks */ | |
76 | static TranslationBlock *tbs; | |
77 | static int code_gen_max_blocks; | |
78 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
79 | static int nb_tbs; | |
80 | /* any access to the tbs or the page table must use this lock */ | |
81 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
82 | ||
83 | uint8_t *code_gen_prologue; | |
84 | static uint8_t *code_gen_buffer; | |
85 | static size_t code_gen_buffer_size; | |
86 | /* threshold to flush the translated code buffer */ | |
87 | static size_t code_gen_buffer_max_size; | |
88 | static uint8_t *code_gen_ptr; | |
89 | ||
90 | typedef struct PageDesc { | |
91 | /* list of TBs intersecting this ram page */ | |
92 | TranslationBlock *first_tb; | |
93 | /* in order to optimize self modifying code, we count the number | |
94 | of lookups we do to a given page to use a bitmap */ | |
95 | unsigned int code_write_count; | |
96 | uint8_t *code_bitmap; | |
97 | #if defined(CONFIG_USER_ONLY) | |
98 | unsigned long flags; | |
99 | #endif | |
100 | } PageDesc; | |
101 | ||
102 | /* In system mode we want L1_MAP to be based on ram offsets, | |
103 | while in user mode we want it to be based on virtual addresses. */ | |
104 | #if !defined(CONFIG_USER_ONLY) | |
105 | #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS | |
106 | # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS | |
107 | #else | |
108 | # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
109 | #endif | |
110 | #else | |
111 | # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS | |
112 | #endif | |
113 | ||
114 | /* The bits remaining after N lower levels of page tables. */ | |
115 | #define V_L1_BITS_REM \ | |
116 | ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) | |
117 | ||
118 | #if V_L1_BITS_REM < 4 | |
119 | #define V_L1_BITS (V_L1_BITS_REM + L2_BITS) | |
120 | #else | |
121 | #define V_L1_BITS V_L1_BITS_REM | |
122 | #endif | |
123 | ||
124 | #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) | |
125 | ||
126 | #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) | |
127 | ||
128 | uintptr_t qemu_real_host_page_size; | |
129 | uintptr_t qemu_host_page_size; | |
130 | uintptr_t qemu_host_page_mask; | |
131 | ||
132 | /* This is a multi-level map on the virtual address space. | |
133 | The bottom level has pointers to PageDesc. */ | |
134 | static void *l1_map[V_L1_SIZE]; | |
135 | ||
136 | /* statistics */ | |
137 | static int tb_flush_count; | |
138 | static int tb_phys_invalidate_count; | |
d19893da | 139 | |
57fec1fe FB |
140 | /* code generation context */ |
141 | TCGContext tcg_ctx; | |
d19893da | 142 | |
5b6dd868 BS |
143 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
144 | tb_page_addr_t phys_page2); | |
a8a826a3 | 145 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); |
5b6dd868 | 146 | |
57fec1fe FB |
147 | void cpu_gen_init(void) |
148 | { | |
149 | tcg_context_init(&tcg_ctx); | |
57fec1fe FB |
150 | } |
151 | ||
d19893da | 152 | /* return non zero if the very first instruction is invalid so that |
5fafdf24 | 153 | the virtual CPU can trigger an exception. |
d19893da FB |
154 | |
155 | '*gen_code_size_ptr' contains the size of the generated code (host | |
156 | code). | |
157 | */ | |
9349b4f9 | 158 | int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr) |
d19893da | 159 | { |
57fec1fe | 160 | TCGContext *s = &tcg_ctx; |
d19893da FB |
161 | uint8_t *gen_code_buf; |
162 | int gen_code_size; | |
57fec1fe FB |
163 | #ifdef CONFIG_PROFILER |
164 | int64_t ti; | |
165 | #endif | |
166 | ||
167 | #ifdef CONFIG_PROFILER | |
b67d9a52 FB |
168 | s->tb_count1++; /* includes aborted translations because of |
169 | exceptions */ | |
57fec1fe FB |
170 | ti = profile_getclock(); |
171 | #endif | |
172 | tcg_func_start(s); | |
d19893da | 173 | |
2cfc5f17 TS |
174 | gen_intermediate_code(env, tb); |
175 | ||
ec6338ba | 176 | /* generate machine code */ |
57fec1fe | 177 | gen_code_buf = tb->tc_ptr; |
ec6338ba FB |
178 | tb->tb_next_offset[0] = 0xffff; |
179 | tb->tb_next_offset[1] = 0xffff; | |
57fec1fe | 180 | s->tb_next_offset = tb->tb_next_offset; |
4cbb86e1 | 181 | #ifdef USE_DIRECT_JUMP |
57fec1fe FB |
182 | s->tb_jmp_offset = tb->tb_jmp_offset; |
183 | s->tb_next = NULL; | |
d19893da | 184 | #else |
57fec1fe FB |
185 | s->tb_jmp_offset = NULL; |
186 | s->tb_next = tb->tb_next; | |
d19893da | 187 | #endif |
57fec1fe FB |
188 | |
189 | #ifdef CONFIG_PROFILER | |
b67d9a52 FB |
190 | s->tb_count++; |
191 | s->interm_time += profile_getclock() - ti; | |
192 | s->code_time -= profile_getclock(); | |
57fec1fe | 193 | #endif |
54604f74 | 194 | gen_code_size = tcg_gen_code(s, gen_code_buf); |
d19893da | 195 | *gen_code_size_ptr = gen_code_size; |
57fec1fe | 196 | #ifdef CONFIG_PROFILER |
b67d9a52 FB |
197 | s->code_time += profile_getclock(); |
198 | s->code_in_len += tb->size; | |
199 | s->code_out_len += gen_code_size; | |
57fec1fe FB |
200 | #endif |
201 | ||
d19893da | 202 | #ifdef DEBUG_DISAS |
8fec2b8c | 203 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { |
93fcfe39 AL |
204 | qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr); |
205 | log_disas(tb->tc_ptr, *gen_code_size_ptr); | |
206 | qemu_log("\n"); | |
31b1a7b4 | 207 | qemu_log_flush(); |
d19893da FB |
208 | } |
209 | #endif | |
210 | return 0; | |
211 | } | |
212 | ||
5fafdf24 | 213 | /* The cpu state corresponding to 'searched_pc' is restored. |
d19893da | 214 | */ |
a8a826a3 BS |
215 | static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env, |
216 | uintptr_t searched_pc) | |
d19893da | 217 | { |
57fec1fe FB |
218 | TCGContext *s = &tcg_ctx; |
219 | int j; | |
6375e09e | 220 | uintptr_t tc_ptr; |
57fec1fe FB |
221 | #ifdef CONFIG_PROFILER |
222 | int64_t ti; | |
223 | #endif | |
224 | ||
225 | #ifdef CONFIG_PROFILER | |
226 | ti = profile_getclock(); | |
227 | #endif | |
228 | tcg_func_start(s); | |
d19893da | 229 | |
2cfc5f17 | 230 | gen_intermediate_code_pc(env, tb); |
3b46e624 | 231 | |
2e70f6ef PB |
232 | if (use_icount) { |
233 | /* Reset the cycle counter to the start of the block. */ | |
234 | env->icount_decr.u16.low += tb->icount; | |
235 | /* Clear the IO flag. */ | |
236 | env->can_do_io = 0; | |
237 | } | |
238 | ||
d19893da | 239 | /* find opc index corresponding to search_pc */ |
6375e09e | 240 | tc_ptr = (uintptr_t)tb->tc_ptr; |
d19893da FB |
241 | if (searched_pc < tc_ptr) |
242 | return -1; | |
57fec1fe FB |
243 | |
244 | s->tb_next_offset = tb->tb_next_offset; | |
245 | #ifdef USE_DIRECT_JUMP | |
246 | s->tb_jmp_offset = tb->tb_jmp_offset; | |
247 | s->tb_next = NULL; | |
248 | #else | |
249 | s->tb_jmp_offset = NULL; | |
250 | s->tb_next = tb->tb_next; | |
251 | #endif | |
54604f74 | 252 | j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr); |
57fec1fe FB |
253 | if (j < 0) |
254 | return -1; | |
d19893da | 255 | /* now find start of instruction before */ |
ab1103de | 256 | while (s->gen_opc_instr_start[j] == 0) { |
d19893da | 257 | j--; |
ab1103de | 258 | } |
c9c99c22 | 259 | env->icount_decr.u16.low -= s->gen_opc_icount[j]; |
3b46e624 | 260 | |
e87b7cb0 | 261 | restore_state_to_opc(env, tb, j); |
57fec1fe FB |
262 | |
263 | #ifdef CONFIG_PROFILER | |
b67d9a52 FB |
264 | s->restore_time += profile_getclock() - ti; |
265 | s->restore_count++; | |
57fec1fe | 266 | #endif |
d19893da FB |
267 | return 0; |
268 | } | |
5b6dd868 | 269 | |
a8a826a3 BS |
270 | bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr) |
271 | { | |
272 | TranslationBlock *tb; | |
273 | ||
274 | tb = tb_find_pc(retaddr); | |
275 | if (tb) { | |
276 | cpu_restore_state_from_tb(tb, env, retaddr); | |
277 | return true; | |
278 | } | |
279 | return false; | |
280 | } | |
281 | ||
5b6dd868 BS |
282 | #ifdef _WIN32 |
283 | static inline void map_exec(void *addr, long size) | |
284 | { | |
285 | DWORD old_protect; | |
286 | VirtualProtect(addr, size, | |
287 | PAGE_EXECUTE_READWRITE, &old_protect); | |
288 | } | |
289 | #else | |
290 | static inline void map_exec(void *addr, long size) | |
291 | { | |
292 | unsigned long start, end, page_size; | |
293 | ||
294 | page_size = getpagesize(); | |
295 | start = (unsigned long)addr; | |
296 | start &= ~(page_size - 1); | |
297 | ||
298 | end = (unsigned long)addr + size; | |
299 | end += page_size - 1; | |
300 | end &= ~(page_size - 1); | |
301 | ||
302 | mprotect((void *)start, end - start, | |
303 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
304 | } | |
305 | #endif | |
306 | ||
307 | static void page_init(void) | |
308 | { | |
309 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
310 | TARGET_PAGE_SIZE */ | |
311 | #ifdef _WIN32 | |
312 | { | |
313 | SYSTEM_INFO system_info; | |
314 | ||
315 | GetSystemInfo(&system_info); | |
316 | qemu_real_host_page_size = system_info.dwPageSize; | |
317 | } | |
318 | #else | |
319 | qemu_real_host_page_size = getpagesize(); | |
320 | #endif | |
321 | if (qemu_host_page_size == 0) { | |
322 | qemu_host_page_size = qemu_real_host_page_size; | |
323 | } | |
324 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
325 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
326 | } | |
327 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
328 | ||
329 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) | |
330 | { | |
331 | #ifdef HAVE_KINFO_GETVMMAP | |
332 | struct kinfo_vmentry *freep; | |
333 | int i, cnt; | |
334 | ||
335 | freep = kinfo_getvmmap(getpid(), &cnt); | |
336 | if (freep) { | |
337 | mmap_lock(); | |
338 | for (i = 0; i < cnt; i++) { | |
339 | unsigned long startaddr, endaddr; | |
340 | ||
341 | startaddr = freep[i].kve_start; | |
342 | endaddr = freep[i].kve_end; | |
343 | if (h2g_valid(startaddr)) { | |
344 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
345 | ||
346 | if (h2g_valid(endaddr)) { | |
347 | endaddr = h2g(endaddr); | |
348 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
349 | } else { | |
350 | #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS | |
351 | endaddr = ~0ul; | |
352 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
353 | #endif | |
354 | } | |
355 | } | |
356 | } | |
357 | free(freep); | |
358 | mmap_unlock(); | |
359 | } | |
360 | #else | |
361 | FILE *f; | |
362 | ||
363 | last_brk = (unsigned long)sbrk(0); | |
364 | ||
365 | f = fopen("/compat/linux/proc/self/maps", "r"); | |
366 | if (f) { | |
367 | mmap_lock(); | |
368 | ||
369 | do { | |
370 | unsigned long startaddr, endaddr; | |
371 | int n; | |
372 | ||
373 | n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); | |
374 | ||
375 | if (n == 2 && h2g_valid(startaddr)) { | |
376 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
377 | ||
378 | if (h2g_valid(endaddr)) { | |
379 | endaddr = h2g(endaddr); | |
380 | } else { | |
381 | endaddr = ~0ul; | |
382 | } | |
383 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
384 | } | |
385 | } while (!feof(f)); | |
386 | ||
387 | fclose(f); | |
388 | mmap_unlock(); | |
389 | } | |
390 | #endif | |
391 | } | |
392 | #endif | |
393 | } | |
394 | ||
395 | static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | |
396 | { | |
397 | PageDesc *pd; | |
398 | void **lp; | |
399 | int i; | |
400 | ||
401 | #if defined(CONFIG_USER_ONLY) | |
402 | /* We can't use g_malloc because it may recurse into a locked mutex. */ | |
403 | # define ALLOC(P, SIZE) \ | |
404 | do { \ | |
405 | P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ | |
406 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ | |
407 | } while (0) | |
408 | #else | |
409 | # define ALLOC(P, SIZE) \ | |
410 | do { P = g_malloc0(SIZE); } while (0) | |
411 | #endif | |
412 | ||
413 | /* Level 1. Always allocated. */ | |
414 | lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); | |
415 | ||
416 | /* Level 2..N-1. */ | |
417 | for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) { | |
418 | void **p = *lp; | |
419 | ||
420 | if (p == NULL) { | |
421 | if (!alloc) { | |
422 | return NULL; | |
423 | } | |
424 | ALLOC(p, sizeof(void *) * L2_SIZE); | |
425 | *lp = p; | |
426 | } | |
427 | ||
428 | lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); | |
429 | } | |
430 | ||
431 | pd = *lp; | |
432 | if (pd == NULL) { | |
433 | if (!alloc) { | |
434 | return NULL; | |
435 | } | |
436 | ALLOC(pd, sizeof(PageDesc) * L2_SIZE); | |
437 | *lp = pd; | |
438 | } | |
439 | ||
440 | #undef ALLOC | |
441 | ||
442 | return pd + (index & (L2_SIZE - 1)); | |
443 | } | |
444 | ||
445 | static inline PageDesc *page_find(tb_page_addr_t index) | |
446 | { | |
447 | return page_find_alloc(index, 0); | |
448 | } | |
449 | ||
450 | #if !defined(CONFIG_USER_ONLY) | |
451 | #define mmap_lock() do { } while (0) | |
452 | #define mmap_unlock() do { } while (0) | |
453 | #endif | |
454 | ||
455 | #if defined(CONFIG_USER_ONLY) | |
456 | /* Currently it is not recommended to allocate big chunks of data in | |
457 | user mode. It will change when a dedicated libc will be used. */ | |
458 | /* ??? 64-bit hosts ought to have no problem mmaping data outside the | |
459 | region in which the guest needs to run. Revisit this. */ | |
460 | #define USE_STATIC_CODE_GEN_BUFFER | |
461 | #endif | |
462 | ||
463 | /* ??? Should configure for this, not list operating systems here. */ | |
464 | #if (defined(__linux__) \ | |
465 | || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ | |
466 | || defined(__DragonFly__) || defined(__OpenBSD__) \ | |
467 | || defined(__NetBSD__)) | |
468 | # define USE_MMAP | |
469 | #endif | |
470 | ||
471 | /* Minimum size of the code gen buffer. This number is randomly chosen, | |
472 | but not so small that we can't have a fair number of TB's live. */ | |
473 | #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) | |
474 | ||
475 | /* Maximum size of the code gen buffer we'd like to use. Unless otherwise | |
476 | indicated, this is constrained by the range of direct branches on the | |
477 | host cpu, as used by the TCG implementation of goto_tb. */ | |
478 | #if defined(__x86_64__) | |
479 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
480 | #elif defined(__sparc__) | |
481 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
482 | #elif defined(__arm__) | |
483 | # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) | |
484 | #elif defined(__s390x__) | |
485 | /* We have a +- 4GB range on the branches; leave some slop. */ | |
486 | # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) | |
487 | #else | |
488 | # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) | |
489 | #endif | |
490 | ||
491 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) | |
492 | ||
493 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
494 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
495 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
496 | ||
497 | static inline size_t size_code_gen_buffer(size_t tb_size) | |
498 | { | |
499 | /* Size the buffer. */ | |
500 | if (tb_size == 0) { | |
501 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
502 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
503 | #else | |
504 | /* ??? Needs adjustments. */ | |
505 | /* ??? If we relax the requirement that CONFIG_USER_ONLY use the | |
506 | static buffer, we could size this on RESERVED_VA, on the text | |
507 | segment size of the executable, or continue to use the default. */ | |
508 | tb_size = (unsigned long)(ram_size / 4); | |
509 | #endif | |
510 | } | |
511 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
512 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
513 | } | |
514 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
515 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
516 | } | |
517 | code_gen_buffer_size = tb_size; | |
518 | return tb_size; | |
519 | } | |
520 | ||
521 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
522 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
523 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
524 | ||
525 | static inline void *alloc_code_gen_buffer(void) | |
526 | { | |
527 | map_exec(static_code_gen_buffer, code_gen_buffer_size); | |
528 | return static_code_gen_buffer; | |
529 | } | |
530 | #elif defined(USE_MMAP) | |
531 | static inline void *alloc_code_gen_buffer(void) | |
532 | { | |
533 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
534 | uintptr_t start = 0; | |
535 | void *buf; | |
536 | ||
537 | /* Constrain the position of the buffer based on the host cpu. | |
538 | Note that these addresses are chosen in concert with the | |
539 | addresses assigned in the relevant linker script file. */ | |
540 | # if defined(__PIE__) || defined(__PIC__) | |
541 | /* Don't bother setting a preferred location if we're building | |
542 | a position-independent executable. We're more likely to get | |
543 | an address near the main executable if we let the kernel | |
544 | choose the address. */ | |
545 | # elif defined(__x86_64__) && defined(MAP_32BIT) | |
546 | /* Force the memory down into low memory with the executable. | |
547 | Leave the choice of exact location with the kernel. */ | |
548 | flags |= MAP_32BIT; | |
549 | /* Cannot expect to map more than 800MB in low memory. */ | |
550 | if (code_gen_buffer_size > 800u * 1024 * 1024) { | |
551 | code_gen_buffer_size = 800u * 1024 * 1024; | |
552 | } | |
553 | # elif defined(__sparc__) | |
554 | start = 0x40000000ul; | |
555 | # elif defined(__s390x__) | |
556 | start = 0x90000000ul; | |
557 | # endif | |
558 | ||
559 | buf = mmap((void *)start, code_gen_buffer_size, | |
560 | PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); | |
561 | return buf == MAP_FAILED ? NULL : buf; | |
562 | } | |
563 | #else | |
564 | static inline void *alloc_code_gen_buffer(void) | |
565 | { | |
566 | void *buf = g_malloc(code_gen_buffer_size); | |
567 | ||
568 | if (buf) { | |
569 | map_exec(buf, code_gen_buffer_size); | |
570 | } | |
571 | return buf; | |
572 | } | |
573 | #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ | |
574 | ||
575 | static inline void code_gen_alloc(size_t tb_size) | |
576 | { | |
577 | code_gen_buffer_size = size_code_gen_buffer(tb_size); | |
578 | code_gen_buffer = alloc_code_gen_buffer(); | |
579 | if (code_gen_buffer == NULL) { | |
580 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); | |
581 | exit(1); | |
582 | } | |
583 | ||
584 | qemu_madvise(code_gen_buffer, code_gen_buffer_size, QEMU_MADV_HUGEPAGE); | |
585 | ||
586 | /* Steal room for the prologue at the end of the buffer. This ensures | |
587 | (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches | |
588 | from TB's to the prologue are going to be in range. It also means | |
589 | that we don't need to mark (additional) portions of the data segment | |
590 | as executable. */ | |
591 | code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024; | |
592 | code_gen_buffer_size -= 1024; | |
593 | ||
594 | code_gen_buffer_max_size = code_gen_buffer_size - | |
595 | (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); | |
596 | code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
597 | tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); | |
598 | } | |
599 | ||
600 | /* Must be called before using the QEMU cpus. 'tb_size' is the size | |
601 | (in bytes) allocated to the translation buffer. Zero means default | |
602 | size. */ | |
603 | void tcg_exec_init(unsigned long tb_size) | |
604 | { | |
605 | cpu_gen_init(); | |
606 | code_gen_alloc(tb_size); | |
607 | code_gen_ptr = code_gen_buffer; | |
608 | tcg_register_jit(code_gen_buffer, code_gen_buffer_size); | |
609 | page_init(); | |
610 | #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) | |
611 | /* There's no guest base to take into account, so go ahead and | |
612 | initialize the prologue now. */ | |
613 | tcg_prologue_init(&tcg_ctx); | |
614 | #endif | |
615 | } | |
616 | ||
617 | bool tcg_enabled(void) | |
618 | { | |
619 | return code_gen_buffer != NULL; | |
620 | } | |
621 | ||
622 | /* Allocate a new translation block. Flush the translation buffer if | |
623 | too many translation blocks or too much generated code. */ | |
624 | static TranslationBlock *tb_alloc(target_ulong pc) | |
625 | { | |
626 | TranslationBlock *tb; | |
627 | ||
628 | if (nb_tbs >= code_gen_max_blocks || | |
629 | (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) { | |
630 | return NULL; | |
631 | } | |
632 | tb = &tbs[nb_tbs++]; | |
633 | tb->pc = pc; | |
634 | tb->cflags = 0; | |
635 | return tb; | |
636 | } | |
637 | ||
638 | void tb_free(TranslationBlock *tb) | |
639 | { | |
640 | /* In practice this is mostly used for single use temporary TB | |
641 | Ignore the hard cases and just back up if this TB happens to | |
642 | be the last one generated. */ | |
643 | if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { | |
644 | code_gen_ptr = tb->tc_ptr; | |
645 | nb_tbs--; | |
646 | } | |
647 | } | |
648 | ||
649 | static inline void invalidate_page_bitmap(PageDesc *p) | |
650 | { | |
651 | if (p->code_bitmap) { | |
652 | g_free(p->code_bitmap); | |
653 | p->code_bitmap = NULL; | |
654 | } | |
655 | p->code_write_count = 0; | |
656 | } | |
657 | ||
658 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | |
659 | static void page_flush_tb_1(int level, void **lp) | |
660 | { | |
661 | int i; | |
662 | ||
663 | if (*lp == NULL) { | |
664 | return; | |
665 | } | |
666 | if (level == 0) { | |
667 | PageDesc *pd = *lp; | |
668 | ||
669 | for (i = 0; i < L2_SIZE; ++i) { | |
670 | pd[i].first_tb = NULL; | |
671 | invalidate_page_bitmap(pd + i); | |
672 | } | |
673 | } else { | |
674 | void **pp = *lp; | |
675 | ||
676 | for (i = 0; i < L2_SIZE; ++i) { | |
677 | page_flush_tb_1(level - 1, pp + i); | |
678 | } | |
679 | } | |
680 | } | |
681 | ||
682 | static void page_flush_tb(void) | |
683 | { | |
684 | int i; | |
685 | ||
686 | for (i = 0; i < V_L1_SIZE; i++) { | |
687 | page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); | |
688 | } | |
689 | } | |
690 | ||
691 | /* flush all the translation blocks */ | |
692 | /* XXX: tb_flush is currently not thread safe */ | |
693 | void tb_flush(CPUArchState *env1) | |
694 | { | |
695 | CPUArchState *env; | |
696 | ||
697 | #if defined(DEBUG_FLUSH) | |
698 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", | |
699 | (unsigned long)(code_gen_ptr - code_gen_buffer), | |
700 | nb_tbs, nb_tbs > 0 ? | |
701 | ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); | |
702 | #endif | |
703 | if ((unsigned long)(code_gen_ptr - code_gen_buffer) | |
704 | > code_gen_buffer_size) { | |
705 | cpu_abort(env1, "Internal error: code buffer overflow\n"); | |
706 | } | |
707 | nb_tbs = 0; | |
708 | ||
709 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
710 | memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *)); | |
711 | } | |
712 | ||
713 | memset(tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof(void *)); | |
714 | page_flush_tb(); | |
715 | ||
716 | code_gen_ptr = code_gen_buffer; | |
717 | /* XXX: flush processor icache at this point if cache flush is | |
718 | expensive */ | |
719 | tb_flush_count++; | |
720 | } | |
721 | ||
722 | #ifdef DEBUG_TB_CHECK | |
723 | ||
724 | static void tb_invalidate_check(target_ulong address) | |
725 | { | |
726 | TranslationBlock *tb; | |
727 | int i; | |
728 | ||
729 | address &= TARGET_PAGE_MASK; | |
730 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
731 | for (tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
732 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || | |
733 | address >= tb->pc + tb->size)) { | |
734 | printf("ERROR invalidate: address=" TARGET_FMT_lx | |
735 | " PC=%08lx size=%04x\n", | |
736 | address, (long)tb->pc, tb->size); | |
737 | } | |
738 | } | |
739 | } | |
740 | } | |
741 | ||
742 | /* verify that all the pages have correct rights for code */ | |
743 | static void tb_page_check(void) | |
744 | { | |
745 | TranslationBlock *tb; | |
746 | int i, flags1, flags2; | |
747 | ||
748 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
749 | for (tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
750 | flags1 = page_get_flags(tb->pc); | |
751 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
752 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
753 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
754 | (long)tb->pc, tb->size, flags1, flags2); | |
755 | } | |
756 | } | |
757 | } | |
758 | } | |
759 | ||
760 | #endif | |
761 | ||
0c884d16 | 762 | static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb) |
5b6dd868 BS |
763 | { |
764 | TranslationBlock *tb1; | |
765 | ||
766 | for (;;) { | |
767 | tb1 = *ptb; | |
768 | if (tb1 == tb) { | |
0c884d16 | 769 | *ptb = tb1->phys_hash_next; |
5b6dd868 BS |
770 | break; |
771 | } | |
0c884d16 | 772 | ptb = &tb1->phys_hash_next; |
5b6dd868 BS |
773 | } |
774 | } | |
775 | ||
776 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
777 | { | |
778 | TranslationBlock *tb1; | |
779 | unsigned int n1; | |
780 | ||
781 | for (;;) { | |
782 | tb1 = *ptb; | |
783 | n1 = (uintptr_t)tb1 & 3; | |
784 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
785 | if (tb1 == tb) { | |
786 | *ptb = tb1->page_next[n1]; | |
787 | break; | |
788 | } | |
789 | ptb = &tb1->page_next[n1]; | |
790 | } | |
791 | } | |
792 | ||
793 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
794 | { | |
795 | TranslationBlock *tb1, **ptb; | |
796 | unsigned int n1; | |
797 | ||
798 | ptb = &tb->jmp_next[n]; | |
799 | tb1 = *ptb; | |
800 | if (tb1) { | |
801 | /* find tb(n) in circular list */ | |
802 | for (;;) { | |
803 | tb1 = *ptb; | |
804 | n1 = (uintptr_t)tb1 & 3; | |
805 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
806 | if (n1 == n && tb1 == tb) { | |
807 | break; | |
808 | } | |
809 | if (n1 == 2) { | |
810 | ptb = &tb1->jmp_first; | |
811 | } else { | |
812 | ptb = &tb1->jmp_next[n1]; | |
813 | } | |
814 | } | |
815 | /* now we can suppress tb(n) from the list */ | |
816 | *ptb = tb->jmp_next[n]; | |
817 | ||
818 | tb->jmp_next[n] = NULL; | |
819 | } | |
820 | } | |
821 | ||
822 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
823 | another TB */ | |
824 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
825 | { | |
826 | tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); | |
827 | } | |
828 | ||
0c884d16 | 829 | /* invalidate one TB */ |
5b6dd868 BS |
830 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) |
831 | { | |
832 | CPUArchState *env; | |
833 | PageDesc *p; | |
834 | unsigned int h, n1; | |
835 | tb_page_addr_t phys_pc; | |
836 | TranslationBlock *tb1, *tb2; | |
837 | ||
838 | /* remove the TB from the hash list */ | |
839 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
840 | h = tb_phys_hash_func(phys_pc); | |
0c884d16 | 841 | tb_hash_remove(&tb_phys_hash[h], tb); |
5b6dd868 BS |
842 | |
843 | /* remove the TB from the page list */ | |
844 | if (tb->page_addr[0] != page_addr) { | |
845 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
846 | tb_page_remove(&p->first_tb, tb); | |
847 | invalidate_page_bitmap(p); | |
848 | } | |
849 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
850 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
851 | tb_page_remove(&p->first_tb, tb); | |
852 | invalidate_page_bitmap(p); | |
853 | } | |
854 | ||
855 | tb_invalidated_flag = 1; | |
856 | ||
857 | /* remove the TB from the hash list */ | |
858 | h = tb_jmp_cache_hash_func(tb->pc); | |
859 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
860 | if (env->tb_jmp_cache[h] == tb) { | |
861 | env->tb_jmp_cache[h] = NULL; | |
862 | } | |
863 | } | |
864 | ||
865 | /* suppress this TB from the two jump lists */ | |
866 | tb_jmp_remove(tb, 0); | |
867 | tb_jmp_remove(tb, 1); | |
868 | ||
869 | /* suppress any remaining jumps to this TB */ | |
870 | tb1 = tb->jmp_first; | |
871 | for (;;) { | |
872 | n1 = (uintptr_t)tb1 & 3; | |
873 | if (n1 == 2) { | |
874 | break; | |
875 | } | |
876 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
877 | tb2 = tb1->jmp_next[n1]; | |
878 | tb_reset_jump(tb1, n1); | |
879 | tb1->jmp_next[n1] = NULL; | |
880 | tb1 = tb2; | |
881 | } | |
882 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ | |
883 | ||
884 | tb_phys_invalidate_count++; | |
885 | } | |
886 | ||
887 | static inline void set_bits(uint8_t *tab, int start, int len) | |
888 | { | |
889 | int end, mask, end1; | |
890 | ||
891 | end = start + len; | |
892 | tab += start >> 3; | |
893 | mask = 0xff << (start & 7); | |
894 | if ((start & ~7) == (end & ~7)) { | |
895 | if (start < end) { | |
896 | mask &= ~(0xff << (end & 7)); | |
897 | *tab |= mask; | |
898 | } | |
899 | } else { | |
900 | *tab++ |= mask; | |
901 | start = (start + 8) & ~7; | |
902 | end1 = end & ~7; | |
903 | while (start < end1) { | |
904 | *tab++ = 0xff; | |
905 | start += 8; | |
906 | } | |
907 | if (start < end) { | |
908 | mask = ~(0xff << (end & 7)); | |
909 | *tab |= mask; | |
910 | } | |
911 | } | |
912 | } | |
913 | ||
914 | static void build_page_bitmap(PageDesc *p) | |
915 | { | |
916 | int n, tb_start, tb_end; | |
917 | TranslationBlock *tb; | |
918 | ||
919 | p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); | |
920 | ||
921 | tb = p->first_tb; | |
922 | while (tb != NULL) { | |
923 | n = (uintptr_t)tb & 3; | |
924 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
925 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
926 | if (n == 0) { | |
927 | /* NOTE: tb_end may be after the end of the page, but | |
928 | it is not a problem */ | |
929 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
930 | tb_end = tb_start + tb->size; | |
931 | if (tb_end > TARGET_PAGE_SIZE) { | |
932 | tb_end = TARGET_PAGE_SIZE; | |
933 | } | |
934 | } else { | |
935 | tb_start = 0; | |
936 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
937 | } | |
938 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
939 | tb = tb->page_next[n]; | |
940 | } | |
941 | } | |
942 | ||
943 | TranslationBlock *tb_gen_code(CPUArchState *env, | |
944 | target_ulong pc, target_ulong cs_base, | |
945 | int flags, int cflags) | |
946 | { | |
947 | TranslationBlock *tb; | |
948 | uint8_t *tc_ptr; | |
949 | tb_page_addr_t phys_pc, phys_page2; | |
950 | target_ulong virt_page2; | |
951 | int code_gen_size; | |
952 | ||
953 | phys_pc = get_page_addr_code(env, pc); | |
954 | tb = tb_alloc(pc); | |
955 | if (!tb) { | |
956 | /* flush must be done */ | |
957 | tb_flush(env); | |
958 | /* cannot fail at this point */ | |
959 | tb = tb_alloc(pc); | |
960 | /* Don't forget to invalidate previous TB info. */ | |
961 | tb_invalidated_flag = 1; | |
962 | } | |
963 | tc_ptr = code_gen_ptr; | |
964 | tb->tc_ptr = tc_ptr; | |
965 | tb->cs_base = cs_base; | |
966 | tb->flags = flags; | |
967 | tb->cflags = cflags; | |
968 | cpu_gen_code(env, tb, &code_gen_size); | |
969 | code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + | |
970 | CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
971 | ||
972 | /* check next page if needed */ | |
973 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
974 | phys_page2 = -1; | |
975 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
976 | phys_page2 = get_page_addr_code(env, virt_page2); | |
977 | } | |
978 | tb_link_page(tb, phys_pc, phys_page2); | |
979 | return tb; | |
980 | } | |
981 | ||
982 | /* | |
983 | * Invalidate all TBs which intersect with the target physical address range | |
984 | * [start;end[. NOTE: start and end may refer to *different* physical pages. | |
985 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
986 | * access: the virtual CPU will exit the current TB if code is modified inside | |
987 | * this TB. | |
988 | */ | |
989 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, | |
990 | int is_cpu_write_access) | |
991 | { | |
992 | while (start < end) { | |
993 | tb_invalidate_phys_page_range(start, end, is_cpu_write_access); | |
994 | start &= TARGET_PAGE_MASK; | |
995 | start += TARGET_PAGE_SIZE; | |
996 | } | |
997 | } | |
998 | ||
999 | /* | |
1000 | * Invalidate all TBs which intersect with the target physical address range | |
1001 | * [start;end[. NOTE: start and end must refer to the *same* physical page. | |
1002 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
1003 | * access: the virtual CPU will exit the current TB if code is modified inside | |
1004 | * this TB. | |
1005 | */ | |
1006 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | |
1007 | int is_cpu_write_access) | |
1008 | { | |
1009 | TranslationBlock *tb, *tb_next, *saved_tb; | |
1010 | CPUArchState *env = cpu_single_env; | |
1011 | tb_page_addr_t tb_start, tb_end; | |
1012 | PageDesc *p; | |
1013 | int n; | |
1014 | #ifdef TARGET_HAS_PRECISE_SMC | |
1015 | int current_tb_not_found = is_cpu_write_access; | |
1016 | TranslationBlock *current_tb = NULL; | |
1017 | int current_tb_modified = 0; | |
1018 | target_ulong current_pc = 0; | |
1019 | target_ulong current_cs_base = 0; | |
1020 | int current_flags = 0; | |
1021 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1022 | ||
1023 | p = page_find(start >> TARGET_PAGE_BITS); | |
1024 | if (!p) { | |
1025 | return; | |
1026 | } | |
1027 | if (!p->code_bitmap && | |
1028 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && | |
1029 | is_cpu_write_access) { | |
1030 | /* build code bitmap */ | |
1031 | build_page_bitmap(p); | |
1032 | } | |
1033 | ||
1034 | /* we remove all the TBs in the range [start, end[ */ | |
1035 | /* XXX: see if in some cases it could be faster to invalidate all | |
1036 | the code */ | |
1037 | tb = p->first_tb; | |
1038 | while (tb != NULL) { | |
1039 | n = (uintptr_t)tb & 3; | |
1040 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1041 | tb_next = tb->page_next[n]; | |
1042 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1043 | if (n == 0) { | |
1044 | /* NOTE: tb_end may be after the end of the page, but | |
1045 | it is not a problem */ | |
1046 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
1047 | tb_end = tb_start + tb->size; | |
1048 | } else { | |
1049 | tb_start = tb->page_addr[1]; | |
1050 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1051 | } | |
1052 | if (!(tb_end <= start || tb_start >= end)) { | |
1053 | #ifdef TARGET_HAS_PRECISE_SMC | |
1054 | if (current_tb_not_found) { | |
1055 | current_tb_not_found = 0; | |
1056 | current_tb = NULL; | |
1057 | if (env->mem_io_pc) { | |
1058 | /* now we have a real cpu fault */ | |
1059 | current_tb = tb_find_pc(env->mem_io_pc); | |
1060 | } | |
1061 | } | |
1062 | if (current_tb == tb && | |
1063 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1064 | /* If we are modifying the current TB, we must stop | |
1065 | its execution. We could be more precise by checking | |
1066 | that the modification is after the current PC, but it | |
1067 | would require a specialized function to partially | |
1068 | restore the CPU state */ | |
1069 | ||
1070 | current_tb_modified = 1; | |
a8a826a3 | 1071 | cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc); |
5b6dd868 BS |
1072 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1073 | ¤t_flags); | |
1074 | } | |
1075 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1076 | /* we need to do that to handle the case where a signal | |
1077 | occurs while doing tb_phys_invalidate() */ | |
1078 | saved_tb = NULL; | |
1079 | if (env) { | |
1080 | saved_tb = env->current_tb; | |
1081 | env->current_tb = NULL; | |
1082 | } | |
1083 | tb_phys_invalidate(tb, -1); | |
1084 | if (env) { | |
1085 | env->current_tb = saved_tb; | |
1086 | if (env->interrupt_request && env->current_tb) { | |
1087 | cpu_interrupt(env, env->interrupt_request); | |
1088 | } | |
1089 | } | |
1090 | } | |
1091 | tb = tb_next; | |
1092 | } | |
1093 | #if !defined(CONFIG_USER_ONLY) | |
1094 | /* if no code remaining, no need to continue to use slow writes */ | |
1095 | if (!p->first_tb) { | |
1096 | invalidate_page_bitmap(p); | |
1097 | if (is_cpu_write_access) { | |
1098 | tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); | |
1099 | } | |
1100 | } | |
1101 | #endif | |
1102 | #ifdef TARGET_HAS_PRECISE_SMC | |
1103 | if (current_tb_modified) { | |
1104 | /* we generate a block containing just the instruction | |
1105 | modifying the memory. It will ensure that it cannot modify | |
1106 | itself */ | |
1107 | env->current_tb = NULL; | |
1108 | tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); | |
1109 | cpu_resume_from_signal(env, NULL); | |
1110 | } | |
1111 | #endif | |
1112 | } | |
1113 | ||
1114 | /* len must be <= 8 and start must be a multiple of len */ | |
1115 | void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) | |
1116 | { | |
1117 | PageDesc *p; | |
1118 | int offset, b; | |
1119 | ||
1120 | #if 0 | |
1121 | if (1) { | |
1122 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
1123 | cpu_single_env->mem_io_vaddr, len, | |
1124 | cpu_single_env->eip, | |
1125 | cpu_single_env->eip + | |
1126 | (intptr_t)cpu_single_env->segs[R_CS].base); | |
1127 | } | |
1128 | #endif | |
1129 | p = page_find(start >> TARGET_PAGE_BITS); | |
1130 | if (!p) { | |
1131 | return; | |
1132 | } | |
1133 | if (p->code_bitmap) { | |
1134 | offset = start & ~TARGET_PAGE_MASK; | |
1135 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
1136 | if (b & ((1 << len) - 1)) { | |
1137 | goto do_invalidate; | |
1138 | } | |
1139 | } else { | |
1140 | do_invalidate: | |
1141 | tb_invalidate_phys_page_range(start, start + len, 1); | |
1142 | } | |
1143 | } | |
1144 | ||
1145 | #if !defined(CONFIG_SOFTMMU) | |
1146 | static void tb_invalidate_phys_page(tb_page_addr_t addr, | |
1147 | uintptr_t pc, void *puc) | |
1148 | { | |
1149 | TranslationBlock *tb; | |
1150 | PageDesc *p; | |
1151 | int n; | |
1152 | #ifdef TARGET_HAS_PRECISE_SMC | |
1153 | TranslationBlock *current_tb = NULL; | |
1154 | CPUArchState *env = cpu_single_env; | |
1155 | int current_tb_modified = 0; | |
1156 | target_ulong current_pc = 0; | |
1157 | target_ulong current_cs_base = 0; | |
1158 | int current_flags = 0; | |
1159 | #endif | |
1160 | ||
1161 | addr &= TARGET_PAGE_MASK; | |
1162 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1163 | if (!p) { | |
1164 | return; | |
1165 | } | |
1166 | tb = p->first_tb; | |
1167 | #ifdef TARGET_HAS_PRECISE_SMC | |
1168 | if (tb && pc != 0) { | |
1169 | current_tb = tb_find_pc(pc); | |
1170 | } | |
1171 | #endif | |
1172 | while (tb != NULL) { | |
1173 | n = (uintptr_t)tb & 3; | |
1174 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1175 | #ifdef TARGET_HAS_PRECISE_SMC | |
1176 | if (current_tb == tb && | |
1177 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1178 | /* If we are modifying the current TB, we must stop | |
1179 | its execution. We could be more precise by checking | |
1180 | that the modification is after the current PC, but it | |
1181 | would require a specialized function to partially | |
1182 | restore the CPU state */ | |
1183 | ||
1184 | current_tb_modified = 1; | |
a8a826a3 | 1185 | cpu_restore_state_from_tb(current_tb, env, pc); |
5b6dd868 BS |
1186 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, |
1187 | ¤t_flags); | |
1188 | } | |
1189 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1190 | tb_phys_invalidate(tb, addr); | |
1191 | tb = tb->page_next[n]; | |
1192 | } | |
1193 | p->first_tb = NULL; | |
1194 | #ifdef TARGET_HAS_PRECISE_SMC | |
1195 | if (current_tb_modified) { | |
1196 | /* we generate a block containing just the instruction | |
1197 | modifying the memory. It will ensure that it cannot modify | |
1198 | itself */ | |
1199 | env->current_tb = NULL; | |
1200 | tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); | |
1201 | cpu_resume_from_signal(env, puc); | |
1202 | } | |
1203 | #endif | |
1204 | } | |
1205 | #endif | |
1206 | ||
1207 | /* add the tb in the target page and protect it if necessary */ | |
1208 | static inline void tb_alloc_page(TranslationBlock *tb, | |
1209 | unsigned int n, tb_page_addr_t page_addr) | |
1210 | { | |
1211 | PageDesc *p; | |
1212 | #ifndef CONFIG_USER_ONLY | |
1213 | bool page_already_protected; | |
1214 | #endif | |
1215 | ||
1216 | tb->page_addr[n] = page_addr; | |
1217 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); | |
1218 | tb->page_next[n] = p->first_tb; | |
1219 | #ifndef CONFIG_USER_ONLY | |
1220 | page_already_protected = p->first_tb != NULL; | |
1221 | #endif | |
1222 | p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); | |
1223 | invalidate_page_bitmap(p); | |
1224 | ||
1225 | #if defined(TARGET_HAS_SMC) || 1 | |
1226 | ||
1227 | #if defined(CONFIG_USER_ONLY) | |
1228 | if (p->flags & PAGE_WRITE) { | |
1229 | target_ulong addr; | |
1230 | PageDesc *p2; | |
1231 | int prot; | |
1232 | ||
1233 | /* force the host page as non writable (writes will have a | |
1234 | page fault + mprotect overhead) */ | |
1235 | page_addr &= qemu_host_page_mask; | |
1236 | prot = 0; | |
1237 | for (addr = page_addr; addr < page_addr + qemu_host_page_size; | |
1238 | addr += TARGET_PAGE_SIZE) { | |
1239 | ||
1240 | p2 = page_find(addr >> TARGET_PAGE_BITS); | |
1241 | if (!p2) { | |
1242 | continue; | |
1243 | } | |
1244 | prot |= p2->flags; | |
1245 | p2->flags &= ~PAGE_WRITE; | |
1246 | } | |
1247 | mprotect(g2h(page_addr), qemu_host_page_size, | |
1248 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
1249 | #ifdef DEBUG_TB_INVALIDATE | |
1250 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", | |
1251 | page_addr); | |
1252 | #endif | |
1253 | } | |
1254 | #else | |
1255 | /* if some code is already present, then the pages are already | |
1256 | protected. So we handle the case where only the first TB is | |
1257 | allocated in a physical page */ | |
1258 | if (!page_already_protected) { | |
1259 | tlb_protect_code(page_addr); | |
1260 | } | |
1261 | #endif | |
1262 | ||
1263 | #endif /* TARGET_HAS_SMC */ | |
1264 | } | |
1265 | ||
1266 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
1267 | (-1) to indicate that only one page contains the TB. */ | |
1268 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | |
1269 | tb_page_addr_t phys_page2) | |
1270 | { | |
1271 | unsigned int h; | |
1272 | TranslationBlock **ptb; | |
1273 | ||
1274 | /* Grab the mmap lock to stop another thread invalidating this TB | |
1275 | before we are done. */ | |
1276 | mmap_lock(); | |
1277 | /* add in the physical hash table */ | |
1278 | h = tb_phys_hash_func(phys_pc); | |
1279 | ptb = &tb_phys_hash[h]; | |
1280 | tb->phys_hash_next = *ptb; | |
1281 | *ptb = tb; | |
1282 | ||
1283 | /* add in the page list */ | |
1284 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
1285 | if (phys_page2 != -1) { | |
1286 | tb_alloc_page(tb, 1, phys_page2); | |
1287 | } else { | |
1288 | tb->page_addr[1] = -1; | |
1289 | } | |
1290 | ||
1291 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); | |
1292 | tb->jmp_next[0] = NULL; | |
1293 | tb->jmp_next[1] = NULL; | |
1294 | ||
1295 | /* init original jump addresses */ | |
1296 | if (tb->tb_next_offset[0] != 0xffff) { | |
1297 | tb_reset_jump(tb, 0); | |
1298 | } | |
1299 | if (tb->tb_next_offset[1] != 0xffff) { | |
1300 | tb_reset_jump(tb, 1); | |
1301 | } | |
1302 | ||
1303 | #ifdef DEBUG_TB_CHECK | |
1304 | tb_page_check(); | |
1305 | #endif | |
1306 | mmap_unlock(); | |
1307 | } | |
1308 | ||
1309 | #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU) | |
1310 | /* check whether the given addr is in TCG generated code buffer or not */ | |
1311 | bool is_tcg_gen_code(uintptr_t tc_ptr) | |
1312 | { | |
1313 | /* This can be called during code generation, code_gen_buffer_max_size | |
1314 | is used instead of code_gen_ptr for upper boundary checking */ | |
1315 | return (tc_ptr >= (uintptr_t)code_gen_buffer && | |
1316 | tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size)); | |
1317 | } | |
1318 | #endif | |
1319 | ||
1320 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < | |
1321 | tb[1].tc_ptr. Return NULL if not found */ | |
a8a826a3 | 1322 | static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) |
5b6dd868 BS |
1323 | { |
1324 | int m_min, m_max, m; | |
1325 | uintptr_t v; | |
1326 | TranslationBlock *tb; | |
1327 | ||
1328 | if (nb_tbs <= 0) { | |
1329 | return NULL; | |
1330 | } | |
1331 | if (tc_ptr < (uintptr_t)code_gen_buffer || | |
1332 | tc_ptr >= (uintptr_t)code_gen_ptr) { | |
1333 | return NULL; | |
1334 | } | |
1335 | /* binary search (cf Knuth) */ | |
1336 | m_min = 0; | |
1337 | m_max = nb_tbs - 1; | |
1338 | while (m_min <= m_max) { | |
1339 | m = (m_min + m_max) >> 1; | |
1340 | tb = &tbs[m]; | |
1341 | v = (uintptr_t)tb->tc_ptr; | |
1342 | if (v == tc_ptr) { | |
1343 | return tb; | |
1344 | } else if (tc_ptr < v) { | |
1345 | m_max = m - 1; | |
1346 | } else { | |
1347 | m_min = m + 1; | |
1348 | } | |
1349 | } | |
1350 | return &tbs[m_max]; | |
1351 | } | |
1352 | ||
1353 | static void tb_reset_jump_recursive(TranslationBlock *tb); | |
1354 | ||
1355 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
1356 | { | |
1357 | TranslationBlock *tb1, *tb_next, **ptb; | |
1358 | unsigned int n1; | |
1359 | ||
1360 | tb1 = tb->jmp_next[n]; | |
1361 | if (tb1 != NULL) { | |
1362 | /* find head of list */ | |
1363 | for (;;) { | |
1364 | n1 = (uintptr_t)tb1 & 3; | |
1365 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1366 | if (n1 == 2) { | |
1367 | break; | |
1368 | } | |
1369 | tb1 = tb1->jmp_next[n1]; | |
1370 | } | |
1371 | /* we are now sure now that tb jumps to tb1 */ | |
1372 | tb_next = tb1; | |
1373 | ||
1374 | /* remove tb from the jmp_first list */ | |
1375 | ptb = &tb_next->jmp_first; | |
1376 | for (;;) { | |
1377 | tb1 = *ptb; | |
1378 | n1 = (uintptr_t)tb1 & 3; | |
1379 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1380 | if (n1 == n && tb1 == tb) { | |
1381 | break; | |
1382 | } | |
1383 | ptb = &tb1->jmp_next[n1]; | |
1384 | } | |
1385 | *ptb = tb->jmp_next[n]; | |
1386 | tb->jmp_next[n] = NULL; | |
1387 | ||
1388 | /* suppress the jump to next tb in generated code */ | |
1389 | tb_reset_jump(tb, n); | |
1390 | ||
1391 | /* suppress jumps in the tb on which we could have jumped */ | |
1392 | tb_reset_jump_recursive(tb_next); | |
1393 | } | |
1394 | } | |
1395 | ||
1396 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
1397 | { | |
1398 | tb_reset_jump_recursive2(tb, 0); | |
1399 | tb_reset_jump_recursive2(tb, 1); | |
1400 | } | |
1401 | ||
1402 | #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY) | |
1403 | void tb_invalidate_phys_addr(hwaddr addr) | |
1404 | { | |
1405 | ram_addr_t ram_addr; | |
1406 | MemoryRegionSection *section; | |
1407 | ||
1408 | section = phys_page_find(address_space_memory.dispatch, | |
1409 | addr >> TARGET_PAGE_BITS); | |
1410 | if (!(memory_region_is_ram(section->mr) | |
1411 | || (section->mr->rom_device && section->mr->readable))) { | |
1412 | return; | |
1413 | } | |
1414 | ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) | |
1415 | + memory_region_section_addr(section, addr); | |
1416 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); | |
1417 | } | |
1418 | #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */ | |
1419 | ||
1420 | void cpu_unlink_tb(CPUArchState *env) | |
1421 | { | |
1422 | /* FIXME: TB unchaining isn't SMP safe. For now just ignore the | |
1423 | problem and hope the cpu will stop of its own accord. For userspace | |
1424 | emulation this often isn't actually as bad as it sounds. Often | |
1425 | signals are used primarily to interrupt blocking syscalls. */ | |
1426 | TranslationBlock *tb; | |
1427 | static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; | |
1428 | ||
1429 | spin_lock(&interrupt_lock); | |
1430 | tb = env->current_tb; | |
1431 | /* if the cpu is currently executing code, we must unlink it and | |
1432 | all the potentially executing TB */ | |
1433 | if (tb) { | |
1434 | env->current_tb = NULL; | |
1435 | tb_reset_jump_recursive(tb); | |
1436 | } | |
1437 | spin_unlock(&interrupt_lock); | |
1438 | } | |
1439 | ||
1440 | void tb_check_watchpoint(CPUArchState *env) | |
1441 | { | |
1442 | TranslationBlock *tb; | |
1443 | ||
1444 | tb = tb_find_pc(env->mem_io_pc); | |
1445 | if (!tb) { | |
1446 | cpu_abort(env, "check_watchpoint: could not find TB for pc=%p", | |
1447 | (void *)env->mem_io_pc); | |
1448 | } | |
a8a826a3 | 1449 | cpu_restore_state_from_tb(tb, env, env->mem_io_pc); |
5b6dd868 BS |
1450 | tb_phys_invalidate(tb, -1); |
1451 | } | |
1452 | ||
1453 | #ifndef CONFIG_USER_ONLY | |
1454 | /* mask must never be zero, except for A20 change call */ | |
1455 | static void tcg_handle_interrupt(CPUArchState *env, int mask) | |
1456 | { | |
1457 | CPUState *cpu = ENV_GET_CPU(env); | |
1458 | int old_mask; | |
1459 | ||
1460 | old_mask = env->interrupt_request; | |
1461 | env->interrupt_request |= mask; | |
1462 | ||
1463 | /* | |
1464 | * If called from iothread context, wake the target cpu in | |
1465 | * case its halted. | |
1466 | */ | |
1467 | if (!qemu_cpu_is_self(cpu)) { | |
1468 | qemu_cpu_kick(cpu); | |
1469 | return; | |
1470 | } | |
1471 | ||
1472 | if (use_icount) { | |
1473 | env->icount_decr.u16.high = 0xffff; | |
1474 | if (!can_do_io(env) | |
1475 | && (mask & ~old_mask) != 0) { | |
1476 | cpu_abort(env, "Raised interrupt while not in I/O function"); | |
1477 | } | |
1478 | } else { | |
1479 | cpu_unlink_tb(env); | |
1480 | } | |
1481 | } | |
1482 | ||
1483 | CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; | |
1484 | ||
1485 | /* in deterministic execution mode, instructions doing device I/Os | |
1486 | must be at the end of the TB */ | |
1487 | void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr) | |
1488 | { | |
1489 | TranslationBlock *tb; | |
1490 | uint32_t n, cflags; | |
1491 | target_ulong pc, cs_base; | |
1492 | uint64_t flags; | |
1493 | ||
1494 | tb = tb_find_pc(retaddr); | |
1495 | if (!tb) { | |
1496 | cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", | |
1497 | (void *)retaddr); | |
1498 | } | |
1499 | n = env->icount_decr.u16.low + tb->icount; | |
a8a826a3 | 1500 | cpu_restore_state_from_tb(tb, env, retaddr); |
5b6dd868 BS |
1501 | /* Calculate how many instructions had been executed before the fault |
1502 | occurred. */ | |
1503 | n = n - env->icount_decr.u16.low; | |
1504 | /* Generate a new TB ending on the I/O insn. */ | |
1505 | n++; | |
1506 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
1507 | they were already the first instruction in the TB. If this is not | |
1508 | the first instruction in a TB then re-execute the preceding | |
1509 | branch. */ | |
1510 | #if defined(TARGET_MIPS) | |
1511 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
1512 | env->active_tc.PC -= 4; | |
1513 | env->icount_decr.u16.low++; | |
1514 | env->hflags &= ~MIPS_HFLAG_BMASK; | |
1515 | } | |
1516 | #elif defined(TARGET_SH4) | |
1517 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
1518 | && n > 1) { | |
1519 | env->pc -= 2; | |
1520 | env->icount_decr.u16.low++; | |
1521 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); | |
1522 | } | |
1523 | #endif | |
1524 | /* This should never happen. */ | |
1525 | if (n > CF_COUNT_MASK) { | |
1526 | cpu_abort(env, "TB too big during recompile"); | |
1527 | } | |
1528 | ||
1529 | cflags = n | CF_LAST_IO; | |
1530 | pc = tb->pc; | |
1531 | cs_base = tb->cs_base; | |
1532 | flags = tb->flags; | |
1533 | tb_phys_invalidate(tb, -1); | |
1534 | /* FIXME: In theory this could raise an exception. In practice | |
1535 | we have already translated the block once so it's probably ok. */ | |
1536 | tb_gen_code(env, pc, cs_base, flags, cflags); | |
1537 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not | |
1538 | the first in the TB) then we end up generating a whole new TB and | |
1539 | repeating the fault, which is horribly inefficient. | |
1540 | Better would be to execute just this insn uncached, or generate a | |
1541 | second new TB. */ | |
1542 | cpu_resume_from_signal(env, NULL); | |
1543 | } | |
1544 | ||
1545 | void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) | |
1546 | { | |
1547 | unsigned int i; | |
1548 | ||
1549 | /* Discard jump cache entries for any tb which might potentially | |
1550 | overlap the flushed page. */ | |
1551 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
1552 | memset(&env->tb_jmp_cache[i], 0, | |
1553 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); | |
1554 | ||
1555 | i = tb_jmp_cache_hash_page(addr); | |
1556 | memset(&env->tb_jmp_cache[i], 0, | |
1557 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); | |
1558 | } | |
1559 | ||
1560 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) | |
1561 | { | |
1562 | int i, target_code_size, max_target_code_size; | |
1563 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
1564 | TranslationBlock *tb; | |
1565 | ||
1566 | target_code_size = 0; | |
1567 | max_target_code_size = 0; | |
1568 | cross_page = 0; | |
1569 | direct_jmp_count = 0; | |
1570 | direct_jmp2_count = 0; | |
1571 | for (i = 0; i < nb_tbs; i++) { | |
1572 | tb = &tbs[i]; | |
1573 | target_code_size += tb->size; | |
1574 | if (tb->size > max_target_code_size) { | |
1575 | max_target_code_size = tb->size; | |
1576 | } | |
1577 | if (tb->page_addr[1] != -1) { | |
1578 | cross_page++; | |
1579 | } | |
1580 | if (tb->tb_next_offset[0] != 0xffff) { | |
1581 | direct_jmp_count++; | |
1582 | if (tb->tb_next_offset[1] != 0xffff) { | |
1583 | direct_jmp2_count++; | |
1584 | } | |
1585 | } | |
1586 | } | |
1587 | /* XXX: avoid using doubles ? */ | |
1588 | cpu_fprintf(f, "Translation buffer state:\n"); | |
1589 | cpu_fprintf(f, "gen code size %td/%zd\n", | |
1590 | code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); | |
1591 | cpu_fprintf(f, "TB count %d/%d\n", | |
1592 | nb_tbs, code_gen_max_blocks); | |
1593 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", | |
1594 | nb_tbs ? target_code_size / nb_tbs : 0, | |
1595 | max_target_code_size); | |
1596 | cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", | |
1597 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, | |
1598 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) | |
1599 | / target_code_size : 0); | |
1600 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", | |
1601 | cross_page, | |
1602 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); | |
1603 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
1604 | direct_jmp_count, | |
1605 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, | |
1606 | direct_jmp2_count, | |
1607 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
1608 | cpu_fprintf(f, "\nStatistics:\n"); | |
1609 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); | |
1610 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
1611 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
1612 | tcg_dump_info(f, cpu_fprintf); | |
1613 | } | |
1614 | ||
1615 | #else /* CONFIG_USER_ONLY */ | |
1616 | ||
1617 | void cpu_interrupt(CPUArchState *env, int mask) | |
1618 | { | |
1619 | env->interrupt_request |= mask; | |
1620 | cpu_unlink_tb(env); | |
1621 | } | |
1622 | ||
1623 | /* | |
1624 | * Walks guest process memory "regions" one by one | |
1625 | * and calls callback function 'fn' for each region. | |
1626 | */ | |
1627 | struct walk_memory_regions_data { | |
1628 | walk_memory_regions_fn fn; | |
1629 | void *priv; | |
1630 | uintptr_t start; | |
1631 | int prot; | |
1632 | }; | |
1633 | ||
1634 | static int walk_memory_regions_end(struct walk_memory_regions_data *data, | |
1635 | abi_ulong end, int new_prot) | |
1636 | { | |
1637 | if (data->start != -1ul) { | |
1638 | int rc = data->fn(data->priv, data->start, end, data->prot); | |
1639 | if (rc != 0) { | |
1640 | return rc; | |
1641 | } | |
1642 | } | |
1643 | ||
1644 | data->start = (new_prot ? end : -1ul); | |
1645 | data->prot = new_prot; | |
1646 | ||
1647 | return 0; | |
1648 | } | |
1649 | ||
1650 | static int walk_memory_regions_1(struct walk_memory_regions_data *data, | |
1651 | abi_ulong base, int level, void **lp) | |
1652 | { | |
1653 | abi_ulong pa; | |
1654 | int i, rc; | |
1655 | ||
1656 | if (*lp == NULL) { | |
1657 | return walk_memory_regions_end(data, base, 0); | |
1658 | } | |
1659 | ||
1660 | if (level == 0) { | |
1661 | PageDesc *pd = *lp; | |
1662 | ||
1663 | for (i = 0; i < L2_SIZE; ++i) { | |
1664 | int prot = pd[i].flags; | |
1665 | ||
1666 | pa = base | (i << TARGET_PAGE_BITS); | |
1667 | if (prot != data->prot) { | |
1668 | rc = walk_memory_regions_end(data, pa, prot); | |
1669 | if (rc != 0) { | |
1670 | return rc; | |
1671 | } | |
1672 | } | |
1673 | } | |
1674 | } else { | |
1675 | void **pp = *lp; | |
1676 | ||
1677 | for (i = 0; i < L2_SIZE; ++i) { | |
1678 | pa = base | ((abi_ulong)i << | |
1679 | (TARGET_PAGE_BITS + L2_BITS * level)); | |
1680 | rc = walk_memory_regions_1(data, pa, level - 1, pp + i); | |
1681 | if (rc != 0) { | |
1682 | return rc; | |
1683 | } | |
1684 | } | |
1685 | } | |
1686 | ||
1687 | return 0; | |
1688 | } | |
1689 | ||
1690 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
1691 | { | |
1692 | struct walk_memory_regions_data data; | |
1693 | uintptr_t i; | |
1694 | ||
1695 | data.fn = fn; | |
1696 | data.priv = priv; | |
1697 | data.start = -1ul; | |
1698 | data.prot = 0; | |
1699 | ||
1700 | for (i = 0; i < V_L1_SIZE; i++) { | |
1701 | int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, | |
1702 | V_L1_SHIFT / L2_BITS - 1, l1_map + i); | |
1703 | ||
1704 | if (rc != 0) { | |
1705 | return rc; | |
1706 | } | |
1707 | } | |
1708 | ||
1709 | return walk_memory_regions_end(&data, 0, 0); | |
1710 | } | |
1711 | ||
1712 | static int dump_region(void *priv, abi_ulong start, | |
1713 | abi_ulong end, unsigned long prot) | |
1714 | { | |
1715 | FILE *f = (FILE *)priv; | |
1716 | ||
1717 | (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx | |
1718 | " "TARGET_ABI_FMT_lx" %c%c%c\n", | |
1719 | start, end, end - start, | |
1720 | ((prot & PAGE_READ) ? 'r' : '-'), | |
1721 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
1722 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
1723 | ||
1724 | return 0; | |
1725 | } | |
1726 | ||
1727 | /* dump memory mappings */ | |
1728 | void page_dump(FILE *f) | |
1729 | { | |
1730 | (void) fprintf(f, "%-8s %-8s %-8s %s\n", | |
1731 | "start", "end", "size", "prot"); | |
1732 | walk_memory_regions(f, dump_region); | |
1733 | } | |
1734 | ||
1735 | int page_get_flags(target_ulong address) | |
1736 | { | |
1737 | PageDesc *p; | |
1738 | ||
1739 | p = page_find(address >> TARGET_PAGE_BITS); | |
1740 | if (!p) { | |
1741 | return 0; | |
1742 | } | |
1743 | return p->flags; | |
1744 | } | |
1745 | ||
1746 | /* Modify the flags of a page and invalidate the code if necessary. | |
1747 | The flag PAGE_WRITE_ORG is positioned automatically depending | |
1748 | on PAGE_WRITE. The mmap_lock should already be held. */ | |
1749 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1750 | { | |
1751 | target_ulong addr, len; | |
1752 | ||
1753 | /* This function should never be called with addresses outside the | |
1754 | guest address space. If this assert fires, it probably indicates | |
1755 | a missing call to h2g_valid. */ | |
1756 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1757 | assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); | |
1758 | #endif | |
1759 | assert(start < end); | |
1760 | ||
1761 | start = start & TARGET_PAGE_MASK; | |
1762 | end = TARGET_PAGE_ALIGN(end); | |
1763 | ||
1764 | if (flags & PAGE_WRITE) { | |
1765 | flags |= PAGE_WRITE_ORG; | |
1766 | } | |
1767 | ||
1768 | for (addr = start, len = end - start; | |
1769 | len != 0; | |
1770 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1771 | PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1772 | ||
1773 | /* If the write protection bit is set, then we invalidate | |
1774 | the code inside. */ | |
1775 | if (!(p->flags & PAGE_WRITE) && | |
1776 | (flags & PAGE_WRITE) && | |
1777 | p->first_tb) { | |
1778 | tb_invalidate_phys_page(addr, 0, NULL); | |
1779 | } | |
1780 | p->flags = flags; | |
1781 | } | |
1782 | } | |
1783 | ||
1784 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
1785 | { | |
1786 | PageDesc *p; | |
1787 | target_ulong end; | |
1788 | target_ulong addr; | |
1789 | ||
1790 | /* This function should never be called with addresses outside the | |
1791 | guest address space. If this assert fires, it probably indicates | |
1792 | a missing call to h2g_valid. */ | |
1793 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1794 | assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); | |
1795 | #endif | |
1796 | ||
1797 | if (len == 0) { | |
1798 | return 0; | |
1799 | } | |
1800 | if (start + len - 1 < start) { | |
1801 | /* We've wrapped around. */ | |
1802 | return -1; | |
1803 | } | |
1804 | ||
1805 | /* must do before we loose bits in the next step */ | |
1806 | end = TARGET_PAGE_ALIGN(start + len); | |
1807 | start = start & TARGET_PAGE_MASK; | |
1808 | ||
1809 | for (addr = start, len = end - start; | |
1810 | len != 0; | |
1811 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1812 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1813 | if (!p) { | |
1814 | return -1; | |
1815 | } | |
1816 | if (!(p->flags & PAGE_VALID)) { | |
1817 | return -1; | |
1818 | } | |
1819 | ||
1820 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { | |
1821 | return -1; | |
1822 | } | |
1823 | if (flags & PAGE_WRITE) { | |
1824 | if (!(p->flags & PAGE_WRITE_ORG)) { | |
1825 | return -1; | |
1826 | } | |
1827 | /* unprotect the page if it was put read-only because it | |
1828 | contains translated code */ | |
1829 | if (!(p->flags & PAGE_WRITE)) { | |
1830 | if (!page_unprotect(addr, 0, NULL)) { | |
1831 | return -1; | |
1832 | } | |
1833 | } | |
1834 | return 0; | |
1835 | } | |
1836 | } | |
1837 | return 0; | |
1838 | } | |
1839 | ||
1840 | /* called from signal handler: invalidate the code and unprotect the | |
1841 | page. Return TRUE if the fault was successfully handled. */ | |
1842 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc) | |
1843 | { | |
1844 | unsigned int prot; | |
1845 | PageDesc *p; | |
1846 | target_ulong host_start, host_end, addr; | |
1847 | ||
1848 | /* Technically this isn't safe inside a signal handler. However we | |
1849 | know this only ever happens in a synchronous SEGV handler, so in | |
1850 | practice it seems to be ok. */ | |
1851 | mmap_lock(); | |
1852 | ||
1853 | p = page_find(address >> TARGET_PAGE_BITS); | |
1854 | if (!p) { | |
1855 | mmap_unlock(); | |
1856 | return 0; | |
1857 | } | |
1858 | ||
1859 | /* if the page was really writable, then we change its | |
1860 | protection back to writable */ | |
1861 | if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { | |
1862 | host_start = address & qemu_host_page_mask; | |
1863 | host_end = host_start + qemu_host_page_size; | |
1864 | ||
1865 | prot = 0; | |
1866 | for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { | |
1867 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1868 | p->flags |= PAGE_WRITE; | |
1869 | prot |= p->flags; | |
1870 | ||
1871 | /* and since the content will be modified, we must invalidate | |
1872 | the corresponding translated code. */ | |
1873 | tb_invalidate_phys_page(addr, pc, puc); | |
1874 | #ifdef DEBUG_TB_CHECK | |
1875 | tb_invalidate_check(addr); | |
1876 | #endif | |
1877 | } | |
1878 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
1879 | prot & PAGE_BITS); | |
1880 | ||
1881 | mmap_unlock(); | |
1882 | return 1; | |
1883 | } | |
1884 | mmap_unlock(); | |
1885 | return 0; | |
1886 | } | |
1887 | #endif /* CONFIG_USER_ONLY */ |