]>
Commit | Line | Data |
---|---|---|
d19893da FB |
1 | /* |
2 | * Host code generation | |
5fafdf24 | 3 | * |
d19893da FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d19893da | 18 | */ |
5b6dd868 BS |
19 | #ifdef _WIN32 |
20 | #include <windows.h> | |
21 | #else | |
22 | #include <sys/types.h> | |
23 | #include <sys/mman.h> | |
24 | #endif | |
d19893da FB |
25 | #include <stdarg.h> |
26 | #include <stdlib.h> | |
27 | #include <stdio.h> | |
28 | #include <string.h> | |
29 | #include <inttypes.h> | |
30 | ||
31 | #include "config.h" | |
2054396a | 32 | |
5b6dd868 | 33 | #include "qemu-common.h" |
af5ad107 | 34 | #define NO_CPU_IO_DEFS |
d3eead2e | 35 | #include "cpu.h" |
d19893da | 36 | #include "disas.h" |
57fec1fe | 37 | #include "tcg.h" |
29e922b6 | 38 | #include "qemu-timer.h" |
5b6dd868 BS |
39 | #include "memory.h" |
40 | #include "exec-memory.h" | |
41 | #if defined(CONFIG_USER_ONLY) | |
42 | #include "qemu.h" | |
43 | #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | |
44 | #include <sys/param.h> | |
45 | #if __FreeBSD_version >= 700104 | |
46 | #define HAVE_KINFO_GETVMMAP | |
47 | #define sigqueue sigqueue_freebsd /* avoid redefinition */ | |
48 | #include <sys/time.h> | |
49 | #include <sys/proc.h> | |
50 | #include <machine/profile.h> | |
51 | #define _KERNEL | |
52 | #include <sys/user.h> | |
53 | #undef _KERNEL | |
54 | #undef sigqueue | |
55 | #include <libutil.h> | |
56 | #endif | |
57 | #endif | |
58 | #endif | |
59 | ||
60 | #include "cputlb.h" | |
61 | #include "translate-all.h" | |
62 | ||
63 | //#define DEBUG_TB_INVALIDATE | |
64 | //#define DEBUG_FLUSH | |
65 | /* make various TB consistency checks */ | |
66 | //#define DEBUG_TB_CHECK | |
67 | ||
68 | #if !defined(CONFIG_USER_ONLY) | |
69 | /* TB consistency checks only implemented for usermode emulation. */ | |
70 | #undef DEBUG_TB_CHECK | |
71 | #endif | |
72 | ||
73 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
74 | ||
75 | /* Code generation and translation blocks */ | |
76 | static TranslationBlock *tbs; | |
77 | static int code_gen_max_blocks; | |
78 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
79 | static int nb_tbs; | |
80 | /* any access to the tbs or the page table must use this lock */ | |
81 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
82 | ||
83 | uint8_t *code_gen_prologue; | |
84 | static uint8_t *code_gen_buffer; | |
85 | static size_t code_gen_buffer_size; | |
86 | /* threshold to flush the translated code buffer */ | |
87 | static size_t code_gen_buffer_max_size; | |
88 | static uint8_t *code_gen_ptr; | |
89 | ||
90 | typedef struct PageDesc { | |
91 | /* list of TBs intersecting this ram page */ | |
92 | TranslationBlock *first_tb; | |
93 | /* in order to optimize self modifying code, we count the number | |
94 | of lookups we do to a given page to use a bitmap */ | |
95 | unsigned int code_write_count; | |
96 | uint8_t *code_bitmap; | |
97 | #if defined(CONFIG_USER_ONLY) | |
98 | unsigned long flags; | |
99 | #endif | |
100 | } PageDesc; | |
101 | ||
102 | /* In system mode we want L1_MAP to be based on ram offsets, | |
103 | while in user mode we want it to be based on virtual addresses. */ | |
104 | #if !defined(CONFIG_USER_ONLY) | |
105 | #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS | |
106 | # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS | |
107 | #else | |
108 | # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
109 | #endif | |
110 | #else | |
111 | # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS | |
112 | #endif | |
113 | ||
114 | /* The bits remaining after N lower levels of page tables. */ | |
115 | #define V_L1_BITS_REM \ | |
116 | ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) | |
117 | ||
118 | #if V_L1_BITS_REM < 4 | |
119 | #define V_L1_BITS (V_L1_BITS_REM + L2_BITS) | |
120 | #else | |
121 | #define V_L1_BITS V_L1_BITS_REM | |
122 | #endif | |
123 | ||
124 | #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) | |
125 | ||
126 | #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) | |
127 | ||
128 | uintptr_t qemu_real_host_page_size; | |
129 | uintptr_t qemu_host_page_size; | |
130 | uintptr_t qemu_host_page_mask; | |
131 | ||
132 | /* This is a multi-level map on the virtual address space. | |
133 | The bottom level has pointers to PageDesc. */ | |
134 | static void *l1_map[V_L1_SIZE]; | |
135 | ||
136 | /* statistics */ | |
137 | static int tb_flush_count; | |
138 | static int tb_phys_invalidate_count; | |
d19893da | 139 | |
57fec1fe FB |
140 | /* code generation context */ |
141 | TCGContext tcg_ctx; | |
d19893da | 142 | |
5b6dd868 BS |
143 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, |
144 | tb_page_addr_t phys_page2); | |
145 | ||
57fec1fe FB |
146 | void cpu_gen_init(void) |
147 | { | |
148 | tcg_context_init(&tcg_ctx); | |
57fec1fe FB |
149 | } |
150 | ||
d19893da | 151 | /* return non zero if the very first instruction is invalid so that |
5fafdf24 | 152 | the virtual CPU can trigger an exception. |
d19893da FB |
153 | |
154 | '*gen_code_size_ptr' contains the size of the generated code (host | |
155 | code). | |
156 | */ | |
9349b4f9 | 157 | int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr) |
d19893da | 158 | { |
57fec1fe | 159 | TCGContext *s = &tcg_ctx; |
d19893da FB |
160 | uint8_t *gen_code_buf; |
161 | int gen_code_size; | |
57fec1fe FB |
162 | #ifdef CONFIG_PROFILER |
163 | int64_t ti; | |
164 | #endif | |
165 | ||
166 | #ifdef CONFIG_PROFILER | |
b67d9a52 FB |
167 | s->tb_count1++; /* includes aborted translations because of |
168 | exceptions */ | |
57fec1fe FB |
169 | ti = profile_getclock(); |
170 | #endif | |
171 | tcg_func_start(s); | |
d19893da | 172 | |
2cfc5f17 TS |
173 | gen_intermediate_code(env, tb); |
174 | ||
ec6338ba | 175 | /* generate machine code */ |
57fec1fe | 176 | gen_code_buf = tb->tc_ptr; |
ec6338ba FB |
177 | tb->tb_next_offset[0] = 0xffff; |
178 | tb->tb_next_offset[1] = 0xffff; | |
57fec1fe | 179 | s->tb_next_offset = tb->tb_next_offset; |
4cbb86e1 | 180 | #ifdef USE_DIRECT_JUMP |
57fec1fe FB |
181 | s->tb_jmp_offset = tb->tb_jmp_offset; |
182 | s->tb_next = NULL; | |
d19893da | 183 | #else |
57fec1fe FB |
184 | s->tb_jmp_offset = NULL; |
185 | s->tb_next = tb->tb_next; | |
d19893da | 186 | #endif |
57fec1fe FB |
187 | |
188 | #ifdef CONFIG_PROFILER | |
b67d9a52 FB |
189 | s->tb_count++; |
190 | s->interm_time += profile_getclock() - ti; | |
191 | s->code_time -= profile_getclock(); | |
57fec1fe | 192 | #endif |
54604f74 | 193 | gen_code_size = tcg_gen_code(s, gen_code_buf); |
d19893da | 194 | *gen_code_size_ptr = gen_code_size; |
57fec1fe | 195 | #ifdef CONFIG_PROFILER |
b67d9a52 FB |
196 | s->code_time += profile_getclock(); |
197 | s->code_in_len += tb->size; | |
198 | s->code_out_len += gen_code_size; | |
57fec1fe FB |
199 | #endif |
200 | ||
d19893da | 201 | #ifdef DEBUG_DISAS |
8fec2b8c | 202 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { |
93fcfe39 AL |
203 | qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr); |
204 | log_disas(tb->tc_ptr, *gen_code_size_ptr); | |
205 | qemu_log("\n"); | |
31b1a7b4 | 206 | qemu_log_flush(); |
d19893da FB |
207 | } |
208 | #endif | |
209 | return 0; | |
210 | } | |
211 | ||
5fafdf24 | 212 | /* The cpu state corresponding to 'searched_pc' is restored. |
d19893da | 213 | */ |
5fafdf24 | 214 | int cpu_restore_state(TranslationBlock *tb, |
6375e09e | 215 | CPUArchState *env, uintptr_t searched_pc) |
d19893da | 216 | { |
57fec1fe FB |
217 | TCGContext *s = &tcg_ctx; |
218 | int j; | |
6375e09e | 219 | uintptr_t tc_ptr; |
57fec1fe FB |
220 | #ifdef CONFIG_PROFILER |
221 | int64_t ti; | |
222 | #endif | |
223 | ||
224 | #ifdef CONFIG_PROFILER | |
225 | ti = profile_getclock(); | |
226 | #endif | |
227 | tcg_func_start(s); | |
d19893da | 228 | |
2cfc5f17 | 229 | gen_intermediate_code_pc(env, tb); |
3b46e624 | 230 | |
2e70f6ef PB |
231 | if (use_icount) { |
232 | /* Reset the cycle counter to the start of the block. */ | |
233 | env->icount_decr.u16.low += tb->icount; | |
234 | /* Clear the IO flag. */ | |
235 | env->can_do_io = 0; | |
236 | } | |
237 | ||
d19893da | 238 | /* find opc index corresponding to search_pc */ |
6375e09e | 239 | tc_ptr = (uintptr_t)tb->tc_ptr; |
d19893da FB |
240 | if (searched_pc < tc_ptr) |
241 | return -1; | |
57fec1fe FB |
242 | |
243 | s->tb_next_offset = tb->tb_next_offset; | |
244 | #ifdef USE_DIRECT_JUMP | |
245 | s->tb_jmp_offset = tb->tb_jmp_offset; | |
246 | s->tb_next = NULL; | |
247 | #else | |
248 | s->tb_jmp_offset = NULL; | |
249 | s->tb_next = tb->tb_next; | |
250 | #endif | |
54604f74 | 251 | j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr); |
57fec1fe FB |
252 | if (j < 0) |
253 | return -1; | |
d19893da | 254 | /* now find start of instruction before */ |
ab1103de | 255 | while (s->gen_opc_instr_start[j] == 0) { |
d19893da | 256 | j--; |
ab1103de | 257 | } |
c9c99c22 | 258 | env->icount_decr.u16.low -= s->gen_opc_icount[j]; |
3b46e624 | 259 | |
e87b7cb0 | 260 | restore_state_to_opc(env, tb, j); |
57fec1fe FB |
261 | |
262 | #ifdef CONFIG_PROFILER | |
b67d9a52 FB |
263 | s->restore_time += profile_getclock() - ti; |
264 | s->restore_count++; | |
57fec1fe | 265 | #endif |
d19893da FB |
266 | return 0; |
267 | } | |
5b6dd868 BS |
268 | |
269 | #ifdef _WIN32 | |
270 | static inline void map_exec(void *addr, long size) | |
271 | { | |
272 | DWORD old_protect; | |
273 | VirtualProtect(addr, size, | |
274 | PAGE_EXECUTE_READWRITE, &old_protect); | |
275 | } | |
276 | #else | |
277 | static inline void map_exec(void *addr, long size) | |
278 | { | |
279 | unsigned long start, end, page_size; | |
280 | ||
281 | page_size = getpagesize(); | |
282 | start = (unsigned long)addr; | |
283 | start &= ~(page_size - 1); | |
284 | ||
285 | end = (unsigned long)addr + size; | |
286 | end += page_size - 1; | |
287 | end &= ~(page_size - 1); | |
288 | ||
289 | mprotect((void *)start, end - start, | |
290 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
291 | } | |
292 | #endif | |
293 | ||
294 | static void page_init(void) | |
295 | { | |
296 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
297 | TARGET_PAGE_SIZE */ | |
298 | #ifdef _WIN32 | |
299 | { | |
300 | SYSTEM_INFO system_info; | |
301 | ||
302 | GetSystemInfo(&system_info); | |
303 | qemu_real_host_page_size = system_info.dwPageSize; | |
304 | } | |
305 | #else | |
306 | qemu_real_host_page_size = getpagesize(); | |
307 | #endif | |
308 | if (qemu_host_page_size == 0) { | |
309 | qemu_host_page_size = qemu_real_host_page_size; | |
310 | } | |
311 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
312 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
313 | } | |
314 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
315 | ||
316 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) | |
317 | { | |
318 | #ifdef HAVE_KINFO_GETVMMAP | |
319 | struct kinfo_vmentry *freep; | |
320 | int i, cnt; | |
321 | ||
322 | freep = kinfo_getvmmap(getpid(), &cnt); | |
323 | if (freep) { | |
324 | mmap_lock(); | |
325 | for (i = 0; i < cnt; i++) { | |
326 | unsigned long startaddr, endaddr; | |
327 | ||
328 | startaddr = freep[i].kve_start; | |
329 | endaddr = freep[i].kve_end; | |
330 | if (h2g_valid(startaddr)) { | |
331 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
332 | ||
333 | if (h2g_valid(endaddr)) { | |
334 | endaddr = h2g(endaddr); | |
335 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
336 | } else { | |
337 | #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS | |
338 | endaddr = ~0ul; | |
339 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
340 | #endif | |
341 | } | |
342 | } | |
343 | } | |
344 | free(freep); | |
345 | mmap_unlock(); | |
346 | } | |
347 | #else | |
348 | FILE *f; | |
349 | ||
350 | last_brk = (unsigned long)sbrk(0); | |
351 | ||
352 | f = fopen("/compat/linux/proc/self/maps", "r"); | |
353 | if (f) { | |
354 | mmap_lock(); | |
355 | ||
356 | do { | |
357 | unsigned long startaddr, endaddr; | |
358 | int n; | |
359 | ||
360 | n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); | |
361 | ||
362 | if (n == 2 && h2g_valid(startaddr)) { | |
363 | startaddr = h2g(startaddr) & TARGET_PAGE_MASK; | |
364 | ||
365 | if (h2g_valid(endaddr)) { | |
366 | endaddr = h2g(endaddr); | |
367 | } else { | |
368 | endaddr = ~0ul; | |
369 | } | |
370 | page_set_flags(startaddr, endaddr, PAGE_RESERVED); | |
371 | } | |
372 | } while (!feof(f)); | |
373 | ||
374 | fclose(f); | |
375 | mmap_unlock(); | |
376 | } | |
377 | #endif | |
378 | } | |
379 | #endif | |
380 | } | |
381 | ||
382 | static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | |
383 | { | |
384 | PageDesc *pd; | |
385 | void **lp; | |
386 | int i; | |
387 | ||
388 | #if defined(CONFIG_USER_ONLY) | |
389 | /* We can't use g_malloc because it may recurse into a locked mutex. */ | |
390 | # define ALLOC(P, SIZE) \ | |
391 | do { \ | |
392 | P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ | |
393 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ | |
394 | } while (0) | |
395 | #else | |
396 | # define ALLOC(P, SIZE) \ | |
397 | do { P = g_malloc0(SIZE); } while (0) | |
398 | #endif | |
399 | ||
400 | /* Level 1. Always allocated. */ | |
401 | lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); | |
402 | ||
403 | /* Level 2..N-1. */ | |
404 | for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) { | |
405 | void **p = *lp; | |
406 | ||
407 | if (p == NULL) { | |
408 | if (!alloc) { | |
409 | return NULL; | |
410 | } | |
411 | ALLOC(p, sizeof(void *) * L2_SIZE); | |
412 | *lp = p; | |
413 | } | |
414 | ||
415 | lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); | |
416 | } | |
417 | ||
418 | pd = *lp; | |
419 | if (pd == NULL) { | |
420 | if (!alloc) { | |
421 | return NULL; | |
422 | } | |
423 | ALLOC(pd, sizeof(PageDesc) * L2_SIZE); | |
424 | *lp = pd; | |
425 | } | |
426 | ||
427 | #undef ALLOC | |
428 | ||
429 | return pd + (index & (L2_SIZE - 1)); | |
430 | } | |
431 | ||
432 | static inline PageDesc *page_find(tb_page_addr_t index) | |
433 | { | |
434 | return page_find_alloc(index, 0); | |
435 | } | |
436 | ||
437 | #if !defined(CONFIG_USER_ONLY) | |
438 | #define mmap_lock() do { } while (0) | |
439 | #define mmap_unlock() do { } while (0) | |
440 | #endif | |
441 | ||
442 | #if defined(CONFIG_USER_ONLY) | |
443 | /* Currently it is not recommended to allocate big chunks of data in | |
444 | user mode. It will change when a dedicated libc will be used. */ | |
445 | /* ??? 64-bit hosts ought to have no problem mmaping data outside the | |
446 | region in which the guest needs to run. Revisit this. */ | |
447 | #define USE_STATIC_CODE_GEN_BUFFER | |
448 | #endif | |
449 | ||
450 | /* ??? Should configure for this, not list operating systems here. */ | |
451 | #if (defined(__linux__) \ | |
452 | || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ | |
453 | || defined(__DragonFly__) || defined(__OpenBSD__) \ | |
454 | || defined(__NetBSD__)) | |
455 | # define USE_MMAP | |
456 | #endif | |
457 | ||
458 | /* Minimum size of the code gen buffer. This number is randomly chosen, | |
459 | but not so small that we can't have a fair number of TB's live. */ | |
460 | #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) | |
461 | ||
462 | /* Maximum size of the code gen buffer we'd like to use. Unless otherwise | |
463 | indicated, this is constrained by the range of direct branches on the | |
464 | host cpu, as used by the TCG implementation of goto_tb. */ | |
465 | #if defined(__x86_64__) | |
466 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
467 | #elif defined(__sparc__) | |
468 | # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) | |
469 | #elif defined(__arm__) | |
470 | # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) | |
471 | #elif defined(__s390x__) | |
472 | /* We have a +- 4GB range on the branches; leave some slop. */ | |
473 | # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) | |
474 | #else | |
475 | # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) | |
476 | #endif | |
477 | ||
478 | #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) | |
479 | ||
480 | #define DEFAULT_CODE_GEN_BUFFER_SIZE \ | |
481 | (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ | |
482 | ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) | |
483 | ||
484 | static inline size_t size_code_gen_buffer(size_t tb_size) | |
485 | { | |
486 | /* Size the buffer. */ | |
487 | if (tb_size == 0) { | |
488 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
489 | tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; | |
490 | #else | |
491 | /* ??? Needs adjustments. */ | |
492 | /* ??? If we relax the requirement that CONFIG_USER_ONLY use the | |
493 | static buffer, we could size this on RESERVED_VA, on the text | |
494 | segment size of the executable, or continue to use the default. */ | |
495 | tb_size = (unsigned long)(ram_size / 4); | |
496 | #endif | |
497 | } | |
498 | if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { | |
499 | tb_size = MIN_CODE_GEN_BUFFER_SIZE; | |
500 | } | |
501 | if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { | |
502 | tb_size = MAX_CODE_GEN_BUFFER_SIZE; | |
503 | } | |
504 | code_gen_buffer_size = tb_size; | |
505 | return tb_size; | |
506 | } | |
507 | ||
508 | #ifdef USE_STATIC_CODE_GEN_BUFFER | |
509 | static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] | |
510 | __attribute__((aligned(CODE_GEN_ALIGN))); | |
511 | ||
512 | static inline void *alloc_code_gen_buffer(void) | |
513 | { | |
514 | map_exec(static_code_gen_buffer, code_gen_buffer_size); | |
515 | return static_code_gen_buffer; | |
516 | } | |
517 | #elif defined(USE_MMAP) | |
518 | static inline void *alloc_code_gen_buffer(void) | |
519 | { | |
520 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
521 | uintptr_t start = 0; | |
522 | void *buf; | |
523 | ||
524 | /* Constrain the position of the buffer based on the host cpu. | |
525 | Note that these addresses are chosen in concert with the | |
526 | addresses assigned in the relevant linker script file. */ | |
527 | # if defined(__PIE__) || defined(__PIC__) | |
528 | /* Don't bother setting a preferred location if we're building | |
529 | a position-independent executable. We're more likely to get | |
530 | an address near the main executable if we let the kernel | |
531 | choose the address. */ | |
532 | # elif defined(__x86_64__) && defined(MAP_32BIT) | |
533 | /* Force the memory down into low memory with the executable. | |
534 | Leave the choice of exact location with the kernel. */ | |
535 | flags |= MAP_32BIT; | |
536 | /* Cannot expect to map more than 800MB in low memory. */ | |
537 | if (code_gen_buffer_size > 800u * 1024 * 1024) { | |
538 | code_gen_buffer_size = 800u * 1024 * 1024; | |
539 | } | |
540 | # elif defined(__sparc__) | |
541 | start = 0x40000000ul; | |
542 | # elif defined(__s390x__) | |
543 | start = 0x90000000ul; | |
544 | # endif | |
545 | ||
546 | buf = mmap((void *)start, code_gen_buffer_size, | |
547 | PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); | |
548 | return buf == MAP_FAILED ? NULL : buf; | |
549 | } | |
550 | #else | |
551 | static inline void *alloc_code_gen_buffer(void) | |
552 | { | |
553 | void *buf = g_malloc(code_gen_buffer_size); | |
554 | ||
555 | if (buf) { | |
556 | map_exec(buf, code_gen_buffer_size); | |
557 | } | |
558 | return buf; | |
559 | } | |
560 | #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ | |
561 | ||
562 | static inline void code_gen_alloc(size_t tb_size) | |
563 | { | |
564 | code_gen_buffer_size = size_code_gen_buffer(tb_size); | |
565 | code_gen_buffer = alloc_code_gen_buffer(); | |
566 | if (code_gen_buffer == NULL) { | |
567 | fprintf(stderr, "Could not allocate dynamic translator buffer\n"); | |
568 | exit(1); | |
569 | } | |
570 | ||
571 | qemu_madvise(code_gen_buffer, code_gen_buffer_size, QEMU_MADV_HUGEPAGE); | |
572 | ||
573 | /* Steal room for the prologue at the end of the buffer. This ensures | |
574 | (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches | |
575 | from TB's to the prologue are going to be in range. It also means | |
576 | that we don't need to mark (additional) portions of the data segment | |
577 | as executable. */ | |
578 | code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024; | |
579 | code_gen_buffer_size -= 1024; | |
580 | ||
581 | code_gen_buffer_max_size = code_gen_buffer_size - | |
582 | (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); | |
583 | code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; | |
584 | tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); | |
585 | } | |
586 | ||
587 | /* Must be called before using the QEMU cpus. 'tb_size' is the size | |
588 | (in bytes) allocated to the translation buffer. Zero means default | |
589 | size. */ | |
590 | void tcg_exec_init(unsigned long tb_size) | |
591 | { | |
592 | cpu_gen_init(); | |
593 | code_gen_alloc(tb_size); | |
594 | code_gen_ptr = code_gen_buffer; | |
595 | tcg_register_jit(code_gen_buffer, code_gen_buffer_size); | |
596 | page_init(); | |
597 | #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) | |
598 | /* There's no guest base to take into account, so go ahead and | |
599 | initialize the prologue now. */ | |
600 | tcg_prologue_init(&tcg_ctx); | |
601 | #endif | |
602 | } | |
603 | ||
604 | bool tcg_enabled(void) | |
605 | { | |
606 | return code_gen_buffer != NULL; | |
607 | } | |
608 | ||
609 | /* Allocate a new translation block. Flush the translation buffer if | |
610 | too many translation blocks or too much generated code. */ | |
611 | static TranslationBlock *tb_alloc(target_ulong pc) | |
612 | { | |
613 | TranslationBlock *tb; | |
614 | ||
615 | if (nb_tbs >= code_gen_max_blocks || | |
616 | (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) { | |
617 | return NULL; | |
618 | } | |
619 | tb = &tbs[nb_tbs++]; | |
620 | tb->pc = pc; | |
621 | tb->cflags = 0; | |
622 | return tb; | |
623 | } | |
624 | ||
625 | void tb_free(TranslationBlock *tb) | |
626 | { | |
627 | /* In practice this is mostly used for single use temporary TB | |
628 | Ignore the hard cases and just back up if this TB happens to | |
629 | be the last one generated. */ | |
630 | if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { | |
631 | code_gen_ptr = tb->tc_ptr; | |
632 | nb_tbs--; | |
633 | } | |
634 | } | |
635 | ||
636 | static inline void invalidate_page_bitmap(PageDesc *p) | |
637 | { | |
638 | if (p->code_bitmap) { | |
639 | g_free(p->code_bitmap); | |
640 | p->code_bitmap = NULL; | |
641 | } | |
642 | p->code_write_count = 0; | |
643 | } | |
644 | ||
645 | /* Set to NULL all the 'first_tb' fields in all PageDescs. */ | |
646 | static void page_flush_tb_1(int level, void **lp) | |
647 | { | |
648 | int i; | |
649 | ||
650 | if (*lp == NULL) { | |
651 | return; | |
652 | } | |
653 | if (level == 0) { | |
654 | PageDesc *pd = *lp; | |
655 | ||
656 | for (i = 0; i < L2_SIZE; ++i) { | |
657 | pd[i].first_tb = NULL; | |
658 | invalidate_page_bitmap(pd + i); | |
659 | } | |
660 | } else { | |
661 | void **pp = *lp; | |
662 | ||
663 | for (i = 0; i < L2_SIZE; ++i) { | |
664 | page_flush_tb_1(level - 1, pp + i); | |
665 | } | |
666 | } | |
667 | } | |
668 | ||
669 | static void page_flush_tb(void) | |
670 | { | |
671 | int i; | |
672 | ||
673 | for (i = 0; i < V_L1_SIZE; i++) { | |
674 | page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); | |
675 | } | |
676 | } | |
677 | ||
678 | /* flush all the translation blocks */ | |
679 | /* XXX: tb_flush is currently not thread safe */ | |
680 | void tb_flush(CPUArchState *env1) | |
681 | { | |
682 | CPUArchState *env; | |
683 | ||
684 | #if defined(DEBUG_FLUSH) | |
685 | printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", | |
686 | (unsigned long)(code_gen_ptr - code_gen_buffer), | |
687 | nb_tbs, nb_tbs > 0 ? | |
688 | ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); | |
689 | #endif | |
690 | if ((unsigned long)(code_gen_ptr - code_gen_buffer) | |
691 | > code_gen_buffer_size) { | |
692 | cpu_abort(env1, "Internal error: code buffer overflow\n"); | |
693 | } | |
694 | nb_tbs = 0; | |
695 | ||
696 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
697 | memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *)); | |
698 | } | |
699 | ||
700 | memset(tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof(void *)); | |
701 | page_flush_tb(); | |
702 | ||
703 | code_gen_ptr = code_gen_buffer; | |
704 | /* XXX: flush processor icache at this point if cache flush is | |
705 | expensive */ | |
706 | tb_flush_count++; | |
707 | } | |
708 | ||
709 | #ifdef DEBUG_TB_CHECK | |
710 | ||
711 | static void tb_invalidate_check(target_ulong address) | |
712 | { | |
713 | TranslationBlock *tb; | |
714 | int i; | |
715 | ||
716 | address &= TARGET_PAGE_MASK; | |
717 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
718 | for (tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
719 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || | |
720 | address >= tb->pc + tb->size)) { | |
721 | printf("ERROR invalidate: address=" TARGET_FMT_lx | |
722 | " PC=%08lx size=%04x\n", | |
723 | address, (long)tb->pc, tb->size); | |
724 | } | |
725 | } | |
726 | } | |
727 | } | |
728 | ||
729 | /* verify that all the pages have correct rights for code */ | |
730 | static void tb_page_check(void) | |
731 | { | |
732 | TranslationBlock *tb; | |
733 | int i, flags1, flags2; | |
734 | ||
735 | for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
736 | for (tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
737 | flags1 = page_get_flags(tb->pc); | |
738 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
739 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
740 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
741 | (long)tb->pc, tb->size, flags1, flags2); | |
742 | } | |
743 | } | |
744 | } | |
745 | } | |
746 | ||
747 | #endif | |
748 | ||
749 | /* invalidate one TB */ | |
750 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
751 | int next_offset) | |
752 | { | |
753 | TranslationBlock *tb1; | |
754 | ||
755 | for (;;) { | |
756 | tb1 = *ptb; | |
757 | if (tb1 == tb) { | |
758 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
759 | break; | |
760 | } | |
761 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
762 | } | |
763 | } | |
764 | ||
765 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
766 | { | |
767 | TranslationBlock *tb1; | |
768 | unsigned int n1; | |
769 | ||
770 | for (;;) { | |
771 | tb1 = *ptb; | |
772 | n1 = (uintptr_t)tb1 & 3; | |
773 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
774 | if (tb1 == tb) { | |
775 | *ptb = tb1->page_next[n1]; | |
776 | break; | |
777 | } | |
778 | ptb = &tb1->page_next[n1]; | |
779 | } | |
780 | } | |
781 | ||
782 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
783 | { | |
784 | TranslationBlock *tb1, **ptb; | |
785 | unsigned int n1; | |
786 | ||
787 | ptb = &tb->jmp_next[n]; | |
788 | tb1 = *ptb; | |
789 | if (tb1) { | |
790 | /* find tb(n) in circular list */ | |
791 | for (;;) { | |
792 | tb1 = *ptb; | |
793 | n1 = (uintptr_t)tb1 & 3; | |
794 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
795 | if (n1 == n && tb1 == tb) { | |
796 | break; | |
797 | } | |
798 | if (n1 == 2) { | |
799 | ptb = &tb1->jmp_first; | |
800 | } else { | |
801 | ptb = &tb1->jmp_next[n1]; | |
802 | } | |
803 | } | |
804 | /* now we can suppress tb(n) from the list */ | |
805 | *ptb = tb->jmp_next[n]; | |
806 | ||
807 | tb->jmp_next[n] = NULL; | |
808 | } | |
809 | } | |
810 | ||
811 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
812 | another TB */ | |
813 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
814 | { | |
815 | tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); | |
816 | } | |
817 | ||
818 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) | |
819 | { | |
820 | CPUArchState *env; | |
821 | PageDesc *p; | |
822 | unsigned int h, n1; | |
823 | tb_page_addr_t phys_pc; | |
824 | TranslationBlock *tb1, *tb2; | |
825 | ||
826 | /* remove the TB from the hash list */ | |
827 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
828 | h = tb_phys_hash_func(phys_pc); | |
829 | tb_remove(&tb_phys_hash[h], tb, | |
830 | offsetof(TranslationBlock, phys_hash_next)); | |
831 | ||
832 | /* remove the TB from the page list */ | |
833 | if (tb->page_addr[0] != page_addr) { | |
834 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
835 | tb_page_remove(&p->first_tb, tb); | |
836 | invalidate_page_bitmap(p); | |
837 | } | |
838 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
839 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
840 | tb_page_remove(&p->first_tb, tb); | |
841 | invalidate_page_bitmap(p); | |
842 | } | |
843 | ||
844 | tb_invalidated_flag = 1; | |
845 | ||
846 | /* remove the TB from the hash list */ | |
847 | h = tb_jmp_cache_hash_func(tb->pc); | |
848 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
849 | if (env->tb_jmp_cache[h] == tb) { | |
850 | env->tb_jmp_cache[h] = NULL; | |
851 | } | |
852 | } | |
853 | ||
854 | /* suppress this TB from the two jump lists */ | |
855 | tb_jmp_remove(tb, 0); | |
856 | tb_jmp_remove(tb, 1); | |
857 | ||
858 | /* suppress any remaining jumps to this TB */ | |
859 | tb1 = tb->jmp_first; | |
860 | for (;;) { | |
861 | n1 = (uintptr_t)tb1 & 3; | |
862 | if (n1 == 2) { | |
863 | break; | |
864 | } | |
865 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
866 | tb2 = tb1->jmp_next[n1]; | |
867 | tb_reset_jump(tb1, n1); | |
868 | tb1->jmp_next[n1] = NULL; | |
869 | tb1 = tb2; | |
870 | } | |
871 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ | |
872 | ||
873 | tb_phys_invalidate_count++; | |
874 | } | |
875 | ||
876 | static inline void set_bits(uint8_t *tab, int start, int len) | |
877 | { | |
878 | int end, mask, end1; | |
879 | ||
880 | end = start + len; | |
881 | tab += start >> 3; | |
882 | mask = 0xff << (start & 7); | |
883 | if ((start & ~7) == (end & ~7)) { | |
884 | if (start < end) { | |
885 | mask &= ~(0xff << (end & 7)); | |
886 | *tab |= mask; | |
887 | } | |
888 | } else { | |
889 | *tab++ |= mask; | |
890 | start = (start + 8) & ~7; | |
891 | end1 = end & ~7; | |
892 | while (start < end1) { | |
893 | *tab++ = 0xff; | |
894 | start += 8; | |
895 | } | |
896 | if (start < end) { | |
897 | mask = ~(0xff << (end & 7)); | |
898 | *tab |= mask; | |
899 | } | |
900 | } | |
901 | } | |
902 | ||
903 | static void build_page_bitmap(PageDesc *p) | |
904 | { | |
905 | int n, tb_start, tb_end; | |
906 | TranslationBlock *tb; | |
907 | ||
908 | p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); | |
909 | ||
910 | tb = p->first_tb; | |
911 | while (tb != NULL) { | |
912 | n = (uintptr_t)tb & 3; | |
913 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
914 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
915 | if (n == 0) { | |
916 | /* NOTE: tb_end may be after the end of the page, but | |
917 | it is not a problem */ | |
918 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
919 | tb_end = tb_start + tb->size; | |
920 | if (tb_end > TARGET_PAGE_SIZE) { | |
921 | tb_end = TARGET_PAGE_SIZE; | |
922 | } | |
923 | } else { | |
924 | tb_start = 0; | |
925 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
926 | } | |
927 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
928 | tb = tb->page_next[n]; | |
929 | } | |
930 | } | |
931 | ||
932 | TranslationBlock *tb_gen_code(CPUArchState *env, | |
933 | target_ulong pc, target_ulong cs_base, | |
934 | int flags, int cflags) | |
935 | { | |
936 | TranslationBlock *tb; | |
937 | uint8_t *tc_ptr; | |
938 | tb_page_addr_t phys_pc, phys_page2; | |
939 | target_ulong virt_page2; | |
940 | int code_gen_size; | |
941 | ||
942 | phys_pc = get_page_addr_code(env, pc); | |
943 | tb = tb_alloc(pc); | |
944 | if (!tb) { | |
945 | /* flush must be done */ | |
946 | tb_flush(env); | |
947 | /* cannot fail at this point */ | |
948 | tb = tb_alloc(pc); | |
949 | /* Don't forget to invalidate previous TB info. */ | |
950 | tb_invalidated_flag = 1; | |
951 | } | |
952 | tc_ptr = code_gen_ptr; | |
953 | tb->tc_ptr = tc_ptr; | |
954 | tb->cs_base = cs_base; | |
955 | tb->flags = flags; | |
956 | tb->cflags = cflags; | |
957 | cpu_gen_code(env, tb, &code_gen_size); | |
958 | code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + | |
959 | CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
960 | ||
961 | /* check next page if needed */ | |
962 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
963 | phys_page2 = -1; | |
964 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
965 | phys_page2 = get_page_addr_code(env, virt_page2); | |
966 | } | |
967 | tb_link_page(tb, phys_pc, phys_page2); | |
968 | return tb; | |
969 | } | |
970 | ||
971 | /* | |
972 | * Invalidate all TBs which intersect with the target physical address range | |
973 | * [start;end[. NOTE: start and end may refer to *different* physical pages. | |
974 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
975 | * access: the virtual CPU will exit the current TB if code is modified inside | |
976 | * this TB. | |
977 | */ | |
978 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, | |
979 | int is_cpu_write_access) | |
980 | { | |
981 | while (start < end) { | |
982 | tb_invalidate_phys_page_range(start, end, is_cpu_write_access); | |
983 | start &= TARGET_PAGE_MASK; | |
984 | start += TARGET_PAGE_SIZE; | |
985 | } | |
986 | } | |
987 | ||
988 | /* | |
989 | * Invalidate all TBs which intersect with the target physical address range | |
990 | * [start;end[. NOTE: start and end must refer to the *same* physical page. | |
991 | * 'is_cpu_write_access' should be true if called from a real cpu write | |
992 | * access: the virtual CPU will exit the current TB if code is modified inside | |
993 | * this TB. | |
994 | */ | |
995 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | |
996 | int is_cpu_write_access) | |
997 | { | |
998 | TranslationBlock *tb, *tb_next, *saved_tb; | |
999 | CPUArchState *env = cpu_single_env; | |
1000 | tb_page_addr_t tb_start, tb_end; | |
1001 | PageDesc *p; | |
1002 | int n; | |
1003 | #ifdef TARGET_HAS_PRECISE_SMC | |
1004 | int current_tb_not_found = is_cpu_write_access; | |
1005 | TranslationBlock *current_tb = NULL; | |
1006 | int current_tb_modified = 0; | |
1007 | target_ulong current_pc = 0; | |
1008 | target_ulong current_cs_base = 0; | |
1009 | int current_flags = 0; | |
1010 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1011 | ||
1012 | p = page_find(start >> TARGET_PAGE_BITS); | |
1013 | if (!p) { | |
1014 | return; | |
1015 | } | |
1016 | if (!p->code_bitmap && | |
1017 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && | |
1018 | is_cpu_write_access) { | |
1019 | /* build code bitmap */ | |
1020 | build_page_bitmap(p); | |
1021 | } | |
1022 | ||
1023 | /* we remove all the TBs in the range [start, end[ */ | |
1024 | /* XXX: see if in some cases it could be faster to invalidate all | |
1025 | the code */ | |
1026 | tb = p->first_tb; | |
1027 | while (tb != NULL) { | |
1028 | n = (uintptr_t)tb & 3; | |
1029 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1030 | tb_next = tb->page_next[n]; | |
1031 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
1032 | if (n == 0) { | |
1033 | /* NOTE: tb_end may be after the end of the page, but | |
1034 | it is not a problem */ | |
1035 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
1036 | tb_end = tb_start + tb->size; | |
1037 | } else { | |
1038 | tb_start = tb->page_addr[1]; | |
1039 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
1040 | } | |
1041 | if (!(tb_end <= start || tb_start >= end)) { | |
1042 | #ifdef TARGET_HAS_PRECISE_SMC | |
1043 | if (current_tb_not_found) { | |
1044 | current_tb_not_found = 0; | |
1045 | current_tb = NULL; | |
1046 | if (env->mem_io_pc) { | |
1047 | /* now we have a real cpu fault */ | |
1048 | current_tb = tb_find_pc(env->mem_io_pc); | |
1049 | } | |
1050 | } | |
1051 | if (current_tb == tb && | |
1052 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1053 | /* If we are modifying the current TB, we must stop | |
1054 | its execution. We could be more precise by checking | |
1055 | that the modification is after the current PC, but it | |
1056 | would require a specialized function to partially | |
1057 | restore the CPU state */ | |
1058 | ||
1059 | current_tb_modified = 1; | |
1060 | cpu_restore_state(current_tb, env, env->mem_io_pc); | |
1061 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, | |
1062 | ¤t_flags); | |
1063 | } | |
1064 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1065 | /* we need to do that to handle the case where a signal | |
1066 | occurs while doing tb_phys_invalidate() */ | |
1067 | saved_tb = NULL; | |
1068 | if (env) { | |
1069 | saved_tb = env->current_tb; | |
1070 | env->current_tb = NULL; | |
1071 | } | |
1072 | tb_phys_invalidate(tb, -1); | |
1073 | if (env) { | |
1074 | env->current_tb = saved_tb; | |
1075 | if (env->interrupt_request && env->current_tb) { | |
1076 | cpu_interrupt(env, env->interrupt_request); | |
1077 | } | |
1078 | } | |
1079 | } | |
1080 | tb = tb_next; | |
1081 | } | |
1082 | #if !defined(CONFIG_USER_ONLY) | |
1083 | /* if no code remaining, no need to continue to use slow writes */ | |
1084 | if (!p->first_tb) { | |
1085 | invalidate_page_bitmap(p); | |
1086 | if (is_cpu_write_access) { | |
1087 | tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); | |
1088 | } | |
1089 | } | |
1090 | #endif | |
1091 | #ifdef TARGET_HAS_PRECISE_SMC | |
1092 | if (current_tb_modified) { | |
1093 | /* we generate a block containing just the instruction | |
1094 | modifying the memory. It will ensure that it cannot modify | |
1095 | itself */ | |
1096 | env->current_tb = NULL; | |
1097 | tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); | |
1098 | cpu_resume_from_signal(env, NULL); | |
1099 | } | |
1100 | #endif | |
1101 | } | |
1102 | ||
1103 | /* len must be <= 8 and start must be a multiple of len */ | |
1104 | void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) | |
1105 | { | |
1106 | PageDesc *p; | |
1107 | int offset, b; | |
1108 | ||
1109 | #if 0 | |
1110 | if (1) { | |
1111 | qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
1112 | cpu_single_env->mem_io_vaddr, len, | |
1113 | cpu_single_env->eip, | |
1114 | cpu_single_env->eip + | |
1115 | (intptr_t)cpu_single_env->segs[R_CS].base); | |
1116 | } | |
1117 | #endif | |
1118 | p = page_find(start >> TARGET_PAGE_BITS); | |
1119 | if (!p) { | |
1120 | return; | |
1121 | } | |
1122 | if (p->code_bitmap) { | |
1123 | offset = start & ~TARGET_PAGE_MASK; | |
1124 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
1125 | if (b & ((1 << len) - 1)) { | |
1126 | goto do_invalidate; | |
1127 | } | |
1128 | } else { | |
1129 | do_invalidate: | |
1130 | tb_invalidate_phys_page_range(start, start + len, 1); | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | #if !defined(CONFIG_SOFTMMU) | |
1135 | static void tb_invalidate_phys_page(tb_page_addr_t addr, | |
1136 | uintptr_t pc, void *puc) | |
1137 | { | |
1138 | TranslationBlock *tb; | |
1139 | PageDesc *p; | |
1140 | int n; | |
1141 | #ifdef TARGET_HAS_PRECISE_SMC | |
1142 | TranslationBlock *current_tb = NULL; | |
1143 | CPUArchState *env = cpu_single_env; | |
1144 | int current_tb_modified = 0; | |
1145 | target_ulong current_pc = 0; | |
1146 | target_ulong current_cs_base = 0; | |
1147 | int current_flags = 0; | |
1148 | #endif | |
1149 | ||
1150 | addr &= TARGET_PAGE_MASK; | |
1151 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1152 | if (!p) { | |
1153 | return; | |
1154 | } | |
1155 | tb = p->first_tb; | |
1156 | #ifdef TARGET_HAS_PRECISE_SMC | |
1157 | if (tb && pc != 0) { | |
1158 | current_tb = tb_find_pc(pc); | |
1159 | } | |
1160 | #endif | |
1161 | while (tb != NULL) { | |
1162 | n = (uintptr_t)tb & 3; | |
1163 | tb = (TranslationBlock *)((uintptr_t)tb & ~3); | |
1164 | #ifdef TARGET_HAS_PRECISE_SMC | |
1165 | if (current_tb == tb && | |
1166 | (current_tb->cflags & CF_COUNT_MASK) != 1) { | |
1167 | /* If we are modifying the current TB, we must stop | |
1168 | its execution. We could be more precise by checking | |
1169 | that the modification is after the current PC, but it | |
1170 | would require a specialized function to partially | |
1171 | restore the CPU state */ | |
1172 | ||
1173 | current_tb_modified = 1; | |
1174 | cpu_restore_state(current_tb, env, pc); | |
1175 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, | |
1176 | ¤t_flags); | |
1177 | } | |
1178 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
1179 | tb_phys_invalidate(tb, addr); | |
1180 | tb = tb->page_next[n]; | |
1181 | } | |
1182 | p->first_tb = NULL; | |
1183 | #ifdef TARGET_HAS_PRECISE_SMC | |
1184 | if (current_tb_modified) { | |
1185 | /* we generate a block containing just the instruction | |
1186 | modifying the memory. It will ensure that it cannot modify | |
1187 | itself */ | |
1188 | env->current_tb = NULL; | |
1189 | tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); | |
1190 | cpu_resume_from_signal(env, puc); | |
1191 | } | |
1192 | #endif | |
1193 | } | |
1194 | #endif | |
1195 | ||
1196 | /* add the tb in the target page and protect it if necessary */ | |
1197 | static inline void tb_alloc_page(TranslationBlock *tb, | |
1198 | unsigned int n, tb_page_addr_t page_addr) | |
1199 | { | |
1200 | PageDesc *p; | |
1201 | #ifndef CONFIG_USER_ONLY | |
1202 | bool page_already_protected; | |
1203 | #endif | |
1204 | ||
1205 | tb->page_addr[n] = page_addr; | |
1206 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); | |
1207 | tb->page_next[n] = p->first_tb; | |
1208 | #ifndef CONFIG_USER_ONLY | |
1209 | page_already_protected = p->first_tb != NULL; | |
1210 | #endif | |
1211 | p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); | |
1212 | invalidate_page_bitmap(p); | |
1213 | ||
1214 | #if defined(TARGET_HAS_SMC) || 1 | |
1215 | ||
1216 | #if defined(CONFIG_USER_ONLY) | |
1217 | if (p->flags & PAGE_WRITE) { | |
1218 | target_ulong addr; | |
1219 | PageDesc *p2; | |
1220 | int prot; | |
1221 | ||
1222 | /* force the host page as non writable (writes will have a | |
1223 | page fault + mprotect overhead) */ | |
1224 | page_addr &= qemu_host_page_mask; | |
1225 | prot = 0; | |
1226 | for (addr = page_addr; addr < page_addr + qemu_host_page_size; | |
1227 | addr += TARGET_PAGE_SIZE) { | |
1228 | ||
1229 | p2 = page_find(addr >> TARGET_PAGE_BITS); | |
1230 | if (!p2) { | |
1231 | continue; | |
1232 | } | |
1233 | prot |= p2->flags; | |
1234 | p2->flags &= ~PAGE_WRITE; | |
1235 | } | |
1236 | mprotect(g2h(page_addr), qemu_host_page_size, | |
1237 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
1238 | #ifdef DEBUG_TB_INVALIDATE | |
1239 | printf("protecting code page: 0x" TARGET_FMT_lx "\n", | |
1240 | page_addr); | |
1241 | #endif | |
1242 | } | |
1243 | #else | |
1244 | /* if some code is already present, then the pages are already | |
1245 | protected. So we handle the case where only the first TB is | |
1246 | allocated in a physical page */ | |
1247 | if (!page_already_protected) { | |
1248 | tlb_protect_code(page_addr); | |
1249 | } | |
1250 | #endif | |
1251 | ||
1252 | #endif /* TARGET_HAS_SMC */ | |
1253 | } | |
1254 | ||
1255 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
1256 | (-1) to indicate that only one page contains the TB. */ | |
1257 | static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, | |
1258 | tb_page_addr_t phys_page2) | |
1259 | { | |
1260 | unsigned int h; | |
1261 | TranslationBlock **ptb; | |
1262 | ||
1263 | /* Grab the mmap lock to stop another thread invalidating this TB | |
1264 | before we are done. */ | |
1265 | mmap_lock(); | |
1266 | /* add in the physical hash table */ | |
1267 | h = tb_phys_hash_func(phys_pc); | |
1268 | ptb = &tb_phys_hash[h]; | |
1269 | tb->phys_hash_next = *ptb; | |
1270 | *ptb = tb; | |
1271 | ||
1272 | /* add in the page list */ | |
1273 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
1274 | if (phys_page2 != -1) { | |
1275 | tb_alloc_page(tb, 1, phys_page2); | |
1276 | } else { | |
1277 | tb->page_addr[1] = -1; | |
1278 | } | |
1279 | ||
1280 | tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); | |
1281 | tb->jmp_next[0] = NULL; | |
1282 | tb->jmp_next[1] = NULL; | |
1283 | ||
1284 | /* init original jump addresses */ | |
1285 | if (tb->tb_next_offset[0] != 0xffff) { | |
1286 | tb_reset_jump(tb, 0); | |
1287 | } | |
1288 | if (tb->tb_next_offset[1] != 0xffff) { | |
1289 | tb_reset_jump(tb, 1); | |
1290 | } | |
1291 | ||
1292 | #ifdef DEBUG_TB_CHECK | |
1293 | tb_page_check(); | |
1294 | #endif | |
1295 | mmap_unlock(); | |
1296 | } | |
1297 | ||
1298 | #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU) | |
1299 | /* check whether the given addr is in TCG generated code buffer or not */ | |
1300 | bool is_tcg_gen_code(uintptr_t tc_ptr) | |
1301 | { | |
1302 | /* This can be called during code generation, code_gen_buffer_max_size | |
1303 | is used instead of code_gen_ptr for upper boundary checking */ | |
1304 | return (tc_ptr >= (uintptr_t)code_gen_buffer && | |
1305 | tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size)); | |
1306 | } | |
1307 | #endif | |
1308 | ||
1309 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < | |
1310 | tb[1].tc_ptr. Return NULL if not found */ | |
1311 | TranslationBlock *tb_find_pc(uintptr_t tc_ptr) | |
1312 | { | |
1313 | int m_min, m_max, m; | |
1314 | uintptr_t v; | |
1315 | TranslationBlock *tb; | |
1316 | ||
1317 | if (nb_tbs <= 0) { | |
1318 | return NULL; | |
1319 | } | |
1320 | if (tc_ptr < (uintptr_t)code_gen_buffer || | |
1321 | tc_ptr >= (uintptr_t)code_gen_ptr) { | |
1322 | return NULL; | |
1323 | } | |
1324 | /* binary search (cf Knuth) */ | |
1325 | m_min = 0; | |
1326 | m_max = nb_tbs - 1; | |
1327 | while (m_min <= m_max) { | |
1328 | m = (m_min + m_max) >> 1; | |
1329 | tb = &tbs[m]; | |
1330 | v = (uintptr_t)tb->tc_ptr; | |
1331 | if (v == tc_ptr) { | |
1332 | return tb; | |
1333 | } else if (tc_ptr < v) { | |
1334 | m_max = m - 1; | |
1335 | } else { | |
1336 | m_min = m + 1; | |
1337 | } | |
1338 | } | |
1339 | return &tbs[m_max]; | |
1340 | } | |
1341 | ||
1342 | static void tb_reset_jump_recursive(TranslationBlock *tb); | |
1343 | ||
1344 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
1345 | { | |
1346 | TranslationBlock *tb1, *tb_next, **ptb; | |
1347 | unsigned int n1; | |
1348 | ||
1349 | tb1 = tb->jmp_next[n]; | |
1350 | if (tb1 != NULL) { | |
1351 | /* find head of list */ | |
1352 | for (;;) { | |
1353 | n1 = (uintptr_t)tb1 & 3; | |
1354 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1355 | if (n1 == 2) { | |
1356 | break; | |
1357 | } | |
1358 | tb1 = tb1->jmp_next[n1]; | |
1359 | } | |
1360 | /* we are now sure now that tb jumps to tb1 */ | |
1361 | tb_next = tb1; | |
1362 | ||
1363 | /* remove tb from the jmp_first list */ | |
1364 | ptb = &tb_next->jmp_first; | |
1365 | for (;;) { | |
1366 | tb1 = *ptb; | |
1367 | n1 = (uintptr_t)tb1 & 3; | |
1368 | tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); | |
1369 | if (n1 == n && tb1 == tb) { | |
1370 | break; | |
1371 | } | |
1372 | ptb = &tb1->jmp_next[n1]; | |
1373 | } | |
1374 | *ptb = tb->jmp_next[n]; | |
1375 | tb->jmp_next[n] = NULL; | |
1376 | ||
1377 | /* suppress the jump to next tb in generated code */ | |
1378 | tb_reset_jump(tb, n); | |
1379 | ||
1380 | /* suppress jumps in the tb on which we could have jumped */ | |
1381 | tb_reset_jump_recursive(tb_next); | |
1382 | } | |
1383 | } | |
1384 | ||
1385 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
1386 | { | |
1387 | tb_reset_jump_recursive2(tb, 0); | |
1388 | tb_reset_jump_recursive2(tb, 1); | |
1389 | } | |
1390 | ||
1391 | #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY) | |
1392 | void tb_invalidate_phys_addr(hwaddr addr) | |
1393 | { | |
1394 | ram_addr_t ram_addr; | |
1395 | MemoryRegionSection *section; | |
1396 | ||
1397 | section = phys_page_find(address_space_memory.dispatch, | |
1398 | addr >> TARGET_PAGE_BITS); | |
1399 | if (!(memory_region_is_ram(section->mr) | |
1400 | || (section->mr->rom_device && section->mr->readable))) { | |
1401 | return; | |
1402 | } | |
1403 | ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) | |
1404 | + memory_region_section_addr(section, addr); | |
1405 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); | |
1406 | } | |
1407 | #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */ | |
1408 | ||
1409 | void cpu_unlink_tb(CPUArchState *env) | |
1410 | { | |
1411 | /* FIXME: TB unchaining isn't SMP safe. For now just ignore the | |
1412 | problem and hope the cpu will stop of its own accord. For userspace | |
1413 | emulation this often isn't actually as bad as it sounds. Often | |
1414 | signals are used primarily to interrupt blocking syscalls. */ | |
1415 | TranslationBlock *tb; | |
1416 | static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; | |
1417 | ||
1418 | spin_lock(&interrupt_lock); | |
1419 | tb = env->current_tb; | |
1420 | /* if the cpu is currently executing code, we must unlink it and | |
1421 | all the potentially executing TB */ | |
1422 | if (tb) { | |
1423 | env->current_tb = NULL; | |
1424 | tb_reset_jump_recursive(tb); | |
1425 | } | |
1426 | spin_unlock(&interrupt_lock); | |
1427 | } | |
1428 | ||
1429 | void tb_check_watchpoint(CPUArchState *env) | |
1430 | { | |
1431 | TranslationBlock *tb; | |
1432 | ||
1433 | tb = tb_find_pc(env->mem_io_pc); | |
1434 | if (!tb) { | |
1435 | cpu_abort(env, "check_watchpoint: could not find TB for pc=%p", | |
1436 | (void *)env->mem_io_pc); | |
1437 | } | |
1438 | cpu_restore_state(tb, env, env->mem_io_pc); | |
1439 | tb_phys_invalidate(tb, -1); | |
1440 | } | |
1441 | ||
1442 | #ifndef CONFIG_USER_ONLY | |
1443 | /* mask must never be zero, except for A20 change call */ | |
1444 | static void tcg_handle_interrupt(CPUArchState *env, int mask) | |
1445 | { | |
1446 | CPUState *cpu = ENV_GET_CPU(env); | |
1447 | int old_mask; | |
1448 | ||
1449 | old_mask = env->interrupt_request; | |
1450 | env->interrupt_request |= mask; | |
1451 | ||
1452 | /* | |
1453 | * If called from iothread context, wake the target cpu in | |
1454 | * case its halted. | |
1455 | */ | |
1456 | if (!qemu_cpu_is_self(cpu)) { | |
1457 | qemu_cpu_kick(cpu); | |
1458 | return; | |
1459 | } | |
1460 | ||
1461 | if (use_icount) { | |
1462 | env->icount_decr.u16.high = 0xffff; | |
1463 | if (!can_do_io(env) | |
1464 | && (mask & ~old_mask) != 0) { | |
1465 | cpu_abort(env, "Raised interrupt while not in I/O function"); | |
1466 | } | |
1467 | } else { | |
1468 | cpu_unlink_tb(env); | |
1469 | } | |
1470 | } | |
1471 | ||
1472 | CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; | |
1473 | ||
1474 | /* in deterministic execution mode, instructions doing device I/Os | |
1475 | must be at the end of the TB */ | |
1476 | void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr) | |
1477 | { | |
1478 | TranslationBlock *tb; | |
1479 | uint32_t n, cflags; | |
1480 | target_ulong pc, cs_base; | |
1481 | uint64_t flags; | |
1482 | ||
1483 | tb = tb_find_pc(retaddr); | |
1484 | if (!tb) { | |
1485 | cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", | |
1486 | (void *)retaddr); | |
1487 | } | |
1488 | n = env->icount_decr.u16.low + tb->icount; | |
1489 | cpu_restore_state(tb, env, retaddr); | |
1490 | /* Calculate how many instructions had been executed before the fault | |
1491 | occurred. */ | |
1492 | n = n - env->icount_decr.u16.low; | |
1493 | /* Generate a new TB ending on the I/O insn. */ | |
1494 | n++; | |
1495 | /* On MIPS and SH, delay slot instructions can only be restarted if | |
1496 | they were already the first instruction in the TB. If this is not | |
1497 | the first instruction in a TB then re-execute the preceding | |
1498 | branch. */ | |
1499 | #if defined(TARGET_MIPS) | |
1500 | if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { | |
1501 | env->active_tc.PC -= 4; | |
1502 | env->icount_decr.u16.low++; | |
1503 | env->hflags &= ~MIPS_HFLAG_BMASK; | |
1504 | } | |
1505 | #elif defined(TARGET_SH4) | |
1506 | if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 | |
1507 | && n > 1) { | |
1508 | env->pc -= 2; | |
1509 | env->icount_decr.u16.low++; | |
1510 | env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); | |
1511 | } | |
1512 | #endif | |
1513 | /* This should never happen. */ | |
1514 | if (n > CF_COUNT_MASK) { | |
1515 | cpu_abort(env, "TB too big during recompile"); | |
1516 | } | |
1517 | ||
1518 | cflags = n | CF_LAST_IO; | |
1519 | pc = tb->pc; | |
1520 | cs_base = tb->cs_base; | |
1521 | flags = tb->flags; | |
1522 | tb_phys_invalidate(tb, -1); | |
1523 | /* FIXME: In theory this could raise an exception. In practice | |
1524 | we have already translated the block once so it's probably ok. */ | |
1525 | tb_gen_code(env, pc, cs_base, flags, cflags); | |
1526 | /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not | |
1527 | the first in the TB) then we end up generating a whole new TB and | |
1528 | repeating the fault, which is horribly inefficient. | |
1529 | Better would be to execute just this insn uncached, or generate a | |
1530 | second new TB. */ | |
1531 | cpu_resume_from_signal(env, NULL); | |
1532 | } | |
1533 | ||
1534 | void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) | |
1535 | { | |
1536 | unsigned int i; | |
1537 | ||
1538 | /* Discard jump cache entries for any tb which might potentially | |
1539 | overlap the flushed page. */ | |
1540 | i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); | |
1541 | memset(&env->tb_jmp_cache[i], 0, | |
1542 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); | |
1543 | ||
1544 | i = tb_jmp_cache_hash_page(addr); | |
1545 | memset(&env->tb_jmp_cache[i], 0, | |
1546 | TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); | |
1547 | } | |
1548 | ||
1549 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) | |
1550 | { | |
1551 | int i, target_code_size, max_target_code_size; | |
1552 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
1553 | TranslationBlock *tb; | |
1554 | ||
1555 | target_code_size = 0; | |
1556 | max_target_code_size = 0; | |
1557 | cross_page = 0; | |
1558 | direct_jmp_count = 0; | |
1559 | direct_jmp2_count = 0; | |
1560 | for (i = 0; i < nb_tbs; i++) { | |
1561 | tb = &tbs[i]; | |
1562 | target_code_size += tb->size; | |
1563 | if (tb->size > max_target_code_size) { | |
1564 | max_target_code_size = tb->size; | |
1565 | } | |
1566 | if (tb->page_addr[1] != -1) { | |
1567 | cross_page++; | |
1568 | } | |
1569 | if (tb->tb_next_offset[0] != 0xffff) { | |
1570 | direct_jmp_count++; | |
1571 | if (tb->tb_next_offset[1] != 0xffff) { | |
1572 | direct_jmp2_count++; | |
1573 | } | |
1574 | } | |
1575 | } | |
1576 | /* XXX: avoid using doubles ? */ | |
1577 | cpu_fprintf(f, "Translation buffer state:\n"); | |
1578 | cpu_fprintf(f, "gen code size %td/%zd\n", | |
1579 | code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); | |
1580 | cpu_fprintf(f, "TB count %d/%d\n", | |
1581 | nb_tbs, code_gen_max_blocks); | |
1582 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", | |
1583 | nb_tbs ? target_code_size / nb_tbs : 0, | |
1584 | max_target_code_size); | |
1585 | cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", | |
1586 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, | |
1587 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) | |
1588 | / target_code_size : 0); | |
1589 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", | |
1590 | cross_page, | |
1591 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); | |
1592 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
1593 | direct_jmp_count, | |
1594 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, | |
1595 | direct_jmp2_count, | |
1596 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
1597 | cpu_fprintf(f, "\nStatistics:\n"); | |
1598 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); | |
1599 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
1600 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
1601 | tcg_dump_info(f, cpu_fprintf); | |
1602 | } | |
1603 | ||
1604 | #else /* CONFIG_USER_ONLY */ | |
1605 | ||
1606 | void cpu_interrupt(CPUArchState *env, int mask) | |
1607 | { | |
1608 | env->interrupt_request |= mask; | |
1609 | cpu_unlink_tb(env); | |
1610 | } | |
1611 | ||
1612 | /* | |
1613 | * Walks guest process memory "regions" one by one | |
1614 | * and calls callback function 'fn' for each region. | |
1615 | */ | |
1616 | struct walk_memory_regions_data { | |
1617 | walk_memory_regions_fn fn; | |
1618 | void *priv; | |
1619 | uintptr_t start; | |
1620 | int prot; | |
1621 | }; | |
1622 | ||
1623 | static int walk_memory_regions_end(struct walk_memory_regions_data *data, | |
1624 | abi_ulong end, int new_prot) | |
1625 | { | |
1626 | if (data->start != -1ul) { | |
1627 | int rc = data->fn(data->priv, data->start, end, data->prot); | |
1628 | if (rc != 0) { | |
1629 | return rc; | |
1630 | } | |
1631 | } | |
1632 | ||
1633 | data->start = (new_prot ? end : -1ul); | |
1634 | data->prot = new_prot; | |
1635 | ||
1636 | return 0; | |
1637 | } | |
1638 | ||
1639 | static int walk_memory_regions_1(struct walk_memory_regions_data *data, | |
1640 | abi_ulong base, int level, void **lp) | |
1641 | { | |
1642 | abi_ulong pa; | |
1643 | int i, rc; | |
1644 | ||
1645 | if (*lp == NULL) { | |
1646 | return walk_memory_regions_end(data, base, 0); | |
1647 | } | |
1648 | ||
1649 | if (level == 0) { | |
1650 | PageDesc *pd = *lp; | |
1651 | ||
1652 | for (i = 0; i < L2_SIZE; ++i) { | |
1653 | int prot = pd[i].flags; | |
1654 | ||
1655 | pa = base | (i << TARGET_PAGE_BITS); | |
1656 | if (prot != data->prot) { | |
1657 | rc = walk_memory_regions_end(data, pa, prot); | |
1658 | if (rc != 0) { | |
1659 | return rc; | |
1660 | } | |
1661 | } | |
1662 | } | |
1663 | } else { | |
1664 | void **pp = *lp; | |
1665 | ||
1666 | for (i = 0; i < L2_SIZE; ++i) { | |
1667 | pa = base | ((abi_ulong)i << | |
1668 | (TARGET_PAGE_BITS + L2_BITS * level)); | |
1669 | rc = walk_memory_regions_1(data, pa, level - 1, pp + i); | |
1670 | if (rc != 0) { | |
1671 | return rc; | |
1672 | } | |
1673 | } | |
1674 | } | |
1675 | ||
1676 | return 0; | |
1677 | } | |
1678 | ||
1679 | int walk_memory_regions(void *priv, walk_memory_regions_fn fn) | |
1680 | { | |
1681 | struct walk_memory_regions_data data; | |
1682 | uintptr_t i; | |
1683 | ||
1684 | data.fn = fn; | |
1685 | data.priv = priv; | |
1686 | data.start = -1ul; | |
1687 | data.prot = 0; | |
1688 | ||
1689 | for (i = 0; i < V_L1_SIZE; i++) { | |
1690 | int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, | |
1691 | V_L1_SHIFT / L2_BITS - 1, l1_map + i); | |
1692 | ||
1693 | if (rc != 0) { | |
1694 | return rc; | |
1695 | } | |
1696 | } | |
1697 | ||
1698 | return walk_memory_regions_end(&data, 0, 0); | |
1699 | } | |
1700 | ||
1701 | static int dump_region(void *priv, abi_ulong start, | |
1702 | abi_ulong end, unsigned long prot) | |
1703 | { | |
1704 | FILE *f = (FILE *)priv; | |
1705 | ||
1706 | (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx | |
1707 | " "TARGET_ABI_FMT_lx" %c%c%c\n", | |
1708 | start, end, end - start, | |
1709 | ((prot & PAGE_READ) ? 'r' : '-'), | |
1710 | ((prot & PAGE_WRITE) ? 'w' : '-'), | |
1711 | ((prot & PAGE_EXEC) ? 'x' : '-')); | |
1712 | ||
1713 | return 0; | |
1714 | } | |
1715 | ||
1716 | /* dump memory mappings */ | |
1717 | void page_dump(FILE *f) | |
1718 | { | |
1719 | (void) fprintf(f, "%-8s %-8s %-8s %s\n", | |
1720 | "start", "end", "size", "prot"); | |
1721 | walk_memory_regions(f, dump_region); | |
1722 | } | |
1723 | ||
1724 | int page_get_flags(target_ulong address) | |
1725 | { | |
1726 | PageDesc *p; | |
1727 | ||
1728 | p = page_find(address >> TARGET_PAGE_BITS); | |
1729 | if (!p) { | |
1730 | return 0; | |
1731 | } | |
1732 | return p->flags; | |
1733 | } | |
1734 | ||
1735 | /* Modify the flags of a page and invalidate the code if necessary. | |
1736 | The flag PAGE_WRITE_ORG is positioned automatically depending | |
1737 | on PAGE_WRITE. The mmap_lock should already be held. */ | |
1738 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1739 | { | |
1740 | target_ulong addr, len; | |
1741 | ||
1742 | /* This function should never be called with addresses outside the | |
1743 | guest address space. If this assert fires, it probably indicates | |
1744 | a missing call to h2g_valid. */ | |
1745 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1746 | assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); | |
1747 | #endif | |
1748 | assert(start < end); | |
1749 | ||
1750 | start = start & TARGET_PAGE_MASK; | |
1751 | end = TARGET_PAGE_ALIGN(end); | |
1752 | ||
1753 | if (flags & PAGE_WRITE) { | |
1754 | flags |= PAGE_WRITE_ORG; | |
1755 | } | |
1756 | ||
1757 | for (addr = start, len = end - start; | |
1758 | len != 0; | |
1759 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1760 | PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1761 | ||
1762 | /* If the write protection bit is set, then we invalidate | |
1763 | the code inside. */ | |
1764 | if (!(p->flags & PAGE_WRITE) && | |
1765 | (flags & PAGE_WRITE) && | |
1766 | p->first_tb) { | |
1767 | tb_invalidate_phys_page(addr, 0, NULL); | |
1768 | } | |
1769 | p->flags = flags; | |
1770 | } | |
1771 | } | |
1772 | ||
1773 | int page_check_range(target_ulong start, target_ulong len, int flags) | |
1774 | { | |
1775 | PageDesc *p; | |
1776 | target_ulong end; | |
1777 | target_ulong addr; | |
1778 | ||
1779 | /* This function should never be called with addresses outside the | |
1780 | guest address space. If this assert fires, it probably indicates | |
1781 | a missing call to h2g_valid. */ | |
1782 | #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS | |
1783 | assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); | |
1784 | #endif | |
1785 | ||
1786 | if (len == 0) { | |
1787 | return 0; | |
1788 | } | |
1789 | if (start + len - 1 < start) { | |
1790 | /* We've wrapped around. */ | |
1791 | return -1; | |
1792 | } | |
1793 | ||
1794 | /* must do before we loose bits in the next step */ | |
1795 | end = TARGET_PAGE_ALIGN(start + len); | |
1796 | start = start & TARGET_PAGE_MASK; | |
1797 | ||
1798 | for (addr = start, len = end - start; | |
1799 | len != 0; | |
1800 | len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
1801 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1802 | if (!p) { | |
1803 | return -1; | |
1804 | } | |
1805 | if (!(p->flags & PAGE_VALID)) { | |
1806 | return -1; | |
1807 | } | |
1808 | ||
1809 | if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { | |
1810 | return -1; | |
1811 | } | |
1812 | if (flags & PAGE_WRITE) { | |
1813 | if (!(p->flags & PAGE_WRITE_ORG)) { | |
1814 | return -1; | |
1815 | } | |
1816 | /* unprotect the page if it was put read-only because it | |
1817 | contains translated code */ | |
1818 | if (!(p->flags & PAGE_WRITE)) { | |
1819 | if (!page_unprotect(addr, 0, NULL)) { | |
1820 | return -1; | |
1821 | } | |
1822 | } | |
1823 | return 0; | |
1824 | } | |
1825 | } | |
1826 | return 0; | |
1827 | } | |
1828 | ||
1829 | /* called from signal handler: invalidate the code and unprotect the | |
1830 | page. Return TRUE if the fault was successfully handled. */ | |
1831 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc) | |
1832 | { | |
1833 | unsigned int prot; | |
1834 | PageDesc *p; | |
1835 | target_ulong host_start, host_end, addr; | |
1836 | ||
1837 | /* Technically this isn't safe inside a signal handler. However we | |
1838 | know this only ever happens in a synchronous SEGV handler, so in | |
1839 | practice it seems to be ok. */ | |
1840 | mmap_lock(); | |
1841 | ||
1842 | p = page_find(address >> TARGET_PAGE_BITS); | |
1843 | if (!p) { | |
1844 | mmap_unlock(); | |
1845 | return 0; | |
1846 | } | |
1847 | ||
1848 | /* if the page was really writable, then we change its | |
1849 | protection back to writable */ | |
1850 | if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { | |
1851 | host_start = address & qemu_host_page_mask; | |
1852 | host_end = host_start + qemu_host_page_size; | |
1853 | ||
1854 | prot = 0; | |
1855 | for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { | |
1856 | p = page_find(addr >> TARGET_PAGE_BITS); | |
1857 | p->flags |= PAGE_WRITE; | |
1858 | prot |= p->flags; | |
1859 | ||
1860 | /* and since the content will be modified, we must invalidate | |
1861 | the corresponding translated code. */ | |
1862 | tb_invalidate_phys_page(addr, pc, puc); | |
1863 | #ifdef DEBUG_TB_CHECK | |
1864 | tb_invalidate_check(addr); | |
1865 | #endif | |
1866 | } | |
1867 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
1868 | prot & PAGE_BITS); | |
1869 | ||
1870 | mmap_unlock(); | |
1871 | return 1; | |
1872 | } | |
1873 | mmap_unlock(); | |
1874 | return 0; | |
1875 | } | |
1876 | #endif /* CONFIG_USER_ONLY */ |