]> git.proxmox.com Git - qemu.git/blob - translate-all.c
qdev-properties-system.c: Allow vlan or netdev for -device, not both
[qemu.git] / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 #endif
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <inttypes.h>
30
31 #include "config.h"
32
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
35 #include "cpu.h"
36 #include "disas/disas.h"
37 #include "tcg.h"
38 #if defined(CONFIG_USER_ONLY)
39 #include "qemu.h"
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <machine/profile.h>
48 #define _KERNEL
49 #include <sys/user.h>
50 #undef _KERNEL
51 #undef sigqueue
52 #include <libutil.h>
53 #endif
54 #endif
55 #else
56 #include "exec/address-spaces.h"
57 #endif
58
59 #include "exec/cputlb.h"
60 #include "translate-all.h"
61 #include "qemu/timer.h"
62
63 //#define DEBUG_TB_INVALIDATE
64 //#define DEBUG_FLUSH
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
67
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
70 #undef DEBUG_TB_CHECK
71 #endif
72
73 #define SMC_BITMAP_USE_THRESHOLD 10
74
75 typedef struct PageDesc {
76 /* list of TBs intersecting this ram page */
77 TranslationBlock *first_tb;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82 #if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84 #endif
85 } PageDesc;
86
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
92 #else
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94 #endif
95 #else
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
97 #endif
98
99 /* The bits remaining after N lower levels of page tables. */
100 #define V_L1_BITS_REM \
101 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
102
103 #if V_L1_BITS_REM < 4
104 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
105 #else
106 #define V_L1_BITS V_L1_BITS_REM
107 #endif
108
109 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
110
111 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
112
113 uintptr_t qemu_real_host_page_size;
114 uintptr_t qemu_host_page_size;
115 uintptr_t qemu_host_page_mask;
116
117 /* This is a multi-level map on the virtual address space.
118 The bottom level has pointers to PageDesc. */
119 static void *l1_map[V_L1_SIZE];
120
121 /* code generation context */
122 TCGContext tcg_ctx;
123
124 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
125 tb_page_addr_t phys_page2);
126 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
127
128 void cpu_gen_init(void)
129 {
130 tcg_context_init(&tcg_ctx);
131 }
132
133 /* return non zero if the very first instruction is invalid so that
134 the virtual CPU can trigger an exception.
135
136 '*gen_code_size_ptr' contains the size of the generated code (host
137 code).
138 */
139 int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
140 {
141 TCGContext *s = &tcg_ctx;
142 uint8_t *gen_code_buf;
143 int gen_code_size;
144 #ifdef CONFIG_PROFILER
145 int64_t ti;
146 #endif
147
148 #ifdef CONFIG_PROFILER
149 s->tb_count1++; /* includes aborted translations because of
150 exceptions */
151 ti = profile_getclock();
152 #endif
153 tcg_func_start(s);
154
155 gen_intermediate_code(env, tb);
156
157 /* generate machine code */
158 gen_code_buf = tb->tc_ptr;
159 tb->tb_next_offset[0] = 0xffff;
160 tb->tb_next_offset[1] = 0xffff;
161 s->tb_next_offset = tb->tb_next_offset;
162 #ifdef USE_DIRECT_JUMP
163 s->tb_jmp_offset = tb->tb_jmp_offset;
164 s->tb_next = NULL;
165 #else
166 s->tb_jmp_offset = NULL;
167 s->tb_next = tb->tb_next;
168 #endif
169
170 #ifdef CONFIG_PROFILER
171 s->tb_count++;
172 s->interm_time += profile_getclock() - ti;
173 s->code_time -= profile_getclock();
174 #endif
175 gen_code_size = tcg_gen_code(s, gen_code_buf);
176 *gen_code_size_ptr = gen_code_size;
177 #ifdef CONFIG_PROFILER
178 s->code_time += profile_getclock();
179 s->code_in_len += tb->size;
180 s->code_out_len += gen_code_size;
181 #endif
182
183 #ifdef DEBUG_DISAS
184 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
185 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
186 log_disas(tb->tc_ptr, *gen_code_size_ptr);
187 qemu_log("\n");
188 qemu_log_flush();
189 }
190 #endif
191 return 0;
192 }
193
194 /* The cpu state corresponding to 'searched_pc' is restored.
195 */
196 static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
197 uintptr_t searched_pc)
198 {
199 TCGContext *s = &tcg_ctx;
200 int j;
201 uintptr_t tc_ptr;
202 #ifdef CONFIG_PROFILER
203 int64_t ti;
204 #endif
205
206 #ifdef CONFIG_PROFILER
207 ti = profile_getclock();
208 #endif
209 tcg_func_start(s);
210
211 gen_intermediate_code_pc(env, tb);
212
213 if (use_icount) {
214 /* Reset the cycle counter to the start of the block. */
215 env->icount_decr.u16.low += tb->icount;
216 /* Clear the IO flag. */
217 env->can_do_io = 0;
218 }
219
220 /* find opc index corresponding to search_pc */
221 tc_ptr = (uintptr_t)tb->tc_ptr;
222 if (searched_pc < tc_ptr)
223 return -1;
224
225 s->tb_next_offset = tb->tb_next_offset;
226 #ifdef USE_DIRECT_JUMP
227 s->tb_jmp_offset = tb->tb_jmp_offset;
228 s->tb_next = NULL;
229 #else
230 s->tb_jmp_offset = NULL;
231 s->tb_next = tb->tb_next;
232 #endif
233 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
234 if (j < 0)
235 return -1;
236 /* now find start of instruction before */
237 while (s->gen_opc_instr_start[j] == 0) {
238 j--;
239 }
240 env->icount_decr.u16.low -= s->gen_opc_icount[j];
241
242 restore_state_to_opc(env, tb, j);
243
244 #ifdef CONFIG_PROFILER
245 s->restore_time += profile_getclock() - ti;
246 s->restore_count++;
247 #endif
248 return 0;
249 }
250
251 bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
252 {
253 TranslationBlock *tb;
254
255 tb = tb_find_pc(retaddr);
256 if (tb) {
257 cpu_restore_state_from_tb(tb, env, retaddr);
258 return true;
259 }
260 return false;
261 }
262
263 #ifdef _WIN32
264 static inline void map_exec(void *addr, long size)
265 {
266 DWORD old_protect;
267 VirtualProtect(addr, size,
268 PAGE_EXECUTE_READWRITE, &old_protect);
269 }
270 #else
271 static inline void map_exec(void *addr, long size)
272 {
273 unsigned long start, end, page_size;
274
275 page_size = getpagesize();
276 start = (unsigned long)addr;
277 start &= ~(page_size - 1);
278
279 end = (unsigned long)addr + size;
280 end += page_size - 1;
281 end &= ~(page_size - 1);
282
283 mprotect((void *)start, end - start,
284 PROT_READ | PROT_WRITE | PROT_EXEC);
285 }
286 #endif
287
288 static void page_init(void)
289 {
290 /* NOTE: we can always suppose that qemu_host_page_size >=
291 TARGET_PAGE_SIZE */
292 #ifdef _WIN32
293 {
294 SYSTEM_INFO system_info;
295
296 GetSystemInfo(&system_info);
297 qemu_real_host_page_size = system_info.dwPageSize;
298 }
299 #else
300 qemu_real_host_page_size = getpagesize();
301 #endif
302 if (qemu_host_page_size == 0) {
303 qemu_host_page_size = qemu_real_host_page_size;
304 }
305 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 qemu_host_page_size = TARGET_PAGE_SIZE;
307 }
308 qemu_host_page_mask = ~(qemu_host_page_size - 1);
309
310 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
311 {
312 #ifdef HAVE_KINFO_GETVMMAP
313 struct kinfo_vmentry *freep;
314 int i, cnt;
315
316 freep = kinfo_getvmmap(getpid(), &cnt);
317 if (freep) {
318 mmap_lock();
319 for (i = 0; i < cnt; i++) {
320 unsigned long startaddr, endaddr;
321
322 startaddr = freep[i].kve_start;
323 endaddr = freep[i].kve_end;
324 if (h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 } else {
331 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
332 endaddr = ~0ul;
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 #endif
335 }
336 }
337 }
338 free(freep);
339 mmap_unlock();
340 }
341 #else
342 FILE *f;
343
344 last_brk = (unsigned long)sbrk(0);
345
346 f = fopen("/compat/linux/proc/self/maps", "r");
347 if (f) {
348 mmap_lock();
349
350 do {
351 unsigned long startaddr, endaddr;
352 int n;
353
354 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
355
356 if (n == 2 && h2g_valid(startaddr)) {
357 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
358
359 if (h2g_valid(endaddr)) {
360 endaddr = h2g(endaddr);
361 } else {
362 endaddr = ~0ul;
363 }
364 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
365 }
366 } while (!feof(f));
367
368 fclose(f);
369 mmap_unlock();
370 }
371 #endif
372 }
373 #endif
374 }
375
376 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
377 {
378 PageDesc *pd;
379 void **lp;
380 int i;
381
382 #if defined(CONFIG_USER_ONLY)
383 /* We can't use g_malloc because it may recurse into a locked mutex. */
384 # define ALLOC(P, SIZE) \
385 do { \
386 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
388 } while (0)
389 #else
390 # define ALLOC(P, SIZE) \
391 do { P = g_malloc0(SIZE); } while (0)
392 #endif
393
394 /* Level 1. Always allocated. */
395 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
396
397 /* Level 2..N-1. */
398 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
399 void **p = *lp;
400
401 if (p == NULL) {
402 if (!alloc) {
403 return NULL;
404 }
405 ALLOC(p, sizeof(void *) * L2_SIZE);
406 *lp = p;
407 }
408
409 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
410 }
411
412 pd = *lp;
413 if (pd == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
417 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
418 *lp = pd;
419 }
420
421 #undef ALLOC
422
423 return pd + (index & (L2_SIZE - 1));
424 }
425
426 static inline PageDesc *page_find(tb_page_addr_t index)
427 {
428 return page_find_alloc(index, 0);
429 }
430
431 #if !defined(CONFIG_USER_ONLY)
432 #define mmap_lock() do { } while (0)
433 #define mmap_unlock() do { } while (0)
434 #endif
435
436 #if defined(CONFIG_USER_ONLY)
437 /* Currently it is not recommended to allocate big chunks of data in
438 user mode. It will change when a dedicated libc will be used. */
439 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 region in which the guest needs to run. Revisit this. */
441 #define USE_STATIC_CODE_GEN_BUFFER
442 #endif
443
444 /* ??? Should configure for this, not list operating systems here. */
445 #if (defined(__linux__) \
446 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 || defined(__DragonFly__) || defined(__OpenBSD__) \
448 || defined(__NetBSD__))
449 # define USE_MMAP
450 #endif
451
452 /* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
455
456 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459 #if defined(__x86_64__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__sparc__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__aarch64__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
465 #elif defined(__arm__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
467 #elif defined(__s390x__)
468 /* We have a +- 4GB range on the branches; leave some slop. */
469 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
470 #else
471 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
472 #endif
473
474 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
475
476 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
477 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
478 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
479
480 static inline size_t size_code_gen_buffer(size_t tb_size)
481 {
482 /* Size the buffer. */
483 if (tb_size == 0) {
484 #ifdef USE_STATIC_CODE_GEN_BUFFER
485 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486 #else
487 /* ??? Needs adjustments. */
488 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
489 static buffer, we could size this on RESERVED_VA, on the text
490 segment size of the executable, or continue to use the default. */
491 tb_size = (unsigned long)(ram_size / 4);
492 #endif
493 }
494 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
495 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
496 }
497 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
498 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
499 }
500 tcg_ctx.code_gen_buffer_size = tb_size;
501 return tb_size;
502 }
503
504 #ifdef USE_STATIC_CODE_GEN_BUFFER
505 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
506 __attribute__((aligned(CODE_GEN_ALIGN)));
507
508 static inline void *alloc_code_gen_buffer(void)
509 {
510 map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
511 return static_code_gen_buffer;
512 }
513 #elif defined(USE_MMAP)
514 static inline void *alloc_code_gen_buffer(void)
515 {
516 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
517 uintptr_t start = 0;
518 void *buf;
519
520 /* Constrain the position of the buffer based on the host cpu.
521 Note that these addresses are chosen in concert with the
522 addresses assigned in the relevant linker script file. */
523 # if defined(__PIE__) || defined(__PIC__)
524 /* Don't bother setting a preferred location if we're building
525 a position-independent executable. We're more likely to get
526 an address near the main executable if we let the kernel
527 choose the address. */
528 # elif defined(__x86_64__) && defined(MAP_32BIT)
529 /* Force the memory down into low memory with the executable.
530 Leave the choice of exact location with the kernel. */
531 flags |= MAP_32BIT;
532 /* Cannot expect to map more than 800MB in low memory. */
533 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
534 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
535 }
536 # elif defined(__sparc__)
537 start = 0x40000000ul;
538 # elif defined(__s390x__)
539 start = 0x90000000ul;
540 # endif
541
542 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
544 return buf == MAP_FAILED ? NULL : buf;
545 }
546 #else
547 static inline void *alloc_code_gen_buffer(void)
548 {
549 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
550
551 if (buf) {
552 map_exec(buf, tcg_ctx.code_gen_buffer_size);
553 }
554 return buf;
555 }
556 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
557
558 static inline void code_gen_alloc(size_t tb_size)
559 {
560 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
561 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
562 if (tcg_ctx.code_gen_buffer == NULL) {
563 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
564 exit(1);
565 }
566
567 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
568 QEMU_MADV_HUGEPAGE);
569
570 /* Steal room for the prologue at the end of the buffer. This ensures
571 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
572 from TB's to the prologue are going to be in range. It also means
573 that we don't need to mark (additional) portions of the data segment
574 as executable. */
575 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
576 tcg_ctx.code_gen_buffer_size - 1024;
577 tcg_ctx.code_gen_buffer_size -= 1024;
578
579 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
580 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
581 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
582 CODE_GEN_AVG_BLOCK_SIZE;
583 tcg_ctx.tb_ctx.tbs =
584 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
585 }
586
587 /* Must be called before using the QEMU cpus. 'tb_size' is the size
588 (in bytes) allocated to the translation buffer. Zero means default
589 size. */
590 void tcg_exec_init(unsigned long tb_size)
591 {
592 cpu_gen_init();
593 code_gen_alloc(tb_size);
594 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
595 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
596 page_init();
597 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
598 /* There's no guest base to take into account, so go ahead and
599 initialize the prologue now. */
600 tcg_prologue_init(&tcg_ctx);
601 #endif
602 }
603
604 bool tcg_enabled(void)
605 {
606 return tcg_ctx.code_gen_buffer != NULL;
607 }
608
609 /* Allocate a new translation block. Flush the translation buffer if
610 too many translation blocks or too much generated code. */
611 static TranslationBlock *tb_alloc(target_ulong pc)
612 {
613 TranslationBlock *tb;
614
615 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
616 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
617 tcg_ctx.code_gen_buffer_max_size) {
618 return NULL;
619 }
620 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
621 tb->pc = pc;
622 tb->cflags = 0;
623 return tb;
624 }
625
626 void tb_free(TranslationBlock *tb)
627 {
628 /* In practice this is mostly used for single use temporary TB
629 Ignore the hard cases and just back up if this TB happens to
630 be the last one generated. */
631 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
632 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
633 tcg_ctx.code_gen_ptr = tb->tc_ptr;
634 tcg_ctx.tb_ctx.nb_tbs--;
635 }
636 }
637
638 static inline void invalidate_page_bitmap(PageDesc *p)
639 {
640 if (p->code_bitmap) {
641 g_free(p->code_bitmap);
642 p->code_bitmap = NULL;
643 }
644 p->code_write_count = 0;
645 }
646
647 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
648 static void page_flush_tb_1(int level, void **lp)
649 {
650 int i;
651
652 if (*lp == NULL) {
653 return;
654 }
655 if (level == 0) {
656 PageDesc *pd = *lp;
657
658 for (i = 0; i < L2_SIZE; ++i) {
659 pd[i].first_tb = NULL;
660 invalidate_page_bitmap(pd + i);
661 }
662 } else {
663 void **pp = *lp;
664
665 for (i = 0; i < L2_SIZE; ++i) {
666 page_flush_tb_1(level - 1, pp + i);
667 }
668 }
669 }
670
671 static void page_flush_tb(void)
672 {
673 int i;
674
675 for (i = 0; i < V_L1_SIZE; i++) {
676 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
677 }
678 }
679
680 /* flush all the translation blocks */
681 /* XXX: tb_flush is currently not thread safe */
682 void tb_flush(CPUArchState *env1)
683 {
684 CPUState *cpu;
685
686 #if defined(DEBUG_FLUSH)
687 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
688 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
689 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
690 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
691 tcg_ctx.tb_ctx.nb_tbs : 0);
692 #endif
693 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
694 > tcg_ctx.code_gen_buffer_size) {
695 cpu_abort(env1, "Internal error: code buffer overflow\n");
696 }
697 tcg_ctx.tb_ctx.nb_tbs = 0;
698
699 CPU_FOREACH(cpu) {
700 CPUArchState *env = cpu->env_ptr;
701
702 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
703 }
704
705 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
706 CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
707 page_flush_tb();
708
709 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
710 /* XXX: flush processor icache at this point if cache flush is
711 expensive */
712 tcg_ctx.tb_ctx.tb_flush_count++;
713 }
714
715 #ifdef DEBUG_TB_CHECK
716
717 static void tb_invalidate_check(target_ulong address)
718 {
719 TranslationBlock *tb;
720 int i;
721
722 address &= TARGET_PAGE_MASK;
723 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
724 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
725 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
726 address >= tb->pc + tb->size)) {
727 printf("ERROR invalidate: address=" TARGET_FMT_lx
728 " PC=%08lx size=%04x\n",
729 address, (long)tb->pc, tb->size);
730 }
731 }
732 }
733 }
734
735 /* verify that all the pages have correct rights for code */
736 static void tb_page_check(void)
737 {
738 TranslationBlock *tb;
739 int i, flags1, flags2;
740
741 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
742 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
743 tb = tb->phys_hash_next) {
744 flags1 = page_get_flags(tb->pc);
745 flags2 = page_get_flags(tb->pc + tb->size - 1);
746 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
747 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
748 (long)tb->pc, tb->size, flags1, flags2);
749 }
750 }
751 }
752 }
753
754 #endif
755
756 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
757 {
758 TranslationBlock *tb1;
759
760 for (;;) {
761 tb1 = *ptb;
762 if (tb1 == tb) {
763 *ptb = tb1->phys_hash_next;
764 break;
765 }
766 ptb = &tb1->phys_hash_next;
767 }
768 }
769
770 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
771 {
772 TranslationBlock *tb1;
773 unsigned int n1;
774
775 for (;;) {
776 tb1 = *ptb;
777 n1 = (uintptr_t)tb1 & 3;
778 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
779 if (tb1 == tb) {
780 *ptb = tb1->page_next[n1];
781 break;
782 }
783 ptb = &tb1->page_next[n1];
784 }
785 }
786
787 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
788 {
789 TranslationBlock *tb1, **ptb;
790 unsigned int n1;
791
792 ptb = &tb->jmp_next[n];
793 tb1 = *ptb;
794 if (tb1) {
795 /* find tb(n) in circular list */
796 for (;;) {
797 tb1 = *ptb;
798 n1 = (uintptr_t)tb1 & 3;
799 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
800 if (n1 == n && tb1 == tb) {
801 break;
802 }
803 if (n1 == 2) {
804 ptb = &tb1->jmp_first;
805 } else {
806 ptb = &tb1->jmp_next[n1];
807 }
808 }
809 /* now we can suppress tb(n) from the list */
810 *ptb = tb->jmp_next[n];
811
812 tb->jmp_next[n] = NULL;
813 }
814 }
815
816 /* reset the jump entry 'n' of a TB so that it is not chained to
817 another TB */
818 static inline void tb_reset_jump(TranslationBlock *tb, int n)
819 {
820 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
821 }
822
823 /* invalidate one TB */
824 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
825 {
826 CPUState *cpu;
827 PageDesc *p;
828 unsigned int h, n1;
829 tb_page_addr_t phys_pc;
830 TranslationBlock *tb1, *tb2;
831
832 /* remove the TB from the hash list */
833 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
834 h = tb_phys_hash_func(phys_pc);
835 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
836
837 /* remove the TB from the page list */
838 if (tb->page_addr[0] != page_addr) {
839 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
840 tb_page_remove(&p->first_tb, tb);
841 invalidate_page_bitmap(p);
842 }
843 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
844 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
845 tb_page_remove(&p->first_tb, tb);
846 invalidate_page_bitmap(p);
847 }
848
849 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
850
851 /* remove the TB from the hash list */
852 h = tb_jmp_cache_hash_func(tb->pc);
853 CPU_FOREACH(cpu) {
854 CPUArchState *env = cpu->env_ptr;
855
856 if (env->tb_jmp_cache[h] == tb) {
857 env->tb_jmp_cache[h] = NULL;
858 }
859 }
860
861 /* suppress this TB from the two jump lists */
862 tb_jmp_remove(tb, 0);
863 tb_jmp_remove(tb, 1);
864
865 /* suppress any remaining jumps to this TB */
866 tb1 = tb->jmp_first;
867 for (;;) {
868 n1 = (uintptr_t)tb1 & 3;
869 if (n1 == 2) {
870 break;
871 }
872 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
873 tb2 = tb1->jmp_next[n1];
874 tb_reset_jump(tb1, n1);
875 tb1->jmp_next[n1] = NULL;
876 tb1 = tb2;
877 }
878 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
879
880 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
881 }
882
883 static inline void set_bits(uint8_t *tab, int start, int len)
884 {
885 int end, mask, end1;
886
887 end = start + len;
888 tab += start >> 3;
889 mask = 0xff << (start & 7);
890 if ((start & ~7) == (end & ~7)) {
891 if (start < end) {
892 mask &= ~(0xff << (end & 7));
893 *tab |= mask;
894 }
895 } else {
896 *tab++ |= mask;
897 start = (start + 8) & ~7;
898 end1 = end & ~7;
899 while (start < end1) {
900 *tab++ = 0xff;
901 start += 8;
902 }
903 if (start < end) {
904 mask = ~(0xff << (end & 7));
905 *tab |= mask;
906 }
907 }
908 }
909
910 static void build_page_bitmap(PageDesc *p)
911 {
912 int n, tb_start, tb_end;
913 TranslationBlock *tb;
914
915 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
916
917 tb = p->first_tb;
918 while (tb != NULL) {
919 n = (uintptr_t)tb & 3;
920 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
921 /* NOTE: this is subtle as a TB may span two physical pages */
922 if (n == 0) {
923 /* NOTE: tb_end may be after the end of the page, but
924 it is not a problem */
925 tb_start = tb->pc & ~TARGET_PAGE_MASK;
926 tb_end = tb_start + tb->size;
927 if (tb_end > TARGET_PAGE_SIZE) {
928 tb_end = TARGET_PAGE_SIZE;
929 }
930 } else {
931 tb_start = 0;
932 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
933 }
934 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
935 tb = tb->page_next[n];
936 }
937 }
938
939 TranslationBlock *tb_gen_code(CPUArchState *env,
940 target_ulong pc, target_ulong cs_base,
941 int flags, int cflags)
942 {
943 TranslationBlock *tb;
944 uint8_t *tc_ptr;
945 tb_page_addr_t phys_pc, phys_page2;
946 target_ulong virt_page2;
947 int code_gen_size;
948
949 phys_pc = get_page_addr_code(env, pc);
950 tb = tb_alloc(pc);
951 if (!tb) {
952 /* flush must be done */
953 tb_flush(env);
954 /* cannot fail at this point */
955 tb = tb_alloc(pc);
956 /* Don't forget to invalidate previous TB info. */
957 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
958 }
959 tc_ptr = tcg_ctx.code_gen_ptr;
960 tb->tc_ptr = tc_ptr;
961 tb->cs_base = cs_base;
962 tb->flags = flags;
963 tb->cflags = cflags;
964 cpu_gen_code(env, tb, &code_gen_size);
965 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
966 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
967
968 /* check next page if needed */
969 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
970 phys_page2 = -1;
971 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
972 phys_page2 = get_page_addr_code(env, virt_page2);
973 }
974 tb_link_page(tb, phys_pc, phys_page2);
975 return tb;
976 }
977
978 /*
979 * Invalidate all TBs which intersect with the target physical address range
980 * [start;end[. NOTE: start and end may refer to *different* physical pages.
981 * 'is_cpu_write_access' should be true if called from a real cpu write
982 * access: the virtual CPU will exit the current TB if code is modified inside
983 * this TB.
984 */
985 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
986 int is_cpu_write_access)
987 {
988 while (start < end) {
989 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
990 start &= TARGET_PAGE_MASK;
991 start += TARGET_PAGE_SIZE;
992 }
993 }
994
995 /*
996 * Invalidate all TBs which intersect with the target physical address range
997 * [start;end[. NOTE: start and end must refer to the *same* physical page.
998 * 'is_cpu_write_access' should be true if called from a real cpu write
999 * access: the virtual CPU will exit the current TB if code is modified inside
1000 * this TB.
1001 */
1002 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1003 int is_cpu_write_access)
1004 {
1005 TranslationBlock *tb, *tb_next, *saved_tb;
1006 CPUState *cpu = current_cpu;
1007 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1008 CPUArchState *env = NULL;
1009 #endif
1010 tb_page_addr_t tb_start, tb_end;
1011 PageDesc *p;
1012 int n;
1013 #ifdef TARGET_HAS_PRECISE_SMC
1014 int current_tb_not_found = is_cpu_write_access;
1015 TranslationBlock *current_tb = NULL;
1016 int current_tb_modified = 0;
1017 target_ulong current_pc = 0;
1018 target_ulong current_cs_base = 0;
1019 int current_flags = 0;
1020 #endif /* TARGET_HAS_PRECISE_SMC */
1021
1022 p = page_find(start >> TARGET_PAGE_BITS);
1023 if (!p) {
1024 return;
1025 }
1026 if (!p->code_bitmap &&
1027 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1028 is_cpu_write_access) {
1029 /* build code bitmap */
1030 build_page_bitmap(p);
1031 }
1032 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1033 if (cpu != NULL) {
1034 env = cpu->env_ptr;
1035 }
1036 #endif
1037
1038 /* we remove all the TBs in the range [start, end[ */
1039 /* XXX: see if in some cases it could be faster to invalidate all
1040 the code */
1041 tb = p->first_tb;
1042 while (tb != NULL) {
1043 n = (uintptr_t)tb & 3;
1044 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1045 tb_next = tb->page_next[n];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1047 if (n == 0) {
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 tb_end = tb_start + tb->size;
1052 } else {
1053 tb_start = tb->page_addr[1];
1054 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 }
1056 if (!(tb_end <= start || tb_start >= end)) {
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found) {
1059 current_tb_not_found = 0;
1060 current_tb = NULL;
1061 if (env->mem_io_pc) {
1062 /* now we have a real cpu fault */
1063 current_tb = tb_find_pc(env->mem_io_pc);
1064 }
1065 }
1066 if (current_tb == tb &&
1067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
1073
1074 current_tb_modified = 1;
1075 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1076 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 &current_flags);
1078 }
1079 #endif /* TARGET_HAS_PRECISE_SMC */
1080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1082 saved_tb = NULL;
1083 if (cpu != NULL) {
1084 saved_tb = cpu->current_tb;
1085 cpu->current_tb = NULL;
1086 }
1087 tb_phys_invalidate(tb, -1);
1088 if (cpu != NULL) {
1089 cpu->current_tb = saved_tb;
1090 if (cpu->interrupt_request && cpu->current_tb) {
1091 cpu_interrupt(cpu, cpu->interrupt_request);
1092 }
1093 }
1094 }
1095 tb = tb_next;
1096 }
1097 #if !defined(CONFIG_USER_ONLY)
1098 /* if no code remaining, no need to continue to use slow writes */
1099 if (!p->first_tb) {
1100 invalidate_page_bitmap(p);
1101 if (is_cpu_write_access) {
1102 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1103 }
1104 }
1105 #endif
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
1111 cpu->current_tb = NULL;
1112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1113 cpu_resume_from_signal(env, NULL);
1114 }
1115 #endif
1116 }
1117
1118 /* len must be <= 8 and start must be a multiple of len */
1119 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1120 {
1121 PageDesc *p;
1122 int offset, b;
1123
1124 #if 0
1125 if (1) {
1126 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1127 cpu_single_env->mem_io_vaddr, len,
1128 cpu_single_env->eip,
1129 cpu_single_env->eip +
1130 (intptr_t)cpu_single_env->segs[R_CS].base);
1131 }
1132 #endif
1133 p = page_find(start >> TARGET_PAGE_BITS);
1134 if (!p) {
1135 return;
1136 }
1137 if (p->code_bitmap) {
1138 offset = start & ~TARGET_PAGE_MASK;
1139 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1140 if (b & ((1 << len) - 1)) {
1141 goto do_invalidate;
1142 }
1143 } else {
1144 do_invalidate:
1145 tb_invalidate_phys_page_range(start, start + len, 1);
1146 }
1147 }
1148
1149 #if !defined(CONFIG_SOFTMMU)
1150 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1151 uintptr_t pc, void *puc,
1152 bool locked)
1153 {
1154 TranslationBlock *tb;
1155 PageDesc *p;
1156 int n;
1157 #ifdef TARGET_HAS_PRECISE_SMC
1158 TranslationBlock *current_tb = NULL;
1159 CPUState *cpu = current_cpu;
1160 CPUArchState *env = NULL;
1161 int current_tb_modified = 0;
1162 target_ulong current_pc = 0;
1163 target_ulong current_cs_base = 0;
1164 int current_flags = 0;
1165 #endif
1166
1167 addr &= TARGET_PAGE_MASK;
1168 p = page_find(addr >> TARGET_PAGE_BITS);
1169 if (!p) {
1170 return;
1171 }
1172 tb = p->first_tb;
1173 #ifdef TARGET_HAS_PRECISE_SMC
1174 if (tb && pc != 0) {
1175 current_tb = tb_find_pc(pc);
1176 }
1177 if (cpu != NULL) {
1178 env = cpu->env_ptr;
1179 }
1180 #endif
1181 while (tb != NULL) {
1182 n = (uintptr_t)tb & 3;
1183 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb == tb &&
1186 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1187 /* If we are modifying the current TB, we must stop
1188 its execution. We could be more precise by checking
1189 that the modification is after the current PC, but it
1190 would require a specialized function to partially
1191 restore the CPU state */
1192
1193 current_tb_modified = 1;
1194 cpu_restore_state_from_tb(current_tb, env, pc);
1195 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1196 &current_flags);
1197 }
1198 #endif /* TARGET_HAS_PRECISE_SMC */
1199 tb_phys_invalidate(tb, addr);
1200 tb = tb->page_next[n];
1201 }
1202 p->first_tb = NULL;
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1207 itself */
1208 cpu->current_tb = NULL;
1209 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1210 if (locked) {
1211 mmap_unlock();
1212 }
1213 cpu_resume_from_signal(env, puc);
1214 }
1215 #endif
1216 }
1217 #endif
1218
1219 /* add the tb in the target page and protect it if necessary */
1220 static inline void tb_alloc_page(TranslationBlock *tb,
1221 unsigned int n, tb_page_addr_t page_addr)
1222 {
1223 PageDesc *p;
1224 #ifndef CONFIG_USER_ONLY
1225 bool page_already_protected;
1226 #endif
1227
1228 tb->page_addr[n] = page_addr;
1229 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1230 tb->page_next[n] = p->first_tb;
1231 #ifndef CONFIG_USER_ONLY
1232 page_already_protected = p->first_tb != NULL;
1233 #endif
1234 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1235 invalidate_page_bitmap(p);
1236
1237 #if defined(TARGET_HAS_SMC) || 1
1238
1239 #if defined(CONFIG_USER_ONLY)
1240 if (p->flags & PAGE_WRITE) {
1241 target_ulong addr;
1242 PageDesc *p2;
1243 int prot;
1244
1245 /* force the host page as non writable (writes will have a
1246 page fault + mprotect overhead) */
1247 page_addr &= qemu_host_page_mask;
1248 prot = 0;
1249 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1250 addr += TARGET_PAGE_SIZE) {
1251
1252 p2 = page_find(addr >> TARGET_PAGE_BITS);
1253 if (!p2) {
1254 continue;
1255 }
1256 prot |= p2->flags;
1257 p2->flags &= ~PAGE_WRITE;
1258 }
1259 mprotect(g2h(page_addr), qemu_host_page_size,
1260 (prot & PAGE_BITS) & ~PAGE_WRITE);
1261 #ifdef DEBUG_TB_INVALIDATE
1262 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1263 page_addr);
1264 #endif
1265 }
1266 #else
1267 /* if some code is already present, then the pages are already
1268 protected. So we handle the case where only the first TB is
1269 allocated in a physical page */
1270 if (!page_already_protected) {
1271 tlb_protect_code(page_addr);
1272 }
1273 #endif
1274
1275 #endif /* TARGET_HAS_SMC */
1276 }
1277
1278 /* add a new TB and link it to the physical page tables. phys_page2 is
1279 (-1) to indicate that only one page contains the TB. */
1280 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1281 tb_page_addr_t phys_page2)
1282 {
1283 unsigned int h;
1284 TranslationBlock **ptb;
1285
1286 /* Grab the mmap lock to stop another thread invalidating this TB
1287 before we are done. */
1288 mmap_lock();
1289 /* add in the physical hash table */
1290 h = tb_phys_hash_func(phys_pc);
1291 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1292 tb->phys_hash_next = *ptb;
1293 *ptb = tb;
1294
1295 /* add in the page list */
1296 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1297 if (phys_page2 != -1) {
1298 tb_alloc_page(tb, 1, phys_page2);
1299 } else {
1300 tb->page_addr[1] = -1;
1301 }
1302
1303 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1304 tb->jmp_next[0] = NULL;
1305 tb->jmp_next[1] = NULL;
1306
1307 /* init original jump addresses */
1308 if (tb->tb_next_offset[0] != 0xffff) {
1309 tb_reset_jump(tb, 0);
1310 }
1311 if (tb->tb_next_offset[1] != 0xffff) {
1312 tb_reset_jump(tb, 1);
1313 }
1314
1315 #ifdef DEBUG_TB_CHECK
1316 tb_page_check();
1317 #endif
1318 mmap_unlock();
1319 }
1320
1321 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1322 tb[1].tc_ptr. Return NULL if not found */
1323 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1324 {
1325 int m_min, m_max, m;
1326 uintptr_t v;
1327 TranslationBlock *tb;
1328
1329 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1330 return NULL;
1331 }
1332 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1333 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1334 return NULL;
1335 }
1336 /* binary search (cf Knuth) */
1337 m_min = 0;
1338 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1339 while (m_min <= m_max) {
1340 m = (m_min + m_max) >> 1;
1341 tb = &tcg_ctx.tb_ctx.tbs[m];
1342 v = (uintptr_t)tb->tc_ptr;
1343 if (v == tc_ptr) {
1344 return tb;
1345 } else if (tc_ptr < v) {
1346 m_max = m - 1;
1347 } else {
1348 m_min = m + 1;
1349 }
1350 }
1351 return &tcg_ctx.tb_ctx.tbs[m_max];
1352 }
1353
1354 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1355 void tb_invalidate_phys_addr(hwaddr addr)
1356 {
1357 ram_addr_t ram_addr;
1358 MemoryRegion *mr;
1359 hwaddr l = 1;
1360
1361 mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1362 if (!(memory_region_is_ram(mr)
1363 || memory_region_is_romd(mr))) {
1364 return;
1365 }
1366 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1367 + addr;
1368 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1369 }
1370 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1371
1372 void tb_check_watchpoint(CPUArchState *env)
1373 {
1374 TranslationBlock *tb;
1375
1376 tb = tb_find_pc(env->mem_io_pc);
1377 if (!tb) {
1378 cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1379 (void *)env->mem_io_pc);
1380 }
1381 cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1382 tb_phys_invalidate(tb, -1);
1383 }
1384
1385 #ifndef CONFIG_USER_ONLY
1386 /* mask must never be zero, except for A20 change call */
1387 static void tcg_handle_interrupt(CPUState *cpu, int mask)
1388 {
1389 CPUArchState *env = cpu->env_ptr;
1390 int old_mask;
1391
1392 old_mask = cpu->interrupt_request;
1393 cpu->interrupt_request |= mask;
1394
1395 /*
1396 * If called from iothread context, wake the target cpu in
1397 * case its halted.
1398 */
1399 if (!qemu_cpu_is_self(cpu)) {
1400 qemu_cpu_kick(cpu);
1401 return;
1402 }
1403
1404 if (use_icount) {
1405 env->icount_decr.u16.high = 0xffff;
1406 if (!can_do_io(env)
1407 && (mask & ~old_mask) != 0) {
1408 cpu_abort(env, "Raised interrupt while not in I/O function");
1409 }
1410 } else {
1411 cpu->tcg_exit_req = 1;
1412 }
1413 }
1414
1415 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1416
1417 /* in deterministic execution mode, instructions doing device I/Os
1418 must be at the end of the TB */
1419 void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1420 {
1421 TranslationBlock *tb;
1422 uint32_t n, cflags;
1423 target_ulong pc, cs_base;
1424 uint64_t flags;
1425
1426 tb = tb_find_pc(retaddr);
1427 if (!tb) {
1428 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1429 (void *)retaddr);
1430 }
1431 n = env->icount_decr.u16.low + tb->icount;
1432 cpu_restore_state_from_tb(tb, env, retaddr);
1433 /* Calculate how many instructions had been executed before the fault
1434 occurred. */
1435 n = n - env->icount_decr.u16.low;
1436 /* Generate a new TB ending on the I/O insn. */
1437 n++;
1438 /* On MIPS and SH, delay slot instructions can only be restarted if
1439 they were already the first instruction in the TB. If this is not
1440 the first instruction in a TB then re-execute the preceding
1441 branch. */
1442 #if defined(TARGET_MIPS)
1443 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1444 env->active_tc.PC -= 4;
1445 env->icount_decr.u16.low++;
1446 env->hflags &= ~MIPS_HFLAG_BMASK;
1447 }
1448 #elif defined(TARGET_SH4)
1449 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1450 && n > 1) {
1451 env->pc -= 2;
1452 env->icount_decr.u16.low++;
1453 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1454 }
1455 #endif
1456 /* This should never happen. */
1457 if (n > CF_COUNT_MASK) {
1458 cpu_abort(env, "TB too big during recompile");
1459 }
1460
1461 cflags = n | CF_LAST_IO;
1462 pc = tb->pc;
1463 cs_base = tb->cs_base;
1464 flags = tb->flags;
1465 tb_phys_invalidate(tb, -1);
1466 /* FIXME: In theory this could raise an exception. In practice
1467 we have already translated the block once so it's probably ok. */
1468 tb_gen_code(env, pc, cs_base, flags, cflags);
1469 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1470 the first in the TB) then we end up generating a whole new TB and
1471 repeating the fault, which is horribly inefficient.
1472 Better would be to execute just this insn uncached, or generate a
1473 second new TB. */
1474 cpu_resume_from_signal(env, NULL);
1475 }
1476
1477 void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1478 {
1479 unsigned int i;
1480
1481 /* Discard jump cache entries for any tb which might potentially
1482 overlap the flushed page. */
1483 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1484 memset(&env->tb_jmp_cache[i], 0,
1485 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1486
1487 i = tb_jmp_cache_hash_page(addr);
1488 memset(&env->tb_jmp_cache[i], 0,
1489 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1490 }
1491
1492 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1493 {
1494 int i, target_code_size, max_target_code_size;
1495 int direct_jmp_count, direct_jmp2_count, cross_page;
1496 TranslationBlock *tb;
1497
1498 target_code_size = 0;
1499 max_target_code_size = 0;
1500 cross_page = 0;
1501 direct_jmp_count = 0;
1502 direct_jmp2_count = 0;
1503 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1504 tb = &tcg_ctx.tb_ctx.tbs[i];
1505 target_code_size += tb->size;
1506 if (tb->size > max_target_code_size) {
1507 max_target_code_size = tb->size;
1508 }
1509 if (tb->page_addr[1] != -1) {
1510 cross_page++;
1511 }
1512 if (tb->tb_next_offset[0] != 0xffff) {
1513 direct_jmp_count++;
1514 if (tb->tb_next_offset[1] != 0xffff) {
1515 direct_jmp2_count++;
1516 }
1517 }
1518 }
1519 /* XXX: avoid using doubles ? */
1520 cpu_fprintf(f, "Translation buffer state:\n");
1521 cpu_fprintf(f, "gen code size %td/%zd\n",
1522 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1523 tcg_ctx.code_gen_buffer_max_size);
1524 cpu_fprintf(f, "TB count %d/%d\n",
1525 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1526 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1527 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1528 tcg_ctx.tb_ctx.nb_tbs : 0,
1529 max_target_code_size);
1530 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1531 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1532 tcg_ctx.code_gen_buffer) /
1533 tcg_ctx.tb_ctx.nb_tbs : 0,
1534 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1535 tcg_ctx.code_gen_buffer) /
1536 target_code_size : 0);
1537 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1538 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1539 tcg_ctx.tb_ctx.nb_tbs : 0);
1540 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1541 direct_jmp_count,
1542 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1543 tcg_ctx.tb_ctx.nb_tbs : 0,
1544 direct_jmp2_count,
1545 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1546 tcg_ctx.tb_ctx.nb_tbs : 0);
1547 cpu_fprintf(f, "\nStatistics:\n");
1548 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1549 cpu_fprintf(f, "TB invalidate count %d\n",
1550 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1551 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1552 tcg_dump_info(f, cpu_fprintf);
1553 }
1554
1555 #else /* CONFIG_USER_ONLY */
1556
1557 void cpu_interrupt(CPUState *cpu, int mask)
1558 {
1559 cpu->interrupt_request |= mask;
1560 cpu->tcg_exit_req = 1;
1561 }
1562
1563 /*
1564 * Walks guest process memory "regions" one by one
1565 * and calls callback function 'fn' for each region.
1566 */
1567 struct walk_memory_regions_data {
1568 walk_memory_regions_fn fn;
1569 void *priv;
1570 uintptr_t start;
1571 int prot;
1572 };
1573
1574 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1575 abi_ulong end, int new_prot)
1576 {
1577 if (data->start != -1ul) {
1578 int rc = data->fn(data->priv, data->start, end, data->prot);
1579 if (rc != 0) {
1580 return rc;
1581 }
1582 }
1583
1584 data->start = (new_prot ? end : -1ul);
1585 data->prot = new_prot;
1586
1587 return 0;
1588 }
1589
1590 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1591 abi_ulong base, int level, void **lp)
1592 {
1593 abi_ulong pa;
1594 int i, rc;
1595
1596 if (*lp == NULL) {
1597 return walk_memory_regions_end(data, base, 0);
1598 }
1599
1600 if (level == 0) {
1601 PageDesc *pd = *lp;
1602
1603 for (i = 0; i < L2_SIZE; ++i) {
1604 int prot = pd[i].flags;
1605
1606 pa = base | (i << TARGET_PAGE_BITS);
1607 if (prot != data->prot) {
1608 rc = walk_memory_regions_end(data, pa, prot);
1609 if (rc != 0) {
1610 return rc;
1611 }
1612 }
1613 }
1614 } else {
1615 void **pp = *lp;
1616
1617 for (i = 0; i < L2_SIZE; ++i) {
1618 pa = base | ((abi_ulong)i <<
1619 (TARGET_PAGE_BITS + L2_BITS * level));
1620 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1621 if (rc != 0) {
1622 return rc;
1623 }
1624 }
1625 }
1626
1627 return 0;
1628 }
1629
1630 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1631 {
1632 struct walk_memory_regions_data data;
1633 uintptr_t i;
1634
1635 data.fn = fn;
1636 data.priv = priv;
1637 data.start = -1ul;
1638 data.prot = 0;
1639
1640 for (i = 0; i < V_L1_SIZE; i++) {
1641 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1642 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1643
1644 if (rc != 0) {
1645 return rc;
1646 }
1647 }
1648
1649 return walk_memory_regions_end(&data, 0, 0);
1650 }
1651
1652 static int dump_region(void *priv, abi_ulong start,
1653 abi_ulong end, unsigned long prot)
1654 {
1655 FILE *f = (FILE *)priv;
1656
1657 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1658 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1659 start, end, end - start,
1660 ((prot & PAGE_READ) ? 'r' : '-'),
1661 ((prot & PAGE_WRITE) ? 'w' : '-'),
1662 ((prot & PAGE_EXEC) ? 'x' : '-'));
1663
1664 return 0;
1665 }
1666
1667 /* dump memory mappings */
1668 void page_dump(FILE *f)
1669 {
1670 const int length = sizeof(abi_ulong) * 2;
1671 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1672 length, "start", length, "end", length, "size", "prot");
1673 walk_memory_regions(f, dump_region);
1674 }
1675
1676 int page_get_flags(target_ulong address)
1677 {
1678 PageDesc *p;
1679
1680 p = page_find(address >> TARGET_PAGE_BITS);
1681 if (!p) {
1682 return 0;
1683 }
1684 return p->flags;
1685 }
1686
1687 /* Modify the flags of a page and invalidate the code if necessary.
1688 The flag PAGE_WRITE_ORG is positioned automatically depending
1689 on PAGE_WRITE. The mmap_lock should already be held. */
1690 void page_set_flags(target_ulong start, target_ulong end, int flags)
1691 {
1692 target_ulong addr, len;
1693
1694 /* This function should never be called with addresses outside the
1695 guest address space. If this assert fires, it probably indicates
1696 a missing call to h2g_valid. */
1697 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1698 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1699 #endif
1700 assert(start < end);
1701
1702 start = start & TARGET_PAGE_MASK;
1703 end = TARGET_PAGE_ALIGN(end);
1704
1705 if (flags & PAGE_WRITE) {
1706 flags |= PAGE_WRITE_ORG;
1707 }
1708
1709 for (addr = start, len = end - start;
1710 len != 0;
1711 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1712 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1713
1714 /* If the write protection bit is set, then we invalidate
1715 the code inside. */
1716 if (!(p->flags & PAGE_WRITE) &&
1717 (flags & PAGE_WRITE) &&
1718 p->first_tb) {
1719 tb_invalidate_phys_page(addr, 0, NULL, false);
1720 }
1721 p->flags = flags;
1722 }
1723 }
1724
1725 int page_check_range(target_ulong start, target_ulong len, int flags)
1726 {
1727 PageDesc *p;
1728 target_ulong end;
1729 target_ulong addr;
1730
1731 /* This function should never be called with addresses outside the
1732 guest address space. If this assert fires, it probably indicates
1733 a missing call to h2g_valid. */
1734 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1735 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1736 #endif
1737
1738 if (len == 0) {
1739 return 0;
1740 }
1741 if (start + len - 1 < start) {
1742 /* We've wrapped around. */
1743 return -1;
1744 }
1745
1746 /* must do before we loose bits in the next step */
1747 end = TARGET_PAGE_ALIGN(start + len);
1748 start = start & TARGET_PAGE_MASK;
1749
1750 for (addr = start, len = end - start;
1751 len != 0;
1752 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1753 p = page_find(addr >> TARGET_PAGE_BITS);
1754 if (!p) {
1755 return -1;
1756 }
1757 if (!(p->flags & PAGE_VALID)) {
1758 return -1;
1759 }
1760
1761 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1762 return -1;
1763 }
1764 if (flags & PAGE_WRITE) {
1765 if (!(p->flags & PAGE_WRITE_ORG)) {
1766 return -1;
1767 }
1768 /* unprotect the page if it was put read-only because it
1769 contains translated code */
1770 if (!(p->flags & PAGE_WRITE)) {
1771 if (!page_unprotect(addr, 0, NULL)) {
1772 return -1;
1773 }
1774 }
1775 return 0;
1776 }
1777 }
1778 return 0;
1779 }
1780
1781 /* called from signal handler: invalidate the code and unprotect the
1782 page. Return TRUE if the fault was successfully handled. */
1783 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1784 {
1785 unsigned int prot;
1786 PageDesc *p;
1787 target_ulong host_start, host_end, addr;
1788
1789 /* Technically this isn't safe inside a signal handler. However we
1790 know this only ever happens in a synchronous SEGV handler, so in
1791 practice it seems to be ok. */
1792 mmap_lock();
1793
1794 p = page_find(address >> TARGET_PAGE_BITS);
1795 if (!p) {
1796 mmap_unlock();
1797 return 0;
1798 }
1799
1800 /* if the page was really writable, then we change its
1801 protection back to writable */
1802 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1803 host_start = address & qemu_host_page_mask;
1804 host_end = host_start + qemu_host_page_size;
1805
1806 prot = 0;
1807 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1808 p = page_find(addr >> TARGET_PAGE_BITS);
1809 p->flags |= PAGE_WRITE;
1810 prot |= p->flags;
1811
1812 /* and since the content will be modified, we must invalidate
1813 the corresponding translated code. */
1814 tb_invalidate_phys_page(addr, pc, puc, true);
1815 #ifdef DEBUG_TB_CHECK
1816 tb_invalidate_check(addr);
1817 #endif
1818 }
1819 mprotect((void *)g2h(host_start), qemu_host_page_size,
1820 prot & PAGE_BITS);
1821
1822 mmap_unlock();
1823 return 1;
1824 }
1825 mmap_unlock();
1826 return 0;
1827 }
1828 #endif /* CONFIG_USER_ONLY */