4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
36 #include "disas/disas.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
56 #include "exec/address-spaces.h"
59 #include "exec/cputlb.h"
60 #include "translate-all.h"
61 #include "qemu/timer.h"
63 //#define DEBUG_TB_INVALIDATE
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 typedef struct PageDesc
{
76 /* list of TBs intersecting this ram page */
77 TranslationBlock
*first_tb
;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count
;
82 #if defined(CONFIG_USER_ONLY)
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
99 /* The bits remaining after N lower levels of page tables. */
100 #define V_L1_BITS_REM \
101 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
103 #if V_L1_BITS_REM < 4
104 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
106 #define V_L1_BITS V_L1_BITS_REM
109 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113 uintptr_t qemu_real_host_page_size
;
114 uintptr_t qemu_host_page_size
;
115 uintptr_t qemu_host_page_mask
;
117 /* This is a multi-level map on the virtual address space.
118 The bottom level has pointers to PageDesc. */
119 static void *l1_map
[V_L1_SIZE
];
121 /* code generation context */
124 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
125 tb_page_addr_t phys_page2
);
126 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
128 void cpu_gen_init(void)
130 tcg_context_init(&tcg_ctx
);
133 /* return non zero if the very first instruction is invalid so that
134 the virtual CPU can trigger an exception.
136 '*gen_code_size_ptr' contains the size of the generated code (host
139 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
141 TCGContext
*s
= &tcg_ctx
;
142 uint8_t *gen_code_buf
;
144 #ifdef CONFIG_PROFILER
148 #ifdef CONFIG_PROFILER
149 s
->tb_count1
++; /* includes aborted translations because of
151 ti
= profile_getclock();
155 gen_intermediate_code(env
, tb
);
157 /* generate machine code */
158 gen_code_buf
= tb
->tc_ptr
;
159 tb
->tb_next_offset
[0] = 0xffff;
160 tb
->tb_next_offset
[1] = 0xffff;
161 s
->tb_next_offset
= tb
->tb_next_offset
;
162 #ifdef USE_DIRECT_JUMP
163 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
166 s
->tb_jmp_offset
= NULL
;
167 s
->tb_next
= tb
->tb_next
;
170 #ifdef CONFIG_PROFILER
172 s
->interm_time
+= profile_getclock() - ti
;
173 s
->code_time
-= profile_getclock();
175 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
176 *gen_code_size_ptr
= gen_code_size
;
177 #ifdef CONFIG_PROFILER
178 s
->code_time
+= profile_getclock();
179 s
->code_in_len
+= tb
->size
;
180 s
->code_out_len
+= gen_code_size
;
184 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
185 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr
);
186 log_disas(tb
->tc_ptr
, *gen_code_size_ptr
);
194 /* The cpu state corresponding to 'searched_pc' is restored.
196 static int cpu_restore_state_from_tb(TranslationBlock
*tb
, CPUArchState
*env
,
197 uintptr_t searched_pc
)
199 TCGContext
*s
= &tcg_ctx
;
202 #ifdef CONFIG_PROFILER
206 #ifdef CONFIG_PROFILER
207 ti
= profile_getclock();
211 gen_intermediate_code_pc(env
, tb
);
214 /* Reset the cycle counter to the start of the block. */
215 env
->icount_decr
.u16
.low
+= tb
->icount
;
216 /* Clear the IO flag. */
220 /* find opc index corresponding to search_pc */
221 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
222 if (searched_pc
< tc_ptr
)
225 s
->tb_next_offset
= tb
->tb_next_offset
;
226 #ifdef USE_DIRECT_JUMP
227 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
230 s
->tb_jmp_offset
= NULL
;
231 s
->tb_next
= tb
->tb_next
;
233 j
= tcg_gen_code_search_pc(s
, (uint8_t *)tc_ptr
, searched_pc
- tc_ptr
);
236 /* now find start of instruction before */
237 while (s
->gen_opc_instr_start
[j
] == 0) {
240 env
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
242 restore_state_to_opc(env
, tb
, j
);
244 #ifdef CONFIG_PROFILER
245 s
->restore_time
+= profile_getclock() - ti
;
251 bool cpu_restore_state(CPUArchState
*env
, uintptr_t retaddr
)
253 TranslationBlock
*tb
;
255 tb
= tb_find_pc(retaddr
);
257 cpu_restore_state_from_tb(tb
, env
, retaddr
);
264 static inline void map_exec(void *addr
, long size
)
267 VirtualProtect(addr
, size
,
268 PAGE_EXECUTE_READWRITE
, &old_protect
);
271 static inline void map_exec(void *addr
, long size
)
273 unsigned long start
, end
, page_size
;
275 page_size
= getpagesize();
276 start
= (unsigned long)addr
;
277 start
&= ~(page_size
- 1);
279 end
= (unsigned long)addr
+ size
;
280 end
+= page_size
- 1;
281 end
&= ~(page_size
- 1);
283 mprotect((void *)start
, end
- start
,
284 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
288 static void page_init(void)
290 /* NOTE: we can always suppose that qemu_host_page_size >=
294 SYSTEM_INFO system_info
;
296 GetSystemInfo(&system_info
);
297 qemu_real_host_page_size
= system_info
.dwPageSize
;
300 qemu_real_host_page_size
= getpagesize();
302 if (qemu_host_page_size
== 0) {
303 qemu_host_page_size
= qemu_real_host_page_size
;
305 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
306 qemu_host_page_size
= TARGET_PAGE_SIZE
;
308 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
310 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
312 #ifdef HAVE_KINFO_GETVMMAP
313 struct kinfo_vmentry
*freep
;
316 freep
= kinfo_getvmmap(getpid(), &cnt
);
319 for (i
= 0; i
< cnt
; i
++) {
320 unsigned long startaddr
, endaddr
;
322 startaddr
= freep
[i
].kve_start
;
323 endaddr
= freep
[i
].kve_end
;
324 if (h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
329 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
331 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
333 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 last_brk
= (unsigned long)sbrk(0);
346 f
= fopen("/compat/linux/proc/self/maps", "r");
351 unsigned long startaddr
, endaddr
;
354 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
356 if (n
== 2 && h2g_valid(startaddr
)) {
357 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
359 if (h2g_valid(endaddr
)) {
360 endaddr
= h2g(endaddr
);
364 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
376 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
382 #if defined(CONFIG_USER_ONLY)
383 /* We can't use g_malloc because it may recurse into a locked mutex. */
384 # define ALLOC(P, SIZE) \
386 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
390 # define ALLOC(P, SIZE) \
391 do { P = g_malloc0(SIZE); } while (0)
394 /* Level 1. Always allocated. */
395 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
398 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
405 ALLOC(p
, sizeof(void *) * L2_SIZE
);
409 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
417 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
423 return pd
+ (index
& (L2_SIZE
- 1));
426 static inline PageDesc
*page_find(tb_page_addr_t index
)
428 return page_find_alloc(index
, 0);
431 #if !defined(CONFIG_USER_ONLY)
432 #define mmap_lock() do { } while (0)
433 #define mmap_unlock() do { } while (0)
436 #if defined(CONFIG_USER_ONLY)
437 /* Currently it is not recommended to allocate big chunks of data in
438 user mode. It will change when a dedicated libc will be used. */
439 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 region in which the guest needs to run. Revisit this. */
441 #define USE_STATIC_CODE_GEN_BUFFER
444 /* ??? Should configure for this, not list operating systems here. */
445 #if (defined(__linux__) \
446 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 || defined(__DragonFly__) || defined(__OpenBSD__) \
448 || defined(__NetBSD__))
452 /* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
456 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459 #if defined(__x86_64__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__sparc__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__aarch64__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
465 #elif defined(__arm__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
467 #elif defined(__s390x__)
468 /* We have a +- 4GB range on the branches; leave some slop. */
469 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
471 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
474 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
476 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
477 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
478 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
480 static inline size_t size_code_gen_buffer(size_t tb_size
)
482 /* Size the buffer. */
484 #ifdef USE_STATIC_CODE_GEN_BUFFER
485 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
487 /* ??? Needs adjustments. */
488 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
489 static buffer, we could size this on RESERVED_VA, on the text
490 segment size of the executable, or continue to use the default. */
491 tb_size
= (unsigned long)(ram_size
/ 4);
494 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
495 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
497 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
498 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
500 tcg_ctx
.code_gen_buffer_size
= tb_size
;
504 #ifdef USE_STATIC_CODE_GEN_BUFFER
505 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
506 __attribute__((aligned(CODE_GEN_ALIGN
)));
508 static inline void *alloc_code_gen_buffer(void)
510 map_exec(static_code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
511 return static_code_gen_buffer
;
513 #elif defined(USE_MMAP)
514 static inline void *alloc_code_gen_buffer(void)
516 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
520 /* Constrain the position of the buffer based on the host cpu.
521 Note that these addresses are chosen in concert with the
522 addresses assigned in the relevant linker script file. */
523 # if defined(__PIE__) || defined(__PIC__)
524 /* Don't bother setting a preferred location if we're building
525 a position-independent executable. We're more likely to get
526 an address near the main executable if we let the kernel
527 choose the address. */
528 # elif defined(__x86_64__) && defined(MAP_32BIT)
529 /* Force the memory down into low memory with the executable.
530 Leave the choice of exact location with the kernel. */
532 /* Cannot expect to map more than 800MB in low memory. */
533 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
534 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
536 # elif defined(__sparc__)
537 start
= 0x40000000ul
;
538 # elif defined(__s390x__)
539 start
= 0x90000000ul
;
542 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
544 return buf
== MAP_FAILED
? NULL
: buf
;
547 static inline void *alloc_code_gen_buffer(void)
549 void *buf
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
552 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
556 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
558 static inline void code_gen_alloc(size_t tb_size
)
560 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
561 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
562 if (tcg_ctx
.code_gen_buffer
== NULL
) {
563 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
567 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
570 /* Steal room for the prologue at the end of the buffer. This ensures
571 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
572 from TB's to the prologue are going to be in range. It also means
573 that we don't need to mark (additional) portions of the data segment
575 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
576 tcg_ctx
.code_gen_buffer_size
- 1024;
577 tcg_ctx
.code_gen_buffer_size
-= 1024;
579 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
580 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
581 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
582 CODE_GEN_AVG_BLOCK_SIZE
;
584 g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
587 /* Must be called before using the QEMU cpus. 'tb_size' is the size
588 (in bytes) allocated to the translation buffer. Zero means default
590 void tcg_exec_init(unsigned long tb_size
)
593 code_gen_alloc(tb_size
);
594 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
595 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
597 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
598 /* There's no guest base to take into account, so go ahead and
599 initialize the prologue now. */
600 tcg_prologue_init(&tcg_ctx
);
604 bool tcg_enabled(void)
606 return tcg_ctx
.code_gen_buffer
!= NULL
;
609 /* Allocate a new translation block. Flush the translation buffer if
610 too many translation blocks or too much generated code. */
611 static TranslationBlock
*tb_alloc(target_ulong pc
)
613 TranslationBlock
*tb
;
615 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
616 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
617 tcg_ctx
.code_gen_buffer_max_size
) {
620 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
626 void tb_free(TranslationBlock
*tb
)
628 /* In practice this is mostly used for single use temporary TB
629 Ignore the hard cases and just back up if this TB happens to
630 be the last one generated. */
631 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
632 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
633 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
634 tcg_ctx
.tb_ctx
.nb_tbs
--;
638 static inline void invalidate_page_bitmap(PageDesc
*p
)
640 if (p
->code_bitmap
) {
641 g_free(p
->code_bitmap
);
642 p
->code_bitmap
= NULL
;
644 p
->code_write_count
= 0;
647 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
648 static void page_flush_tb_1(int level
, void **lp
)
658 for (i
= 0; i
< L2_SIZE
; ++i
) {
659 pd
[i
].first_tb
= NULL
;
660 invalidate_page_bitmap(pd
+ i
);
665 for (i
= 0; i
< L2_SIZE
; ++i
) {
666 page_flush_tb_1(level
- 1, pp
+ i
);
671 static void page_flush_tb(void)
675 for (i
= 0; i
< V_L1_SIZE
; i
++) {
676 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
680 /* flush all the translation blocks */
681 /* XXX: tb_flush is currently not thread safe */
682 void tb_flush(CPUArchState
*env1
)
686 #if defined(DEBUG_FLUSH)
687 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
688 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
689 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
690 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
691 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
693 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
694 > tcg_ctx
.code_gen_buffer_size
) {
695 cpu_abort(env1
, "Internal error: code buffer overflow\n");
697 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
699 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
700 memset(env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof(void *));
703 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0,
704 CODE_GEN_PHYS_HASH_SIZE
* sizeof(void *));
707 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
708 /* XXX: flush processor icache at this point if cache flush is
710 tcg_ctx
.tb_ctx
.tb_flush_count
++;
713 #ifdef DEBUG_TB_CHECK
715 static void tb_invalidate_check(target_ulong address
)
717 TranslationBlock
*tb
;
720 address
&= TARGET_PAGE_MASK
;
721 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
722 for (tb
= tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
723 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
724 address
>= tb
->pc
+ tb
->size
)) {
725 printf("ERROR invalidate: address=" TARGET_FMT_lx
726 " PC=%08lx size=%04x\n",
727 address
, (long)tb
->pc
, tb
->size
);
733 /* verify that all the pages have correct rights for code */
734 static void tb_page_check(void)
736 TranslationBlock
*tb
;
737 int i
, flags1
, flags2
;
739 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
740 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
741 tb
= tb
->phys_hash_next
) {
742 flags1
= page_get_flags(tb
->pc
);
743 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
744 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
745 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
746 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
754 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
756 TranslationBlock
*tb1
;
761 *ptb
= tb1
->phys_hash_next
;
764 ptb
= &tb1
->phys_hash_next
;
768 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
770 TranslationBlock
*tb1
;
775 n1
= (uintptr_t)tb1
& 3;
776 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
778 *ptb
= tb1
->page_next
[n1
];
781 ptb
= &tb1
->page_next
[n1
];
785 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
787 TranslationBlock
*tb1
, **ptb
;
790 ptb
= &tb
->jmp_next
[n
];
793 /* find tb(n) in circular list */
796 n1
= (uintptr_t)tb1
& 3;
797 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
798 if (n1
== n
&& tb1
== tb
) {
802 ptb
= &tb1
->jmp_first
;
804 ptb
= &tb1
->jmp_next
[n1
];
807 /* now we can suppress tb(n) from the list */
808 *ptb
= tb
->jmp_next
[n
];
810 tb
->jmp_next
[n
] = NULL
;
814 /* reset the jump entry 'n' of a TB so that it is not chained to
816 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
818 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
821 /* invalidate one TB */
822 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
827 tb_page_addr_t phys_pc
;
828 TranslationBlock
*tb1
, *tb2
;
830 /* remove the TB from the hash list */
831 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
832 h
= tb_phys_hash_func(phys_pc
);
833 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
835 /* remove the TB from the page list */
836 if (tb
->page_addr
[0] != page_addr
) {
837 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
838 tb_page_remove(&p
->first_tb
, tb
);
839 invalidate_page_bitmap(p
);
841 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
842 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
843 tb_page_remove(&p
->first_tb
, tb
);
844 invalidate_page_bitmap(p
);
847 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
849 /* remove the TB from the hash list */
850 h
= tb_jmp_cache_hash_func(tb
->pc
);
851 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
852 if (env
->tb_jmp_cache
[h
] == tb
) {
853 env
->tb_jmp_cache
[h
] = NULL
;
857 /* suppress this TB from the two jump lists */
858 tb_jmp_remove(tb
, 0);
859 tb_jmp_remove(tb
, 1);
861 /* suppress any remaining jumps to this TB */
864 n1
= (uintptr_t)tb1
& 3;
868 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
869 tb2
= tb1
->jmp_next
[n1
];
870 tb_reset_jump(tb1
, n1
);
871 tb1
->jmp_next
[n1
] = NULL
;
874 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
876 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
879 static inline void set_bits(uint8_t *tab
, int start
, int len
)
885 mask
= 0xff << (start
& 7);
886 if ((start
& ~7) == (end
& ~7)) {
888 mask
&= ~(0xff << (end
& 7));
893 start
= (start
+ 8) & ~7;
895 while (start
< end1
) {
900 mask
= ~(0xff << (end
& 7));
906 static void build_page_bitmap(PageDesc
*p
)
908 int n
, tb_start
, tb_end
;
909 TranslationBlock
*tb
;
911 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
915 n
= (uintptr_t)tb
& 3;
916 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
917 /* NOTE: this is subtle as a TB may span two physical pages */
919 /* NOTE: tb_end may be after the end of the page, but
920 it is not a problem */
921 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
922 tb_end
= tb_start
+ tb
->size
;
923 if (tb_end
> TARGET_PAGE_SIZE
) {
924 tb_end
= TARGET_PAGE_SIZE
;
928 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
930 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
931 tb
= tb
->page_next
[n
];
935 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
936 target_ulong pc
, target_ulong cs_base
,
937 int flags
, int cflags
)
939 TranslationBlock
*tb
;
941 tb_page_addr_t phys_pc
, phys_page2
;
942 target_ulong virt_page2
;
945 phys_pc
= get_page_addr_code(env
, pc
);
948 /* flush must be done */
950 /* cannot fail at this point */
952 /* Don't forget to invalidate previous TB info. */
953 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
955 tc_ptr
= tcg_ctx
.code_gen_ptr
;
957 tb
->cs_base
= cs_base
;
960 cpu_gen_code(env
, tb
, &code_gen_size
);
961 tcg_ctx
.code_gen_ptr
= (void *)(((uintptr_t)tcg_ctx
.code_gen_ptr
+
962 code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
964 /* check next page if needed */
965 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
967 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
968 phys_page2
= get_page_addr_code(env
, virt_page2
);
970 tb_link_page(tb
, phys_pc
, phys_page2
);
975 * Invalidate all TBs which intersect with the target physical address range
976 * [start;end[. NOTE: start and end may refer to *different* physical pages.
977 * 'is_cpu_write_access' should be true if called from a real cpu write
978 * access: the virtual CPU will exit the current TB if code is modified inside
981 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
982 int is_cpu_write_access
)
984 while (start
< end
) {
985 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
986 start
&= TARGET_PAGE_MASK
;
987 start
+= TARGET_PAGE_SIZE
;
992 * Invalidate all TBs which intersect with the target physical address range
993 * [start;end[. NOTE: start and end must refer to the *same* physical page.
994 * 'is_cpu_write_access' should be true if called from a real cpu write
995 * access: the virtual CPU will exit the current TB if code is modified inside
998 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
999 int is_cpu_write_access
)
1001 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1002 CPUState
*cpu
= current_cpu
;
1003 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1004 CPUArchState
*env
= NULL
;
1006 tb_page_addr_t tb_start
, tb_end
;
1009 #ifdef TARGET_HAS_PRECISE_SMC
1010 int current_tb_not_found
= is_cpu_write_access
;
1011 TranslationBlock
*current_tb
= NULL
;
1012 int current_tb_modified
= 0;
1013 target_ulong current_pc
= 0;
1014 target_ulong current_cs_base
= 0;
1015 int current_flags
= 0;
1016 #endif /* TARGET_HAS_PRECISE_SMC */
1018 p
= page_find(start
>> TARGET_PAGE_BITS
);
1022 if (!p
->code_bitmap
&&
1023 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1024 is_cpu_write_access
) {
1025 /* build code bitmap */
1026 build_page_bitmap(p
);
1028 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1034 /* we remove all the TBs in the range [start, end[ */
1035 /* XXX: see if in some cases it could be faster to invalidate all
1038 while (tb
!= NULL
) {
1039 n
= (uintptr_t)tb
& 3;
1040 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1041 tb_next
= tb
->page_next
[n
];
1042 /* NOTE: this is subtle as a TB may span two physical pages */
1044 /* NOTE: tb_end may be after the end of the page, but
1045 it is not a problem */
1046 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1047 tb_end
= tb_start
+ tb
->size
;
1049 tb_start
= tb
->page_addr
[1];
1050 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1052 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb_not_found
) {
1055 current_tb_not_found
= 0;
1057 if (env
->mem_io_pc
) {
1058 /* now we have a real cpu fault */
1059 current_tb
= tb_find_pc(env
->mem_io_pc
);
1062 if (current_tb
== tb
&&
1063 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
1070 current_tb_modified
= 1;
1071 cpu_restore_state_from_tb(current_tb
, env
, env
->mem_io_pc
);
1072 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1075 #endif /* TARGET_HAS_PRECISE_SMC */
1076 /* we need to do that to handle the case where a signal
1077 occurs while doing tb_phys_invalidate() */
1080 saved_tb
= cpu
->current_tb
;
1081 cpu
->current_tb
= NULL
;
1083 tb_phys_invalidate(tb
, -1);
1085 cpu
->current_tb
= saved_tb
;
1086 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1087 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1093 #if !defined(CONFIG_USER_ONLY)
1094 /* if no code remaining, no need to continue to use slow writes */
1096 invalidate_page_bitmap(p
);
1097 if (is_cpu_write_access
) {
1098 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1102 #ifdef TARGET_HAS_PRECISE_SMC
1103 if (current_tb_modified
) {
1104 /* we generate a block containing just the instruction
1105 modifying the memory. It will ensure that it cannot modify
1107 cpu
->current_tb
= NULL
;
1108 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1109 cpu_resume_from_signal(env
, NULL
);
1114 /* len must be <= 8 and start must be a multiple of len */
1115 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1122 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1123 cpu_single_env
->mem_io_vaddr
, len
,
1124 cpu_single_env
->eip
,
1125 cpu_single_env
->eip
+
1126 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1129 p
= page_find(start
>> TARGET_PAGE_BITS
);
1133 if (p
->code_bitmap
) {
1134 offset
= start
& ~TARGET_PAGE_MASK
;
1135 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1136 if (b
& ((1 << len
) - 1)) {
1141 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1145 #if !defined(CONFIG_SOFTMMU)
1146 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1147 uintptr_t pc
, void *puc
)
1149 TranslationBlock
*tb
;
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 TranslationBlock
*current_tb
= NULL
;
1154 CPUState
*cpu
= current_cpu
;
1155 CPUArchState
*env
= NULL
;
1156 int current_tb_modified
= 0;
1157 target_ulong current_pc
= 0;
1158 target_ulong current_cs_base
= 0;
1159 int current_flags
= 0;
1162 addr
&= TARGET_PAGE_MASK
;
1163 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1168 #ifdef TARGET_HAS_PRECISE_SMC
1169 if (tb
&& pc
!= 0) {
1170 current_tb
= tb_find_pc(pc
);
1176 while (tb
!= NULL
) {
1177 n
= (uintptr_t)tb
& 3;
1178 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1179 #ifdef TARGET_HAS_PRECISE_SMC
1180 if (current_tb
== tb
&&
1181 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1182 /* If we are modifying the current TB, we must stop
1183 its execution. We could be more precise by checking
1184 that the modification is after the current PC, but it
1185 would require a specialized function to partially
1186 restore the CPU state */
1188 current_tb_modified
= 1;
1189 cpu_restore_state_from_tb(current_tb
, env
, pc
);
1190 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1193 #endif /* TARGET_HAS_PRECISE_SMC */
1194 tb_phys_invalidate(tb
, addr
);
1195 tb
= tb
->page_next
[n
];
1198 #ifdef TARGET_HAS_PRECISE_SMC
1199 if (current_tb_modified
) {
1200 /* we generate a block containing just the instruction
1201 modifying the memory. It will ensure that it cannot modify
1203 cpu
->current_tb
= NULL
;
1204 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1205 cpu_resume_from_signal(env
, puc
);
1211 /* add the tb in the target page and protect it if necessary */
1212 static inline void tb_alloc_page(TranslationBlock
*tb
,
1213 unsigned int n
, tb_page_addr_t page_addr
)
1216 #ifndef CONFIG_USER_ONLY
1217 bool page_already_protected
;
1220 tb
->page_addr
[n
] = page_addr
;
1221 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1222 tb
->page_next
[n
] = p
->first_tb
;
1223 #ifndef CONFIG_USER_ONLY
1224 page_already_protected
= p
->first_tb
!= NULL
;
1226 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1227 invalidate_page_bitmap(p
);
1229 #if defined(TARGET_HAS_SMC) || 1
1231 #if defined(CONFIG_USER_ONLY)
1232 if (p
->flags
& PAGE_WRITE
) {
1237 /* force the host page as non writable (writes will have a
1238 page fault + mprotect overhead) */
1239 page_addr
&= qemu_host_page_mask
;
1241 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1242 addr
+= TARGET_PAGE_SIZE
) {
1244 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1249 p2
->flags
&= ~PAGE_WRITE
;
1251 mprotect(g2h(page_addr
), qemu_host_page_size
,
1252 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1253 #ifdef DEBUG_TB_INVALIDATE
1254 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1259 /* if some code is already present, then the pages are already
1260 protected. So we handle the case where only the first TB is
1261 allocated in a physical page */
1262 if (!page_already_protected
) {
1263 tlb_protect_code(page_addr
);
1267 #endif /* TARGET_HAS_SMC */
1270 /* add a new TB and link it to the physical page tables. phys_page2 is
1271 (-1) to indicate that only one page contains the TB. */
1272 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1273 tb_page_addr_t phys_page2
)
1276 TranslationBlock
**ptb
;
1278 /* Grab the mmap lock to stop another thread invalidating this TB
1279 before we are done. */
1281 /* add in the physical hash table */
1282 h
= tb_phys_hash_func(phys_pc
);
1283 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1284 tb
->phys_hash_next
= *ptb
;
1287 /* add in the page list */
1288 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1289 if (phys_page2
!= -1) {
1290 tb_alloc_page(tb
, 1, phys_page2
);
1292 tb
->page_addr
[1] = -1;
1295 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1296 tb
->jmp_next
[0] = NULL
;
1297 tb
->jmp_next
[1] = NULL
;
1299 /* init original jump addresses */
1300 if (tb
->tb_next_offset
[0] != 0xffff) {
1301 tb_reset_jump(tb
, 0);
1303 if (tb
->tb_next_offset
[1] != 0xffff) {
1304 tb_reset_jump(tb
, 1);
1307 #ifdef DEBUG_TB_CHECK
1313 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1314 /* check whether the given addr is in TCG generated code buffer or not */
1315 bool is_tcg_gen_code(uintptr_t tc_ptr
)
1317 /* This can be called during code generation, code_gen_buffer_size
1318 is used instead of code_gen_ptr for upper boundary checking */
1319 return (tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_buffer
&&
1320 tc_ptr
< (uintptr_t)(tcg_ctx
.code_gen_buffer
+
1321 tcg_ctx
.code_gen_buffer_size
));
1325 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1329 int m_min
, m_max
, m
;
1331 TranslationBlock
*tb
;
1333 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1336 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1337 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1340 /* binary search (cf Knuth) */
1342 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1343 while (m_min
<= m_max
) {
1344 m
= (m_min
+ m_max
) >> 1;
1345 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1346 v
= (uintptr_t)tb
->tc_ptr
;
1349 } else if (tc_ptr
< v
) {
1355 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1358 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1359 void tb_invalidate_phys_addr(hwaddr addr
)
1361 ram_addr_t ram_addr
;
1365 mr
= address_space_translate(&address_space_memory
, addr
, &addr
, &l
, false);
1366 if (!(memory_region_is_ram(mr
)
1367 || memory_region_is_romd(mr
))) {
1370 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1372 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1374 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1376 void tb_check_watchpoint(CPUArchState
*env
)
1378 TranslationBlock
*tb
;
1380 tb
= tb_find_pc(env
->mem_io_pc
);
1382 cpu_abort(env
, "check_watchpoint: could not find TB for pc=%p",
1383 (void *)env
->mem_io_pc
);
1385 cpu_restore_state_from_tb(tb
, env
, env
->mem_io_pc
);
1386 tb_phys_invalidate(tb
, -1);
1389 #ifndef CONFIG_USER_ONLY
1390 /* mask must never be zero, except for A20 change call */
1391 static void tcg_handle_interrupt(CPUState
*cpu
, int mask
)
1393 CPUArchState
*env
= cpu
->env_ptr
;
1396 old_mask
= cpu
->interrupt_request
;
1397 cpu
->interrupt_request
|= mask
;
1400 * If called from iothread context, wake the target cpu in
1403 if (!qemu_cpu_is_self(cpu
)) {
1409 env
->icount_decr
.u16
.high
= 0xffff;
1411 && (mask
& ~old_mask
) != 0) {
1412 cpu_abort(env
, "Raised interrupt while not in I/O function");
1415 cpu
->tcg_exit_req
= 1;
1419 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1421 /* in deterministic execution mode, instructions doing device I/Os
1422 must be at the end of the TB */
1423 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
1425 TranslationBlock
*tb
;
1427 target_ulong pc
, cs_base
;
1430 tb
= tb_find_pc(retaddr
);
1432 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
1435 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
1436 cpu_restore_state_from_tb(tb
, env
, retaddr
);
1437 /* Calculate how many instructions had been executed before the fault
1439 n
= n
- env
->icount_decr
.u16
.low
;
1440 /* Generate a new TB ending on the I/O insn. */
1442 /* On MIPS and SH, delay slot instructions can only be restarted if
1443 they were already the first instruction in the TB. If this is not
1444 the first instruction in a TB then re-execute the preceding
1446 #if defined(TARGET_MIPS)
1447 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1448 env
->active_tc
.PC
-= 4;
1449 env
->icount_decr
.u16
.low
++;
1450 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1452 #elif defined(TARGET_SH4)
1453 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1456 env
->icount_decr
.u16
.low
++;
1457 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1460 /* This should never happen. */
1461 if (n
> CF_COUNT_MASK
) {
1462 cpu_abort(env
, "TB too big during recompile");
1465 cflags
= n
| CF_LAST_IO
;
1467 cs_base
= tb
->cs_base
;
1469 tb_phys_invalidate(tb
, -1);
1470 /* FIXME: In theory this could raise an exception. In practice
1471 we have already translated the block once so it's probably ok. */
1472 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
1473 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1474 the first in the TB) then we end up generating a whole new TB and
1475 repeating the fault, which is horribly inefficient.
1476 Better would be to execute just this insn uncached, or generate a
1478 cpu_resume_from_signal(env
, NULL
);
1481 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1485 /* Discard jump cache entries for any tb which might potentially
1486 overlap the flushed page. */
1487 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1488 memset(&env
->tb_jmp_cache
[i
], 0,
1489 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1491 i
= tb_jmp_cache_hash_page(addr
);
1492 memset(&env
->tb_jmp_cache
[i
], 0,
1493 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1496 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1498 int i
, target_code_size
, max_target_code_size
;
1499 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1500 TranslationBlock
*tb
;
1502 target_code_size
= 0;
1503 max_target_code_size
= 0;
1505 direct_jmp_count
= 0;
1506 direct_jmp2_count
= 0;
1507 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1508 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1509 target_code_size
+= tb
->size
;
1510 if (tb
->size
> max_target_code_size
) {
1511 max_target_code_size
= tb
->size
;
1513 if (tb
->page_addr
[1] != -1) {
1516 if (tb
->tb_next_offset
[0] != 0xffff) {
1518 if (tb
->tb_next_offset
[1] != 0xffff) {
1519 direct_jmp2_count
++;
1523 /* XXX: avoid using doubles ? */
1524 cpu_fprintf(f
, "Translation buffer state:\n");
1525 cpu_fprintf(f
, "gen code size %td/%zd\n",
1526 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1527 tcg_ctx
.code_gen_buffer_max_size
);
1528 cpu_fprintf(f
, "TB count %d/%d\n",
1529 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1530 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1531 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1532 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1533 max_target_code_size
);
1534 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1535 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1536 tcg_ctx
.code_gen_buffer
) /
1537 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1538 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1539 tcg_ctx
.code_gen_buffer
) /
1540 target_code_size
: 0);
1541 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1542 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1543 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1544 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1546 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1547 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1549 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1550 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1551 cpu_fprintf(f
, "\nStatistics:\n");
1552 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1553 cpu_fprintf(f
, "TB invalidate count %d\n",
1554 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1555 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1556 tcg_dump_info(f
, cpu_fprintf
);
1559 #else /* CONFIG_USER_ONLY */
1561 void cpu_interrupt(CPUState
*cpu
, int mask
)
1563 cpu
->interrupt_request
|= mask
;
1564 cpu
->tcg_exit_req
= 1;
1568 * Walks guest process memory "regions" one by one
1569 * and calls callback function 'fn' for each region.
1571 struct walk_memory_regions_data
{
1572 walk_memory_regions_fn fn
;
1578 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1579 abi_ulong end
, int new_prot
)
1581 if (data
->start
!= -1ul) {
1582 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1588 data
->start
= (new_prot
? end
: -1ul);
1589 data
->prot
= new_prot
;
1594 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1595 abi_ulong base
, int level
, void **lp
)
1601 return walk_memory_regions_end(data
, base
, 0);
1607 for (i
= 0; i
< L2_SIZE
; ++i
) {
1608 int prot
= pd
[i
].flags
;
1610 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1611 if (prot
!= data
->prot
) {
1612 rc
= walk_memory_regions_end(data
, pa
, prot
);
1621 for (i
= 0; i
< L2_SIZE
; ++i
) {
1622 pa
= base
| ((abi_ulong
)i
<<
1623 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1624 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1634 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1636 struct walk_memory_regions_data data
;
1644 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1645 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1646 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
1653 return walk_memory_regions_end(&data
, 0, 0);
1656 static int dump_region(void *priv
, abi_ulong start
,
1657 abi_ulong end
, unsigned long prot
)
1659 FILE *f
= (FILE *)priv
;
1661 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
1662 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
1663 start
, end
, end
- start
,
1664 ((prot
& PAGE_READ
) ? 'r' : '-'),
1665 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1666 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1671 /* dump memory mappings */
1672 void page_dump(FILE *f
)
1674 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
1675 "start", "end", "size", "prot");
1676 walk_memory_regions(f
, dump_region
);
1679 int page_get_flags(target_ulong address
)
1683 p
= page_find(address
>> TARGET_PAGE_BITS
);
1690 /* Modify the flags of a page and invalidate the code if necessary.
1691 The flag PAGE_WRITE_ORG is positioned automatically depending
1692 on PAGE_WRITE. The mmap_lock should already be held. */
1693 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1695 target_ulong addr
, len
;
1697 /* This function should never be called with addresses outside the
1698 guest address space. If this assert fires, it probably indicates
1699 a missing call to h2g_valid. */
1700 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1701 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1703 assert(start
< end
);
1705 start
= start
& TARGET_PAGE_MASK
;
1706 end
= TARGET_PAGE_ALIGN(end
);
1708 if (flags
& PAGE_WRITE
) {
1709 flags
|= PAGE_WRITE_ORG
;
1712 for (addr
= start
, len
= end
- start
;
1714 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1715 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1717 /* If the write protection bit is set, then we invalidate
1719 if (!(p
->flags
& PAGE_WRITE
) &&
1720 (flags
& PAGE_WRITE
) &&
1722 tb_invalidate_phys_page(addr
, 0, NULL
);
1728 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1734 /* This function should never be called with addresses outside the
1735 guest address space. If this assert fires, it probably indicates
1736 a missing call to h2g_valid. */
1737 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1738 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1744 if (start
+ len
- 1 < start
) {
1745 /* We've wrapped around. */
1749 /* must do before we loose bits in the next step */
1750 end
= TARGET_PAGE_ALIGN(start
+ len
);
1751 start
= start
& TARGET_PAGE_MASK
;
1753 for (addr
= start
, len
= end
- start
;
1755 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1756 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1760 if (!(p
->flags
& PAGE_VALID
)) {
1764 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1767 if (flags
& PAGE_WRITE
) {
1768 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1771 /* unprotect the page if it was put read-only because it
1772 contains translated code */
1773 if (!(p
->flags
& PAGE_WRITE
)) {
1774 if (!page_unprotect(addr
, 0, NULL
)) {
1784 /* called from signal handler: invalidate the code and unprotect the
1785 page. Return TRUE if the fault was successfully handled. */
1786 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1790 target_ulong host_start
, host_end
, addr
;
1792 /* Technically this isn't safe inside a signal handler. However we
1793 know this only ever happens in a synchronous SEGV handler, so in
1794 practice it seems to be ok. */
1797 p
= page_find(address
>> TARGET_PAGE_BITS
);
1803 /* if the page was really writable, then we change its
1804 protection back to writable */
1805 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1806 host_start
= address
& qemu_host_page_mask
;
1807 host_end
= host_start
+ qemu_host_page_size
;
1810 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1811 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1812 p
->flags
|= PAGE_WRITE
;
1815 /* and since the content will be modified, we must invalidate
1816 the corresponding translated code. */
1817 tb_invalidate_phys_page(addr
, pc
, puc
);
1818 #ifdef DEBUG_TB_CHECK
1819 tb_invalidate_check(addr
);
1822 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1831 #endif /* CONFIG_USER_ONLY */