4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
36 #include "disas/disas.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
56 #include "exec/address-spaces.h"
59 #include "exec/cputlb.h"
60 #include "translate-all.h"
61 #include "qemu/timer.h"
63 //#define DEBUG_TB_INVALIDATE
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 typedef struct PageDesc
{
76 /* list of TBs intersecting this ram page */
77 TranslationBlock
*first_tb
;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count
;
82 #if defined(CONFIG_USER_ONLY)
87 /* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89 #if !defined(CONFIG_USER_ONLY)
90 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
99 /* Size of the L2 (and L3, etc) page tables. */
101 #define V_L2_SIZE (1 << V_L2_BITS)
103 /* The bits remaining after N lower levels of page tables. */
104 #define V_L1_BITS_REM \
105 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
107 #if V_L1_BITS_REM < 4
108 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
110 #define V_L1_BITS V_L1_BITS_REM
113 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
115 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
117 uintptr_t qemu_real_host_page_size
;
118 uintptr_t qemu_host_page_size
;
119 uintptr_t qemu_host_page_mask
;
121 /* This is a multi-level map on the virtual address space.
122 The bottom level has pointers to PageDesc. */
123 static void *l1_map
[V_L1_SIZE
];
125 /* code generation context */
128 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
129 tb_page_addr_t phys_page2
);
130 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
132 void cpu_gen_init(void)
134 tcg_context_init(&tcg_ctx
);
137 /* return non zero if the very first instruction is invalid so that
138 the virtual CPU can trigger an exception.
140 '*gen_code_size_ptr' contains the size of the generated code (host
143 int cpu_gen_code(CPUArchState
*env
, TranslationBlock
*tb
, int *gen_code_size_ptr
)
145 TCGContext
*s
= &tcg_ctx
;
146 tcg_insn_unit
*gen_code_buf
;
148 #ifdef CONFIG_PROFILER
152 #ifdef CONFIG_PROFILER
153 s
->tb_count1
++; /* includes aborted translations because of
155 ti
= profile_getclock();
159 gen_intermediate_code(env
, tb
);
161 /* generate machine code */
162 gen_code_buf
= tb
->tc_ptr
;
163 tb
->tb_next_offset
[0] = 0xffff;
164 tb
->tb_next_offset
[1] = 0xffff;
165 s
->tb_next_offset
= tb
->tb_next_offset
;
166 #ifdef USE_DIRECT_JUMP
167 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
170 s
->tb_jmp_offset
= NULL
;
171 s
->tb_next
= tb
->tb_next
;
174 #ifdef CONFIG_PROFILER
176 s
->interm_time
+= profile_getclock() - ti
;
177 s
->code_time
-= profile_getclock();
179 gen_code_size
= tcg_gen_code(s
, gen_code_buf
);
180 *gen_code_size_ptr
= gen_code_size
;
181 #ifdef CONFIG_PROFILER
182 s
->code_time
+= profile_getclock();
183 s
->code_in_len
+= tb
->size
;
184 s
->code_out_len
+= gen_code_size
;
188 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
)) {
189 qemu_log("OUT: [size=%d]\n", gen_code_size
);
190 log_disas(tb
->tc_ptr
, gen_code_size
);
198 /* The cpu state corresponding to 'searched_pc' is restored.
200 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
201 uintptr_t searched_pc
)
203 CPUArchState
*env
= cpu
->env_ptr
;
204 TCGContext
*s
= &tcg_ctx
;
207 #ifdef CONFIG_PROFILER
211 #ifdef CONFIG_PROFILER
212 ti
= profile_getclock();
216 gen_intermediate_code_pc(env
, tb
);
219 /* Reset the cycle counter to the start of the block. */
220 cpu
->icount_decr
.u16
.low
+= tb
->icount
;
221 /* Clear the IO flag. */
225 /* find opc index corresponding to search_pc */
226 tc_ptr
= (uintptr_t)tb
->tc_ptr
;
227 if (searched_pc
< tc_ptr
)
230 s
->tb_next_offset
= tb
->tb_next_offset
;
231 #ifdef USE_DIRECT_JUMP
232 s
->tb_jmp_offset
= tb
->tb_jmp_offset
;
235 s
->tb_jmp_offset
= NULL
;
236 s
->tb_next
= tb
->tb_next
;
238 j
= tcg_gen_code_search_pc(s
, (tcg_insn_unit
*)tc_ptr
,
239 searched_pc
- tc_ptr
);
242 /* now find start of instruction before */
243 while (s
->gen_opc_instr_start
[j
] == 0) {
246 cpu
->icount_decr
.u16
.low
-= s
->gen_opc_icount
[j
];
248 restore_state_to_opc(env
, tb
, j
);
250 #ifdef CONFIG_PROFILER
251 s
->restore_time
+= profile_getclock() - ti
;
257 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
259 TranslationBlock
*tb
;
261 tb
= tb_find_pc(retaddr
);
263 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
270 static inline void map_exec(void *addr
, long size
)
273 VirtualProtect(addr
, size
,
274 PAGE_EXECUTE_READWRITE
, &old_protect
);
277 static inline void map_exec(void *addr
, long size
)
279 unsigned long start
, end
, page_size
;
281 page_size
= getpagesize();
282 start
= (unsigned long)addr
;
283 start
&= ~(page_size
- 1);
285 end
= (unsigned long)addr
+ size
;
286 end
+= page_size
- 1;
287 end
&= ~(page_size
- 1);
289 mprotect((void *)start
, end
- start
,
290 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
294 void page_size_init(void)
296 /* NOTE: we can always suppose that qemu_host_page_size >=
299 SYSTEM_INFO system_info
;
301 GetSystemInfo(&system_info
);
302 qemu_real_host_page_size
= system_info
.dwPageSize
;
304 qemu_real_host_page_size
= getpagesize();
306 if (qemu_host_page_size
== 0) {
307 qemu_host_page_size
= qemu_real_host_page_size
;
309 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
310 qemu_host_page_size
= TARGET_PAGE_SIZE
;
312 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
315 static void page_init(void)
318 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
320 #ifdef HAVE_KINFO_GETVMMAP
321 struct kinfo_vmentry
*freep
;
324 freep
= kinfo_getvmmap(getpid(), &cnt
);
327 for (i
= 0; i
< cnt
; i
++) {
328 unsigned long startaddr
, endaddr
;
330 startaddr
= freep
[i
].kve_start
;
331 endaddr
= freep
[i
].kve_end
;
332 if (h2g_valid(startaddr
)) {
333 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
335 if (h2g_valid(endaddr
)) {
336 endaddr
= h2g(endaddr
);
337 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
339 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
341 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
352 last_brk
= (unsigned long)sbrk(0);
354 f
= fopen("/compat/linux/proc/self/maps", "r");
359 unsigned long startaddr
, endaddr
;
362 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
364 if (n
== 2 && h2g_valid(startaddr
)) {
365 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
367 if (h2g_valid(endaddr
)) {
368 endaddr
= h2g(endaddr
);
372 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
384 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
390 #if defined(CONFIG_USER_ONLY)
391 /* We can't use g_malloc because it may recurse into a locked mutex. */
392 # define ALLOC(P, SIZE) \
394 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
395 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
398 # define ALLOC(P, SIZE) \
399 do { P = g_malloc0(SIZE); } while (0)
402 /* Level 1. Always allocated. */
403 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
406 for (i
= V_L1_SHIFT
/ V_L2_BITS
- 1; i
> 0; i
--) {
413 ALLOC(p
, sizeof(void *) * V_L2_SIZE
);
417 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
425 ALLOC(pd
, sizeof(PageDesc
) * V_L2_SIZE
);
431 return pd
+ (index
& (V_L2_SIZE
- 1));
434 static inline PageDesc
*page_find(tb_page_addr_t index
)
436 return page_find_alloc(index
, 0);
439 #if !defined(CONFIG_USER_ONLY)
440 #define mmap_lock() do { } while (0)
441 #define mmap_unlock() do { } while (0)
444 #if defined(CONFIG_USER_ONLY)
445 /* Currently it is not recommended to allocate big chunks of data in
446 user mode. It will change when a dedicated libc will be used. */
447 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
448 region in which the guest needs to run. Revisit this. */
449 #define USE_STATIC_CODE_GEN_BUFFER
452 /* ??? Should configure for this, not list operating systems here. */
453 #if (defined(__linux__) \
454 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
455 || defined(__DragonFly__) || defined(__OpenBSD__) \
456 || defined(__NetBSD__))
460 /* Minimum size of the code gen buffer. This number is randomly chosen,
461 but not so small that we can't have a fair number of TB's live. */
462 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
464 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
465 indicated, this is constrained by the range of direct branches on the
466 host cpu, as used by the TCG implementation of goto_tb. */
467 #if defined(__x86_64__)
468 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
469 #elif defined(__sparc__)
470 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
471 #elif defined(__aarch64__)
472 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
473 #elif defined(__arm__)
474 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
475 #elif defined(__s390x__)
476 /* We have a +- 4GB range on the branches; leave some slop. */
477 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
478 #elif defined(__mips__)
479 /* We have a 256MB branch region, but leave room to make sure the
480 main executable is also within that region. */
481 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
483 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
486 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
488 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
489 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
490 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
492 static inline size_t size_code_gen_buffer(size_t tb_size
)
494 /* Size the buffer. */
496 #ifdef USE_STATIC_CODE_GEN_BUFFER
497 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
499 /* ??? Needs adjustments. */
500 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
501 static buffer, we could size this on RESERVED_VA, on the text
502 segment size of the executable, or continue to use the default. */
503 tb_size
= (unsigned long)(ram_size
/ 4);
506 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
507 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
509 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
510 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
512 tcg_ctx
.code_gen_buffer_size
= tb_size
;
516 #ifdef USE_STATIC_CODE_GEN_BUFFER
517 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
518 __attribute__((aligned(CODE_GEN_ALIGN
)));
520 static inline void *alloc_code_gen_buffer(void)
522 map_exec(static_code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
523 return static_code_gen_buffer
;
525 #elif defined(USE_MMAP)
526 static inline void *alloc_code_gen_buffer(void)
528 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
532 /* Constrain the position of the buffer based on the host cpu.
533 Note that these addresses are chosen in concert with the
534 addresses assigned in the relevant linker script file. */
535 # if defined(__PIE__) || defined(__PIC__)
536 /* Don't bother setting a preferred location if we're building
537 a position-independent executable. We're more likely to get
538 an address near the main executable if we let the kernel
539 choose the address. */
540 # elif defined(__x86_64__) && defined(MAP_32BIT)
541 /* Force the memory down into low memory with the executable.
542 Leave the choice of exact location with the kernel. */
544 /* Cannot expect to map more than 800MB in low memory. */
545 if (tcg_ctx
.code_gen_buffer_size
> 800u * 1024 * 1024) {
546 tcg_ctx
.code_gen_buffer_size
= 800u * 1024 * 1024;
548 # elif defined(__sparc__)
549 start
= 0x40000000ul
;
550 # elif defined(__s390x__)
551 start
= 0x90000000ul
;
552 # elif defined(__mips__)
553 /* ??? We ought to more explicitly manage layout for softmmu too. */
554 # ifdef CONFIG_USER_ONLY
555 start
= 0x68000000ul
;
556 # elif _MIPS_SIM == _ABI64
557 start
= 0x128000000ul
;
559 start
= 0x08000000ul
;
563 buf
= mmap((void *)start
, tcg_ctx
.code_gen_buffer_size
,
564 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
565 return buf
== MAP_FAILED
? NULL
: buf
;
568 static inline void *alloc_code_gen_buffer(void)
570 void *buf
= g_malloc(tcg_ctx
.code_gen_buffer_size
);
573 map_exec(buf
, tcg_ctx
.code_gen_buffer_size
);
577 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
579 static inline void code_gen_alloc(size_t tb_size
)
581 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
582 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
583 if (tcg_ctx
.code_gen_buffer
== NULL
) {
584 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
588 qemu_madvise(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
,
591 /* Steal room for the prologue at the end of the buffer. This ensures
592 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
593 from TB's to the prologue are going to be in range. It also means
594 that we don't need to mark (additional) portions of the data segment
596 tcg_ctx
.code_gen_prologue
= tcg_ctx
.code_gen_buffer
+
597 tcg_ctx
.code_gen_buffer_size
- 1024;
598 tcg_ctx
.code_gen_buffer_size
-= 1024;
600 tcg_ctx
.code_gen_buffer_max_size
= tcg_ctx
.code_gen_buffer_size
-
601 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
602 tcg_ctx
.code_gen_max_blocks
= tcg_ctx
.code_gen_buffer_size
/
603 CODE_GEN_AVG_BLOCK_SIZE
;
605 g_malloc(tcg_ctx
.code_gen_max_blocks
* sizeof(TranslationBlock
));
608 /* Must be called before using the QEMU cpus. 'tb_size' is the size
609 (in bytes) allocated to the translation buffer. Zero means default
611 void tcg_exec_init(unsigned long tb_size
)
614 code_gen_alloc(tb_size
);
615 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
616 tcg_register_jit(tcg_ctx
.code_gen_buffer
, tcg_ctx
.code_gen_buffer_size
);
618 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
619 /* There's no guest base to take into account, so go ahead and
620 initialize the prologue now. */
621 tcg_prologue_init(&tcg_ctx
);
625 bool tcg_enabled(void)
627 return tcg_ctx
.code_gen_buffer
!= NULL
;
630 /* Allocate a new translation block. Flush the translation buffer if
631 too many translation blocks or too much generated code. */
632 static TranslationBlock
*tb_alloc(target_ulong pc
)
634 TranslationBlock
*tb
;
636 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
||
637 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) >=
638 tcg_ctx
.code_gen_buffer_max_size
) {
641 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
647 void tb_free(TranslationBlock
*tb
)
649 /* In practice this is mostly used for single use temporary TB
650 Ignore the hard cases and just back up if this TB happens to
651 be the last one generated. */
652 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
653 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
654 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
655 tcg_ctx
.tb_ctx
.nb_tbs
--;
659 static inline void invalidate_page_bitmap(PageDesc
*p
)
661 if (p
->code_bitmap
) {
662 g_free(p
->code_bitmap
);
663 p
->code_bitmap
= NULL
;
665 p
->code_write_count
= 0;
668 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
669 static void page_flush_tb_1(int level
, void **lp
)
679 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
680 pd
[i
].first_tb
= NULL
;
681 invalidate_page_bitmap(pd
+ i
);
686 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
687 page_flush_tb_1(level
- 1, pp
+ i
);
692 static void page_flush_tb(void)
696 for (i
= 0; i
< V_L1_SIZE
; i
++) {
697 page_flush_tb_1(V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
701 /* flush all the translation blocks */
702 /* XXX: tb_flush is currently not thread safe */
703 void tb_flush(CPUArchState
*env1
)
705 CPUState
*cpu
= ENV_GET_CPU(env1
);
707 #if defined(DEBUG_FLUSH)
708 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
709 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
710 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
711 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
712 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
714 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
715 > tcg_ctx
.code_gen_buffer_size
) {
716 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
718 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
721 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
724 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0, sizeof(tcg_ctx
.tb_ctx
.tb_phys_hash
));
727 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
728 /* XXX: flush processor icache at this point if cache flush is
730 tcg_ctx
.tb_ctx
.tb_flush_count
++;
733 #ifdef DEBUG_TB_CHECK
735 static void tb_invalidate_check(target_ulong address
)
737 TranslationBlock
*tb
;
740 address
&= TARGET_PAGE_MASK
;
741 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
742 for (tb
= tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
743 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
744 address
>= tb
->pc
+ tb
->size
)) {
745 printf("ERROR invalidate: address=" TARGET_FMT_lx
746 " PC=%08lx size=%04x\n",
747 address
, (long)tb
->pc
, tb
->size
);
753 /* verify that all the pages have correct rights for code */
754 static void tb_page_check(void)
756 TranslationBlock
*tb
;
757 int i
, flags1
, flags2
;
759 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
760 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
761 tb
= tb
->phys_hash_next
) {
762 flags1
= page_get_flags(tb
->pc
);
763 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
764 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
765 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
766 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
774 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
776 TranslationBlock
*tb1
;
781 *ptb
= tb1
->phys_hash_next
;
784 ptb
= &tb1
->phys_hash_next
;
788 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
790 TranslationBlock
*tb1
;
795 n1
= (uintptr_t)tb1
& 3;
796 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
798 *ptb
= tb1
->page_next
[n1
];
801 ptb
= &tb1
->page_next
[n1
];
805 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
807 TranslationBlock
*tb1
, **ptb
;
810 ptb
= &tb
->jmp_next
[n
];
813 /* find tb(n) in circular list */
816 n1
= (uintptr_t)tb1
& 3;
817 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
818 if (n1
== n
&& tb1
== tb
) {
822 ptb
= &tb1
->jmp_first
;
824 ptb
= &tb1
->jmp_next
[n1
];
827 /* now we can suppress tb(n) from the list */
828 *ptb
= tb
->jmp_next
[n
];
830 tb
->jmp_next
[n
] = NULL
;
834 /* reset the jump entry 'n' of a TB so that it is not chained to
836 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
838 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
841 /* invalidate one TB */
842 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
847 tb_page_addr_t phys_pc
;
848 TranslationBlock
*tb1
, *tb2
;
850 /* remove the TB from the hash list */
851 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
852 h
= tb_phys_hash_func(phys_pc
);
853 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
855 /* remove the TB from the page list */
856 if (tb
->page_addr
[0] != page_addr
) {
857 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
858 tb_page_remove(&p
->first_tb
, tb
);
859 invalidate_page_bitmap(p
);
861 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
862 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
863 tb_page_remove(&p
->first_tb
, tb
);
864 invalidate_page_bitmap(p
);
867 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
869 /* remove the TB from the hash list */
870 h
= tb_jmp_cache_hash_func(tb
->pc
);
872 if (cpu
->tb_jmp_cache
[h
] == tb
) {
873 cpu
->tb_jmp_cache
[h
] = NULL
;
877 /* suppress this TB from the two jump lists */
878 tb_jmp_remove(tb
, 0);
879 tb_jmp_remove(tb
, 1);
881 /* suppress any remaining jumps to this TB */
884 n1
= (uintptr_t)tb1
& 3;
888 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
889 tb2
= tb1
->jmp_next
[n1
];
890 tb_reset_jump(tb1
, n1
);
891 tb1
->jmp_next
[n1
] = NULL
;
894 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
896 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
899 static inline void set_bits(uint8_t *tab
, int start
, int len
)
905 mask
= 0xff << (start
& 7);
906 if ((start
& ~7) == (end
& ~7)) {
908 mask
&= ~(0xff << (end
& 7));
913 start
= (start
+ 8) & ~7;
915 while (start
< end1
) {
920 mask
= ~(0xff << (end
& 7));
926 static void build_page_bitmap(PageDesc
*p
)
928 int n
, tb_start
, tb_end
;
929 TranslationBlock
*tb
;
931 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
935 n
= (uintptr_t)tb
& 3;
936 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
937 /* NOTE: this is subtle as a TB may span two physical pages */
939 /* NOTE: tb_end may be after the end of the page, but
940 it is not a problem */
941 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
942 tb_end
= tb_start
+ tb
->size
;
943 if (tb_end
> TARGET_PAGE_SIZE
) {
944 tb_end
= TARGET_PAGE_SIZE
;
948 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
950 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
951 tb
= tb
->page_next
[n
];
955 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
956 target_ulong pc
, target_ulong cs_base
,
957 int flags
, int cflags
)
959 CPUArchState
*env
= cpu
->env_ptr
;
960 TranslationBlock
*tb
;
961 tb_page_addr_t phys_pc
, phys_page2
;
962 target_ulong virt_page2
;
965 phys_pc
= get_page_addr_code(env
, pc
);
968 /* flush must be done */
970 /* cannot fail at this point */
972 /* Don't forget to invalidate previous TB info. */
973 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
975 tb
->tc_ptr
= tcg_ctx
.code_gen_ptr
;
976 tb
->cs_base
= cs_base
;
979 cpu_gen_code(env
, tb
, &code_gen_size
);
980 tcg_ctx
.code_gen_ptr
= (void *)(((uintptr_t)tcg_ctx
.code_gen_ptr
+
981 code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
983 /* check next page if needed */
984 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
986 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
987 phys_page2
= get_page_addr_code(env
, virt_page2
);
989 tb_link_page(tb
, phys_pc
, phys_page2
);
994 * Invalidate all TBs which intersect with the target physical address range
995 * [start;end[. NOTE: start and end may refer to *different* physical pages.
996 * 'is_cpu_write_access' should be true if called from a real cpu write
997 * access: the virtual CPU will exit the current TB if code is modified inside
1000 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1001 int is_cpu_write_access
)
1003 while (start
< end
) {
1004 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1005 start
&= TARGET_PAGE_MASK
;
1006 start
+= TARGET_PAGE_SIZE
;
1011 * Invalidate all TBs which intersect with the target physical address range
1012 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1013 * 'is_cpu_write_access' should be true if called from a real cpu write
1014 * access: the virtual CPU will exit the current TB if code is modified inside
1017 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1018 int is_cpu_write_access
)
1020 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1021 CPUState
*cpu
= current_cpu
;
1022 #if defined(TARGET_HAS_PRECISE_SMC)
1023 CPUArchState
*env
= NULL
;
1025 tb_page_addr_t tb_start
, tb_end
;
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 int current_tb_not_found
= is_cpu_write_access
;
1030 TranslationBlock
*current_tb
= NULL
;
1031 int current_tb_modified
= 0;
1032 target_ulong current_pc
= 0;
1033 target_ulong current_cs_base
= 0;
1034 int current_flags
= 0;
1035 #endif /* TARGET_HAS_PRECISE_SMC */
1037 p
= page_find(start
>> TARGET_PAGE_BITS
);
1041 if (!p
->code_bitmap
&&
1042 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1043 is_cpu_write_access
) {
1044 /* build code bitmap */
1045 build_page_bitmap(p
);
1047 #if defined(TARGET_HAS_PRECISE_SMC)
1053 /* we remove all the TBs in the range [start, end[ */
1054 /* XXX: see if in some cases it could be faster to invalidate all
1057 while (tb
!= NULL
) {
1058 n
= (uintptr_t)tb
& 3;
1059 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1060 tb_next
= tb
->page_next
[n
];
1061 /* NOTE: this is subtle as a TB may span two physical pages */
1063 /* NOTE: tb_end may be after the end of the page, but
1064 it is not a problem */
1065 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1066 tb_end
= tb_start
+ tb
->size
;
1068 tb_start
= tb
->page_addr
[1];
1069 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1071 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1072 #ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb_not_found
) {
1074 current_tb_not_found
= 0;
1076 if (cpu
->mem_io_pc
) {
1077 /* now we have a real cpu fault */
1078 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1081 if (current_tb
== tb
&&
1082 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1083 /* If we are modifying the current TB, we must stop
1084 its execution. We could be more precise by checking
1085 that the modification is after the current PC, but it
1086 would require a specialized function to partially
1087 restore the CPU state */
1089 current_tb_modified
= 1;
1090 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1091 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1094 #endif /* TARGET_HAS_PRECISE_SMC */
1095 /* we need to do that to handle the case where a signal
1096 occurs while doing tb_phys_invalidate() */
1099 saved_tb
= cpu
->current_tb
;
1100 cpu
->current_tb
= NULL
;
1102 tb_phys_invalidate(tb
, -1);
1104 cpu
->current_tb
= saved_tb
;
1105 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1106 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1112 #if !defined(CONFIG_USER_ONLY)
1113 /* if no code remaining, no need to continue to use slow writes */
1115 invalidate_page_bitmap(p
);
1116 if (is_cpu_write_access
) {
1117 tlb_unprotect_code_phys(cpu
, start
, cpu
->mem_io_vaddr
);
1121 #ifdef TARGET_HAS_PRECISE_SMC
1122 if (current_tb_modified
) {
1123 /* we generate a block containing just the instruction
1124 modifying the memory. It will ensure that it cannot modify
1126 cpu
->current_tb
= NULL
;
1127 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1128 cpu_resume_from_signal(cpu
, NULL
);
1133 /* len must be <= 8 and start must be a multiple of len */
1134 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1141 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1142 cpu_single_env
->mem_io_vaddr
, len
,
1143 cpu_single_env
->eip
,
1144 cpu_single_env
->eip
+
1145 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1148 p
= page_find(start
>> TARGET_PAGE_BITS
);
1152 if (p
->code_bitmap
) {
1153 offset
= start
& ~TARGET_PAGE_MASK
;
1154 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1155 if (b
& ((1 << len
) - 1)) {
1160 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1164 #if !defined(CONFIG_SOFTMMU)
1165 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1166 uintptr_t pc
, void *puc
,
1169 TranslationBlock
*tb
;
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 TranslationBlock
*current_tb
= NULL
;
1174 CPUState
*cpu
= current_cpu
;
1175 CPUArchState
*env
= NULL
;
1176 int current_tb_modified
= 0;
1177 target_ulong current_pc
= 0;
1178 target_ulong current_cs_base
= 0;
1179 int current_flags
= 0;
1182 addr
&= TARGET_PAGE_MASK
;
1183 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1188 #ifdef TARGET_HAS_PRECISE_SMC
1189 if (tb
&& pc
!= 0) {
1190 current_tb
= tb_find_pc(pc
);
1196 while (tb
!= NULL
) {
1197 n
= (uintptr_t)tb
& 3;
1198 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1199 #ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb
== tb
&&
1201 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1202 /* If we are modifying the current TB, we must stop
1203 its execution. We could be more precise by checking
1204 that the modification is after the current PC, but it
1205 would require a specialized function to partially
1206 restore the CPU state */
1208 current_tb_modified
= 1;
1209 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1210 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1213 #endif /* TARGET_HAS_PRECISE_SMC */
1214 tb_phys_invalidate(tb
, addr
);
1215 tb
= tb
->page_next
[n
];
1218 #ifdef TARGET_HAS_PRECISE_SMC
1219 if (current_tb_modified
) {
1220 /* we generate a block containing just the instruction
1221 modifying the memory. It will ensure that it cannot modify
1223 cpu
->current_tb
= NULL
;
1224 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1228 cpu_resume_from_signal(cpu
, puc
);
1234 /* add the tb in the target page and protect it if necessary */
1235 static inline void tb_alloc_page(TranslationBlock
*tb
,
1236 unsigned int n
, tb_page_addr_t page_addr
)
1239 #ifndef CONFIG_USER_ONLY
1240 bool page_already_protected
;
1243 tb
->page_addr
[n
] = page_addr
;
1244 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1245 tb
->page_next
[n
] = p
->first_tb
;
1246 #ifndef CONFIG_USER_ONLY
1247 page_already_protected
= p
->first_tb
!= NULL
;
1249 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1250 invalidate_page_bitmap(p
);
1252 #if defined(TARGET_HAS_SMC) || 1
1254 #if defined(CONFIG_USER_ONLY)
1255 if (p
->flags
& PAGE_WRITE
) {
1260 /* force the host page as non writable (writes will have a
1261 page fault + mprotect overhead) */
1262 page_addr
&= qemu_host_page_mask
;
1264 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1265 addr
+= TARGET_PAGE_SIZE
) {
1267 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1272 p2
->flags
&= ~PAGE_WRITE
;
1274 mprotect(g2h(page_addr
), qemu_host_page_size
,
1275 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1276 #ifdef DEBUG_TB_INVALIDATE
1277 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1282 /* if some code is already present, then the pages are already
1283 protected. So we handle the case where only the first TB is
1284 allocated in a physical page */
1285 if (!page_already_protected
) {
1286 tlb_protect_code(page_addr
);
1290 #endif /* TARGET_HAS_SMC */
1293 /* add a new TB and link it to the physical page tables. phys_page2 is
1294 (-1) to indicate that only one page contains the TB. */
1295 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1296 tb_page_addr_t phys_page2
)
1299 TranslationBlock
**ptb
;
1301 /* Grab the mmap lock to stop another thread invalidating this TB
1302 before we are done. */
1304 /* add in the physical hash table */
1305 h
= tb_phys_hash_func(phys_pc
);
1306 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1307 tb
->phys_hash_next
= *ptb
;
1310 /* add in the page list */
1311 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1312 if (phys_page2
!= -1) {
1313 tb_alloc_page(tb
, 1, phys_page2
);
1315 tb
->page_addr
[1] = -1;
1318 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1319 tb
->jmp_next
[0] = NULL
;
1320 tb
->jmp_next
[1] = NULL
;
1322 /* init original jump addresses */
1323 if (tb
->tb_next_offset
[0] != 0xffff) {
1324 tb_reset_jump(tb
, 0);
1326 if (tb
->tb_next_offset
[1] != 0xffff) {
1327 tb_reset_jump(tb
, 1);
1330 #ifdef DEBUG_TB_CHECK
1336 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1337 tb[1].tc_ptr. Return NULL if not found */
1338 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1340 int m_min
, m_max
, m
;
1342 TranslationBlock
*tb
;
1344 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1347 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1348 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1351 /* binary search (cf Knuth) */
1353 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1354 while (m_min
<= m_max
) {
1355 m
= (m_min
+ m_max
) >> 1;
1356 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1357 v
= (uintptr_t)tb
->tc_ptr
;
1360 } else if (tc_ptr
< v
) {
1366 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1369 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1370 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1372 ram_addr_t ram_addr
;
1376 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1377 if (!(memory_region_is_ram(mr
)
1378 || memory_region_is_romd(mr
))) {
1381 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1383 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1385 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1387 void tb_check_watchpoint(CPUState
*cpu
)
1389 TranslationBlock
*tb
;
1391 tb
= tb_find_pc(cpu
->mem_io_pc
);
1393 cpu_abort(cpu
, "check_watchpoint: could not find TB for pc=%p",
1394 (void *)cpu
->mem_io_pc
);
1396 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1397 tb_phys_invalidate(tb
, -1);
1400 #ifndef CONFIG_USER_ONLY
1401 /* mask must never be zero, except for A20 change call */
1402 static void tcg_handle_interrupt(CPUState
*cpu
, int mask
)
1406 old_mask
= cpu
->interrupt_request
;
1407 cpu
->interrupt_request
|= mask
;
1410 * If called from iothread context, wake the target cpu in
1413 if (!qemu_cpu_is_self(cpu
)) {
1419 cpu
->icount_decr
.u16
.high
= 0xffff;
1420 if (!cpu_can_do_io(cpu
)
1421 && (mask
& ~old_mask
) != 0) {
1422 cpu_abort(cpu
, "Raised interrupt while not in I/O function");
1425 cpu
->tcg_exit_req
= 1;
1429 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1431 /* in deterministic execution mode, instructions doing device I/Os
1432 must be at the end of the TB */
1433 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1435 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1436 CPUArchState
*env
= cpu
->env_ptr
;
1438 TranslationBlock
*tb
;
1440 target_ulong pc
, cs_base
;
1443 tb
= tb_find_pc(retaddr
);
1445 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1448 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1449 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1450 /* Calculate how many instructions had been executed before the fault
1452 n
= n
- cpu
->icount_decr
.u16
.low
;
1453 /* Generate a new TB ending on the I/O insn. */
1455 /* On MIPS and SH, delay slot instructions can only be restarted if
1456 they were already the first instruction in the TB. If this is not
1457 the first instruction in a TB then re-execute the preceding
1459 #if defined(TARGET_MIPS)
1460 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1461 env
->active_tc
.PC
-= 4;
1462 cpu
->icount_decr
.u16
.low
++;
1463 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1465 #elif defined(TARGET_SH4)
1466 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1469 cpu
->icount_decr
.u16
.low
++;
1470 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1473 /* This should never happen. */
1474 if (n
> CF_COUNT_MASK
) {
1475 cpu_abort(cpu
, "TB too big during recompile");
1478 cflags
= n
| CF_LAST_IO
;
1480 cs_base
= tb
->cs_base
;
1482 tb_phys_invalidate(tb
, -1);
1483 /* FIXME: In theory this could raise an exception. In practice
1484 we have already translated the block once so it's probably ok. */
1485 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1486 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1487 the first in the TB) then we end up generating a whole new TB and
1488 repeating the fault, which is horribly inefficient.
1489 Better would be to execute just this insn uncached, or generate a
1491 cpu_resume_from_signal(cpu
, NULL
);
1494 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1498 /* Discard jump cache entries for any tb which might potentially
1499 overlap the flushed page. */
1500 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1501 memset(&cpu
->tb_jmp_cache
[i
], 0,
1502 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1504 i
= tb_jmp_cache_hash_page(addr
);
1505 memset(&cpu
->tb_jmp_cache
[i
], 0,
1506 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1509 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1511 int i
, target_code_size
, max_target_code_size
;
1512 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1513 TranslationBlock
*tb
;
1515 target_code_size
= 0;
1516 max_target_code_size
= 0;
1518 direct_jmp_count
= 0;
1519 direct_jmp2_count
= 0;
1520 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1521 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1522 target_code_size
+= tb
->size
;
1523 if (tb
->size
> max_target_code_size
) {
1524 max_target_code_size
= tb
->size
;
1526 if (tb
->page_addr
[1] != -1) {
1529 if (tb
->tb_next_offset
[0] != 0xffff) {
1531 if (tb
->tb_next_offset
[1] != 0xffff) {
1532 direct_jmp2_count
++;
1536 /* XXX: avoid using doubles ? */
1537 cpu_fprintf(f
, "Translation buffer state:\n");
1538 cpu_fprintf(f
, "gen code size %td/%zd\n",
1539 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1540 tcg_ctx
.code_gen_buffer_max_size
);
1541 cpu_fprintf(f
, "TB count %d/%d\n",
1542 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1543 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1544 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1545 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1546 max_target_code_size
);
1547 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1548 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1549 tcg_ctx
.code_gen_buffer
) /
1550 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1551 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1552 tcg_ctx
.code_gen_buffer
) /
1553 target_code_size
: 0);
1554 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1555 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1556 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1557 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1559 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1560 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1562 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1563 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1564 cpu_fprintf(f
, "\nStatistics:\n");
1565 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1566 cpu_fprintf(f
, "TB invalidate count %d\n",
1567 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1568 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1569 tcg_dump_info(f
, cpu_fprintf
);
1572 #else /* CONFIG_USER_ONLY */
1574 void cpu_interrupt(CPUState
*cpu
, int mask
)
1576 cpu
->interrupt_request
|= mask
;
1577 cpu
->tcg_exit_req
= 1;
1581 * Walks guest process memory "regions" one by one
1582 * and calls callback function 'fn' for each region.
1584 struct walk_memory_regions_data
{
1585 walk_memory_regions_fn fn
;
1591 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1592 abi_ulong end
, int new_prot
)
1594 if (data
->start
!= -1ul) {
1595 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1601 data
->start
= (new_prot
? end
: -1ul);
1602 data
->prot
= new_prot
;
1607 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1608 abi_ulong base
, int level
, void **lp
)
1614 return walk_memory_regions_end(data
, base
, 0);
1620 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1621 int prot
= pd
[i
].flags
;
1623 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1624 if (prot
!= data
->prot
) {
1625 rc
= walk_memory_regions_end(data
, pa
, prot
);
1634 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1635 pa
= base
| ((abi_ulong
)i
<<
1636 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1637 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1647 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1649 struct walk_memory_regions_data data
;
1657 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1658 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1659 V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
1666 return walk_memory_regions_end(&data
, 0, 0);
1669 static int dump_region(void *priv
, abi_ulong start
,
1670 abi_ulong end
, unsigned long prot
)
1672 FILE *f
= (FILE *)priv
;
1674 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
1675 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
1676 start
, end
, end
- start
,
1677 ((prot
& PAGE_READ
) ? 'r' : '-'),
1678 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1679 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1684 /* dump memory mappings */
1685 void page_dump(FILE *f
)
1687 const int length
= sizeof(abi_ulong
) * 2;
1688 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1689 length
, "start", length
, "end", length
, "size", "prot");
1690 walk_memory_regions(f
, dump_region
);
1693 int page_get_flags(target_ulong address
)
1697 p
= page_find(address
>> TARGET_PAGE_BITS
);
1704 /* Modify the flags of a page and invalidate the code if necessary.
1705 The flag PAGE_WRITE_ORG is positioned automatically depending
1706 on PAGE_WRITE. The mmap_lock should already be held. */
1707 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1709 target_ulong addr
, len
;
1711 /* This function should never be called with addresses outside the
1712 guest address space. If this assert fires, it probably indicates
1713 a missing call to h2g_valid. */
1714 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1715 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1717 assert(start
< end
);
1719 start
= start
& TARGET_PAGE_MASK
;
1720 end
= TARGET_PAGE_ALIGN(end
);
1722 if (flags
& PAGE_WRITE
) {
1723 flags
|= PAGE_WRITE_ORG
;
1726 for (addr
= start
, len
= end
- start
;
1728 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1729 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1731 /* If the write protection bit is set, then we invalidate
1733 if (!(p
->flags
& PAGE_WRITE
) &&
1734 (flags
& PAGE_WRITE
) &&
1736 tb_invalidate_phys_page(addr
, 0, NULL
, false);
1742 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1748 /* This function should never be called with addresses outside the
1749 guest address space. If this assert fires, it probably indicates
1750 a missing call to h2g_valid. */
1751 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1752 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1758 if (start
+ len
- 1 < start
) {
1759 /* We've wrapped around. */
1763 /* must do before we loose bits in the next step */
1764 end
= TARGET_PAGE_ALIGN(start
+ len
);
1765 start
= start
& TARGET_PAGE_MASK
;
1767 for (addr
= start
, len
= end
- start
;
1769 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1770 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1774 if (!(p
->flags
& PAGE_VALID
)) {
1778 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1781 if (flags
& PAGE_WRITE
) {
1782 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1785 /* unprotect the page if it was put read-only because it
1786 contains translated code */
1787 if (!(p
->flags
& PAGE_WRITE
)) {
1788 if (!page_unprotect(addr
, 0, NULL
)) {
1797 /* called from signal handler: invalidate the code and unprotect the
1798 page. Return TRUE if the fault was successfully handled. */
1799 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1803 target_ulong host_start
, host_end
, addr
;
1805 /* Technically this isn't safe inside a signal handler. However we
1806 know this only ever happens in a synchronous SEGV handler, so in
1807 practice it seems to be ok. */
1810 p
= page_find(address
>> TARGET_PAGE_BITS
);
1816 /* if the page was really writable, then we change its
1817 protection back to writable */
1818 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1819 host_start
= address
& qemu_host_page_mask
;
1820 host_end
= host_start
+ qemu_host_page_size
;
1823 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1824 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1825 p
->flags
|= PAGE_WRITE
;
1828 /* and since the content will be modified, we must invalidate
1829 the corresponding translated code. */
1830 tb_invalidate_phys_page(addr
, pc
, puc
, true);
1831 #ifdef DEBUG_TB_CHECK
1832 tb_invalidate_check(addr
);
1835 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1844 #endif /* CONFIG_USER_ONLY */