4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #define NO_CPU_IO_DEFS
31 #include "disas/disas.h"
33 #if defined(CONFIG_USER_ONLY)
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <machine/profile.h>
50 #include "exec/address-spaces.h"
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
60 //#define DEBUG_TB_INVALIDATE
62 /* make various TB consistency checks */
63 //#define DEBUG_TB_CHECK
65 #if !defined(CONFIG_USER_ONLY)
66 /* TB consistency checks only implemented for usermode emulation. */
70 #define SMC_BITMAP_USE_THRESHOLD 10
72 typedef struct PageDesc
{
73 /* list of TBs intersecting this ram page */
74 TranslationBlock
*first_tb
;
75 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count
;
78 unsigned long *code_bitmap
;
79 #if defined(CONFIG_USER_ONLY)
84 /* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86 #if !defined(CONFIG_USER_ONLY)
87 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
90 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
96 /* Size of the L2 (and L3, etc) page tables. */
98 #define V_L2_SIZE (1 << V_L2_BITS)
100 /* The bits remaining after N lower levels of page tables. */
101 #define V_L1_BITS_REM \
102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
104 #if V_L1_BITS_REM < 4
105 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
107 #define V_L1_BITS V_L1_BITS_REM
110 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
112 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
114 uintptr_t qemu_host_page_size
;
115 intptr_t qemu_host_page_mask
;
117 /* The bottom level has pointers to PageDesc */
118 static void *l1_map
[V_L1_SIZE
];
120 /* code generation context */
123 /* translation block context */
124 #ifdef CONFIG_USER_ONLY
125 __thread
int have_tb_lock
;
130 #ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock
);
132 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
139 #ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock
);
142 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
146 void tb_lock_reset(void)
148 #ifdef CONFIG_USER_ONLY
150 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
156 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
157 tb_page_addr_t phys_page2
);
158 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
160 void cpu_gen_init(void)
162 tcg_context_init(&tcg_ctx
);
165 /* Encode VAL as a signed leb128 sequence at P.
166 Return P incremented past the encoded value. */
167 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
174 more
= !((val
== 0 && (byte
& 0x40) == 0)
175 || (val
== -1 && (byte
& 0x40) != 0));
185 /* Decode a signed leb128 sequence at *PP; increment *PP past the
186 decoded value. Return the decoded value. */
187 static target_long
decode_sleb128(uint8_t **pp
)
195 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
197 } while (byte
& 0x80);
198 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
199 val
|= -(target_ulong
)1 << shift
;
206 /* Encode the data collected about the instructions while compiling TB.
207 Place the data at BLOCK, and return the number of bytes consumed.
209 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
210 which come from the target's insn_start data, followed by a uintptr_t
211 which comes from the host pc of the end of the code implementing the insn.
213 Each line of the table is encoded as sleb128 deltas from the previous
214 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
215 That is, the first column is seeded with the guest pc, the last column
216 with the host pc, and the middle columns with zeros. */
218 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
220 uint8_t *highwater
= tcg_ctx
.code_gen_highwater
;
224 tb
->tc_search
= block
;
226 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
229 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
231 prev
= (j
== 0 ? tb
->pc
: 0);
233 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
235 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
237 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
238 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
240 /* Test for (pending) buffer overflow. The assumption is that any
241 one row beginning below the high water mark cannot overrun
242 the buffer completely. Thus we can test for overflow after
243 encoding a row without having to check during encoding. */
244 if (unlikely(p
> highwater
)) {
252 /* The cpu state corresponding to 'searched_pc' is restored. */
253 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
254 uintptr_t searched_pc
)
256 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
257 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
258 CPUArchState
*env
= cpu
->env_ptr
;
259 uint8_t *p
= tb
->tc_search
;
260 int i
, j
, num_insns
= tb
->icount
;
261 #ifdef CONFIG_PROFILER
262 int64_t ti
= profile_getclock();
265 if (searched_pc
< host_pc
) {
269 /* Reconstruct the stored insn data while looking for the point at
270 which the end of the insn exceeds the searched_pc. */
271 for (i
= 0; i
< num_insns
; ++i
) {
272 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
273 data
[j
] += decode_sleb128(&p
);
275 host_pc
+= decode_sleb128(&p
);
276 if (host_pc
> searched_pc
) {
283 if (tb
->cflags
& CF_USE_ICOUNT
) {
285 /* Reset the cycle counter to the start of the block. */
286 cpu
->icount_decr
.u16
.low
+= num_insns
;
287 /* Clear the IO flag. */
290 cpu
->icount_decr
.u16
.low
-= i
;
291 restore_state_to_opc(env
, tb
, data
);
293 #ifdef CONFIG_PROFILER
294 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
295 tcg_ctx
.restore_count
++;
300 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
302 TranslationBlock
*tb
;
304 tb
= tb_find_pc(retaddr
);
306 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
307 if (tb
->cflags
& CF_NOCACHE
) {
308 /* one-shot translation, invalidate it immediately */
309 cpu
->current_tb
= NULL
;
310 tb_phys_invalidate(tb
, -1);
318 void page_size_init(void)
320 /* NOTE: we can always suppose that qemu_host_page_size >=
322 qemu_real_host_page_size
= getpagesize();
323 qemu_real_host_page_mask
= -(intptr_t)qemu_real_host_page_size
;
324 if (qemu_host_page_size
== 0) {
325 qemu_host_page_size
= qemu_real_host_page_size
;
327 if (qemu_host_page_size
< TARGET_PAGE_SIZE
) {
328 qemu_host_page_size
= TARGET_PAGE_SIZE
;
330 qemu_host_page_mask
= -(intptr_t)qemu_host_page_size
;
333 static void page_init(void)
336 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
338 #ifdef HAVE_KINFO_GETVMMAP
339 struct kinfo_vmentry
*freep
;
342 freep
= kinfo_getvmmap(getpid(), &cnt
);
345 for (i
= 0; i
< cnt
; i
++) {
346 unsigned long startaddr
, endaddr
;
348 startaddr
= freep
[i
].kve_start
;
349 endaddr
= freep
[i
].kve_end
;
350 if (h2g_valid(startaddr
)) {
351 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
353 if (h2g_valid(endaddr
)) {
354 endaddr
= h2g(endaddr
);
355 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
357 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
359 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
370 last_brk
= (unsigned long)sbrk(0);
372 f
= fopen("/compat/linux/proc/self/maps", "r");
377 unsigned long startaddr
, endaddr
;
380 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
382 if (n
== 2 && h2g_valid(startaddr
)) {
383 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
385 if (h2g_valid(endaddr
)) {
386 endaddr
= h2g(endaddr
);
390 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
403 * Called with mmap_lock held for user-mode emulation.
405 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
411 /* Level 1. Always allocated. */
412 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
415 for (i
= V_L1_SHIFT
/ V_L2_BITS
- 1; i
> 0; i
--) {
416 void **p
= atomic_rcu_read(lp
);
422 p
= g_new0(void *, V_L2_SIZE
);
423 atomic_rcu_set(lp
, p
);
426 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
429 pd
= atomic_rcu_read(lp
);
434 pd
= g_new0(PageDesc
, V_L2_SIZE
);
435 atomic_rcu_set(lp
, pd
);
438 return pd
+ (index
& (V_L2_SIZE
- 1));
441 static inline PageDesc
*page_find(tb_page_addr_t index
)
443 return page_find_alloc(index
, 0);
446 #if defined(CONFIG_USER_ONLY)
447 /* Currently it is not recommended to allocate big chunks of data in
448 user mode. It will change when a dedicated libc will be used. */
449 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
450 region in which the guest needs to run. Revisit this. */
451 #define USE_STATIC_CODE_GEN_BUFFER
454 /* Minimum size of the code gen buffer. This number is randomly chosen,
455 but not so small that we can't have a fair number of TB's live. */
456 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
458 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
459 indicated, this is constrained by the range of direct branches on the
460 host cpu, as used by the TCG implementation of goto_tb. */
461 #if defined(__x86_64__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__sparc__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
465 #elif defined(__powerpc64__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
467 #elif defined(__powerpc__)
468 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
469 #elif defined(__aarch64__)
470 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
471 #elif defined(__arm__)
472 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
473 #elif defined(__s390x__)
474 /* We have a +- 4GB range on the branches; leave some slop. */
475 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
476 #elif defined(__mips__)
477 /* We have a 256MB branch region, but leave room to make sure the
478 main executable is also within that region. */
479 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
481 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
484 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
486 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
487 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
488 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
490 static inline size_t size_code_gen_buffer(size_t tb_size
)
492 /* Size the buffer. */
494 #ifdef USE_STATIC_CODE_GEN_BUFFER
495 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
497 /* ??? Needs adjustments. */
498 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
499 static buffer, we could size this on RESERVED_VA, on the text
500 segment size of the executable, or continue to use the default. */
501 tb_size
= (unsigned long)(ram_size
/ 4);
504 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
505 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
507 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
508 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
514 /* In order to use J and JAL within the code_gen_buffer, we require
515 that the buffer not cross a 256MB boundary. */
516 static inline bool cross_256mb(void *addr
, size_t size
)
518 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & 0xf0000000;
521 /* We weren't able to allocate a buffer without crossing that boundary,
522 so make do with the larger portion of the buffer that doesn't cross.
523 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
524 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
526 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & 0xf0000000);
527 size_t size2
= buf1
+ size1
- buf2
;
535 tcg_ctx
.code_gen_buffer_size
= size1
;
540 #ifdef USE_STATIC_CODE_GEN_BUFFER
541 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
542 __attribute__((aligned(CODE_GEN_ALIGN
)));
545 static inline void do_protect(void *addr
, long size
, int prot
)
548 VirtualProtect(addr
, size
, prot
, &old_protect
);
551 static inline void map_exec(void *addr
, long size
)
553 do_protect(addr
, size
, PAGE_EXECUTE_READWRITE
);
556 static inline void map_none(void *addr
, long size
)
558 do_protect(addr
, size
, PAGE_NOACCESS
);
561 static inline void do_protect(void *addr
, long size
, int prot
)
563 uintptr_t start
, end
;
565 start
= (uintptr_t)addr
;
566 start
&= qemu_real_host_page_mask
;
568 end
= (uintptr_t)addr
+ size
;
569 end
= ROUND_UP(end
, qemu_real_host_page_size
);
571 mprotect((void *)start
, end
- start
, prot
);
574 static inline void map_exec(void *addr
, long size
)
576 do_protect(addr
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
);
579 static inline void map_none(void *addr
, long size
)
581 do_protect(addr
, size
, PROT_NONE
);
585 static inline void *alloc_code_gen_buffer(void)
587 void *buf
= static_code_gen_buffer
;
588 size_t full_size
, size
;
590 /* The size of the buffer, rounded down to end on a page boundary. */
591 full_size
= (((uintptr_t)buf
+ sizeof(static_code_gen_buffer
))
592 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
594 /* Reserve a guard page. */
595 size
= full_size
- qemu_real_host_page_size
;
597 /* Honor a command-line option limiting the size of the buffer. */
598 if (size
> tcg_ctx
.code_gen_buffer_size
) {
599 size
= (((uintptr_t)buf
+ tcg_ctx
.code_gen_buffer_size
)
600 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
602 tcg_ctx
.code_gen_buffer_size
= size
;
605 if (cross_256mb(buf
, size
)) {
606 buf
= split_cross_256mb(buf
, size
);
607 size
= tcg_ctx
.code_gen_buffer_size
;
612 map_none(buf
+ size
, qemu_real_host_page_size
);
613 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
617 #elif defined(_WIN32)
618 static inline void *alloc_code_gen_buffer(void)
620 size_t size
= tcg_ctx
.code_gen_buffer_size
;
623 /* Perform the allocation in two steps, so that the guard page
624 is reserved but uncommitted. */
625 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
626 MEM_RESERVE
, PAGE_NOACCESS
);
628 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
629 assert(buf1
== buf2
);
635 static inline void *alloc_code_gen_buffer(void)
637 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
639 size_t size
= tcg_ctx
.code_gen_buffer_size
;
642 /* Constrain the position of the buffer based on the host cpu.
643 Note that these addresses are chosen in concert with the
644 addresses assigned in the relevant linker script file. */
645 # if defined(__PIE__) || defined(__PIC__)
646 /* Don't bother setting a preferred location if we're building
647 a position-independent executable. We're more likely to get
648 an address near the main executable if we let the kernel
649 choose the address. */
650 # elif defined(__x86_64__) && defined(MAP_32BIT)
651 /* Force the memory down into low memory with the executable.
652 Leave the choice of exact location with the kernel. */
654 /* Cannot expect to map more than 800MB in low memory. */
655 if (size
> 800u * 1024 * 1024) {
656 tcg_ctx
.code_gen_buffer_size
= size
= 800u * 1024 * 1024;
658 # elif defined(__sparc__)
659 start
= 0x40000000ul
;
660 # elif defined(__s390x__)
661 start
= 0x90000000ul
;
662 # elif defined(__mips__)
663 # if _MIPS_SIM == _ABI64
664 start
= 0x128000000ul
;
666 start
= 0x08000000ul
;
670 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
671 PROT_NONE
, flags
, -1, 0);
672 if (buf
== MAP_FAILED
) {
677 if (cross_256mb(buf
, size
)) {
678 /* Try again, with the original still mapped, to avoid re-acquiring
679 that 256mb crossing. This time don't specify an address. */
681 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
682 PROT_NONE
, flags
, -1, 0);
683 switch (buf2
!= MAP_FAILED
) {
685 if (!cross_256mb(buf2
, size
)) {
686 /* Success! Use the new buffer. */
687 munmap(buf
, size
+ qemu_real_host_page_size
);
690 /* Failure. Work with what we had. */
691 munmap(buf2
, size
+ qemu_real_host_page_size
);
694 /* Split the original buffer. Free the smaller half. */
695 buf2
= split_cross_256mb(buf
, size
);
696 size2
= tcg_ctx
.code_gen_buffer_size
;
698 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
700 munmap(buf
, size
- size2
);
709 /* Make the final buffer accessible. The guard page at the end
710 will remain inaccessible with PROT_NONE. */
711 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
713 /* Request large pages for the buffer. */
714 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
718 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
720 static inline void code_gen_alloc(size_t tb_size
)
722 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
723 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
724 if (tcg_ctx
.code_gen_buffer
== NULL
) {
725 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
729 /* Estimate a good size for the number of TBs we can support. We
730 still haven't deducted the prologue from the buffer size here,
731 but that's minimal and won't affect the estimate much. */
732 tcg_ctx
.code_gen_max_blocks
733 = tcg_ctx
.code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
734 tcg_ctx
.tb_ctx
.tbs
= g_new(TranslationBlock
, tcg_ctx
.code_gen_max_blocks
);
736 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
739 /* Must be called before using the QEMU cpus. 'tb_size' is the size
740 (in bytes) allocated to the translation buffer. Zero means default
742 void tcg_exec_init(unsigned long tb_size
)
746 code_gen_alloc(tb_size
);
747 #if defined(CONFIG_SOFTMMU)
748 /* There's no guest base to take into account, so go ahead and
749 initialize the prologue now. */
750 tcg_prologue_init(&tcg_ctx
);
754 bool tcg_enabled(void)
756 return tcg_ctx
.code_gen_buffer
!= NULL
;
759 /* Allocate a new translation block. Flush the translation buffer if
760 too many translation blocks or too much generated code. */
761 static TranslationBlock
*tb_alloc(target_ulong pc
)
763 TranslationBlock
*tb
;
765 if (tcg_ctx
.tb_ctx
.nb_tbs
>= tcg_ctx
.code_gen_max_blocks
) {
768 tb
= &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
++];
774 void tb_free(TranslationBlock
*tb
)
776 /* In practice this is mostly used for single use temporary TB
777 Ignore the hard cases and just back up if this TB happens to
778 be the last one generated. */
779 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
780 tb
== &tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
781 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
;
782 tcg_ctx
.tb_ctx
.nb_tbs
--;
786 static inline void invalidate_page_bitmap(PageDesc
*p
)
788 g_free(p
->code_bitmap
);
789 p
->code_bitmap
= NULL
;
790 p
->code_write_count
= 0;
793 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
794 static void page_flush_tb_1(int level
, void **lp
)
804 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
805 pd
[i
].first_tb
= NULL
;
806 invalidate_page_bitmap(pd
+ i
);
811 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
812 page_flush_tb_1(level
- 1, pp
+ i
);
817 static void page_flush_tb(void)
821 for (i
= 0; i
< V_L1_SIZE
; i
++) {
822 page_flush_tb_1(V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
826 /* flush all the translation blocks */
827 /* XXX: tb_flush is currently not thread safe */
828 void tb_flush(CPUState
*cpu
)
830 #if defined(DEBUG_FLUSH)
831 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
832 (unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
),
833 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
834 ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)) /
835 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
837 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
838 > tcg_ctx
.code_gen_buffer_size
) {
839 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
841 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
844 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
847 memset(tcg_ctx
.tb_ctx
.tb_phys_hash
, 0, sizeof(tcg_ctx
.tb_ctx
.tb_phys_hash
));
850 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
851 /* XXX: flush processor icache at this point if cache flush is
853 tcg_ctx
.tb_ctx
.tb_flush_count
++;
856 #ifdef DEBUG_TB_CHECK
858 static void tb_invalidate_check(target_ulong address
)
860 TranslationBlock
*tb
;
863 address
&= TARGET_PAGE_MASK
;
864 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
865 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
866 tb
= tb
->phys_hash_next
) {
867 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
868 address
>= tb
->pc
+ tb
->size
)) {
869 printf("ERROR invalidate: address=" TARGET_FMT_lx
870 " PC=%08lx size=%04x\n",
871 address
, (long)tb
->pc
, tb
->size
);
877 /* verify that all the pages have correct rights for code */
878 static void tb_page_check(void)
880 TranslationBlock
*tb
;
881 int i
, flags1
, flags2
;
883 for (i
= 0; i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
884 for (tb
= tcg_ctx
.tb_ctx
.tb_phys_hash
[i
]; tb
!= NULL
;
885 tb
= tb
->phys_hash_next
) {
886 flags1
= page_get_flags(tb
->pc
);
887 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
888 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
889 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
890 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
898 static inline void tb_hash_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
900 TranslationBlock
*tb1
;
905 *ptb
= tb1
->phys_hash_next
;
908 ptb
= &tb1
->phys_hash_next
;
912 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
914 TranslationBlock
*tb1
;
919 n1
= (uintptr_t)tb1
& 3;
920 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
922 *ptb
= tb1
->page_next
[n1
];
925 ptb
= &tb1
->page_next
[n1
];
929 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
931 TranslationBlock
*tb1
, **ptb
;
934 ptb
= &tb
->jmp_next
[n
];
937 /* find tb(n) in circular list */
940 n1
= (uintptr_t)tb1
& 3;
941 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
942 if (n1
== n
&& tb1
== tb
) {
946 ptb
= &tb1
->jmp_first
;
948 ptb
= &tb1
->jmp_next
[n1
];
951 /* now we can suppress tb(n) from the list */
952 *ptb
= tb
->jmp_next
[n
];
954 tb
->jmp_next
[n
] = NULL
;
958 /* reset the jump entry 'n' of a TB so that it is not chained to
960 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
962 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
965 /* invalidate one TB */
966 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
971 tb_page_addr_t phys_pc
;
972 TranslationBlock
*tb1
, *tb2
;
974 /* remove the TB from the hash list */
975 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
976 h
= tb_phys_hash_func(phys_pc
);
977 tb_hash_remove(&tcg_ctx
.tb_ctx
.tb_phys_hash
[h
], tb
);
979 /* remove the TB from the page list */
980 if (tb
->page_addr
[0] != page_addr
) {
981 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
982 tb_page_remove(&p
->first_tb
, tb
);
983 invalidate_page_bitmap(p
);
985 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
986 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
987 tb_page_remove(&p
->first_tb
, tb
);
988 invalidate_page_bitmap(p
);
991 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
993 /* remove the TB from the hash list */
994 h
= tb_jmp_cache_hash_func(tb
->pc
);
996 if (cpu
->tb_jmp_cache
[h
] == tb
) {
997 cpu
->tb_jmp_cache
[h
] = NULL
;
1001 /* suppress this TB from the two jump lists */
1002 tb_jmp_remove(tb
, 0);
1003 tb_jmp_remove(tb
, 1);
1005 /* suppress any remaining jumps to this TB */
1006 tb1
= tb
->jmp_first
;
1008 n1
= (uintptr_t)tb1
& 3;
1012 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1013 tb2
= tb1
->jmp_next
[n1
];
1014 tb_reset_jump(tb1
, n1
);
1015 tb1
->jmp_next
[n1
] = NULL
;
1018 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
1020 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1023 static void build_page_bitmap(PageDesc
*p
)
1025 int n
, tb_start
, tb_end
;
1026 TranslationBlock
*tb
;
1028 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1031 while (tb
!= NULL
) {
1032 n
= (uintptr_t)tb
& 3;
1033 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1034 /* NOTE: this is subtle as a TB may span two physical pages */
1036 /* NOTE: tb_end may be after the end of the page, but
1037 it is not a problem */
1038 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1039 tb_end
= tb_start
+ tb
->size
;
1040 if (tb_end
> TARGET_PAGE_SIZE
) {
1041 tb_end
= TARGET_PAGE_SIZE
;
1045 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1047 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1048 tb
= tb
->page_next
[n
];
1052 /* Called with mmap_lock held for user mode emulation. */
1053 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1054 target_ulong pc
, target_ulong cs_base
,
1055 uint32_t flags
, int cflags
)
1057 CPUArchState
*env
= cpu
->env_ptr
;
1058 TranslationBlock
*tb
;
1059 tb_page_addr_t phys_pc
, phys_page2
;
1060 target_ulong virt_page2
;
1061 tcg_insn_unit
*gen_code_buf
;
1062 int gen_code_size
, search_size
;
1063 #ifdef CONFIG_PROFILER
1067 phys_pc
= get_page_addr_code(env
, pc
);
1068 if (use_icount
&& !(cflags
& CF_IGNORE_ICOUNT
)) {
1069 cflags
|= CF_USE_ICOUNT
;
1073 if (unlikely(!tb
)) {
1075 /* flush must be done */
1077 /* cannot fail at this point */
1080 /* Don't forget to invalidate previous TB info. */
1081 tcg_ctx
.tb_ctx
.tb_invalidated_flag
= 1;
1084 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1085 tb
->tc_ptr
= gen_code_buf
;
1086 tb
->cs_base
= cs_base
;
1088 tb
->cflags
= cflags
;
1090 #ifdef CONFIG_PROFILER
1091 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1093 ti
= profile_getclock();
1096 tcg_func_start(&tcg_ctx
);
1098 gen_intermediate_code(env
, tb
);
1100 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1102 /* generate machine code */
1103 tb
->tb_next_offset
[0] = 0xffff;
1104 tb
->tb_next_offset
[1] = 0xffff;
1105 tcg_ctx
.tb_next_offset
= tb
->tb_next_offset
;
1106 #ifdef USE_DIRECT_JUMP
1107 tcg_ctx
.tb_jmp_offset
= tb
->tb_jmp_offset
;
1108 tcg_ctx
.tb_next
= NULL
;
1110 tcg_ctx
.tb_jmp_offset
= NULL
;
1111 tcg_ctx
.tb_next
= tb
->tb_next
;
1114 #ifdef CONFIG_PROFILER
1116 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1117 tcg_ctx
.code_time
-= profile_getclock();
1120 /* ??? Overflow could be handled better here. In particular, we
1121 don't need to re-do gen_intermediate_code, nor should we re-do
1122 the tcg optimization currently hidden inside tcg_gen_code. All
1123 that should be required is to flush the TBs, allocate a new TB,
1124 re-initialize it per above, and re-do the actual code generation. */
1125 gen_code_size
= tcg_gen_code(&tcg_ctx
, tb
);
1126 if (unlikely(gen_code_size
< 0)) {
1127 goto buffer_overflow
;
1129 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1130 if (unlikely(search_size
< 0)) {
1131 goto buffer_overflow
;
1134 #ifdef CONFIG_PROFILER
1135 tcg_ctx
.code_time
+= profile_getclock();
1136 tcg_ctx
.code_in_len
+= tb
->size
;
1137 tcg_ctx
.code_out_len
+= gen_code_size
;
1138 tcg_ctx
.search_out_len
+= search_size
;
1142 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1143 qemu_log_in_addr_range(tb
->pc
)) {
1144 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1145 log_disas(tb
->tc_ptr
, gen_code_size
);
1151 tcg_ctx
.code_gen_ptr
= (void *)
1152 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1155 /* check next page if needed */
1156 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1158 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1159 phys_page2
= get_page_addr_code(env
, virt_page2
);
1161 tb_link_page(tb
, phys_pc
, phys_page2
);
1166 * Invalidate all TBs which intersect with the target physical address range
1167 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1168 * 'is_cpu_write_access' should be true if called from a real cpu write
1169 * access: the virtual CPU will exit the current TB if code is modified inside
1172 * Called with mmap_lock held for user-mode emulation
1174 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1176 while (start
< end
) {
1177 tb_invalidate_phys_page_range(start
, end
, 0);
1178 start
&= TARGET_PAGE_MASK
;
1179 start
+= TARGET_PAGE_SIZE
;
1184 * Invalidate all TBs which intersect with the target physical address range
1185 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1186 * 'is_cpu_write_access' should be true if called from a real cpu write
1187 * access: the virtual CPU will exit the current TB if code is modified inside
1190 * Called with mmap_lock held for user-mode emulation
1192 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1193 int is_cpu_write_access
)
1195 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1196 CPUState
*cpu
= current_cpu
;
1197 #if defined(TARGET_HAS_PRECISE_SMC)
1198 CPUArchState
*env
= NULL
;
1200 tb_page_addr_t tb_start
, tb_end
;
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 int current_tb_not_found
= is_cpu_write_access
;
1205 TranslationBlock
*current_tb
= NULL
;
1206 int current_tb_modified
= 0;
1207 target_ulong current_pc
= 0;
1208 target_ulong current_cs_base
= 0;
1209 uint32_t current_flags
= 0;
1210 #endif /* TARGET_HAS_PRECISE_SMC */
1212 p
= page_find(start
>> TARGET_PAGE_BITS
);
1216 #if defined(TARGET_HAS_PRECISE_SMC)
1222 /* we remove all the TBs in the range [start, end[ */
1223 /* XXX: see if in some cases it could be faster to invalidate all
1226 while (tb
!= NULL
) {
1227 n
= (uintptr_t)tb
& 3;
1228 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1229 tb_next
= tb
->page_next
[n
];
1230 /* NOTE: this is subtle as a TB may span two physical pages */
1232 /* NOTE: tb_end may be after the end of the page, but
1233 it is not a problem */
1234 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1235 tb_end
= tb_start
+ tb
->size
;
1237 tb_start
= tb
->page_addr
[1];
1238 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1240 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1241 #ifdef TARGET_HAS_PRECISE_SMC
1242 if (current_tb_not_found
) {
1243 current_tb_not_found
= 0;
1245 if (cpu
->mem_io_pc
) {
1246 /* now we have a real cpu fault */
1247 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1250 if (current_tb
== tb
&&
1251 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1252 /* If we are modifying the current TB, we must stop
1253 its execution. We could be more precise by checking
1254 that the modification is after the current PC, but it
1255 would require a specialized function to partially
1256 restore the CPU state */
1258 current_tb_modified
= 1;
1259 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1260 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1263 #endif /* TARGET_HAS_PRECISE_SMC */
1264 /* we need to do that to handle the case where a signal
1265 occurs while doing tb_phys_invalidate() */
1268 saved_tb
= cpu
->current_tb
;
1269 cpu
->current_tb
= NULL
;
1271 tb_phys_invalidate(tb
, -1);
1273 cpu
->current_tb
= saved_tb
;
1274 if (cpu
->interrupt_request
&& cpu
->current_tb
) {
1275 cpu_interrupt(cpu
, cpu
->interrupt_request
);
1281 #if !defined(CONFIG_USER_ONLY)
1282 /* if no code remaining, no need to continue to use slow writes */
1284 invalidate_page_bitmap(p
);
1285 tlb_unprotect_code(start
);
1288 #ifdef TARGET_HAS_PRECISE_SMC
1289 if (current_tb_modified
) {
1290 /* we generate a block containing just the instruction
1291 modifying the memory. It will ensure that it cannot modify
1293 cpu
->current_tb
= NULL
;
1294 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1295 cpu_resume_from_signal(cpu
, NULL
);
1300 /* len must be <= 8 and start must be a multiple of len */
1301 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1307 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1308 cpu_single_env
->mem_io_vaddr
, len
,
1309 cpu_single_env
->eip
,
1310 cpu_single_env
->eip
+
1311 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1314 p
= page_find(start
>> TARGET_PAGE_BITS
);
1318 if (!p
->code_bitmap
&&
1319 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1320 /* build code bitmap */
1321 build_page_bitmap(p
);
1323 if (p
->code_bitmap
) {
1327 nr
= start
& ~TARGET_PAGE_MASK
;
1328 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1329 if (b
& ((1 << len
) - 1)) {
1334 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1338 #if !defined(CONFIG_SOFTMMU)
1339 /* Called with mmap_lock held. */
1340 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1341 uintptr_t pc
, void *puc
,
1344 TranslationBlock
*tb
;
1347 #ifdef TARGET_HAS_PRECISE_SMC
1348 TranslationBlock
*current_tb
= NULL
;
1349 CPUState
*cpu
= current_cpu
;
1350 CPUArchState
*env
= NULL
;
1351 int current_tb_modified
= 0;
1352 target_ulong current_pc
= 0;
1353 target_ulong current_cs_base
= 0;
1354 uint32_t current_flags
= 0;
1357 addr
&= TARGET_PAGE_MASK
;
1358 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1363 #ifdef TARGET_HAS_PRECISE_SMC
1364 if (tb
&& pc
!= 0) {
1365 current_tb
= tb_find_pc(pc
);
1371 while (tb
!= NULL
) {
1372 n
= (uintptr_t)tb
& 3;
1373 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1374 #ifdef TARGET_HAS_PRECISE_SMC
1375 if (current_tb
== tb
&&
1376 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1377 /* If we are modifying the current TB, we must stop
1378 its execution. We could be more precise by checking
1379 that the modification is after the current PC, but it
1380 would require a specialized function to partially
1381 restore the CPU state */
1383 current_tb_modified
= 1;
1384 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1385 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1388 #endif /* TARGET_HAS_PRECISE_SMC */
1389 tb_phys_invalidate(tb
, addr
);
1390 tb
= tb
->page_next
[n
];
1393 #ifdef TARGET_HAS_PRECISE_SMC
1394 if (current_tb_modified
) {
1395 /* we generate a block containing just the instruction
1396 modifying the memory. It will ensure that it cannot modify
1398 cpu
->current_tb
= NULL
;
1399 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1403 cpu_resume_from_signal(cpu
, puc
);
1409 /* add the tb in the target page and protect it if necessary
1411 * Called with mmap_lock held for user-mode emulation.
1413 static inline void tb_alloc_page(TranslationBlock
*tb
,
1414 unsigned int n
, tb_page_addr_t page_addr
)
1417 #ifndef CONFIG_USER_ONLY
1418 bool page_already_protected
;
1421 tb
->page_addr
[n
] = page_addr
;
1422 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1423 tb
->page_next
[n
] = p
->first_tb
;
1424 #ifndef CONFIG_USER_ONLY
1425 page_already_protected
= p
->first_tb
!= NULL
;
1427 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1428 invalidate_page_bitmap(p
);
1430 #if defined(CONFIG_USER_ONLY)
1431 if (p
->flags
& PAGE_WRITE
) {
1436 /* force the host page as non writable (writes will have a
1437 page fault + mprotect overhead) */
1438 page_addr
&= qemu_host_page_mask
;
1440 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1441 addr
+= TARGET_PAGE_SIZE
) {
1443 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1448 p2
->flags
&= ~PAGE_WRITE
;
1450 mprotect(g2h(page_addr
), qemu_host_page_size
,
1451 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1452 #ifdef DEBUG_TB_INVALIDATE
1453 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1458 /* if some code is already present, then the pages are already
1459 protected. So we handle the case where only the first TB is
1460 allocated in a physical page */
1461 if (!page_already_protected
) {
1462 tlb_protect_code(page_addr
);
1467 /* add a new TB and link it to the physical page tables. phys_page2 is
1468 * (-1) to indicate that only one page contains the TB.
1470 * Called with mmap_lock held for user-mode emulation.
1472 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1473 tb_page_addr_t phys_page2
)
1476 TranslationBlock
**ptb
;
1478 /* add in the physical hash table */
1479 h
= tb_phys_hash_func(phys_pc
);
1480 ptb
= &tcg_ctx
.tb_ctx
.tb_phys_hash
[h
];
1481 tb
->phys_hash_next
= *ptb
;
1484 /* add in the page list */
1485 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1486 if (phys_page2
!= -1) {
1487 tb_alloc_page(tb
, 1, phys_page2
);
1489 tb
->page_addr
[1] = -1;
1492 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1493 tb
->jmp_next
[0] = NULL
;
1494 tb
->jmp_next
[1] = NULL
;
1496 /* init original jump addresses */
1497 if (tb
->tb_next_offset
[0] != 0xffff) {
1498 tb_reset_jump(tb
, 0);
1500 if (tb
->tb_next_offset
[1] != 0xffff) {
1501 tb_reset_jump(tb
, 1);
1504 #ifdef DEBUG_TB_CHECK
1509 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1510 tb[1].tc_ptr. Return NULL if not found */
1511 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1513 int m_min
, m_max
, m
;
1515 TranslationBlock
*tb
;
1517 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1520 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1521 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1524 /* binary search (cf Knuth) */
1526 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1527 while (m_min
<= m_max
) {
1528 m
= (m_min
+ m_max
) >> 1;
1529 tb
= &tcg_ctx
.tb_ctx
.tbs
[m
];
1530 v
= (uintptr_t)tb
->tc_ptr
;
1533 } else if (tc_ptr
< v
) {
1539 return &tcg_ctx
.tb_ctx
.tbs
[m_max
];
1542 #if !defined(CONFIG_USER_ONLY)
1543 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1545 ram_addr_t ram_addr
;
1550 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1551 if (!(memory_region_is_ram(mr
)
1552 || memory_region_is_romd(mr
))) {
1556 ram_addr
= (memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
)
1558 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1561 #endif /* !defined(CONFIG_USER_ONLY) */
1563 void tb_check_watchpoint(CPUState
*cpu
)
1565 TranslationBlock
*tb
;
1567 tb
= tb_find_pc(cpu
->mem_io_pc
);
1569 /* We can use retranslation to find the PC. */
1570 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1571 tb_phys_invalidate(tb
, -1);
1573 /* The exception probably happened in a helper. The CPU state should
1574 have been saved before calling it. Fetch the PC from there. */
1575 CPUArchState
*env
= cpu
->env_ptr
;
1576 target_ulong pc
, cs_base
;
1577 tb_page_addr_t addr
;
1580 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1581 addr
= get_page_addr_code(env
, pc
);
1582 tb_invalidate_phys_range(addr
, addr
+ 1);
1586 #ifndef CONFIG_USER_ONLY
1587 /* in deterministic execution mode, instructions doing device I/Os
1588 must be at the end of the TB */
1589 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1591 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1592 CPUArchState
*env
= cpu
->env_ptr
;
1594 TranslationBlock
*tb
;
1596 target_ulong pc
, cs_base
;
1599 tb
= tb_find_pc(retaddr
);
1601 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1604 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1605 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1606 /* Calculate how many instructions had been executed before the fault
1608 n
= n
- cpu
->icount_decr
.u16
.low
;
1609 /* Generate a new TB ending on the I/O insn. */
1611 /* On MIPS and SH, delay slot instructions can only be restarted if
1612 they were already the first instruction in the TB. If this is not
1613 the first instruction in a TB then re-execute the preceding
1615 #if defined(TARGET_MIPS)
1616 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1617 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1618 cpu
->icount_decr
.u16
.low
++;
1619 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1621 #elif defined(TARGET_SH4)
1622 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1625 cpu
->icount_decr
.u16
.low
++;
1626 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1629 /* This should never happen. */
1630 if (n
> CF_COUNT_MASK
) {
1631 cpu_abort(cpu
, "TB too big during recompile");
1634 cflags
= n
| CF_LAST_IO
;
1636 cs_base
= tb
->cs_base
;
1638 tb_phys_invalidate(tb
, -1);
1639 if (tb
->cflags
& CF_NOCACHE
) {
1641 /* Invalidate original TB if this TB was generated in
1642 * cpu_exec_nocache() */
1643 tb_phys_invalidate(tb
->orig_tb
, -1);
1647 /* FIXME: In theory this could raise an exception. In practice
1648 we have already translated the block once so it's probably ok. */
1649 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1650 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1651 the first in the TB) then we end up generating a whole new TB and
1652 repeating the fault, which is horribly inefficient.
1653 Better would be to execute just this insn uncached, or generate a
1655 cpu_resume_from_signal(cpu
, NULL
);
1658 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1662 /* Discard jump cache entries for any tb which might potentially
1663 overlap the flushed page. */
1664 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1665 memset(&cpu
->tb_jmp_cache
[i
], 0,
1666 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1668 i
= tb_jmp_cache_hash_page(addr
);
1669 memset(&cpu
->tb_jmp_cache
[i
], 0,
1670 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1673 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1675 int i
, target_code_size
, max_target_code_size
;
1676 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1677 TranslationBlock
*tb
;
1679 target_code_size
= 0;
1680 max_target_code_size
= 0;
1682 direct_jmp_count
= 0;
1683 direct_jmp2_count
= 0;
1684 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1685 tb
= &tcg_ctx
.tb_ctx
.tbs
[i
];
1686 target_code_size
+= tb
->size
;
1687 if (tb
->size
> max_target_code_size
) {
1688 max_target_code_size
= tb
->size
;
1690 if (tb
->page_addr
[1] != -1) {
1693 if (tb
->tb_next_offset
[0] != 0xffff) {
1695 if (tb
->tb_next_offset
[1] != 0xffff) {
1696 direct_jmp2_count
++;
1700 /* XXX: avoid using doubles ? */
1701 cpu_fprintf(f
, "Translation buffer state:\n");
1702 cpu_fprintf(f
, "gen code size %td/%zd\n",
1703 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1704 tcg_ctx
.code_gen_highwater
- tcg_ctx
.code_gen_buffer
);
1705 cpu_fprintf(f
, "TB count %d/%d\n",
1706 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.code_gen_max_blocks
);
1707 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1708 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1709 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1710 max_target_code_size
);
1711 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1712 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1713 tcg_ctx
.code_gen_buffer
) /
1714 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1715 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1716 tcg_ctx
.code_gen_buffer
) /
1717 target_code_size
: 0);
1718 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1719 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1720 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1721 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1723 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1724 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1726 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1727 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1728 cpu_fprintf(f
, "\nStatistics:\n");
1729 cpu_fprintf(f
, "TB flush count %d\n", tcg_ctx
.tb_ctx
.tb_flush_count
);
1730 cpu_fprintf(f
, "TB invalidate count %d\n",
1731 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1732 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
1733 tcg_dump_info(f
, cpu_fprintf
);
1736 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1738 tcg_dump_op_count(f
, cpu_fprintf
);
1741 #else /* CONFIG_USER_ONLY */
1743 void cpu_interrupt(CPUState
*cpu
, int mask
)
1745 cpu
->interrupt_request
|= mask
;
1746 cpu
->tcg_exit_req
= 1;
1750 * Walks guest process memory "regions" one by one
1751 * and calls callback function 'fn' for each region.
1753 struct walk_memory_regions_data
{
1754 walk_memory_regions_fn fn
;
1760 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1761 target_ulong end
, int new_prot
)
1763 if (data
->start
!= -1u) {
1764 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1770 data
->start
= (new_prot
? end
: -1u);
1771 data
->prot
= new_prot
;
1776 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1777 target_ulong base
, int level
, void **lp
)
1783 return walk_memory_regions_end(data
, base
, 0);
1789 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1790 int prot
= pd
[i
].flags
;
1792 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1793 if (prot
!= data
->prot
) {
1794 rc
= walk_memory_regions_end(data
, pa
, prot
);
1803 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1804 pa
= base
| ((target_ulong
)i
<<
1805 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
1806 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1816 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1818 struct walk_memory_regions_data data
;
1826 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1827 int rc
= walk_memory_regions_1(&data
, (target_ulong
)i
<< (V_L1_SHIFT
+ TARGET_PAGE_BITS
),
1828 V_L1_SHIFT
/ V_L2_BITS
- 1, l1_map
+ i
);
1834 return walk_memory_regions_end(&data
, 0, 0);
1837 static int dump_region(void *priv
, target_ulong start
,
1838 target_ulong end
, unsigned long prot
)
1840 FILE *f
= (FILE *)priv
;
1842 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
1843 " "TARGET_FMT_lx
" %c%c%c\n",
1844 start
, end
, end
- start
,
1845 ((prot
& PAGE_READ
) ? 'r' : '-'),
1846 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
1847 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
1852 /* dump memory mappings */
1853 void page_dump(FILE *f
)
1855 const int length
= sizeof(target_ulong
) * 2;
1856 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
1857 length
, "start", length
, "end", length
, "size", "prot");
1858 walk_memory_regions(f
, dump_region
);
1861 int page_get_flags(target_ulong address
)
1865 p
= page_find(address
>> TARGET_PAGE_BITS
);
1872 /* Modify the flags of a page and invalidate the code if necessary.
1873 The flag PAGE_WRITE_ORG is positioned automatically depending
1874 on PAGE_WRITE. The mmap_lock should already be held. */
1875 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1877 target_ulong addr
, len
;
1879 /* This function should never be called with addresses outside the
1880 guest address space. If this assert fires, it probably indicates
1881 a missing call to h2g_valid. */
1882 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1883 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1885 assert(start
< end
);
1887 start
= start
& TARGET_PAGE_MASK
;
1888 end
= TARGET_PAGE_ALIGN(end
);
1890 if (flags
& PAGE_WRITE
) {
1891 flags
|= PAGE_WRITE_ORG
;
1894 for (addr
= start
, len
= end
- start
;
1896 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1897 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1899 /* If the write protection bit is set, then we invalidate
1901 if (!(p
->flags
& PAGE_WRITE
) &&
1902 (flags
& PAGE_WRITE
) &&
1904 tb_invalidate_phys_page(addr
, 0, NULL
, false);
1910 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1916 /* This function should never be called with addresses outside the
1917 guest address space. If this assert fires, it probably indicates
1918 a missing call to h2g_valid. */
1919 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1920 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
1926 if (start
+ len
- 1 < start
) {
1927 /* We've wrapped around. */
1931 /* must do before we loose bits in the next step */
1932 end
= TARGET_PAGE_ALIGN(start
+ len
);
1933 start
= start
& TARGET_PAGE_MASK
;
1935 for (addr
= start
, len
= end
- start
;
1937 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
1938 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1942 if (!(p
->flags
& PAGE_VALID
)) {
1946 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
1949 if (flags
& PAGE_WRITE
) {
1950 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
1953 /* unprotect the page if it was put read-only because it
1954 contains translated code */
1955 if (!(p
->flags
& PAGE_WRITE
)) {
1956 if (!page_unprotect(addr
, 0, NULL
)) {
1965 /* called from signal handler: invalidate the code and unprotect the
1966 page. Return TRUE if the fault was successfully handled. */
1967 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
1971 target_ulong host_start
, host_end
, addr
;
1973 /* Technically this isn't safe inside a signal handler. However we
1974 know this only ever happens in a synchronous SEGV handler, so in
1975 practice it seems to be ok. */
1978 p
= page_find(address
>> TARGET_PAGE_BITS
);
1984 /* if the page was really writable, then we change its
1985 protection back to writable */
1986 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
1987 host_start
= address
& qemu_host_page_mask
;
1988 host_end
= host_start
+ qemu_host_page_size
;
1991 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1992 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1993 p
->flags
|= PAGE_WRITE
;
1996 /* and since the content will be modified, we must invalidate
1997 the corresponding translated code. */
1998 tb_invalidate_phys_page(addr
, pc
, puc
, true);
1999 #ifdef DEBUG_TB_CHECK
2000 tb_invalidate_check(addr
);
2003 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2012 #endif /* CONFIG_USER_ONLY */