4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <machine/profile.h>
50 #include "exec/address-spaces.h"
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/error-report.h"
58 #include "qemu/timer.h"
59 #include "qemu/main-loop.h"
61 #include "sysemu/cpus.h"
63 /* #define DEBUG_TB_INVALIDATE */
64 /* #define DEBUG_TB_FLUSH */
65 /* make various TB consistency checks */
66 /* #define DEBUG_TB_CHECK */
68 #ifdef DEBUG_TB_INVALIDATE
69 #define DEBUG_TB_INVALIDATE_GATE 1
71 #define DEBUG_TB_INVALIDATE_GATE 0
75 #define DEBUG_TB_FLUSH_GATE 1
77 #define DEBUG_TB_FLUSH_GATE 0
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation. */
86 #define DEBUG_TB_CHECK_GATE 1
88 #define DEBUG_TB_CHECK_GATE 0
91 /* Access to the various translations structures need to be serialised via locks
92 * for consistency. This is automatic for SoftMMU based system
93 * emulation due to its single threaded nature. In user-mode emulation
94 * access to the memory related structures are protected with the
98 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
100 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
103 #define SMC_BITMAP_USE_THRESHOLD 10
105 typedef struct PageDesc
{
106 /* list of TBs intersecting this ram page */
107 TranslationBlock
*first_tb
;
108 #ifdef CONFIG_SOFTMMU
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count
;
112 unsigned long *code_bitmap
;
118 /* In system mode we want L1_MAP to be based on ram offsets,
119 while in user mode we want it to be based on virtual addresses. */
120 #if !defined(CONFIG_USER_ONLY)
121 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
122 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
124 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
127 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
130 /* Size of the L2 (and L3, etc) page tables. */
132 #define V_L2_SIZE (1 << V_L2_BITS)
134 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
135 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
136 sizeof(((TranslationBlock
*)0)->trace_vcpu_dstate
)
140 * L1 Mapping properties
142 static int v_l1_size
;
143 static int v_l1_shift
;
144 static int v_l2_levels
;
146 /* The bottom level has pointers to PageDesc, and is indexed by
147 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
149 #define V_L1_MIN_BITS 4
150 #define V_L1_MAX_BITS (V_L2_BITS + 3)
151 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
153 static void *l1_map
[V_L1_MAX_SIZE
];
155 /* code generation context */
156 TCGContext tcg_init_ctx
;
161 /* translation block context */
162 static __thread
int have_tb_lock
;
164 static void page_table_config_init(void)
168 assert(TARGET_PAGE_BITS
);
169 /* The bits remaining after N lower levels of page tables. */
170 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
171 if (v_l1_bits
< V_L1_MIN_BITS
) {
172 v_l1_bits
+= V_L2_BITS
;
175 v_l1_size
= 1 << v_l1_bits
;
176 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
177 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
179 assert(v_l1_bits
<= V_L1_MAX_BITS
);
180 assert(v_l1_shift
% V_L2_BITS
== 0);
181 assert(v_l2_levels
>= 0);
184 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
185 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
189 assert_tb_unlocked();
190 qemu_mutex_lock(&tb_ctx
.tb_lock
);
198 qemu_mutex_unlock(&tb_ctx
.tb_lock
);
201 void tb_lock_reset(void)
204 qemu_mutex_unlock(&tb_ctx
.tb_lock
);
209 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
211 void cpu_gen_init(void)
213 tcg_context_init(&tcg_init_ctx
);
216 /* Encode VAL as a signed leb128 sequence at P.
217 Return P incremented past the encoded value. */
218 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
225 more
= !((val
== 0 && (byte
& 0x40) == 0)
226 || (val
== -1 && (byte
& 0x40) != 0));
236 /* Decode a signed leb128 sequence at *PP; increment *PP past the
237 decoded value. Return the decoded value. */
238 static target_long
decode_sleb128(uint8_t **pp
)
246 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
248 } while (byte
& 0x80);
249 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
250 val
|= -(target_ulong
)1 << shift
;
257 /* Encode the data collected about the instructions while compiling TB.
258 Place the data at BLOCK, and return the number of bytes consumed.
260 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
261 which come from the target's insn_start data, followed by a uintptr_t
262 which comes from the host pc of the end of the code implementing the insn.
264 Each line of the table is encoded as sleb128 deltas from the previous
265 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
266 That is, the first column is seeded with the guest pc, the last column
267 with the host pc, and the middle columns with zeros. */
269 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
271 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
275 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
278 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
280 prev
= (j
== 0 ? tb
->pc
: 0);
282 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
284 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
286 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
287 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
289 /* Test for (pending) buffer overflow. The assumption is that any
290 one row beginning below the high water mark cannot overrun
291 the buffer completely. Thus we can test for overflow after
292 encoding a row without having to check during encoding. */
293 if (unlikely(p
> highwater
)) {
301 /* The cpu state corresponding to 'searched_pc' is restored.
302 * Called with tb_lock held.
304 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
305 uintptr_t searched_pc
)
307 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
308 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
309 CPUArchState
*env
= cpu
->env_ptr
;
310 uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
311 int i
, j
, num_insns
= tb
->icount
;
312 #ifdef CONFIG_PROFILER
313 TCGProfile
*prof
= &tcg_ctx
->prof
;
314 int64_t ti
= profile_getclock();
317 searched_pc
-= GETPC_ADJ
;
319 if (searched_pc
< host_pc
) {
323 /* Reconstruct the stored insn data while looking for the point at
324 which the end of the insn exceeds the searched_pc. */
325 for (i
= 0; i
< num_insns
; ++i
) {
326 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
327 data
[j
] += decode_sleb128(&p
);
329 host_pc
+= decode_sleb128(&p
);
330 if (host_pc
> searched_pc
) {
337 if (tb
->cflags
& CF_USE_ICOUNT
) {
339 /* Reset the cycle counter to the start of the block. */
340 cpu
->icount_decr
.u16
.low
+= num_insns
;
341 /* Clear the IO flag. */
344 cpu
->icount_decr
.u16
.low
-= i
;
345 restore_state_to_opc(env
, tb
, data
);
347 #ifdef CONFIG_PROFILER
348 atomic_set(&prof
->restore_time
,
349 prof
->restore_time
+ profile_getclock() - ti
);
350 atomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
355 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
357 TranslationBlock
*tb
;
360 /* A retaddr of zero is invalid so we really shouldn't have ended
361 * up here. The target code has likely forgotten to check retaddr
362 * != 0 before attempting to restore state. We return early to
363 * avoid blowing up on a recursive tb_lock(). The target must have
364 * previously survived a failed cpu_restore_state because
365 * tb_find_pc(0) would have failed anyway. It still should be
374 tb
= tb_find_pc(retaddr
);
376 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
377 if (tb
->cflags
& CF_NOCACHE
) {
378 /* one-shot translation, invalidate it immediately */
379 tb_phys_invalidate(tb
, -1);
389 static void page_init(void)
392 page_table_config_init();
394 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
396 #ifdef HAVE_KINFO_GETVMMAP
397 struct kinfo_vmentry
*freep
;
400 freep
= kinfo_getvmmap(getpid(), &cnt
);
403 for (i
= 0; i
< cnt
; i
++) {
404 unsigned long startaddr
, endaddr
;
406 startaddr
= freep
[i
].kve_start
;
407 endaddr
= freep
[i
].kve_end
;
408 if (h2g_valid(startaddr
)) {
409 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
411 if (h2g_valid(endaddr
)) {
412 endaddr
= h2g(endaddr
);
413 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
415 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
417 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
428 last_brk
= (unsigned long)sbrk(0);
430 f
= fopen("/compat/linux/proc/self/maps", "r");
435 unsigned long startaddr
, endaddr
;
438 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
440 if (n
== 2 && h2g_valid(startaddr
)) {
441 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
443 if (h2g_valid(endaddr
)) {
444 endaddr
= h2g(endaddr
);
448 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
461 * Called with tb_lock held for system emulation.
462 * Called with mmap_lock held for user-mode emulation.
464 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
471 assert_memory_lock();
474 /* Level 1. Always allocated. */
475 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
478 for (i
= v_l2_levels
; i
> 0; i
--) {
479 void **p
= atomic_rcu_read(lp
);
485 p
= g_new0(void *, V_L2_SIZE
);
486 atomic_rcu_set(lp
, p
);
489 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
492 pd
= atomic_rcu_read(lp
);
497 pd
= g_new0(PageDesc
, V_L2_SIZE
);
498 atomic_rcu_set(lp
, pd
);
501 return pd
+ (index
& (V_L2_SIZE
- 1));
504 static inline PageDesc
*page_find(tb_page_addr_t index
)
506 return page_find_alloc(index
, 0);
509 #if defined(CONFIG_USER_ONLY)
510 /* Currently it is not recommended to allocate big chunks of data in
511 user mode. It will change when a dedicated libc will be used. */
512 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
513 region in which the guest needs to run. Revisit this. */
514 #define USE_STATIC_CODE_GEN_BUFFER
517 /* Minimum size of the code gen buffer. This number is randomly chosen,
518 but not so small that we can't have a fair number of TB's live. */
519 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
521 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
522 indicated, this is constrained by the range of direct branches on the
523 host cpu, as used by the TCG implementation of goto_tb. */
524 #if defined(__x86_64__)
525 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
526 #elif defined(__sparc__)
527 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
528 #elif defined(__powerpc64__)
529 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
530 #elif defined(__powerpc__)
531 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
532 #elif defined(__aarch64__)
533 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
534 #elif defined(__s390x__)
535 /* We have a +- 4GB range on the branches; leave some slop. */
536 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
537 #elif defined(__mips__)
538 /* We have a 256MB branch region, but leave room to make sure the
539 main executable is also within that region. */
540 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
542 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
545 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
547 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
548 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
549 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
551 static inline size_t size_code_gen_buffer(size_t tb_size
)
553 /* Size the buffer. */
555 #ifdef USE_STATIC_CODE_GEN_BUFFER
556 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
558 /* ??? Needs adjustments. */
559 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
560 static buffer, we could size this on RESERVED_VA, on the text
561 segment size of the executable, or continue to use the default. */
562 tb_size
= (unsigned long)(ram_size
/ 4);
565 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
566 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
568 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
569 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
575 /* In order to use J and JAL within the code_gen_buffer, we require
576 that the buffer not cross a 256MB boundary. */
577 static inline bool cross_256mb(void *addr
, size_t size
)
579 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
582 /* We weren't able to allocate a buffer without crossing that boundary,
583 so make do with the larger portion of the buffer that doesn't cross.
584 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
585 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
587 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
588 size_t size2
= buf1
+ size1
- buf2
;
596 tcg_ctx
->code_gen_buffer_size
= size1
;
601 #ifdef USE_STATIC_CODE_GEN_BUFFER
602 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
603 __attribute__((aligned(CODE_GEN_ALIGN
)));
605 static inline void *alloc_code_gen_buffer(void)
607 void *buf
= static_code_gen_buffer
;
608 void *end
= static_code_gen_buffer
+ sizeof(static_code_gen_buffer
);
609 size_t full_size
, size
;
611 /* page-align the beginning and end of the buffer */
612 buf
= QEMU_ALIGN_PTR_UP(buf
, qemu_real_host_page_size
);
613 end
= QEMU_ALIGN_PTR_DOWN(end
, qemu_real_host_page_size
);
615 /* Reserve a guard page. */
616 full_size
= end
- buf
;
617 size
= full_size
- qemu_real_host_page_size
;
619 /* Honor a command-line option limiting the size of the buffer. */
620 if (size
> tcg_ctx
->code_gen_buffer_size
) {
621 size
= QEMU_ALIGN_DOWN(tcg_ctx
->code_gen_buffer_size
,
622 qemu_real_host_page_size
);
624 tcg_ctx
->code_gen_buffer_size
= size
;
627 if (cross_256mb(buf
, size
)) {
628 buf
= split_cross_256mb(buf
, size
);
629 size
= tcg_ctx
->code_gen_buffer_size
;
633 if (qemu_mprotect_rwx(buf
, size
)) {
636 if (qemu_mprotect_none(buf
+ size
, qemu_real_host_page_size
)) {
639 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
643 #elif defined(_WIN32)
644 static inline void *alloc_code_gen_buffer(void)
646 size_t size
= tcg_ctx
->code_gen_buffer_size
;
649 /* Perform the allocation in two steps, so that the guard page
650 is reserved but uncommitted. */
651 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
652 MEM_RESERVE
, PAGE_NOACCESS
);
654 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
655 assert(buf1
== buf2
);
661 static inline void *alloc_code_gen_buffer(void)
663 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
665 size_t size
= tcg_ctx
->code_gen_buffer_size
;
668 /* Constrain the position of the buffer based on the host cpu.
669 Note that these addresses are chosen in concert with the
670 addresses assigned in the relevant linker script file. */
671 # if defined(__PIE__) || defined(__PIC__)
672 /* Don't bother setting a preferred location if we're building
673 a position-independent executable. We're more likely to get
674 an address near the main executable if we let the kernel
675 choose the address. */
676 # elif defined(__x86_64__) && defined(MAP_32BIT)
677 /* Force the memory down into low memory with the executable.
678 Leave the choice of exact location with the kernel. */
680 /* Cannot expect to map more than 800MB in low memory. */
681 if (size
> 800u * 1024 * 1024) {
682 tcg_ctx
->code_gen_buffer_size
= size
= 800u * 1024 * 1024;
684 # elif defined(__sparc__)
685 start
= 0x40000000ul
;
686 # elif defined(__s390x__)
687 start
= 0x90000000ul
;
688 # elif defined(__mips__)
689 # if _MIPS_SIM == _ABI64
690 start
= 0x128000000ul
;
692 start
= 0x08000000ul
;
696 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
697 PROT_NONE
, flags
, -1, 0);
698 if (buf
== MAP_FAILED
) {
703 if (cross_256mb(buf
, size
)) {
704 /* Try again, with the original still mapped, to avoid re-acquiring
705 that 256mb crossing. This time don't specify an address. */
707 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
708 PROT_NONE
, flags
, -1, 0);
709 switch ((int)(buf2
!= MAP_FAILED
)) {
711 if (!cross_256mb(buf2
, size
)) {
712 /* Success! Use the new buffer. */
713 munmap(buf
, size
+ qemu_real_host_page_size
);
716 /* Failure. Work with what we had. */
717 munmap(buf2
, size
+ qemu_real_host_page_size
);
720 /* Split the original buffer. Free the smaller half. */
721 buf2
= split_cross_256mb(buf
, size
);
722 size2
= tcg_ctx
->code_gen_buffer_size
;
724 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
726 munmap(buf
, size
- size2
);
735 /* Make the final buffer accessible. The guard page at the end
736 will remain inaccessible with PROT_NONE. */
737 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
739 /* Request large pages for the buffer. */
740 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
744 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
746 /* compare a pointer @ptr and a tb_tc @s */
747 static int ptr_cmp_tb_tc(const void *ptr
, const struct tb_tc
*s
)
749 if (ptr
>= s
->ptr
+ s
->size
) {
751 } else if (ptr
< s
->ptr
) {
757 static gint
tb_tc_cmp(gconstpointer ap
, gconstpointer bp
)
759 const struct tb_tc
*a
= ap
;
760 const struct tb_tc
*b
= bp
;
763 * When both sizes are set, we know this isn't a lookup.
764 * This is the most likely case: every TB must be inserted; lookups
765 * are a lot less frequent.
767 if (likely(a
->size
&& b
->size
)) {
768 if (a
->ptr
> b
->ptr
) {
770 } else if (a
->ptr
< b
->ptr
) {
773 /* a->ptr == b->ptr should happen only on deletions */
774 g_assert(a
->size
== b
->size
);
778 * All lookups have either .size field set to 0.
779 * From the glib sources we see that @ap is always the lookup key. However
780 * the docs provide no guarantee, so we just mark this case as likely.
782 if (likely(a
->size
== 0)) {
783 return ptr_cmp_tb_tc(a
->ptr
, b
);
785 return ptr_cmp_tb_tc(b
->ptr
, a
);
788 static inline void code_gen_alloc(size_t tb_size
)
790 tcg_ctx
->code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
791 tcg_ctx
->code_gen_buffer
= alloc_code_gen_buffer();
792 if (tcg_ctx
->code_gen_buffer
== NULL
) {
793 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
796 tb_ctx
.tb_tree
= g_tree_new(tb_tc_cmp
);
797 qemu_mutex_init(&tb_ctx
.tb_lock
);
800 static void tb_htable_init(void)
802 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
804 qht_init(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
807 /* Must be called before using the QEMU cpus. 'tb_size' is the size
808 (in bytes) allocated to the translation buffer. Zero means default
810 void tcg_exec_init(unsigned long tb_size
)
816 code_gen_alloc(tb_size
);
817 #if defined(CONFIG_SOFTMMU)
818 /* There's no guest base to take into account, so go ahead and
819 initialize the prologue now. */
820 tcg_prologue_init(tcg_ctx
);
825 * Allocate a new translation block. Flush the translation buffer if
826 * too many translation blocks or too much generated code.
828 * Called with tb_lock held.
830 static TranslationBlock
*tb_alloc(target_ulong pc
)
832 TranslationBlock
*tb
;
836 tb
= tcg_tb_alloc(tcg_ctx
);
837 if (unlikely(tb
== NULL
)) {
843 /* Called with tb_lock held. */
844 void tb_remove(TranslationBlock
*tb
)
848 g_tree_remove(tb_ctx
.tb_tree
, &tb
->tc
);
851 static inline void invalidate_page_bitmap(PageDesc
*p
)
853 #ifdef CONFIG_SOFTMMU
854 g_free(p
->code_bitmap
);
855 p
->code_bitmap
= NULL
;
856 p
->code_write_count
= 0;
860 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
861 static void page_flush_tb_1(int level
, void **lp
)
871 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
872 pd
[i
].first_tb
= NULL
;
873 invalidate_page_bitmap(pd
+ i
);
878 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
879 page_flush_tb_1(level
- 1, pp
+ i
);
884 static void page_flush_tb(void)
886 int i
, l1_sz
= v_l1_size
;
888 for (i
= 0; i
< l1_sz
; i
++) {
889 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
893 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
895 const TranslationBlock
*tb
= value
;
898 *size
+= tb
->tc
.size
;
902 /* flush all the translation blocks */
903 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
907 /* If it is already been done on request of another CPU,
910 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
914 if (DEBUG_TB_FLUSH_GATE
) {
915 size_t nb_tbs
= g_tree_nnodes(tb_ctx
.tb_tree
);
916 size_t host_size
= 0;
918 g_tree_foreach(tb_ctx
.tb_tree
, tb_host_size_iter
, &host_size
);
919 printf("qemu: flush code_size=%td nb_tbs=%zu avg_tb_size=%zu\n",
920 tcg_ctx
->code_gen_ptr
- tcg_ctx
->code_gen_buffer
, nb_tbs
,
921 nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
923 if ((unsigned long)(tcg_ctx
->code_gen_ptr
- tcg_ctx
->code_gen_buffer
)
924 > tcg_ctx
->code_gen_buffer_size
) {
925 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
929 cpu_tb_jmp_cache_clear(cpu
);
932 /* Increment the refcount first so that destroy acts as a reset */
933 g_tree_ref(tb_ctx
.tb_tree
);
934 g_tree_destroy(tb_ctx
.tb_tree
);
936 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
939 tcg_ctx
->code_gen_ptr
= tcg_ctx
->code_gen_buffer
;
940 /* XXX: flush processor icache at this point if cache flush is
942 atomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
948 void tb_flush(CPUState
*cpu
)
951 unsigned tb_flush_count
= atomic_mb_read(&tb_ctx
.tb_flush_count
);
952 async_safe_run_on_cpu(cpu
, do_tb_flush
,
953 RUN_ON_CPU_HOST_INT(tb_flush_count
));
958 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
959 * so in order to prevent bit rot we compile them unconditionally in user-mode,
960 * and let the optimizer get rid of them by wrapping their user-only callers
961 * with if (DEBUG_TB_CHECK_GATE).
963 #ifdef CONFIG_USER_ONLY
966 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
968 TranslationBlock
*tb
= p
;
969 target_ulong addr
= *(target_ulong
*)userp
;
971 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
972 printf("ERROR invalidate: address=" TARGET_FMT_lx
973 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
977 /* verify that all the pages have correct rights for code
979 * Called with tb_lock held.
981 static void tb_invalidate_check(target_ulong address
)
983 address
&= TARGET_PAGE_MASK
;
984 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
988 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
990 TranslationBlock
*tb
= p
;
993 flags1
= page_get_flags(tb
->pc
);
994 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
995 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
996 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
997 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1001 /* verify that all the pages have correct rights for code */
1002 static void tb_page_check(void)
1004 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
1007 #endif /* CONFIG_USER_ONLY */
1009 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
1011 TranslationBlock
*tb1
;
1016 n1
= (uintptr_t)tb1
& 3;
1017 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1019 *ptb
= tb1
->page_next
[n1
];
1022 ptb
= &tb1
->page_next
[n1
];
1026 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1027 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
1029 TranslationBlock
*tb1
;
1030 uintptr_t *ptb
, ntb
;
1033 ptb
= &tb
->jmp_list_next
[n
];
1035 /* find tb(n) in circular list */
1039 tb1
= (TranslationBlock
*)(ntb
& ~3);
1040 if (n1
== n
&& tb1
== tb
) {
1044 ptb
= &tb1
->jmp_list_first
;
1046 ptb
= &tb1
->jmp_list_next
[n1
];
1049 /* now we can suppress tb(n) from the list */
1050 *ptb
= tb
->jmp_list_next
[n
];
1052 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
1056 /* reset the jump entry 'n' of a TB so that it is not chained to
1058 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1060 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1061 tb_set_jmp_target(tb
, n
, addr
);
1064 /* remove any jumps to the TB */
1065 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
1067 TranslationBlock
*tb1
;
1068 uintptr_t *ptb
, ntb
;
1071 ptb
= &tb
->jmp_list_first
;
1075 tb1
= (TranslationBlock
*)(ntb
& ~3);
1079 tb_reset_jump(tb1
, n1
);
1080 *ptb
= tb1
->jmp_list_next
[n1
];
1081 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
1085 /* invalidate one TB
1087 * Called with tb_lock held.
1089 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1094 tb_page_addr_t phys_pc
;
1098 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1100 /* remove the TB from the hash list */
1101 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1102 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1103 tb
->trace_vcpu_dstate
);
1104 qht_remove(&tb_ctx
.htable
, tb
, h
);
1106 /* remove the TB from the page list */
1107 if (tb
->page_addr
[0] != page_addr
) {
1108 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1109 tb_page_remove(&p
->first_tb
, tb
);
1110 invalidate_page_bitmap(p
);
1112 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1113 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1114 tb_page_remove(&p
->first_tb
, tb
);
1115 invalidate_page_bitmap(p
);
1118 /* remove the TB from the hash list */
1119 h
= tb_jmp_cache_hash_func(tb
->pc
);
1121 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1122 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1126 /* suppress this TB from the two jump lists */
1127 tb_remove_from_jmp_list(tb
, 0);
1128 tb_remove_from_jmp_list(tb
, 1);
1130 /* suppress any remaining jumps to this TB */
1133 tb_ctx
.tb_phys_invalidate_count
++;
1136 #ifdef CONFIG_SOFTMMU
1137 static void build_page_bitmap(PageDesc
*p
)
1139 int n
, tb_start
, tb_end
;
1140 TranslationBlock
*tb
;
1142 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1145 while (tb
!= NULL
) {
1146 n
= (uintptr_t)tb
& 3;
1147 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1148 /* NOTE: this is subtle as a TB may span two physical pages */
1150 /* NOTE: tb_end may be after the end of the page, but
1151 it is not a problem */
1152 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1153 tb_end
= tb_start
+ tb
->size
;
1154 if (tb_end
> TARGET_PAGE_SIZE
) {
1155 tb_end
= TARGET_PAGE_SIZE
;
1159 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1161 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1162 tb
= tb
->page_next
[n
];
1167 /* add the tb in the target page and protect it if necessary
1169 * Called with mmap_lock held for user-mode emulation.
1171 static inline void tb_alloc_page(TranslationBlock
*tb
,
1172 unsigned int n
, tb_page_addr_t page_addr
)
1175 #ifndef CONFIG_USER_ONLY
1176 bool page_already_protected
;
1179 assert_memory_lock();
1181 tb
->page_addr
[n
] = page_addr
;
1182 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1183 tb
->page_next
[n
] = p
->first_tb
;
1184 #ifndef CONFIG_USER_ONLY
1185 page_already_protected
= p
->first_tb
!= NULL
;
1187 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1188 invalidate_page_bitmap(p
);
1190 #if defined(CONFIG_USER_ONLY)
1191 if (p
->flags
& PAGE_WRITE
) {
1196 /* force the host page as non writable (writes will have a
1197 page fault + mprotect overhead) */
1198 page_addr
&= qemu_host_page_mask
;
1200 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1201 addr
+= TARGET_PAGE_SIZE
) {
1203 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1208 p2
->flags
&= ~PAGE_WRITE
;
1210 mprotect(g2h(page_addr
), qemu_host_page_size
,
1211 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1212 if (DEBUG_TB_INVALIDATE_GATE
) {
1213 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1217 /* if some code is already present, then the pages are already
1218 protected. So we handle the case where only the first TB is
1219 allocated in a physical page */
1220 if (!page_already_protected
) {
1221 tlb_protect_code(page_addr
);
1226 /* add a new TB and link it to the physical page tables. phys_page2 is
1227 * (-1) to indicate that only one page contains the TB.
1229 * Called with mmap_lock held for user-mode emulation.
1231 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1232 tb_page_addr_t phys_page2
)
1236 assert_memory_lock();
1238 /* add in the page list */
1239 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1240 if (phys_page2
!= -1) {
1241 tb_alloc_page(tb
, 1, phys_page2
);
1243 tb
->page_addr
[1] = -1;
1246 /* add in the hash table */
1247 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1248 tb
->trace_vcpu_dstate
);
1249 qht_insert(&tb_ctx
.htable
, tb
, h
);
1251 #ifdef CONFIG_USER_ONLY
1252 if (DEBUG_TB_CHECK_GATE
) {
1258 /* Called with mmap_lock held for user mode emulation. */
1259 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1260 target_ulong pc
, target_ulong cs_base
,
1261 uint32_t flags
, int cflags
)
1263 CPUArchState
*env
= cpu
->env_ptr
;
1264 TranslationBlock
*tb
;
1265 tb_page_addr_t phys_pc
, phys_page2
;
1266 target_ulong virt_page2
;
1267 tcg_insn_unit
*gen_code_buf
;
1268 int gen_code_size
, search_size
;
1269 #ifdef CONFIG_PROFILER
1270 TCGProfile
*prof
= &tcg_ctx
->prof
;
1273 assert_memory_lock();
1275 phys_pc
= get_page_addr_code(env
, pc
);
1278 if (unlikely(!tb
)) {
1280 /* flush must be done */
1283 /* Make the execution loop process the flush as soon as possible. */
1284 cpu
->exception_index
= EXCP_INTERRUPT
;
1288 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1289 tb
->tc
.ptr
= gen_code_buf
;
1291 tb
->cs_base
= cs_base
;
1293 tb
->cflags
= cflags
;
1294 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1295 tcg_ctx
->tb_cflags
= cflags
;
1297 #ifdef CONFIG_PROFILER
1298 /* includes aborted translations because of exceptions */
1299 atomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1300 ti
= profile_getclock();
1303 tcg_func_start(tcg_ctx
);
1305 tcg_ctx
->cpu
= ENV_GET_CPU(env
);
1306 gen_intermediate_code(cpu
, tb
);
1307 tcg_ctx
->cpu
= NULL
;
1309 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1311 /* generate machine code */
1312 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1313 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1314 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1315 if (TCG_TARGET_HAS_direct_jump
) {
1316 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1317 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1319 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1320 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1323 #ifdef CONFIG_PROFILER
1324 atomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1325 atomic_set(&prof
->interm_time
, prof
->interm_time
+ profile_getclock() - ti
);
1326 ti
= profile_getclock();
1329 /* ??? Overflow could be handled better here. In particular, we
1330 don't need to re-do gen_intermediate_code, nor should we re-do
1331 the tcg optimization currently hidden inside tcg_gen_code. All
1332 that should be required is to flush the TBs, allocate a new TB,
1333 re-initialize it per above, and re-do the actual code generation. */
1334 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1335 if (unlikely(gen_code_size
< 0)) {
1336 goto buffer_overflow
;
1338 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1339 if (unlikely(search_size
< 0)) {
1340 goto buffer_overflow
;
1342 tb
->tc
.size
= gen_code_size
;
1344 #ifdef CONFIG_PROFILER
1345 atomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1346 atomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1347 atomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1348 atomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1352 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1353 qemu_log_in_addr_range(tb
->pc
)) {
1355 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1356 if (tcg_ctx
->data_gen_ptr
) {
1357 size_t code_size
= tcg_ctx
->data_gen_ptr
- tb
->tc
.ptr
;
1358 size_t data_size
= gen_code_size
- code_size
;
1361 log_disas(tb
->tc
.ptr
, code_size
);
1363 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1364 if (sizeof(tcg_target_ulong
) == 8) {
1365 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1366 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1367 *(uint64_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1369 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1370 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1371 *(uint32_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1375 log_disas(tb
->tc
.ptr
, gen_code_size
);
1383 tcg_ctx
->code_gen_ptr
= (void *)
1384 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1387 /* init jump list */
1388 assert(((uintptr_t)tb
& 3) == 0);
1389 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1390 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1391 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1393 /* init original jump addresses wich has been set during tcg_gen_code() */
1394 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1395 tb_reset_jump(tb
, 0);
1397 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1398 tb_reset_jump(tb
, 1);
1401 /* check next page if needed */
1402 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1404 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1405 phys_page2
= get_page_addr_code(env
, virt_page2
);
1407 /* As long as consistency of the TB stuff is provided by tb_lock in user
1408 * mode and is implicit in single-threaded softmmu emulation, no explicit
1409 * memory barrier is required before tb_link_page() makes the TB visible
1410 * through the physical hash table and physical page list.
1412 tb_link_page(tb
, phys_pc
, phys_page2
);
1413 g_tree_insert(tb_ctx
.tb_tree
, &tb
->tc
, tb
);
1418 * Invalidate all TBs which intersect with the target physical address range
1419 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1420 * 'is_cpu_write_access' should be true if called from a real cpu write
1421 * access: the virtual CPU will exit the current TB if code is modified inside
1424 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1425 * Called with tb_lock held for system-mode emulation
1427 static void tb_invalidate_phys_range_1(tb_page_addr_t start
, tb_page_addr_t end
)
1429 while (start
< end
) {
1430 tb_invalidate_phys_page_range(start
, end
, 0);
1431 start
&= TARGET_PAGE_MASK
;
1432 start
+= TARGET_PAGE_SIZE
;
1436 #ifdef CONFIG_SOFTMMU
1437 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1440 tb_invalidate_phys_range_1(start
, end
);
1443 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1445 assert_memory_lock();
1447 tb_invalidate_phys_range_1(start
, end
);
1452 * Invalidate all TBs which intersect with the target physical address range
1453 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1454 * 'is_cpu_write_access' should be true if called from a real cpu write
1455 * access: the virtual CPU will exit the current TB if code is modified inside
1458 * Called with tb_lock/mmap_lock held for user-mode emulation
1459 * Called with tb_lock held for system-mode emulation
1461 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1462 int is_cpu_write_access
)
1464 TranslationBlock
*tb
, *tb_next
;
1465 tb_page_addr_t tb_start
, tb_end
;
1468 #ifdef TARGET_HAS_PRECISE_SMC
1469 CPUState
*cpu
= current_cpu
;
1470 CPUArchState
*env
= NULL
;
1471 int current_tb_not_found
= is_cpu_write_access
;
1472 TranslationBlock
*current_tb
= NULL
;
1473 int current_tb_modified
= 0;
1474 target_ulong current_pc
= 0;
1475 target_ulong current_cs_base
= 0;
1476 uint32_t current_flags
= 0;
1477 #endif /* TARGET_HAS_PRECISE_SMC */
1479 assert_memory_lock();
1482 p
= page_find(start
>> TARGET_PAGE_BITS
);
1486 #if defined(TARGET_HAS_PRECISE_SMC)
1492 /* we remove all the TBs in the range [start, end[ */
1493 /* XXX: see if in some cases it could be faster to invalidate all
1496 while (tb
!= NULL
) {
1497 n
= (uintptr_t)tb
& 3;
1498 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1499 tb_next
= tb
->page_next
[n
];
1500 /* NOTE: this is subtle as a TB may span two physical pages */
1502 /* NOTE: tb_end may be after the end of the page, but
1503 it is not a problem */
1504 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1505 tb_end
= tb_start
+ tb
->size
;
1507 tb_start
= tb
->page_addr
[1];
1508 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1510 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1511 #ifdef TARGET_HAS_PRECISE_SMC
1512 if (current_tb_not_found
) {
1513 current_tb_not_found
= 0;
1515 if (cpu
->mem_io_pc
) {
1516 /* now we have a real cpu fault */
1517 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1520 if (current_tb
== tb
&&
1521 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1522 /* If we are modifying the current TB, we must stop
1523 its execution. We could be more precise by checking
1524 that the modification is after the current PC, but it
1525 would require a specialized function to partially
1526 restore the CPU state */
1528 current_tb_modified
= 1;
1529 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1530 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1533 #endif /* TARGET_HAS_PRECISE_SMC */
1534 tb_phys_invalidate(tb
, -1);
1538 #if !defined(CONFIG_USER_ONLY)
1539 /* if no code remaining, no need to continue to use slow writes */
1541 invalidate_page_bitmap(p
);
1542 tlb_unprotect_code(start
);
1545 #ifdef TARGET_HAS_PRECISE_SMC
1546 if (current_tb_modified
) {
1547 /* Force execution of one insn next time. */
1548 cpu
->cflags_next_tb
= 1 | curr_cflags();
1549 cpu_loop_exit_noexc(cpu
);
1554 #ifdef CONFIG_SOFTMMU
1555 /* len must be <= 8 and start must be a multiple of len.
1556 * Called via softmmu_template.h when code areas are written to with
1557 * iothread mutex not held.
1559 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1565 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1566 cpu_single_env
->mem_io_vaddr
, len
,
1567 cpu_single_env
->eip
,
1568 cpu_single_env
->eip
+
1569 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1572 assert_memory_lock();
1574 p
= page_find(start
>> TARGET_PAGE_BITS
);
1578 if (!p
->code_bitmap
&&
1579 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1580 /* build code bitmap. FIXME: writes should be protected by
1581 * tb_lock, reads by tb_lock or RCU.
1583 build_page_bitmap(p
);
1585 if (p
->code_bitmap
) {
1589 nr
= start
& ~TARGET_PAGE_MASK
;
1590 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1591 if (b
& ((1 << len
) - 1)) {
1596 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1600 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1601 * host PC of the faulting store instruction that caused this invalidate.
1602 * Returns true if the caller needs to abort execution of the current
1603 * TB (because it was modified by this store and the guest CPU has
1604 * precise-SMC semantics).
1606 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1608 TranslationBlock
*tb
;
1611 #ifdef TARGET_HAS_PRECISE_SMC
1612 TranslationBlock
*current_tb
= NULL
;
1613 CPUState
*cpu
= current_cpu
;
1614 CPUArchState
*env
= NULL
;
1615 int current_tb_modified
= 0;
1616 target_ulong current_pc
= 0;
1617 target_ulong current_cs_base
= 0;
1618 uint32_t current_flags
= 0;
1621 assert_memory_lock();
1623 addr
&= TARGET_PAGE_MASK
;
1624 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1631 #ifdef TARGET_HAS_PRECISE_SMC
1632 if (tb
&& pc
!= 0) {
1633 current_tb
= tb_find_pc(pc
);
1639 while (tb
!= NULL
) {
1640 n
= (uintptr_t)tb
& 3;
1641 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1642 #ifdef TARGET_HAS_PRECISE_SMC
1643 if (current_tb
== tb
&&
1644 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1645 /* If we are modifying the current TB, we must stop
1646 its execution. We could be more precise by checking
1647 that the modification is after the current PC, but it
1648 would require a specialized function to partially
1649 restore the CPU state */
1651 current_tb_modified
= 1;
1652 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1653 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1656 #endif /* TARGET_HAS_PRECISE_SMC */
1657 tb_phys_invalidate(tb
, addr
);
1658 tb
= tb
->page_next
[n
];
1661 #ifdef TARGET_HAS_PRECISE_SMC
1662 if (current_tb_modified
) {
1663 /* Force execution of one insn next time. */
1664 cpu
->cflags_next_tb
= 1 | curr_cflags();
1665 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1666 * back into the cpu_exec loop. */
1677 * Find the TB 'tb' such that
1678 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
1679 * Return NULL if not found.
1681 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1683 struct tb_tc s
= { .ptr
= (void *)tc_ptr
};
1685 return g_tree_lookup(tb_ctx
.tb_tree
, &s
);
1688 #if !defined(CONFIG_USER_ONLY)
1689 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1691 ram_addr_t ram_addr
;
1696 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1697 if (!(memory_region_is_ram(mr
)
1698 || memory_region_is_romd(mr
))) {
1702 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1704 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1708 #endif /* !defined(CONFIG_USER_ONLY) */
1710 /* Called with tb_lock held. */
1711 void tb_check_watchpoint(CPUState
*cpu
)
1713 TranslationBlock
*tb
;
1715 tb
= tb_find_pc(cpu
->mem_io_pc
);
1717 /* We can use retranslation to find the PC. */
1718 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1719 tb_phys_invalidate(tb
, -1);
1721 /* The exception probably happened in a helper. The CPU state should
1722 have been saved before calling it. Fetch the PC from there. */
1723 CPUArchState
*env
= cpu
->env_ptr
;
1724 target_ulong pc
, cs_base
;
1725 tb_page_addr_t addr
;
1728 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1729 addr
= get_page_addr_code(env
, pc
);
1730 tb_invalidate_phys_range(addr
, addr
+ 1);
1734 #ifndef CONFIG_USER_ONLY
1735 /* in deterministic execution mode, instructions doing device I/Os
1736 * must be at the end of the TB.
1738 * Called by softmmu_template.h, with iothread mutex not held.
1740 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1742 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1743 CPUArchState
*env
= cpu
->env_ptr
;
1745 TranslationBlock
*tb
;
1749 tb
= tb_find_pc(retaddr
);
1751 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1754 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1755 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1756 /* Calculate how many instructions had been executed before the fault
1758 n
= n
- cpu
->icount_decr
.u16
.low
;
1759 /* Generate a new TB ending on the I/O insn. */
1761 /* On MIPS and SH, delay slot instructions can only be restarted if
1762 they were already the first instruction in the TB. If this is not
1763 the first instruction in a TB then re-execute the preceding
1765 #if defined(TARGET_MIPS)
1766 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1767 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1768 cpu
->icount_decr
.u16
.low
++;
1769 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1771 #elif defined(TARGET_SH4)
1772 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1775 cpu
->icount_decr
.u16
.low
++;
1776 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1779 /* This should never happen. */
1780 if (n
> CF_COUNT_MASK
) {
1781 cpu_abort(cpu
, "TB too big during recompile");
1784 /* Adjust the execution state of the next TB. */
1785 cpu
->cflags_next_tb
= curr_cflags() | CF_LAST_IO
| n
;
1787 if (tb
->cflags
& CF_NOCACHE
) {
1789 /* Invalidate original TB if this TB was generated in
1790 * cpu_exec_nocache() */
1791 tb_phys_invalidate(tb
->orig_tb
, -1);
1796 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1797 * the first in the TB) then we end up generating a whole new TB and
1798 * repeating the fault, which is horribly inefficient.
1799 * Better would be to execute just this insn uncached, or generate a
1802 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1803 * tb_lock gets reset.
1805 cpu_loop_exit_noexc(cpu
);
1808 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
1810 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
1812 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
1813 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
1817 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1819 /* Discard jump cache entries for any tb which might potentially
1820 overlap the flushed page. */
1821 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
1822 tb_jmp_cache_clear_page(cpu
, addr
);
1825 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1826 struct qht_stats hst
)
1828 uint32_t hgram_opts
;
1832 if (!hst
.head_buckets
) {
1835 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1836 hst
.used_head_buckets
, hst
.head_buckets
,
1837 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1839 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1840 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1841 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1842 hgram_opts
|= QDIST_PR_NODECIMAL
;
1844 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1845 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1846 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1849 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1850 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1851 if (hgram_bins
> 10) {
1855 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1857 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1858 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1859 qdist_avg(&hst
.chain
), hgram
);
1863 struct tb_tree_stats
{
1866 size_t max_target_size
;
1867 size_t direct_jmp_count
;
1868 size_t direct_jmp2_count
;
1872 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
1874 const TranslationBlock
*tb
= value
;
1875 struct tb_tree_stats
*tst
= data
;
1877 tst
->host_size
+= tb
->tc
.size
;
1878 tst
->target_size
+= tb
->size
;
1879 if (tb
->size
> tst
->max_target_size
) {
1880 tst
->max_target_size
= tb
->size
;
1882 if (tb
->page_addr
[1] != -1) {
1885 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1886 tst
->direct_jmp_count
++;
1887 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1888 tst
->direct_jmp2_count
++;
1894 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1896 struct tb_tree_stats tst
= {};
1897 struct qht_stats hst
;
1902 nb_tbs
= g_tree_nnodes(tb_ctx
.tb_tree
);
1903 g_tree_foreach(tb_ctx
.tb_tree
, tb_tree_stats_iter
, &tst
);
1904 /* XXX: avoid using doubles ? */
1905 cpu_fprintf(f
, "Translation buffer state:\n");
1907 * Report total code size including the padding and TB structs;
1908 * otherwise users might think "-tb-size" is not honoured.
1909 * For avg host size we use the precise numbers from tb_tree_stats though.
1911 cpu_fprintf(f
, "gen code size %td/%zd\n",
1912 tcg_ctx
->code_gen_ptr
- tcg_ctx
->code_gen_buffer
,
1913 tcg_ctx
->code_gen_highwater
- tcg_ctx
->code_gen_buffer
);
1914 cpu_fprintf(f
, "TB count %zu\n", nb_tbs
);
1915 cpu_fprintf(f
, "TB avg target size %zu max=%zu bytes\n",
1916 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
1917 tst
.max_target_size
);
1918 cpu_fprintf(f
, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
1919 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
1920 tst
.target_size
? (double)tst
.host_size
/ tst
.target_size
: 0);
1921 cpu_fprintf(f
, "cross page TB count %zu (%zu%%)\n", tst
.cross_page
,
1922 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
1923 cpu_fprintf(f
, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1924 tst
.direct_jmp_count
,
1925 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
1926 tst
.direct_jmp2_count
,
1927 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
1929 qht_statistics_init(&tb_ctx
.htable
, &hst
);
1930 print_qht_statistics(f
, cpu_fprintf
, hst
);
1931 qht_statistics_destroy(&hst
);
1933 cpu_fprintf(f
, "\nStatistics:\n");
1934 cpu_fprintf(f
, "TB flush count %u\n",
1935 atomic_read(&tb_ctx
.tb_flush_count
));
1936 cpu_fprintf(f
, "TB invalidate count %d\n", tb_ctx
.tb_phys_invalidate_count
);
1937 cpu_fprintf(f
, "TLB flush count %zu\n", tlb_flush_count());
1938 tcg_dump_info(f
, cpu_fprintf
);
1943 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1945 tcg_dump_op_count(f
, cpu_fprintf
);
1948 #else /* CONFIG_USER_ONLY */
1950 void cpu_interrupt(CPUState
*cpu
, int mask
)
1952 g_assert(qemu_mutex_iothread_locked());
1953 cpu
->interrupt_request
|= mask
;
1954 cpu
->icount_decr
.u16
.high
= -1;
1958 * Walks guest process memory "regions" one by one
1959 * and calls callback function 'fn' for each region.
1961 struct walk_memory_regions_data
{
1962 walk_memory_regions_fn fn
;
1968 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1969 target_ulong end
, int new_prot
)
1971 if (data
->start
!= -1u) {
1972 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1978 data
->start
= (new_prot
? end
: -1u);
1979 data
->prot
= new_prot
;
1984 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1985 target_ulong base
, int level
, void **lp
)
1991 return walk_memory_regions_end(data
, base
, 0);
1997 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1998 int prot
= pd
[i
].flags
;
2000 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2001 if (prot
!= data
->prot
) {
2002 rc
= walk_memory_regions_end(data
, pa
, prot
);
2011 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2012 pa
= base
| ((target_ulong
)i
<<
2013 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2014 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2024 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2026 struct walk_memory_regions_data data
;
2027 uintptr_t i
, l1_sz
= v_l1_size
;
2034 for (i
= 0; i
< l1_sz
; i
++) {
2035 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2036 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2042 return walk_memory_regions_end(&data
, 0, 0);
2045 static int dump_region(void *priv
, target_ulong start
,
2046 target_ulong end
, unsigned long prot
)
2048 FILE *f
= (FILE *)priv
;
2050 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2051 " "TARGET_FMT_lx
" %c%c%c\n",
2052 start
, end
, end
- start
,
2053 ((prot
& PAGE_READ
) ? 'r' : '-'),
2054 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2055 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2060 /* dump memory mappings */
2061 void page_dump(FILE *f
)
2063 const int length
= sizeof(target_ulong
) * 2;
2064 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2065 length
, "start", length
, "end", length
, "size", "prot");
2066 walk_memory_regions(f
, dump_region
);
2069 int page_get_flags(target_ulong address
)
2073 p
= page_find(address
>> TARGET_PAGE_BITS
);
2080 /* Modify the flags of a page and invalidate the code if necessary.
2081 The flag PAGE_WRITE_ORG is positioned automatically depending
2082 on PAGE_WRITE. The mmap_lock should already be held. */
2083 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2085 target_ulong addr
, len
;
2087 /* This function should never be called with addresses outside the
2088 guest address space. If this assert fires, it probably indicates
2089 a missing call to h2g_valid. */
2090 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2091 assert(end
<= ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2093 assert(start
< end
);
2094 assert_memory_lock();
2096 start
= start
& TARGET_PAGE_MASK
;
2097 end
= TARGET_PAGE_ALIGN(end
);
2099 if (flags
& PAGE_WRITE
) {
2100 flags
|= PAGE_WRITE_ORG
;
2103 for (addr
= start
, len
= end
- start
;
2105 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2106 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2108 /* If the write protection bit is set, then we invalidate
2110 if (!(p
->flags
& PAGE_WRITE
) &&
2111 (flags
& PAGE_WRITE
) &&
2113 tb_invalidate_phys_page(addr
, 0);
2119 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2125 /* This function should never be called with addresses outside the
2126 guest address space. If this assert fires, it probably indicates
2127 a missing call to h2g_valid. */
2128 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2129 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2135 if (start
+ len
- 1 < start
) {
2136 /* We've wrapped around. */
2140 /* must do before we loose bits in the next step */
2141 end
= TARGET_PAGE_ALIGN(start
+ len
);
2142 start
= start
& TARGET_PAGE_MASK
;
2144 for (addr
= start
, len
= end
- start
;
2146 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2147 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2151 if (!(p
->flags
& PAGE_VALID
)) {
2155 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2158 if (flags
& PAGE_WRITE
) {
2159 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2162 /* unprotect the page if it was put read-only because it
2163 contains translated code */
2164 if (!(p
->flags
& PAGE_WRITE
)) {
2165 if (!page_unprotect(addr
, 0)) {
2174 /* called from signal handler: invalidate the code and unprotect the
2175 * page. Return 0 if the fault was not handled, 1 if it was handled,
2176 * and 2 if it was handled but the caller must cause the TB to be
2177 * immediately exited. (We can only return 2 if the 'pc' argument is
2180 int page_unprotect(target_ulong address
, uintptr_t pc
)
2183 bool current_tb_invalidated
;
2185 target_ulong host_start
, host_end
, addr
;
2187 /* Technically this isn't safe inside a signal handler. However we
2188 know this only ever happens in a synchronous SEGV handler, so in
2189 practice it seems to be ok. */
2192 p
= page_find(address
>> TARGET_PAGE_BITS
);
2198 /* if the page was really writable, then we change its
2199 protection back to writable */
2200 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2201 host_start
= address
& qemu_host_page_mask
;
2202 host_end
= host_start
+ qemu_host_page_size
;
2205 current_tb_invalidated
= false;
2206 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2207 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2208 p
->flags
|= PAGE_WRITE
;
2211 /* and since the content will be modified, we must invalidate
2212 the corresponding translated code. */
2213 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2214 #ifdef CONFIG_USER_ONLY
2215 if (DEBUG_TB_CHECK_GATE
) {
2216 tb_invalidate_check(addr
);
2220 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2224 /* If current TB was invalidated return to main loop */
2225 return current_tb_invalidated
? 2 : 1;
2230 #endif /* CONFIG_USER_ONLY */
2232 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2233 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2235 #ifdef CONFIG_SOFTMMU