4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
32 #if defined(CONFIG_USER_ONLY)
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <machine/profile.h>
50 #include "exec/address-spaces.h"
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/error-report.h"
58 #include "qemu/timer.h"
59 #include "qemu/main-loop.h"
61 #include "sysemu/cpus.h"
63 /* #define DEBUG_TB_INVALIDATE */
64 /* #define DEBUG_TB_FLUSH */
65 /* make various TB consistency checks */
66 /* #define DEBUG_TB_CHECK */
69 #define DEBUG_TB_FLUSH_GATE 1
71 #define DEBUG_TB_FLUSH_GATE 0
74 #if !defined(CONFIG_USER_ONLY)
75 /* TB consistency checks only implemented for usermode emulation. */
79 /* Access to the various translations structures need to be serialised via locks
80 * for consistency. This is automatic for SoftMMU based system
81 * emulation due to its single threaded nature. In user-mode emulation
82 * access to the memory related structures are protected with the
86 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
88 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
91 #define SMC_BITMAP_USE_THRESHOLD 10
93 typedef struct PageDesc
{
94 /* list of TBs intersecting this ram page */
95 TranslationBlock
*first_tb
;
97 /* in order to optimize self modifying code, we count the number
98 of lookups we do to a given page to use a bitmap */
99 unsigned int code_write_count
;
100 unsigned long *code_bitmap
;
106 /* In system mode we want L1_MAP to be based on ram offsets,
107 while in user mode we want it to be based on virtual addresses. */
108 #if !defined(CONFIG_USER_ONLY)
109 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
110 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
112 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
115 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
118 /* Size of the L2 (and L3, etc) page tables. */
120 #define V_L2_SIZE (1 << V_L2_BITS)
122 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
123 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
124 sizeof(((TranslationBlock
*)0)->trace_vcpu_dstate
)
128 * L1 Mapping properties
130 static int v_l1_size
;
131 static int v_l1_shift
;
132 static int v_l2_levels
;
134 /* The bottom level has pointers to PageDesc, and is indexed by
135 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
137 #define V_L1_MIN_BITS 4
138 #define V_L1_MAX_BITS (V_L2_BITS + 3)
139 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
141 static void *l1_map
[V_L1_MAX_SIZE
];
143 /* code generation context */
147 /* translation block context */
148 static __thread
int have_tb_lock
;
150 static void page_table_config_init(void)
154 assert(TARGET_PAGE_BITS
);
155 /* The bits remaining after N lower levels of page tables. */
156 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
157 if (v_l1_bits
< V_L1_MIN_BITS
) {
158 v_l1_bits
+= V_L2_BITS
;
161 v_l1_size
= 1 << v_l1_bits
;
162 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
163 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
165 assert(v_l1_bits
<= V_L1_MAX_BITS
);
166 assert(v_l1_shift
% V_L2_BITS
== 0);
167 assert(v_l2_levels
>= 0);
170 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
171 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
175 assert_tb_unlocked();
176 qemu_mutex_lock(&tcg_ctx
.tb_ctx
.tb_lock
);
184 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
187 void tb_lock_reset(void)
190 qemu_mutex_unlock(&tcg_ctx
.tb_ctx
.tb_lock
);
195 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
);
197 void cpu_gen_init(void)
199 tcg_context_init(&tcg_ctx
);
202 /* Encode VAL as a signed leb128 sequence at P.
203 Return P incremented past the encoded value. */
204 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
211 more
= !((val
== 0 && (byte
& 0x40) == 0)
212 || (val
== -1 && (byte
& 0x40) != 0));
222 /* Decode a signed leb128 sequence at *PP; increment *PP past the
223 decoded value. Return the decoded value. */
224 static target_long
decode_sleb128(uint8_t **pp
)
232 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
234 } while (byte
& 0x80);
235 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
236 val
|= -(target_ulong
)1 << shift
;
243 /* Encode the data collected about the instructions while compiling TB.
244 Place the data at BLOCK, and return the number of bytes consumed.
246 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
247 which come from the target's insn_start data, followed by a uintptr_t
248 which comes from the host pc of the end of the code implementing the insn.
250 Each line of the table is encoded as sleb128 deltas from the previous
251 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
252 That is, the first column is seeded with the guest pc, the last column
253 with the host pc, and the middle columns with zeros. */
255 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
257 uint8_t *highwater
= tcg_ctx
.code_gen_highwater
;
261 tb
->tc_search
= block
;
263 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
266 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
268 prev
= (j
== 0 ? tb
->pc
: 0);
270 prev
= tcg_ctx
.gen_insn_data
[i
- 1][j
];
272 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_data
[i
][j
] - prev
);
274 prev
= (i
== 0 ? 0 : tcg_ctx
.gen_insn_end_off
[i
- 1]);
275 p
= encode_sleb128(p
, tcg_ctx
.gen_insn_end_off
[i
] - prev
);
277 /* Test for (pending) buffer overflow. The assumption is that any
278 one row beginning below the high water mark cannot overrun
279 the buffer completely. Thus we can test for overflow after
280 encoding a row without having to check during encoding. */
281 if (unlikely(p
> highwater
)) {
289 /* The cpu state corresponding to 'searched_pc' is restored.
290 * Called with tb_lock held.
292 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
293 uintptr_t searched_pc
)
295 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
296 uintptr_t host_pc
= (uintptr_t)tb
->tc_ptr
;
297 CPUArchState
*env
= cpu
->env_ptr
;
298 uint8_t *p
= tb
->tc_search
;
299 int i
, j
, num_insns
= tb
->icount
;
300 #ifdef CONFIG_PROFILER
301 int64_t ti
= profile_getclock();
304 searched_pc
-= GETPC_ADJ
;
306 if (searched_pc
< host_pc
) {
310 /* Reconstruct the stored insn data while looking for the point at
311 which the end of the insn exceeds the searched_pc. */
312 for (i
= 0; i
< num_insns
; ++i
) {
313 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
314 data
[j
] += decode_sleb128(&p
);
316 host_pc
+= decode_sleb128(&p
);
317 if (host_pc
> searched_pc
) {
324 if (tb
->cflags
& CF_USE_ICOUNT
) {
326 /* Reset the cycle counter to the start of the block. */
327 cpu
->icount_decr
.u16
.low
+= num_insns
;
328 /* Clear the IO flag. */
331 cpu
->icount_decr
.u16
.low
-= i
;
332 restore_state_to_opc(env
, tb
, data
);
334 #ifdef CONFIG_PROFILER
335 tcg_ctx
.restore_time
+= profile_getclock() - ti
;
336 tcg_ctx
.restore_count
++;
341 bool cpu_restore_state(CPUState
*cpu
, uintptr_t retaddr
)
343 TranslationBlock
*tb
;
346 /* A retaddr of zero is invalid so we really shouldn't have ended
347 * up here. The target code has likely forgotten to check retaddr
348 * != 0 before attempting to restore state. We return early to
349 * avoid blowing up on a recursive tb_lock(). The target must have
350 * previously survived a failed cpu_restore_state because
351 * tb_find_pc(0) would have failed anyway. It still should be
360 tb
= tb_find_pc(retaddr
);
362 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
363 if (tb
->cflags
& CF_NOCACHE
) {
364 /* one-shot translation, invalidate it immediately */
365 tb_phys_invalidate(tb
, -1);
375 static void page_init(void)
378 page_table_config_init();
380 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
382 #ifdef HAVE_KINFO_GETVMMAP
383 struct kinfo_vmentry
*freep
;
386 freep
= kinfo_getvmmap(getpid(), &cnt
);
389 for (i
= 0; i
< cnt
; i
++) {
390 unsigned long startaddr
, endaddr
;
392 startaddr
= freep
[i
].kve_start
;
393 endaddr
= freep
[i
].kve_end
;
394 if (h2g_valid(startaddr
)) {
395 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
397 if (h2g_valid(endaddr
)) {
398 endaddr
= h2g(endaddr
);
399 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
401 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
403 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
414 last_brk
= (unsigned long)sbrk(0);
416 f
= fopen("/compat/linux/proc/self/maps", "r");
421 unsigned long startaddr
, endaddr
;
424 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
426 if (n
== 2 && h2g_valid(startaddr
)) {
427 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
429 if (h2g_valid(endaddr
)) {
430 endaddr
= h2g(endaddr
);
434 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
447 * Called with tb_lock held for system emulation.
448 * Called with mmap_lock held for user-mode emulation.
450 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
457 assert_memory_lock();
460 /* Level 1. Always allocated. */
461 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
464 for (i
= v_l2_levels
; i
> 0; i
--) {
465 void **p
= atomic_rcu_read(lp
);
471 p
= g_new0(void *, V_L2_SIZE
);
472 atomic_rcu_set(lp
, p
);
475 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
478 pd
= atomic_rcu_read(lp
);
483 pd
= g_new0(PageDesc
, V_L2_SIZE
);
484 atomic_rcu_set(lp
, pd
);
487 return pd
+ (index
& (V_L2_SIZE
- 1));
490 static inline PageDesc
*page_find(tb_page_addr_t index
)
492 return page_find_alloc(index
, 0);
495 #if defined(CONFIG_USER_ONLY)
496 /* Currently it is not recommended to allocate big chunks of data in
497 user mode. It will change when a dedicated libc will be used. */
498 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
499 region in which the guest needs to run. Revisit this. */
500 #define USE_STATIC_CODE_GEN_BUFFER
503 /* Minimum size of the code gen buffer. This number is randomly chosen,
504 but not so small that we can't have a fair number of TB's live. */
505 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
507 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
508 indicated, this is constrained by the range of direct branches on the
509 host cpu, as used by the TCG implementation of goto_tb. */
510 #if defined(__x86_64__)
511 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
512 #elif defined(__sparc__)
513 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
514 #elif defined(__powerpc64__)
515 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
516 #elif defined(__powerpc__)
517 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
518 #elif defined(__aarch64__)
519 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
520 #elif defined(__s390x__)
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
523 #elif defined(__mips__)
524 /* We have a 256MB branch region, but leave room to make sure the
525 main executable is also within that region. */
526 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
528 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
531 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
533 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
534 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
535 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
537 static inline size_t size_code_gen_buffer(size_t tb_size
)
539 /* Size the buffer. */
541 #ifdef USE_STATIC_CODE_GEN_BUFFER
542 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
544 /* ??? Needs adjustments. */
545 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
546 static buffer, we could size this on RESERVED_VA, on the text
547 segment size of the executable, or continue to use the default. */
548 tb_size
= (unsigned long)(ram_size
/ 4);
551 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
552 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
554 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
555 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
561 /* In order to use J and JAL within the code_gen_buffer, we require
562 that the buffer not cross a 256MB boundary. */
563 static inline bool cross_256mb(void *addr
, size_t size
)
565 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
568 /* We weren't able to allocate a buffer without crossing that boundary,
569 so make do with the larger portion of the buffer that doesn't cross.
570 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
571 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
573 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
574 size_t size2
= buf1
+ size1
- buf2
;
582 tcg_ctx
.code_gen_buffer_size
= size1
;
587 #ifdef USE_STATIC_CODE_GEN_BUFFER
588 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
589 __attribute__((aligned(CODE_GEN_ALIGN
)));
592 static inline void do_protect(void *addr
, long size
, int prot
)
595 VirtualProtect(addr
, size
, prot
, &old_protect
);
598 static inline void map_exec(void *addr
, long size
)
600 do_protect(addr
, size
, PAGE_EXECUTE_READWRITE
);
603 static inline void map_none(void *addr
, long size
)
605 do_protect(addr
, size
, PAGE_NOACCESS
);
608 static inline void do_protect(void *addr
, long size
, int prot
)
610 uintptr_t start
, end
;
612 start
= (uintptr_t)addr
;
613 start
&= qemu_real_host_page_mask
;
615 end
= (uintptr_t)addr
+ size
;
616 end
= ROUND_UP(end
, qemu_real_host_page_size
);
618 mprotect((void *)start
, end
- start
, prot
);
621 static inline void map_exec(void *addr
, long size
)
623 do_protect(addr
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
);
626 static inline void map_none(void *addr
, long size
)
628 do_protect(addr
, size
, PROT_NONE
);
632 static inline void *alloc_code_gen_buffer(void)
634 void *buf
= static_code_gen_buffer
;
635 size_t full_size
, size
;
637 /* The size of the buffer, rounded down to end on a page boundary. */
638 full_size
= (((uintptr_t)buf
+ sizeof(static_code_gen_buffer
))
639 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
641 /* Reserve a guard page. */
642 size
= full_size
- qemu_real_host_page_size
;
644 /* Honor a command-line option limiting the size of the buffer. */
645 if (size
> tcg_ctx
.code_gen_buffer_size
) {
646 size
= (((uintptr_t)buf
+ tcg_ctx
.code_gen_buffer_size
)
647 & qemu_real_host_page_mask
) - (uintptr_t)buf
;
649 tcg_ctx
.code_gen_buffer_size
= size
;
652 if (cross_256mb(buf
, size
)) {
653 buf
= split_cross_256mb(buf
, size
);
654 size
= tcg_ctx
.code_gen_buffer_size
;
659 map_none(buf
+ size
, qemu_real_host_page_size
);
660 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
664 #elif defined(_WIN32)
665 static inline void *alloc_code_gen_buffer(void)
667 size_t size
= tcg_ctx
.code_gen_buffer_size
;
670 /* Perform the allocation in two steps, so that the guard page
671 is reserved but uncommitted. */
672 buf1
= VirtualAlloc(NULL
, size
+ qemu_real_host_page_size
,
673 MEM_RESERVE
, PAGE_NOACCESS
);
675 buf2
= VirtualAlloc(buf1
, size
, MEM_COMMIT
, PAGE_EXECUTE_READWRITE
);
676 assert(buf1
== buf2
);
682 static inline void *alloc_code_gen_buffer(void)
684 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
686 size_t size
= tcg_ctx
.code_gen_buffer_size
;
689 /* Constrain the position of the buffer based on the host cpu.
690 Note that these addresses are chosen in concert with the
691 addresses assigned in the relevant linker script file. */
692 # if defined(__PIE__) || defined(__PIC__)
693 /* Don't bother setting a preferred location if we're building
694 a position-independent executable. We're more likely to get
695 an address near the main executable if we let the kernel
696 choose the address. */
697 # elif defined(__x86_64__) && defined(MAP_32BIT)
698 /* Force the memory down into low memory with the executable.
699 Leave the choice of exact location with the kernel. */
701 /* Cannot expect to map more than 800MB in low memory. */
702 if (size
> 800u * 1024 * 1024) {
703 tcg_ctx
.code_gen_buffer_size
= size
= 800u * 1024 * 1024;
705 # elif defined(__sparc__)
706 start
= 0x40000000ul
;
707 # elif defined(__s390x__)
708 start
= 0x90000000ul
;
709 # elif defined(__mips__)
710 # if _MIPS_SIM == _ABI64
711 start
= 0x128000000ul
;
713 start
= 0x08000000ul
;
717 buf
= mmap((void *)start
, size
+ qemu_real_host_page_size
,
718 PROT_NONE
, flags
, -1, 0);
719 if (buf
== MAP_FAILED
) {
724 if (cross_256mb(buf
, size
)) {
725 /* Try again, with the original still mapped, to avoid re-acquiring
726 that 256mb crossing. This time don't specify an address. */
728 void *buf2
= mmap(NULL
, size
+ qemu_real_host_page_size
,
729 PROT_NONE
, flags
, -1, 0);
730 switch ((int)(buf2
!= MAP_FAILED
)) {
732 if (!cross_256mb(buf2
, size
)) {
733 /* Success! Use the new buffer. */
734 munmap(buf
, size
+ qemu_real_host_page_size
);
737 /* Failure. Work with what we had. */
738 munmap(buf2
, size
+ qemu_real_host_page_size
);
741 /* Split the original buffer. Free the smaller half. */
742 buf2
= split_cross_256mb(buf
, size
);
743 size2
= tcg_ctx
.code_gen_buffer_size
;
745 munmap(buf
+ size2
+ qemu_real_host_page_size
, size
- size2
);
747 munmap(buf
, size
- size2
);
756 /* Make the final buffer accessible. The guard page at the end
757 will remain inaccessible with PROT_NONE. */
758 mprotect(buf
, size
, PROT_WRITE
| PROT_READ
| PROT_EXEC
);
760 /* Request large pages for the buffer. */
761 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
765 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
767 static inline void code_gen_alloc(size_t tb_size
)
769 tcg_ctx
.code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
770 tcg_ctx
.code_gen_buffer
= alloc_code_gen_buffer();
771 if (tcg_ctx
.code_gen_buffer
== NULL
) {
772 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
776 /* size this conservatively -- realloc later if needed */
777 tcg_ctx
.tb_ctx
.tbs_size
=
778 tcg_ctx
.code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
/ 8;
779 if (unlikely(!tcg_ctx
.tb_ctx
.tbs_size
)) {
780 tcg_ctx
.tb_ctx
.tbs_size
= 64 * 1024;
782 tcg_ctx
.tb_ctx
.tbs
= g_new(TranslationBlock
*, tcg_ctx
.tb_ctx
.tbs_size
);
784 qemu_mutex_init(&tcg_ctx
.tb_ctx
.tb_lock
);
787 static void tb_htable_init(void)
789 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
791 qht_init(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
, mode
);
794 /* Must be called before using the QEMU cpus. 'tb_size' is the size
795 (in bytes) allocated to the translation buffer. Zero means default
797 void tcg_exec_init(unsigned long tb_size
)
803 code_gen_alloc(tb_size
);
804 #if defined(CONFIG_SOFTMMU)
805 /* There's no guest base to take into account, so go ahead and
806 initialize the prologue now. */
807 tcg_prologue_init(&tcg_ctx
);
812 * Allocate a new translation block. Flush the translation buffer if
813 * too many translation blocks or too much generated code.
815 * Called with tb_lock held.
817 static TranslationBlock
*tb_alloc(target_ulong pc
)
819 TranslationBlock
*tb
;
824 tb
= tcg_tb_alloc(&tcg_ctx
);
825 if (unlikely(tb
== NULL
)) {
828 ctx
= &tcg_ctx
.tb_ctx
;
829 if (unlikely(ctx
->nb_tbs
== ctx
->tbs_size
)) {
831 ctx
->tbs
= g_renew(TranslationBlock
*, ctx
->tbs
, ctx
->tbs_size
);
833 ctx
->tbs
[ctx
->nb_tbs
++] = tb
;
837 /* Called with tb_lock held. */
838 void tb_free(TranslationBlock
*tb
)
842 /* In practice this is mostly used for single use temporary TB
843 Ignore the hard cases and just back up if this TB happens to
844 be the last one generated. */
845 if (tcg_ctx
.tb_ctx
.nb_tbs
> 0 &&
846 tb
== tcg_ctx
.tb_ctx
.tbs
[tcg_ctx
.tb_ctx
.nb_tbs
- 1]) {
847 size_t struct_size
= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
849 tcg_ctx
.code_gen_ptr
= tb
->tc_ptr
- struct_size
;
850 tcg_ctx
.tb_ctx
.nb_tbs
--;
854 static inline void invalidate_page_bitmap(PageDesc
*p
)
856 #ifdef CONFIG_SOFTMMU
857 g_free(p
->code_bitmap
);
858 p
->code_bitmap
= NULL
;
859 p
->code_write_count
= 0;
863 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
864 static void page_flush_tb_1(int level
, void **lp
)
874 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
875 pd
[i
].first_tb
= NULL
;
876 invalidate_page_bitmap(pd
+ i
);
881 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
882 page_flush_tb_1(level
- 1, pp
+ i
);
887 static void page_flush_tb(void)
889 int i
, l1_sz
= v_l1_size
;
891 for (i
= 0; i
< l1_sz
; i
++) {
892 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
896 /* flush all the translation blocks */
897 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
901 /* If it is already been done on request of another CPU,
904 if (tcg_ctx
.tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
908 if (DEBUG_TB_FLUSH_GATE
) {
909 printf("qemu: flush code_size=%td nb_tbs=%d avg_tb_size=%td\n",
910 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
911 tcg_ctx
.tb_ctx
.nb_tbs
, tcg_ctx
.tb_ctx
.nb_tbs
> 0 ?
912 (tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
) /
913 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
915 if ((unsigned long)(tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
)
916 > tcg_ctx
.code_gen_buffer_size
) {
917 cpu_abort(cpu
, "Internal error: code buffer overflow\n");
921 cpu_tb_jmp_cache_clear(cpu
);
924 tcg_ctx
.tb_ctx
.nb_tbs
= 0;
925 qht_reset_size(&tcg_ctx
.tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
928 tcg_ctx
.code_gen_ptr
= tcg_ctx
.code_gen_buffer
;
929 /* XXX: flush processor icache at this point if cache flush is
931 atomic_mb_set(&tcg_ctx
.tb_ctx
.tb_flush_count
,
932 tcg_ctx
.tb_ctx
.tb_flush_count
+ 1);
938 void tb_flush(CPUState
*cpu
)
941 unsigned tb_flush_count
= atomic_mb_read(&tcg_ctx
.tb_ctx
.tb_flush_count
);
942 async_safe_run_on_cpu(cpu
, do_tb_flush
,
943 RUN_ON_CPU_HOST_INT(tb_flush_count
));
947 #ifdef DEBUG_TB_CHECK
950 do_tb_invalidate_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
952 TranslationBlock
*tb
= p
;
953 target_ulong addr
= *(target_ulong
*)userp
;
955 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
956 printf("ERROR invalidate: address=" TARGET_FMT_lx
957 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
961 /* verify that all the pages have correct rights for code
963 * Called with tb_lock held.
965 static void tb_invalidate_check(target_ulong address
)
967 address
&= TARGET_PAGE_MASK
;
968 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
972 do_tb_page_check(struct qht
*ht
, void *p
, uint32_t hash
, void *userp
)
974 TranslationBlock
*tb
= p
;
977 flags1
= page_get_flags(tb
->pc
);
978 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
979 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
980 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
981 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
985 /* verify that all the pages have correct rights for code */
986 static void tb_page_check(void)
988 qht_iter(&tcg_ctx
.tb_ctx
.htable
, do_tb_page_check
, NULL
);
993 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
995 TranslationBlock
*tb1
;
1000 n1
= (uintptr_t)tb1
& 3;
1001 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1003 *ptb
= tb1
->page_next
[n1
];
1006 ptb
= &tb1
->page_next
[n1
];
1010 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1011 static inline void tb_remove_from_jmp_list(TranslationBlock
*tb
, int n
)
1013 TranslationBlock
*tb1
;
1014 uintptr_t *ptb
, ntb
;
1017 ptb
= &tb
->jmp_list_next
[n
];
1019 /* find tb(n) in circular list */
1023 tb1
= (TranslationBlock
*)(ntb
& ~3);
1024 if (n1
== n
&& tb1
== tb
) {
1028 ptb
= &tb1
->jmp_list_first
;
1030 ptb
= &tb1
->jmp_list_next
[n1
];
1033 /* now we can suppress tb(n) from the list */
1034 *ptb
= tb
->jmp_list_next
[n
];
1036 tb
->jmp_list_next
[n
] = (uintptr_t)NULL
;
1040 /* reset the jump entry 'n' of a TB so that it is not chained to
1042 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1044 uintptr_t addr
= (uintptr_t)(tb
->tc_ptr
+ tb
->jmp_reset_offset
[n
]);
1045 tb_set_jmp_target(tb
, n
, addr
);
1048 /* remove any jumps to the TB */
1049 static inline void tb_jmp_unlink(TranslationBlock
*tb
)
1051 TranslationBlock
*tb1
;
1052 uintptr_t *ptb
, ntb
;
1055 ptb
= &tb
->jmp_list_first
;
1059 tb1
= (TranslationBlock
*)(ntb
& ~3);
1063 tb_reset_jump(tb1
, n1
);
1064 *ptb
= tb1
->jmp_list_next
[n1
];
1065 tb1
->jmp_list_next
[n1
] = (uintptr_t)NULL
;
1069 /* invalidate one TB
1071 * Called with tb_lock held.
1073 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1078 tb_page_addr_t phys_pc
;
1082 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1084 /* remove the TB from the hash list */
1085 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1086 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->trace_vcpu_dstate
);
1087 qht_remove(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1089 /* remove the TB from the page list */
1090 if (tb
->page_addr
[0] != page_addr
) {
1091 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1092 tb_page_remove(&p
->first_tb
, tb
);
1093 invalidate_page_bitmap(p
);
1095 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
1096 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1097 tb_page_remove(&p
->first_tb
, tb
);
1098 invalidate_page_bitmap(p
);
1101 /* remove the TB from the hash list */
1102 h
= tb_jmp_cache_hash_func(tb
->pc
);
1104 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1105 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1109 /* suppress this TB from the two jump lists */
1110 tb_remove_from_jmp_list(tb
, 0);
1111 tb_remove_from_jmp_list(tb
, 1);
1113 /* suppress any remaining jumps to this TB */
1116 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
++;
1119 #ifdef CONFIG_SOFTMMU
1120 static void build_page_bitmap(PageDesc
*p
)
1122 int n
, tb_start
, tb_end
;
1123 TranslationBlock
*tb
;
1125 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1128 while (tb
!= NULL
) {
1129 n
= (uintptr_t)tb
& 3;
1130 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1131 /* NOTE: this is subtle as a TB may span two physical pages */
1133 /* NOTE: tb_end may be after the end of the page, but
1134 it is not a problem */
1135 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1136 tb_end
= tb_start
+ tb
->size
;
1137 if (tb_end
> TARGET_PAGE_SIZE
) {
1138 tb_end
= TARGET_PAGE_SIZE
;
1142 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1144 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1145 tb
= tb
->page_next
[n
];
1150 /* add the tb in the target page and protect it if necessary
1152 * Called with mmap_lock held for user-mode emulation.
1154 static inline void tb_alloc_page(TranslationBlock
*tb
,
1155 unsigned int n
, tb_page_addr_t page_addr
)
1158 #ifndef CONFIG_USER_ONLY
1159 bool page_already_protected
;
1162 assert_memory_lock();
1164 tb
->page_addr
[n
] = page_addr
;
1165 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1166 tb
->page_next
[n
] = p
->first_tb
;
1167 #ifndef CONFIG_USER_ONLY
1168 page_already_protected
= p
->first_tb
!= NULL
;
1170 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1171 invalidate_page_bitmap(p
);
1173 #if defined(CONFIG_USER_ONLY)
1174 if (p
->flags
& PAGE_WRITE
) {
1179 /* force the host page as non writable (writes will have a
1180 page fault + mprotect overhead) */
1181 page_addr
&= qemu_host_page_mask
;
1183 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1184 addr
+= TARGET_PAGE_SIZE
) {
1186 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1191 p2
->flags
&= ~PAGE_WRITE
;
1193 mprotect(g2h(page_addr
), qemu_host_page_size
,
1194 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1195 #ifdef DEBUG_TB_INVALIDATE
1196 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1200 /* if some code is already present, then the pages are already
1201 protected. So we handle the case where only the first TB is
1202 allocated in a physical page */
1203 if (!page_already_protected
) {
1204 tlb_protect_code(page_addr
);
1209 /* add a new TB and link it to the physical page tables. phys_page2 is
1210 * (-1) to indicate that only one page contains the TB.
1212 * Called with mmap_lock held for user-mode emulation.
1214 static void tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1215 tb_page_addr_t phys_page2
)
1219 assert_memory_lock();
1221 /* add in the page list */
1222 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1223 if (phys_page2
!= -1) {
1224 tb_alloc_page(tb
, 1, phys_page2
);
1226 tb
->page_addr
[1] = -1;
1229 /* add in the hash table */
1230 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->trace_vcpu_dstate
);
1231 qht_insert(&tcg_ctx
.tb_ctx
.htable
, tb
, h
);
1233 #ifdef DEBUG_TB_CHECK
1238 /* Called with mmap_lock held for user mode emulation. */
1239 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1240 target_ulong pc
, target_ulong cs_base
,
1241 uint32_t flags
, int cflags
)
1243 CPUArchState
*env
= cpu
->env_ptr
;
1244 TranslationBlock
*tb
;
1245 tb_page_addr_t phys_pc
, phys_page2
;
1246 target_ulong virt_page2
;
1247 tcg_insn_unit
*gen_code_buf
;
1248 int gen_code_size
, search_size
;
1249 #ifdef CONFIG_PROFILER
1252 assert_memory_lock();
1254 phys_pc
= get_page_addr_code(env
, pc
);
1255 if (use_icount
&& !(cflags
& CF_IGNORE_ICOUNT
)) {
1256 cflags
|= CF_USE_ICOUNT
;
1260 if (unlikely(!tb
)) {
1262 /* flush must be done */
1265 /* Make the execution loop process the flush as soon as possible. */
1266 cpu
->exception_index
= EXCP_INTERRUPT
;
1270 gen_code_buf
= tcg_ctx
.code_gen_ptr
;
1271 tb
->tc_ptr
= gen_code_buf
;
1273 tb
->cs_base
= cs_base
;
1275 tb
->cflags
= cflags
;
1276 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1278 #ifdef CONFIG_PROFILER
1279 tcg_ctx
.tb_count1
++; /* includes aborted translations because of
1281 ti
= profile_getclock();
1284 tcg_func_start(&tcg_ctx
);
1286 tcg_ctx
.cpu
= ENV_GET_CPU(env
);
1287 gen_intermediate_code(cpu
, tb
);
1290 trace_translate_block(tb
, tb
->pc
, tb
->tc_ptr
);
1292 /* generate machine code */
1293 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1294 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1295 tcg_ctx
.tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1296 if (TCG_TARGET_HAS_direct_jump
) {
1297 tcg_ctx
.tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1298 tcg_ctx
.tb_jmp_target_addr
= NULL
;
1300 tcg_ctx
.tb_jmp_insn_offset
= NULL
;
1301 tcg_ctx
.tb_jmp_target_addr
= tb
->jmp_target_arg
;
1304 #ifdef CONFIG_PROFILER
1306 tcg_ctx
.interm_time
+= profile_getclock() - ti
;
1307 ti
= profile_getclock();
1310 /* ??? Overflow could be handled better here. In particular, we
1311 don't need to re-do gen_intermediate_code, nor should we re-do
1312 the tcg optimization currently hidden inside tcg_gen_code. All
1313 that should be required is to flush the TBs, allocate a new TB,
1314 re-initialize it per above, and re-do the actual code generation. */
1315 gen_code_size
= tcg_gen_code(&tcg_ctx
, tb
);
1316 if (unlikely(gen_code_size
< 0)) {
1317 goto buffer_overflow
;
1319 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1320 if (unlikely(search_size
< 0)) {
1321 goto buffer_overflow
;
1324 #ifdef CONFIG_PROFILER
1325 tcg_ctx
.code_time
+= profile_getclock() - ti
;
1326 tcg_ctx
.code_in_len
+= tb
->size
;
1327 tcg_ctx
.code_out_len
+= gen_code_size
;
1328 tcg_ctx
.search_out_len
+= search_size
;
1332 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1333 qemu_log_in_addr_range(tb
->pc
)) {
1335 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1336 if (tcg_ctx
.data_gen_ptr
) {
1337 size_t code_size
= tcg_ctx
.data_gen_ptr
- tb
->tc_ptr
;
1338 size_t data_size
= gen_code_size
- code_size
;
1341 log_disas(tb
->tc_ptr
, code_size
);
1343 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1344 if (sizeof(tcg_target_ulong
) == 8) {
1345 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1346 (uintptr_t)tcg_ctx
.data_gen_ptr
+ i
,
1347 *(uint64_t *)(tcg_ctx
.data_gen_ptr
+ i
));
1349 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1350 (uintptr_t)tcg_ctx
.data_gen_ptr
+ i
,
1351 *(uint32_t *)(tcg_ctx
.data_gen_ptr
+ i
));
1355 log_disas(tb
->tc_ptr
, gen_code_size
);
1363 tcg_ctx
.code_gen_ptr
= (void *)
1364 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1367 /* init jump list */
1368 assert(((uintptr_t)tb
& 3) == 0);
1369 tb
->jmp_list_first
= (uintptr_t)tb
| 2;
1370 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1371 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1373 /* init original jump addresses wich has been set during tcg_gen_code() */
1374 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1375 tb_reset_jump(tb
, 0);
1377 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1378 tb_reset_jump(tb
, 1);
1381 /* check next page if needed */
1382 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1384 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1385 phys_page2
= get_page_addr_code(env
, virt_page2
);
1387 /* As long as consistency of the TB stuff is provided by tb_lock in user
1388 * mode and is implicit in single-threaded softmmu emulation, no explicit
1389 * memory barrier is required before tb_link_page() makes the TB visible
1390 * through the physical hash table and physical page list.
1392 tb_link_page(tb
, phys_pc
, phys_page2
);
1397 * Invalidate all TBs which intersect with the target physical address range
1398 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1399 * 'is_cpu_write_access' should be true if called from a real cpu write
1400 * access: the virtual CPU will exit the current TB if code is modified inside
1403 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1404 * Called with tb_lock held for system-mode emulation
1406 static void tb_invalidate_phys_range_1(tb_page_addr_t start
, tb_page_addr_t end
)
1408 while (start
< end
) {
1409 tb_invalidate_phys_page_range(start
, end
, 0);
1410 start
&= TARGET_PAGE_MASK
;
1411 start
+= TARGET_PAGE_SIZE
;
1415 #ifdef CONFIG_SOFTMMU
1416 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1419 tb_invalidate_phys_range_1(start
, end
);
1422 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
1424 assert_memory_lock();
1426 tb_invalidate_phys_range_1(start
, end
);
1431 * Invalidate all TBs which intersect with the target physical address range
1432 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1433 * 'is_cpu_write_access' should be true if called from a real cpu write
1434 * access: the virtual CPU will exit the current TB if code is modified inside
1437 * Called with tb_lock/mmap_lock held for user-mode emulation
1438 * Called with tb_lock held for system-mode emulation
1440 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1441 int is_cpu_write_access
)
1443 TranslationBlock
*tb
, *tb_next
;
1444 #if defined(TARGET_HAS_PRECISE_SMC)
1445 CPUState
*cpu
= current_cpu
;
1446 CPUArchState
*env
= NULL
;
1448 tb_page_addr_t tb_start
, tb_end
;
1451 #ifdef TARGET_HAS_PRECISE_SMC
1452 int current_tb_not_found
= is_cpu_write_access
;
1453 TranslationBlock
*current_tb
= NULL
;
1454 int current_tb_modified
= 0;
1455 target_ulong current_pc
= 0;
1456 target_ulong current_cs_base
= 0;
1457 uint32_t current_flags
= 0;
1458 #endif /* TARGET_HAS_PRECISE_SMC */
1460 assert_memory_lock();
1463 p
= page_find(start
>> TARGET_PAGE_BITS
);
1467 #if defined(TARGET_HAS_PRECISE_SMC)
1473 /* we remove all the TBs in the range [start, end[ */
1474 /* XXX: see if in some cases it could be faster to invalidate all
1477 while (tb
!= NULL
) {
1478 n
= (uintptr_t)tb
& 3;
1479 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1480 tb_next
= tb
->page_next
[n
];
1481 /* NOTE: this is subtle as a TB may span two physical pages */
1483 /* NOTE: tb_end may be after the end of the page, but
1484 it is not a problem */
1485 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1486 tb_end
= tb_start
+ tb
->size
;
1488 tb_start
= tb
->page_addr
[1];
1489 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1491 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1492 #ifdef TARGET_HAS_PRECISE_SMC
1493 if (current_tb_not_found
) {
1494 current_tb_not_found
= 0;
1496 if (cpu
->mem_io_pc
) {
1497 /* now we have a real cpu fault */
1498 current_tb
= tb_find_pc(cpu
->mem_io_pc
);
1501 if (current_tb
== tb
&&
1502 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1503 /* If we are modifying the current TB, we must stop
1504 its execution. We could be more precise by checking
1505 that the modification is after the current PC, but it
1506 would require a specialized function to partially
1507 restore the CPU state */
1509 current_tb_modified
= 1;
1510 cpu_restore_state_from_tb(cpu
, current_tb
, cpu
->mem_io_pc
);
1511 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1514 #endif /* TARGET_HAS_PRECISE_SMC */
1515 tb_phys_invalidate(tb
, -1);
1519 #if !defined(CONFIG_USER_ONLY)
1520 /* if no code remaining, no need to continue to use slow writes */
1522 invalidate_page_bitmap(p
);
1523 tlb_unprotect_code(start
);
1526 #ifdef TARGET_HAS_PRECISE_SMC
1527 if (current_tb_modified
) {
1528 /* we generate a block containing just the instruction
1529 modifying the memory. It will ensure that it cannot modify
1531 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1532 cpu_loop_exit_noexc(cpu
);
1537 #ifdef CONFIG_SOFTMMU
1538 /* len must be <= 8 and start must be a multiple of len.
1539 * Called via softmmu_template.h when code areas are written to with
1540 * iothread mutex not held.
1542 void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1548 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1549 cpu_single_env
->mem_io_vaddr
, len
,
1550 cpu_single_env
->eip
,
1551 cpu_single_env
->eip
+
1552 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1555 assert_memory_lock();
1557 p
= page_find(start
>> TARGET_PAGE_BITS
);
1561 if (!p
->code_bitmap
&&
1562 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
1563 /* build code bitmap. FIXME: writes should be protected by
1564 * tb_lock, reads by tb_lock or RCU.
1566 build_page_bitmap(p
);
1568 if (p
->code_bitmap
) {
1572 nr
= start
& ~TARGET_PAGE_MASK
;
1573 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
1574 if (b
& ((1 << len
) - 1)) {
1579 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1583 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1584 * host PC of the faulting store instruction that caused this invalidate.
1585 * Returns true if the caller needs to abort execution of the current
1586 * TB (because it was modified by this store and the guest CPU has
1587 * precise-SMC semantics).
1589 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1591 TranslationBlock
*tb
;
1594 #ifdef TARGET_HAS_PRECISE_SMC
1595 TranslationBlock
*current_tb
= NULL
;
1596 CPUState
*cpu
= current_cpu
;
1597 CPUArchState
*env
= NULL
;
1598 int current_tb_modified
= 0;
1599 target_ulong current_pc
= 0;
1600 target_ulong current_cs_base
= 0;
1601 uint32_t current_flags
= 0;
1604 assert_memory_lock();
1606 addr
&= TARGET_PAGE_MASK
;
1607 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1614 #ifdef TARGET_HAS_PRECISE_SMC
1615 if (tb
&& pc
!= 0) {
1616 current_tb
= tb_find_pc(pc
);
1622 while (tb
!= NULL
) {
1623 n
= (uintptr_t)tb
& 3;
1624 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1625 #ifdef TARGET_HAS_PRECISE_SMC
1626 if (current_tb
== tb
&&
1627 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1628 /* If we are modifying the current TB, we must stop
1629 its execution. We could be more precise by checking
1630 that the modification is after the current PC, but it
1631 would require a specialized function to partially
1632 restore the CPU state */
1634 current_tb_modified
= 1;
1635 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
1636 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1639 #endif /* TARGET_HAS_PRECISE_SMC */
1640 tb_phys_invalidate(tb
, addr
);
1641 tb
= tb
->page_next
[n
];
1644 #ifdef TARGET_HAS_PRECISE_SMC
1645 if (current_tb_modified
) {
1646 /* we generate a block containing just the instruction
1647 modifying the memory. It will ensure that it cannot modify
1649 tb_gen_code(cpu
, current_pc
, current_cs_base
, current_flags
, 1);
1650 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1651 * back into the cpu_exec loop. */
1661 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1662 tb[1].tc_ptr. Return NULL if not found */
1663 static TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1665 int m_min
, m_max
, m
;
1667 TranslationBlock
*tb
;
1669 if (tcg_ctx
.tb_ctx
.nb_tbs
<= 0) {
1672 if (tc_ptr
< (uintptr_t)tcg_ctx
.code_gen_buffer
||
1673 tc_ptr
>= (uintptr_t)tcg_ctx
.code_gen_ptr
) {
1676 /* binary search (cf Knuth) */
1678 m_max
= tcg_ctx
.tb_ctx
.nb_tbs
- 1;
1679 while (m_min
<= m_max
) {
1680 m
= (m_min
+ m_max
) >> 1;
1681 tb
= tcg_ctx
.tb_ctx
.tbs
[m
];
1682 v
= (uintptr_t)tb
->tc_ptr
;
1685 } else if (tc_ptr
< v
) {
1691 return tcg_ctx
.tb_ctx
.tbs
[m_max
];
1694 #if !defined(CONFIG_USER_ONLY)
1695 void tb_invalidate_phys_addr(AddressSpace
*as
, hwaddr addr
)
1697 ram_addr_t ram_addr
;
1702 mr
= address_space_translate(as
, addr
, &addr
, &l
, false);
1703 if (!(memory_region_is_ram(mr
)
1704 || memory_region_is_romd(mr
))) {
1708 ram_addr
= memory_region_get_ram_addr(mr
) + addr
;
1710 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1714 #endif /* !defined(CONFIG_USER_ONLY) */
1716 /* Called with tb_lock held. */
1717 void tb_check_watchpoint(CPUState
*cpu
)
1719 TranslationBlock
*tb
;
1721 tb
= tb_find_pc(cpu
->mem_io_pc
);
1723 /* We can use retranslation to find the PC. */
1724 cpu_restore_state_from_tb(cpu
, tb
, cpu
->mem_io_pc
);
1725 tb_phys_invalidate(tb
, -1);
1727 /* The exception probably happened in a helper. The CPU state should
1728 have been saved before calling it. Fetch the PC from there. */
1729 CPUArchState
*env
= cpu
->env_ptr
;
1730 target_ulong pc
, cs_base
;
1731 tb_page_addr_t addr
;
1734 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1735 addr
= get_page_addr_code(env
, pc
);
1736 tb_invalidate_phys_range(addr
, addr
+ 1);
1740 #ifndef CONFIG_USER_ONLY
1741 /* in deterministic execution mode, instructions doing device I/Os
1742 * must be at the end of the TB.
1744 * Called by softmmu_template.h, with iothread mutex not held.
1746 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1748 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1749 CPUArchState
*env
= cpu
->env_ptr
;
1751 TranslationBlock
*tb
;
1753 target_ulong pc
, cs_base
;
1757 tb
= tb_find_pc(retaddr
);
1759 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1762 n
= cpu
->icount_decr
.u16
.low
+ tb
->icount
;
1763 cpu_restore_state_from_tb(cpu
, tb
, retaddr
);
1764 /* Calculate how many instructions had been executed before the fault
1766 n
= n
- cpu
->icount_decr
.u16
.low
;
1767 /* Generate a new TB ending on the I/O insn. */
1769 /* On MIPS and SH, delay slot instructions can only be restarted if
1770 they were already the first instruction in the TB. If this is not
1771 the first instruction in a TB then re-execute the preceding
1773 #if defined(TARGET_MIPS)
1774 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
1775 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1776 cpu
->icount_decr
.u16
.low
++;
1777 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1779 #elif defined(TARGET_SH4)
1780 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
1783 cpu
->icount_decr
.u16
.low
++;
1784 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1787 /* This should never happen. */
1788 if (n
> CF_COUNT_MASK
) {
1789 cpu_abort(cpu
, "TB too big during recompile");
1792 cflags
= n
| CF_LAST_IO
;
1794 cs_base
= tb
->cs_base
;
1796 tb_phys_invalidate(tb
, -1);
1797 if (tb
->cflags
& CF_NOCACHE
) {
1799 /* Invalidate original TB if this TB was generated in
1800 * cpu_exec_nocache() */
1801 tb_phys_invalidate(tb
->orig_tb
, -1);
1805 /* FIXME: In theory this could raise an exception. In practice
1806 we have already translated the block once so it's probably ok. */
1807 tb_gen_code(cpu
, pc
, cs_base
, flags
, cflags
);
1809 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1810 * the first in the TB) then we end up generating a whole new TB and
1811 * repeating the fault, which is horribly inefficient.
1812 * Better would be to execute just this insn uncached, or generate a
1815 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1816 * tb_lock gets reset.
1818 cpu_loop_exit_noexc(cpu
);
1821 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
1823 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
1825 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
1826 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
1830 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
1832 /* Discard jump cache entries for any tb which might potentially
1833 overlap the flushed page. */
1834 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
1835 tb_jmp_cache_clear_page(cpu
, addr
);
1838 static void print_qht_statistics(FILE *f
, fprintf_function cpu_fprintf
,
1839 struct qht_stats hst
)
1841 uint32_t hgram_opts
;
1845 if (!hst
.head_buckets
) {
1848 cpu_fprintf(f
, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1849 hst
.used_head_buckets
, hst
.head_buckets
,
1850 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
1852 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1853 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1854 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1855 hgram_opts
|= QDIST_PR_NODECIMAL
;
1857 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1858 cpu_fprintf(f
, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1859 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1862 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1863 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1864 if (hgram_bins
> 10) {
1868 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1870 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1871 cpu_fprintf(f
, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1872 qdist_avg(&hst
.chain
), hgram
);
1876 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
1878 int i
, target_code_size
, max_target_code_size
;
1879 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
1880 TranslationBlock
*tb
;
1881 struct qht_stats hst
;
1885 target_code_size
= 0;
1886 max_target_code_size
= 0;
1888 direct_jmp_count
= 0;
1889 direct_jmp2_count
= 0;
1890 for (i
= 0; i
< tcg_ctx
.tb_ctx
.nb_tbs
; i
++) {
1891 tb
= tcg_ctx
.tb_ctx
.tbs
[i
];
1892 target_code_size
+= tb
->size
;
1893 if (tb
->size
> max_target_code_size
) {
1894 max_target_code_size
= tb
->size
;
1896 if (tb
->page_addr
[1] != -1) {
1899 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1901 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1902 direct_jmp2_count
++;
1906 /* XXX: avoid using doubles ? */
1907 cpu_fprintf(f
, "Translation buffer state:\n");
1908 cpu_fprintf(f
, "gen code size %td/%zd\n",
1909 tcg_ctx
.code_gen_ptr
- tcg_ctx
.code_gen_buffer
,
1910 tcg_ctx
.code_gen_highwater
- tcg_ctx
.code_gen_buffer
);
1911 cpu_fprintf(f
, "TB count %d\n", tcg_ctx
.tb_ctx
.nb_tbs
);
1912 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
1913 tcg_ctx
.tb_ctx
.nb_tbs
? target_code_size
/
1914 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1915 max_target_code_size
);
1916 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1917 tcg_ctx
.tb_ctx
.nb_tbs
? (tcg_ctx
.code_gen_ptr
-
1918 tcg_ctx
.code_gen_buffer
) /
1919 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1920 target_code_size
? (double) (tcg_ctx
.code_gen_ptr
-
1921 tcg_ctx
.code_gen_buffer
) /
1922 target_code_size
: 0);
1923 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n", cross_page
,
1924 tcg_ctx
.tb_ctx
.nb_tbs
? (cross_page
* 100) /
1925 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1926 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1928 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp_count
* 100) /
1929 tcg_ctx
.tb_ctx
.nb_tbs
: 0,
1931 tcg_ctx
.tb_ctx
.nb_tbs
? (direct_jmp2_count
* 100) /
1932 tcg_ctx
.tb_ctx
.nb_tbs
: 0);
1934 qht_statistics_init(&tcg_ctx
.tb_ctx
.htable
, &hst
);
1935 print_qht_statistics(f
, cpu_fprintf
, hst
);
1936 qht_statistics_destroy(&hst
);
1938 cpu_fprintf(f
, "\nStatistics:\n");
1939 cpu_fprintf(f
, "TB flush count %u\n",
1940 atomic_read(&tcg_ctx
.tb_ctx
.tb_flush_count
));
1941 cpu_fprintf(f
, "TB invalidate count %d\n",
1942 tcg_ctx
.tb_ctx
.tb_phys_invalidate_count
);
1943 cpu_fprintf(f
, "TLB flush count %zu\n", tlb_flush_count());
1944 tcg_dump_info(f
, cpu_fprintf
);
1949 void dump_opcount_info(FILE *f
, fprintf_function cpu_fprintf
)
1951 tcg_dump_op_count(f
, cpu_fprintf
);
1954 #else /* CONFIG_USER_ONLY */
1956 void cpu_interrupt(CPUState
*cpu
, int mask
)
1958 g_assert(qemu_mutex_iothread_locked());
1959 cpu
->interrupt_request
|= mask
;
1960 cpu
->icount_decr
.u16
.high
= -1;
1964 * Walks guest process memory "regions" one by one
1965 * and calls callback function 'fn' for each region.
1967 struct walk_memory_regions_data
{
1968 walk_memory_regions_fn fn
;
1974 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1975 target_ulong end
, int new_prot
)
1977 if (data
->start
!= -1u) {
1978 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1984 data
->start
= (new_prot
? end
: -1u);
1985 data
->prot
= new_prot
;
1990 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1991 target_ulong base
, int level
, void **lp
)
1997 return walk_memory_regions_end(data
, base
, 0);
2003 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2004 int prot
= pd
[i
].flags
;
2006 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2007 if (prot
!= data
->prot
) {
2008 rc
= walk_memory_regions_end(data
, pa
, prot
);
2017 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2018 pa
= base
| ((target_ulong
)i
<<
2019 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2020 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2030 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2032 struct walk_memory_regions_data data
;
2033 uintptr_t i
, l1_sz
= v_l1_size
;
2040 for (i
= 0; i
< l1_sz
; i
++) {
2041 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2042 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2048 return walk_memory_regions_end(&data
, 0, 0);
2051 static int dump_region(void *priv
, target_ulong start
,
2052 target_ulong end
, unsigned long prot
)
2054 FILE *f
= (FILE *)priv
;
2056 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2057 " "TARGET_FMT_lx
" %c%c%c\n",
2058 start
, end
, end
- start
,
2059 ((prot
& PAGE_READ
) ? 'r' : '-'),
2060 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2061 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2066 /* dump memory mappings */
2067 void page_dump(FILE *f
)
2069 const int length
= sizeof(target_ulong
) * 2;
2070 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2071 length
, "start", length
, "end", length
, "size", "prot");
2072 walk_memory_regions(f
, dump_region
);
2075 int page_get_flags(target_ulong address
)
2079 p
= page_find(address
>> TARGET_PAGE_BITS
);
2086 /* Modify the flags of a page and invalidate the code if necessary.
2087 The flag PAGE_WRITE_ORG is positioned automatically depending
2088 on PAGE_WRITE. The mmap_lock should already be held. */
2089 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2091 target_ulong addr
, len
;
2093 /* This function should never be called with addresses outside the
2094 guest address space. If this assert fires, it probably indicates
2095 a missing call to h2g_valid. */
2096 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2097 assert(end
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2099 assert(start
< end
);
2100 assert_memory_lock();
2102 start
= start
& TARGET_PAGE_MASK
;
2103 end
= TARGET_PAGE_ALIGN(end
);
2105 if (flags
& PAGE_WRITE
) {
2106 flags
|= PAGE_WRITE_ORG
;
2109 for (addr
= start
, len
= end
- start
;
2111 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2112 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2114 /* If the write protection bit is set, then we invalidate
2116 if (!(p
->flags
& PAGE_WRITE
) &&
2117 (flags
& PAGE_WRITE
) &&
2119 tb_invalidate_phys_page(addr
, 0);
2125 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2131 /* This function should never be called with addresses outside the
2132 guest address space. If this assert fires, it probably indicates
2133 a missing call to h2g_valid. */
2134 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2135 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2141 if (start
+ len
- 1 < start
) {
2142 /* We've wrapped around. */
2146 /* must do before we loose bits in the next step */
2147 end
= TARGET_PAGE_ALIGN(start
+ len
);
2148 start
= start
& TARGET_PAGE_MASK
;
2150 for (addr
= start
, len
= end
- start
;
2152 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2153 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2157 if (!(p
->flags
& PAGE_VALID
)) {
2161 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2164 if (flags
& PAGE_WRITE
) {
2165 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2168 /* unprotect the page if it was put read-only because it
2169 contains translated code */
2170 if (!(p
->flags
& PAGE_WRITE
)) {
2171 if (!page_unprotect(addr
, 0)) {
2180 /* called from signal handler: invalidate the code and unprotect the
2181 * page. Return 0 if the fault was not handled, 1 if it was handled,
2182 * and 2 if it was handled but the caller must cause the TB to be
2183 * immediately exited. (We can only return 2 if the 'pc' argument is
2186 int page_unprotect(target_ulong address
, uintptr_t pc
)
2189 bool current_tb_invalidated
;
2191 target_ulong host_start
, host_end
, addr
;
2193 /* Technically this isn't safe inside a signal handler. However we
2194 know this only ever happens in a synchronous SEGV handler, so in
2195 practice it seems to be ok. */
2198 p
= page_find(address
>> TARGET_PAGE_BITS
);
2204 /* if the page was really writable, then we change its
2205 protection back to writable */
2206 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2207 host_start
= address
& qemu_host_page_mask
;
2208 host_end
= host_start
+ qemu_host_page_size
;
2211 current_tb_invalidated
= false;
2212 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2213 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2214 p
->flags
|= PAGE_WRITE
;
2217 /* and since the content will be modified, we must invalidate
2218 the corresponding translated code. */
2219 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2220 #ifdef DEBUG_TB_CHECK
2221 tb_invalidate_check(addr
);
2224 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2228 /* If current TB was invalidated return to main loop */
2229 return current_tb_invalidated
? 2 : 1;
2234 #endif /* CONFIG_USER_ONLY */
2236 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2237 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2239 #ifdef CONFIG_SOFTMMU