4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
24 #define NO_CPU_IO_DEFS
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
30 #if defined(CONFIG_USER_ONLY)
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd /* avoid redefinition */
38 #include <machine/profile.h>
47 #include "exec/ram_addr.h"
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/tcg.h"
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
70 #define DEBUG_TB_INVALIDATE_GATE 0
74 #define DEBUG_TB_FLUSH_GATE 1
76 #define DEBUG_TB_FLUSH_GATE 0
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation. */
85 #define DEBUG_TB_CHECK_GATE 1
87 #define DEBUG_TB_CHECK_GATE 0
90 /* Access to the various translations structures need to be serialised via locks
92 * In user-mode emulation access to the memory related structures are protected
94 * In !user-mode we use per-page locks.
97 #define assert_memory_lock()
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
102 #define SMC_BITMAP_USE_THRESHOLD 10
104 typedef struct PageDesc
{
105 /* list of TBs intersecting this ram page */
107 #ifdef CONFIG_SOFTMMU
108 /* in order to optimize self modifying code, we count the number
109 of lookups we do to a given page to use a bitmap */
110 unsigned long *code_bitmap
;
111 unsigned int code_write_count
;
115 #ifndef CONFIG_USER_ONLY
121 * struct page_entry - page descriptor entry
122 * @pd: pointer to the &struct PageDesc of the page this entry represents
123 * @index: page index of the page
124 * @locked: whether the page is locked
126 * This struct helps us keep track of the locked state of a page, without
127 * bloating &struct PageDesc.
129 * A page lock protects accesses to all fields of &struct PageDesc.
131 * See also: &struct page_collection.
135 tb_page_addr_t index
;
140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141 * @tree: Binary search tree (BST) of the pages, with key == page index
142 * @max: Pointer to the page in @tree with the highest page index
144 * To avoid deadlock we lock pages in ascending order of page index.
145 * When operating on a set of pages, we need to keep track of them so that
146 * we can lock them in order and also unlock them later. For this we collect
147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148 * @tree implementation we use does not provide an O(1) operation to obtain the
149 * highest-ranked element, we use @max to keep track of the inserted page
150 * with the highest index. This is valuable because if a page is not in
151 * the tree and its index is higher than @max's, then we can lock it
152 * without breaking the locking order rule.
154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
157 * See also: page_collection_lock().
159 struct page_collection
{
161 struct page_entry
*max
;
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
173 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
174 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
176 /* In system mode we want L1_MAP to be based on ram offsets,
177 while in user mode we want it to be based on virtual addresses. */
178 #if !defined(CONFIG_USER_ONLY)
179 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
180 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
182 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
185 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
188 /* Size of the L2 (and L3, etc) page tables. */
190 #define V_L2_SIZE (1 << V_L2_BITS)
192 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
193 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
194 sizeof_field(TranslationBlock
, trace_vcpu_dstate
)
198 * L1 Mapping properties
200 static int v_l1_size
;
201 static int v_l1_shift
;
202 static int v_l2_levels
;
204 /* The bottom level has pointers to PageDesc, and is indexed by
205 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
207 #define V_L1_MIN_BITS 4
208 #define V_L1_MAX_BITS (V_L2_BITS + 3)
209 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
211 static void *l1_map
[V_L1_MAX_SIZE
];
213 /* code generation context */
214 TCGContext tcg_init_ctx
;
215 __thread TCGContext
*tcg_ctx
;
219 static void page_table_config_init(void)
223 assert(TARGET_PAGE_BITS
);
224 /* The bits remaining after N lower levels of page tables. */
225 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
226 if (v_l1_bits
< V_L1_MIN_BITS
) {
227 v_l1_bits
+= V_L2_BITS
;
230 v_l1_size
= 1 << v_l1_bits
;
231 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
232 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
234 assert(v_l1_bits
<= V_L1_MAX_BITS
);
235 assert(v_l1_shift
% V_L2_BITS
== 0);
236 assert(v_l2_levels
>= 0);
239 void cpu_gen_init(void)
241 tcg_context_init(&tcg_init_ctx
);
244 /* Encode VAL as a signed leb128 sequence at P.
245 Return P incremented past the encoded value. */
246 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
253 more
= !((val
== 0 && (byte
& 0x40) == 0)
254 || (val
== -1 && (byte
& 0x40) != 0));
264 /* Decode a signed leb128 sequence at *PP; increment *PP past the
265 decoded value. Return the decoded value. */
266 static target_long
decode_sleb128(uint8_t **pp
)
274 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
276 } while (byte
& 0x80);
277 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
278 val
|= -(target_ulong
)1 << shift
;
285 /* Encode the data collected about the instructions while compiling TB.
286 Place the data at BLOCK, and return the number of bytes consumed.
288 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
289 which come from the target's insn_start data, followed by a uintptr_t
290 which comes from the host pc of the end of the code implementing the insn.
292 Each line of the table is encoded as sleb128 deltas from the previous
293 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
294 That is, the first column is seeded with the guest pc, the last column
295 with the host pc, and the middle columns with zeros. */
297 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
299 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
303 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
306 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
308 prev
= (j
== 0 ? tb
->pc
: 0);
310 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
312 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
314 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
315 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
317 /* Test for (pending) buffer overflow. The assumption is that any
318 one row beginning below the high water mark cannot overrun
319 the buffer completely. Thus we can test for overflow after
320 encoding a row without having to check during encoding. */
321 if (unlikely(p
> highwater
)) {
329 /* The cpu state corresponding to 'searched_pc' is restored.
330 * When reset_icount is true, current TB will be interrupted and
331 * icount should be recalculated.
333 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
334 uintptr_t searched_pc
, bool reset_icount
)
336 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
337 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
338 CPUArchState
*env
= cpu
->env_ptr
;
339 uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
340 int i
, j
, num_insns
= tb
->icount
;
341 #ifdef CONFIG_PROFILER
342 TCGProfile
*prof
= &tcg_ctx
->prof
;
343 int64_t ti
= profile_getclock();
346 searched_pc
-= GETPC_ADJ
;
348 if (searched_pc
< host_pc
) {
352 /* Reconstruct the stored insn data while looking for the point at
353 which the end of the insn exceeds the searched_pc. */
354 for (i
= 0; i
< num_insns
; ++i
) {
355 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
356 data
[j
] += decode_sleb128(&p
);
358 host_pc
+= decode_sleb128(&p
);
359 if (host_pc
> searched_pc
) {
366 if (reset_icount
&& (tb_cflags(tb
) & CF_USE_ICOUNT
)) {
368 /* Reset the cycle counter to the start of the block
369 and shift if to the number of actually executed instructions */
370 cpu_neg(cpu
)->icount_decr
.u16
.low
+= num_insns
- i
;
372 restore_state_to_opc(env
, tb
, data
);
374 #ifdef CONFIG_PROFILER
375 atomic_set(&prof
->restore_time
,
376 prof
->restore_time
+ profile_getclock() - ti
);
377 atomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
382 bool cpu_restore_state(CPUState
*cpu
, uintptr_t host_pc
, bool will_exit
)
384 TranslationBlock
*tb
;
386 uintptr_t check_offset
;
388 /* The host_pc has to be in the region of current code buffer. If
389 * it is not we will not be able to resolve it here. The two cases
390 * where host_pc will not be correct are:
392 * - fault during translation (instruction fetch)
393 * - fault from helper (not using GETPC() macro)
395 * Either way we need return early as we can't resolve it here.
397 * We are using unsigned arithmetic so if host_pc <
398 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
399 * above the code_gen_buffer_size
401 check_offset
= host_pc
- (uintptr_t) tcg_init_ctx
.code_gen_buffer
;
403 if (check_offset
< tcg_init_ctx
.code_gen_buffer_size
) {
404 tb
= tcg_tb_lookup(host_pc
);
406 cpu_restore_state_from_tb(cpu
, tb
, host_pc
, will_exit
);
407 if (tb_cflags(tb
) & CF_NOCACHE
) {
408 /* one-shot translation, invalidate it immediately */
409 tb_phys_invalidate(tb
, -1);
419 static void page_init(void)
422 page_table_config_init();
424 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
426 #ifdef HAVE_KINFO_GETVMMAP
427 struct kinfo_vmentry
*freep
;
430 freep
= kinfo_getvmmap(getpid(), &cnt
);
433 for (i
= 0; i
< cnt
; i
++) {
434 unsigned long startaddr
, endaddr
;
436 startaddr
= freep
[i
].kve_start
;
437 endaddr
= freep
[i
].kve_end
;
438 if (h2g_valid(startaddr
)) {
439 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
441 if (h2g_valid(endaddr
)) {
442 endaddr
= h2g(endaddr
);
443 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
445 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
447 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
458 last_brk
= (unsigned long)sbrk(0);
460 f
= fopen("/compat/linux/proc/self/maps", "r");
465 unsigned long startaddr
, endaddr
;
468 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
470 if (n
== 2 && h2g_valid(startaddr
)) {
471 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
473 if (h2g_valid(endaddr
)) {
474 endaddr
= h2g(endaddr
);
478 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
490 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
496 /* Level 1. Always allocated. */
497 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
500 for (i
= v_l2_levels
; i
> 0; i
--) {
501 void **p
= atomic_rcu_read(lp
);
509 p
= g_new0(void *, V_L2_SIZE
);
510 existing
= atomic_cmpxchg(lp
, NULL
, p
);
511 if (unlikely(existing
)) {
517 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
520 pd
= atomic_rcu_read(lp
);
527 pd
= g_new0(PageDesc
, V_L2_SIZE
);
528 #ifndef CONFIG_USER_ONLY
532 for (i
= 0; i
< V_L2_SIZE
; i
++) {
533 qemu_spin_init(&pd
[i
].lock
);
537 existing
= atomic_cmpxchg(lp
, NULL
, pd
);
538 if (unlikely(existing
)) {
544 return pd
+ (index
& (V_L2_SIZE
- 1));
547 static inline PageDesc
*page_find(tb_page_addr_t index
)
549 return page_find_alloc(index
, 0);
552 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
553 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
);
555 /* In user-mode page locks aren't used; mmap_lock is enough */
556 #ifdef CONFIG_USER_ONLY
558 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
560 static inline void page_lock(PageDesc
*pd
)
563 static inline void page_unlock(PageDesc
*pd
)
566 static inline void page_lock_tb(const TranslationBlock
*tb
)
569 static inline void page_unlock_tb(const TranslationBlock
*tb
)
572 struct page_collection
*
573 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
578 void page_collection_unlock(struct page_collection
*set
)
580 #else /* !CONFIG_USER_ONLY */
582 #ifdef CONFIG_DEBUG_TCG
584 static __thread GHashTable
*ht_pages_locked_debug
;
586 static void ht_pages_locked_debug_init(void)
588 if (ht_pages_locked_debug
) {
591 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
594 static bool page_is_locked(const PageDesc
*pd
)
598 ht_pages_locked_debug_init();
599 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
603 static void page_lock__debug(PageDesc
*pd
)
605 ht_pages_locked_debug_init();
606 g_assert(!page_is_locked(pd
));
607 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
610 static void page_unlock__debug(const PageDesc
*pd
)
614 ht_pages_locked_debug_init();
615 g_assert(page_is_locked(pd
));
616 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
621 do_assert_page_locked(const PageDesc
*pd
, const char *file
, int line
)
623 if (unlikely(!page_is_locked(pd
))) {
624 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
630 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
632 void assert_no_pages_locked(void)
634 ht_pages_locked_debug_init();
635 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
638 #else /* !CONFIG_DEBUG_TCG */
640 #define assert_page_locked(pd)
642 static inline void page_lock__debug(const PageDesc
*pd
)
646 static inline void page_unlock__debug(const PageDesc
*pd
)
650 #endif /* CONFIG_DEBUG_TCG */
652 static inline void page_lock(PageDesc
*pd
)
654 page_lock__debug(pd
);
655 qemu_spin_lock(&pd
->lock
);
658 static inline void page_unlock(PageDesc
*pd
)
660 qemu_spin_unlock(&pd
->lock
);
661 page_unlock__debug(pd
);
664 /* lock the page(s) of a TB in the correct acquisition order */
665 static inline void page_lock_tb(const TranslationBlock
*tb
)
667 page_lock_pair(NULL
, tb
->page_addr
[0], NULL
, tb
->page_addr
[1], 0);
670 static inline void page_unlock_tb(const TranslationBlock
*tb
)
672 PageDesc
*p1
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
675 if (unlikely(tb
->page_addr
[1] != -1)) {
676 PageDesc
*p2
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
684 static inline struct page_entry
*
685 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
687 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
695 static void page_entry_destroy(gpointer p
)
697 struct page_entry
*pe
= p
;
699 g_assert(pe
->locked
);
704 /* returns false on success */
705 static bool page_entry_trylock(struct page_entry
*pe
)
709 busy
= qemu_spin_trylock(&pe
->pd
->lock
);
711 g_assert(!pe
->locked
);
713 page_lock__debug(pe
->pd
);
718 static void do_page_entry_lock(struct page_entry
*pe
)
721 g_assert(!pe
->locked
);
725 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
727 struct page_entry
*pe
= value
;
729 do_page_entry_lock(pe
);
733 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
735 struct page_entry
*pe
= value
;
745 * Trylock a page, and if successful, add the page to a collection.
746 * Returns true ("busy") if the page could not be locked; false otherwise.
748 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
750 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
751 struct page_entry
*pe
;
754 pe
= g_tree_lookup(set
->tree
, &index
);
759 pd
= page_find(index
);
764 pe
= page_entry_new(pd
, index
);
765 g_tree_insert(set
->tree
, &pe
->index
, pe
);
768 * If this is either (1) the first insertion or (2) a page whose index
769 * is higher than any other so far, just lock the page and move on.
771 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
773 do_page_entry_lock(pe
);
777 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
780 return page_entry_trylock(pe
);
783 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
785 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
786 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
797 * Lock a range of pages ([@start,@end[) as well as the pages of all
799 * Locking order: acquire locks in ascending order of page index.
801 struct page_collection
*
802 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
804 struct page_collection
*set
= g_malloc(sizeof(*set
));
805 tb_page_addr_t index
;
808 start
>>= TARGET_PAGE_BITS
;
809 end
>>= TARGET_PAGE_BITS
;
810 g_assert(start
<= end
);
812 set
->tree
= g_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
815 assert_no_pages_locked();
818 g_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
820 for (index
= start
; index
<= end
; index
++) {
821 TranslationBlock
*tb
;
824 pd
= page_find(index
);
828 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
829 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
832 assert_page_locked(pd
);
833 PAGE_FOR_EACH_TB(pd
, tb
, n
) {
834 if (page_trylock_add(set
, tb
->page_addr
[0]) ||
835 (tb
->page_addr
[1] != -1 &&
836 page_trylock_add(set
, tb
->page_addr
[1]))) {
837 /* drop all locks, and reacquire in order */
838 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
846 void page_collection_unlock(struct page_collection
*set
)
848 /* entries are unlocked and freed via page_entry_destroy */
849 g_tree_destroy(set
->tree
);
853 #endif /* !CONFIG_USER_ONLY */
855 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
856 PageDesc
**ret_p2
, tb_page_addr_t phys2
, int alloc
)
859 tb_page_addr_t page1
;
860 tb_page_addr_t page2
;
862 assert_memory_lock();
863 g_assert(phys1
!= -1);
865 page1
= phys1
>> TARGET_PAGE_BITS
;
866 page2
= phys2
>> TARGET_PAGE_BITS
;
868 p1
= page_find_alloc(page1
, alloc
);
872 if (likely(phys2
== -1)) {
875 } else if (page1
== page2
) {
882 p2
= page_find_alloc(page2
, alloc
);
895 /* Minimum size of the code gen buffer. This number is randomly chosen,
896 but not so small that we can't have a fair number of TB's live. */
897 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
899 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
900 indicated, this is constrained by the range of direct branches on the
901 host cpu, as used by the TCG implementation of goto_tb. */
902 #if defined(__x86_64__)
903 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
904 #elif defined(__sparc__)
905 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
906 #elif defined(__powerpc64__)
907 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
908 #elif defined(__powerpc__)
909 # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
910 #elif defined(__aarch64__)
911 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
912 #elif defined(__s390x__)
913 /* We have a +- 4GB range on the branches; leave some slop. */
914 # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
915 #elif defined(__mips__)
916 /* We have a 256MB branch region, but leave room to make sure the
917 main executable is also within that region. */
918 # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
920 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
923 #if TCG_TARGET_REG_BITS == 32
924 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
925 #ifdef CONFIG_USER_ONLY
927 * For user mode on smaller 32 bit systems we may run into trouble
928 * allocating big chunks of data in the right place. On these systems
929 * we utilise a static code generation buffer directly in the binary.
931 #define USE_STATIC_CODE_GEN_BUFFER
933 #else /* TCG_TARGET_REG_BITS == 64 */
934 #ifdef CONFIG_USER_ONLY
936 * As user-mode emulation typically means running multiple instances
937 * of the translator don't go too nuts with our default code gen
938 * buffer lest we make things too hard for the OS.
940 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
943 * We expect most system emulation to run one or two guests per host.
944 * Users running large scale system emulation may want to tweak their
945 * runtime setup via the tb-size control on the command line.
947 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
951 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
952 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
953 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
955 static inline size_t size_code_gen_buffer(size_t tb_size
)
957 /* Size the buffer. */
959 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
961 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
962 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
964 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
965 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
971 /* In order to use J and JAL within the code_gen_buffer, we require
972 that the buffer not cross a 256MB boundary. */
973 static inline bool cross_256mb(void *addr
, size_t size
)
975 return ((uintptr_t)addr
^ ((uintptr_t)addr
+ size
)) & ~0x0ffffffful
;
978 /* We weren't able to allocate a buffer without crossing that boundary,
979 so make do with the larger portion of the buffer that doesn't cross.
980 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
981 static inline void *split_cross_256mb(void *buf1
, size_t size1
)
983 void *buf2
= (void *)(((uintptr_t)buf1
+ size1
) & ~0x0ffffffful
);
984 size_t size2
= buf1
+ size1
- buf2
;
992 tcg_ctx
->code_gen_buffer_size
= size1
;
997 #ifdef USE_STATIC_CODE_GEN_BUFFER
998 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
999 __attribute__((aligned(CODE_GEN_ALIGN
)));
1001 static inline void *alloc_code_gen_buffer(void)
1003 void *buf
= static_code_gen_buffer
;
1004 void *end
= static_code_gen_buffer
+ sizeof(static_code_gen_buffer
);
1007 /* page-align the beginning and end of the buffer */
1008 buf
= QEMU_ALIGN_PTR_UP(buf
, qemu_real_host_page_size
);
1009 end
= QEMU_ALIGN_PTR_DOWN(end
, qemu_real_host_page_size
);
1013 /* Honor a command-line option limiting the size of the buffer. */
1014 if (size
> tcg_ctx
->code_gen_buffer_size
) {
1015 size
= QEMU_ALIGN_DOWN(tcg_ctx
->code_gen_buffer_size
,
1016 qemu_real_host_page_size
);
1018 tcg_ctx
->code_gen_buffer_size
= size
;
1021 if (cross_256mb(buf
, size
)) {
1022 buf
= split_cross_256mb(buf
, size
);
1023 size
= tcg_ctx
->code_gen_buffer_size
;
1027 if (qemu_mprotect_rwx(buf
, size
)) {
1030 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1034 #elif defined(_WIN32)
1035 static inline void *alloc_code_gen_buffer(void)
1037 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1038 return VirtualAlloc(NULL
, size
, MEM_RESERVE
| MEM_COMMIT
,
1039 PAGE_EXECUTE_READWRITE
);
1042 static inline void *alloc_code_gen_buffer(void)
1044 int prot
= PROT_WRITE
| PROT_READ
| PROT_EXEC
;
1045 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
1046 size_t size
= tcg_ctx
->code_gen_buffer_size
;
1049 buf
= mmap(NULL
, size
, prot
, flags
, -1, 0);
1050 if (buf
== MAP_FAILED
) {
1055 if (cross_256mb(buf
, size
)) {
1057 * Try again, with the original still mapped, to avoid re-acquiring
1058 * the same 256mb crossing.
1061 void *buf2
= mmap(NULL
, size
, prot
, flags
, -1, 0);
1062 switch ((int)(buf2
!= MAP_FAILED
)) {
1064 if (!cross_256mb(buf2
, size
)) {
1065 /* Success! Use the new buffer. */
1069 /* Failure. Work with what we had. */
1073 /* Split the original buffer. Free the smaller half. */
1074 buf2
= split_cross_256mb(buf
, size
);
1075 size2
= tcg_ctx
->code_gen_buffer_size
;
1077 munmap(buf
+ size2
, size
- size2
);
1079 munmap(buf
, size
- size2
);
1088 /* Request large pages for the buffer. */
1089 qemu_madvise(buf
, size
, QEMU_MADV_HUGEPAGE
);
1093 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1095 static inline void code_gen_alloc(size_t tb_size
)
1097 tcg_ctx
->code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
1098 tcg_ctx
->code_gen_buffer
= alloc_code_gen_buffer();
1099 if (tcg_ctx
->code_gen_buffer
== NULL
) {
1100 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
1105 static bool tb_cmp(const void *ap
, const void *bp
)
1107 const TranslationBlock
*a
= ap
;
1108 const TranslationBlock
*b
= bp
;
1110 return a
->pc
== b
->pc
&&
1111 a
->cs_base
== b
->cs_base
&&
1112 a
->flags
== b
->flags
&&
1113 (tb_cflags(a
) & CF_HASH_MASK
) == (tb_cflags(b
) & CF_HASH_MASK
) &&
1114 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
1115 a
->page_addr
[0] == b
->page_addr
[0] &&
1116 a
->page_addr
[1] == b
->page_addr
[1];
1119 static void tb_htable_init(void)
1121 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
1123 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
1126 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1127 (in bytes) allocated to the translation buffer. Zero means default
1129 void tcg_exec_init(unsigned long tb_size
)
1135 code_gen_alloc(tb_size
);
1136 #if defined(CONFIG_SOFTMMU)
1137 /* There's no guest base to take into account, so go ahead and
1138 initialize the prologue now. */
1139 tcg_prologue_init(tcg_ctx
);
1143 /* call with @p->lock held */
1144 static inline void invalidate_page_bitmap(PageDesc
*p
)
1146 assert_page_locked(p
);
1147 #ifdef CONFIG_SOFTMMU
1148 g_free(p
->code_bitmap
);
1149 p
->code_bitmap
= NULL
;
1150 p
->code_write_count
= 0;
1154 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1155 static void page_flush_tb_1(int level
, void **lp
)
1165 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1167 pd
[i
].first_tb
= (uintptr_t)NULL
;
1168 invalidate_page_bitmap(pd
+ i
);
1169 page_unlock(&pd
[i
]);
1174 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
1175 page_flush_tb_1(level
- 1, pp
+ i
);
1180 static void page_flush_tb(void)
1182 int i
, l1_sz
= v_l1_size
;
1184 for (i
= 0; i
< l1_sz
; i
++) {
1185 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
1189 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
1191 const TranslationBlock
*tb
= value
;
1192 size_t *size
= data
;
1194 *size
+= tb
->tc
.size
;
1198 /* flush all the translation blocks */
1199 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
1201 bool did_flush
= false;
1204 /* If it is already been done on request of another CPU,
1207 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
1212 if (DEBUG_TB_FLUSH_GATE
) {
1213 size_t nb_tbs
= tcg_nb_tbs();
1214 size_t host_size
= 0;
1216 tcg_tb_foreach(tb_host_size_iter
, &host_size
);
1217 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1218 tcg_code_size(), nb_tbs
, nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
1222 cpu_tb_jmp_cache_clear(cpu
);
1225 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
1228 tcg_region_reset_all();
1229 /* XXX: flush processor icache at this point if cache flush is
1231 atomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
1236 qemu_plugin_flush_cb();
1240 void tb_flush(CPUState
*cpu
)
1242 if (tcg_enabled()) {
1243 unsigned tb_flush_count
= atomic_mb_read(&tb_ctx
.tb_flush_count
);
1245 if (cpu_in_exclusive_context(cpu
)) {
1246 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
1248 async_safe_run_on_cpu(cpu
, do_tb_flush
,
1249 RUN_ON_CPU_HOST_INT(tb_flush_count
));
1255 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1256 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1257 * and let the optimizer get rid of them by wrapping their user-only callers
1258 * with if (DEBUG_TB_CHECK_GATE).
1260 #ifdef CONFIG_USER_ONLY
1262 static void do_tb_invalidate_check(void *p
, uint32_t hash
, void *userp
)
1264 TranslationBlock
*tb
= p
;
1265 target_ulong addr
= *(target_ulong
*)userp
;
1267 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
1268 printf("ERROR invalidate: address=" TARGET_FMT_lx
1269 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
1273 /* verify that all the pages have correct rights for code
1275 * Called with mmap_lock held.
1277 static void tb_invalidate_check(target_ulong address
)
1279 address
&= TARGET_PAGE_MASK
;
1280 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
1283 static void do_tb_page_check(void *p
, uint32_t hash
, void *userp
)
1285 TranslationBlock
*tb
= p
;
1288 flags1
= page_get_flags(tb
->pc
);
1289 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
1290 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
1291 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1292 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1296 /* verify that all the pages have correct rights for code */
1297 static void tb_page_check(void)
1299 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
1302 #endif /* CONFIG_USER_ONLY */
1305 * user-mode: call with mmap_lock held
1306 * !user-mode: call with @pd->lock held
1308 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
1310 TranslationBlock
*tb1
;
1314 assert_page_locked(pd
);
1315 pprev
= &pd
->first_tb
;
1316 PAGE_FOR_EACH_TB(pd
, tb1
, n1
) {
1318 *pprev
= tb1
->page_next
[n1
];
1321 pprev
= &tb1
->page_next
[n1
];
1323 g_assert_not_reached();
1326 /* remove @orig from its @n_orig-th jump list */
1327 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
1329 uintptr_t ptr
, ptr_locked
;
1330 TranslationBlock
*dest
;
1331 TranslationBlock
*tb
;
1335 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1336 ptr
= atomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
1337 dest
= (TranslationBlock
*)(ptr
& ~1);
1342 qemu_spin_lock(&dest
->jmp_lock
);
1344 * While acquiring the lock, the jump might have been removed if the
1345 * destination TB was invalidated; check again.
1347 ptr_locked
= atomic_read(&orig
->jmp_dest
[n_orig
]);
1348 if (ptr_locked
!= ptr
) {
1349 qemu_spin_unlock(&dest
->jmp_lock
);
1351 * The only possibility is that the jump was unlinked via
1352 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1353 * because we set the LSB above.
1355 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
1359 * We first acquired the lock, and since the destination pointer matches,
1360 * we know for sure that @orig is in the jmp list.
1362 pprev
= &dest
->jmp_list_head
;
1363 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1364 if (tb
== orig
&& n
== n_orig
) {
1365 *pprev
= tb
->jmp_list_next
[n
];
1366 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1367 qemu_spin_unlock(&dest
->jmp_lock
);
1370 pprev
= &tb
->jmp_list_next
[n
];
1372 g_assert_not_reached();
1375 /* reset the jump entry 'n' of a TB so that it is not chained to
1377 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1379 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1380 tb_set_jmp_target(tb
, n
, addr
);
1383 /* remove any jumps to the TB */
1384 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
1386 TranslationBlock
*tb
;
1389 qemu_spin_lock(&dest
->jmp_lock
);
1391 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1392 tb_reset_jump(tb
, n
);
1393 atomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
1394 /* No need to clear the list entry; setting the dest ptr is enough */
1396 dest
->jmp_list_head
= (uintptr_t)NULL
;
1398 qemu_spin_unlock(&dest
->jmp_lock
);
1402 * In user-mode, call with mmap_lock held.
1403 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1406 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
1411 tb_page_addr_t phys_pc
;
1413 assert_memory_lock();
1415 /* make sure no further incoming jumps will be chained to this TB */
1416 qemu_spin_lock(&tb
->jmp_lock
);
1417 atomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1418 qemu_spin_unlock(&tb
->jmp_lock
);
1420 /* remove the TB from the hash list */
1421 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1422 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb_cflags(tb
) & CF_HASH_MASK
,
1423 tb
->trace_vcpu_dstate
);
1424 if (!(tb
->cflags
& CF_NOCACHE
) &&
1425 !qht_remove(&tb_ctx
.htable
, tb
, h
)) {
1429 /* remove the TB from the page list */
1430 if (rm_from_page_list
) {
1431 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1432 tb_page_remove(p
, tb
);
1433 invalidate_page_bitmap(p
);
1434 if (tb
->page_addr
[1] != -1) {
1435 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1436 tb_page_remove(p
, tb
);
1437 invalidate_page_bitmap(p
);
1441 /* remove the TB from the hash list */
1442 h
= tb_jmp_cache_hash_func(tb
->pc
);
1444 if (atomic_read(&cpu
->tb_jmp_cache
[h
]) == tb
) {
1445 atomic_set(&cpu
->tb_jmp_cache
[h
], NULL
);
1449 /* suppress this TB from the two jump lists */
1450 tb_remove_from_jmp_list(tb
, 0);
1451 tb_remove_from_jmp_list(tb
, 1);
1453 /* suppress any remaining jumps to this TB */
1456 atomic_set(&tcg_ctx
->tb_phys_invalidate_count
,
1457 tcg_ctx
->tb_phys_invalidate_count
+ 1);
1460 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
1462 do_tb_phys_invalidate(tb
, true);
1465 /* invalidate one TB
1467 * Called with mmap_lock held in user-mode.
1469 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1471 if (page_addr
== -1 && tb
->page_addr
[0] != -1) {
1473 do_tb_phys_invalidate(tb
, true);
1476 do_tb_phys_invalidate(tb
, false);
1480 #ifdef CONFIG_SOFTMMU
1481 /* call with @p->lock held */
1482 static void build_page_bitmap(PageDesc
*p
)
1484 int n
, tb_start
, tb_end
;
1485 TranslationBlock
*tb
;
1487 assert_page_locked(p
);
1488 p
->code_bitmap
= bitmap_new(TARGET_PAGE_SIZE
);
1490 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1491 /* NOTE: this is subtle as a TB may span two physical pages */
1493 /* NOTE: tb_end may be after the end of the page, but
1494 it is not a problem */
1495 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1496 tb_end
= tb_start
+ tb
->size
;
1497 if (tb_end
> TARGET_PAGE_SIZE
) {
1498 tb_end
= TARGET_PAGE_SIZE
;
1502 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1504 bitmap_set(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1509 /* add the tb in the target page and protect it if necessary
1511 * Called with mmap_lock held for user-mode emulation.
1512 * Called with @p->lock held in !user-mode.
1514 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
1515 unsigned int n
, tb_page_addr_t page_addr
)
1517 #ifndef CONFIG_USER_ONLY
1518 bool page_already_protected
;
1521 assert_page_locked(p
);
1523 tb
->page_addr
[n
] = page_addr
;
1524 tb
->page_next
[n
] = p
->first_tb
;
1525 #ifndef CONFIG_USER_ONLY
1526 page_already_protected
= p
->first_tb
!= (uintptr_t)NULL
;
1528 p
->first_tb
= (uintptr_t)tb
| n
;
1529 invalidate_page_bitmap(p
);
1531 #if defined(CONFIG_USER_ONLY)
1532 if (p
->flags
& PAGE_WRITE
) {
1537 /* force the host page as non writable (writes will have a
1538 page fault + mprotect overhead) */
1539 page_addr
&= qemu_host_page_mask
;
1541 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1542 addr
+= TARGET_PAGE_SIZE
) {
1544 p2
= page_find(addr
>> TARGET_PAGE_BITS
);
1549 p2
->flags
&= ~PAGE_WRITE
;
1551 mprotect(g2h(page_addr
), qemu_host_page_size
,
1552 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1553 if (DEBUG_TB_INVALIDATE_GATE
) {
1554 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
1558 /* if some code is already present, then the pages are already
1559 protected. So we handle the case where only the first TB is
1560 allocated in a physical page */
1561 if (!page_already_protected
) {
1562 tlb_protect_code(page_addr
);
1567 /* add a new TB and link it to the physical page tables. phys_page2 is
1568 * (-1) to indicate that only one page contains the TB.
1570 * Called with mmap_lock held for user-mode emulation.
1572 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1573 * Note that in !user-mode, another thread might have already added a TB
1574 * for the same block of guest code that @tb corresponds to. In that case,
1575 * the caller should discard the original @tb, and use instead the returned TB.
1577 static TranslationBlock
*
1578 tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1579 tb_page_addr_t phys_page2
)
1582 PageDesc
*p2
= NULL
;
1584 assert_memory_lock();
1586 if (phys_pc
== -1) {
1588 * If the TB is not associated with a physical RAM page then
1589 * it must be a temporary one-insn TB, and we have nothing to do
1590 * except fill in the page_addr[] fields.
1592 assert(tb
->cflags
& CF_NOCACHE
);
1593 tb
->page_addr
[0] = tb
->page_addr
[1] = -1;
1598 * Add the TB to the page list, acquiring first the pages's locks.
1599 * We keep the locks held until after inserting the TB in the hash table,
1600 * so that if the insertion fails we know for sure that the TBs are still
1601 * in the page descriptors.
1602 * Note that inserting into the hash table first isn't an option, since
1603 * we can only insert TBs that are fully initialized.
1605 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, 1);
1606 tb_page_add(p
, tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1608 tb_page_add(p2
, tb
, 1, phys_page2
);
1610 tb
->page_addr
[1] = -1;
1613 if (!(tb
->cflags
& CF_NOCACHE
)) {
1614 void *existing_tb
= NULL
;
1617 /* add in the hash table */
1618 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
& CF_HASH_MASK
,
1619 tb
->trace_vcpu_dstate
);
1620 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
1622 /* remove TB from the page(s) if we couldn't insert it */
1623 if (unlikely(existing_tb
)) {
1624 tb_page_remove(p
, tb
);
1625 invalidate_page_bitmap(p
);
1627 tb_page_remove(p2
, tb
);
1628 invalidate_page_bitmap(p2
);
1634 if (p2
&& p2
!= p
) {
1639 #ifdef CONFIG_USER_ONLY
1640 if (DEBUG_TB_CHECK_GATE
) {
1647 /* Called with mmap_lock held for user mode emulation. */
1648 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1649 target_ulong pc
, target_ulong cs_base
,
1650 uint32_t flags
, int cflags
)
1652 CPUArchState
*env
= cpu
->env_ptr
;
1653 TranslationBlock
*tb
, *existing_tb
;
1654 tb_page_addr_t phys_pc
, phys_page2
;
1655 target_ulong virt_page2
;
1656 tcg_insn_unit
*gen_code_buf
;
1657 int gen_code_size
, search_size
, max_insns
;
1658 #ifdef CONFIG_PROFILER
1659 TCGProfile
*prof
= &tcg_ctx
->prof
;
1663 assert_memory_lock();
1665 phys_pc
= get_page_addr_code(env
, pc
);
1667 if (phys_pc
== -1) {
1668 /* Generate a temporary TB with 1 insn in it */
1669 cflags
&= ~CF_COUNT_MASK
;
1670 cflags
|= CF_NOCACHE
| 1;
1673 cflags
&= ~CF_CLUSTER_MASK
;
1674 cflags
|= cpu
->cluster_index
<< CF_CLUSTER_SHIFT
;
1676 max_insns
= cflags
& CF_COUNT_MASK
;
1677 if (max_insns
== 0) {
1678 max_insns
= CF_COUNT_MASK
;
1680 if (max_insns
> TCG_MAX_INSNS
) {
1681 max_insns
= TCG_MAX_INSNS
;
1683 if (cpu
->singlestep_enabled
|| singlestep
) {
1688 tb
= tcg_tb_alloc(tcg_ctx
);
1689 if (unlikely(!tb
)) {
1690 /* flush must be done */
1693 /* Make the execution loop process the flush as soon as possible. */
1694 cpu
->exception_index
= EXCP_INTERRUPT
;
1698 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1699 tb
->tc
.ptr
= gen_code_buf
;
1701 tb
->cs_base
= cs_base
;
1703 tb
->cflags
= cflags
;
1705 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1706 tcg_ctx
->tb_cflags
= cflags
;
1709 #ifdef CONFIG_PROFILER
1710 /* includes aborted translations because of exceptions */
1711 atomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1712 ti
= profile_getclock();
1715 tcg_func_start(tcg_ctx
);
1717 tcg_ctx
->cpu
= env_cpu(env
);
1718 gen_intermediate_code(cpu
, tb
, max_insns
);
1719 tcg_ctx
->cpu
= NULL
;
1721 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1723 /* generate machine code */
1724 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1725 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1726 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1727 if (TCG_TARGET_HAS_direct_jump
) {
1728 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1729 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1731 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1732 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1735 #ifdef CONFIG_PROFILER
1736 atomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1737 atomic_set(&prof
->interm_time
, prof
->interm_time
+ profile_getclock() - ti
);
1738 ti
= profile_getclock();
1741 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1742 if (unlikely(gen_code_size
< 0)) {
1743 switch (gen_code_size
) {
1746 * Overflow of code_gen_buffer, or the current slice of it.
1748 * TODO: We don't need to re-do gen_intermediate_code, nor
1749 * should we re-do the tcg optimization currently hidden
1750 * inside tcg_gen_code. All that should be required is to
1751 * flush the TBs, allocate a new TB, re-initialize it per
1752 * above, and re-do the actual code generation.
1754 goto buffer_overflow
;
1758 * The code generated for the TranslationBlock is too large.
1759 * The maximum size allowed by the unwind info is 64k.
1760 * There may be stricter constraints from relocations
1761 * in the tcg backend.
1763 * Try again with half as many insns as we attempted this time.
1764 * If a single insn overflows, there's a bug somewhere...
1766 max_insns
= tb
->icount
;
1767 assert(max_insns
> 1);
1772 g_assert_not_reached();
1775 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1776 if (unlikely(search_size
< 0)) {
1777 goto buffer_overflow
;
1779 tb
->tc
.size
= gen_code_size
;
1781 #ifdef CONFIG_PROFILER
1782 atomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1783 atomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1784 atomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1785 atomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1789 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1790 qemu_log_in_addr_range(tb
->pc
)) {
1791 FILE *logfile
= qemu_log_lock();
1792 qemu_log("OUT: [size=%d]\n", gen_code_size
);
1793 if (tcg_ctx
->data_gen_ptr
) {
1794 size_t code_size
= tcg_ctx
->data_gen_ptr
- tb
->tc
.ptr
;
1795 size_t data_size
= gen_code_size
- code_size
;
1798 log_disas(tb
->tc
.ptr
, code_size
);
1800 for (i
= 0; i
< data_size
; i
+= sizeof(tcg_target_ulong
)) {
1801 if (sizeof(tcg_target_ulong
) == 8) {
1802 qemu_log("0x%08" PRIxPTR
": .quad 0x%016" PRIx64
"\n",
1803 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1804 *(uint64_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1806 qemu_log("0x%08" PRIxPTR
": .long 0x%08x\n",
1807 (uintptr_t)tcg_ctx
->data_gen_ptr
+ i
,
1808 *(uint32_t *)(tcg_ctx
->data_gen_ptr
+ i
));
1812 log_disas(tb
->tc
.ptr
, gen_code_size
);
1816 qemu_log_unlock(logfile
);
1820 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)
1821 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1824 /* init jump list */
1825 qemu_spin_init(&tb
->jmp_lock
);
1826 tb
->jmp_list_head
= (uintptr_t)NULL
;
1827 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1828 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1829 tb
->jmp_dest
[0] = (uintptr_t)NULL
;
1830 tb
->jmp_dest
[1] = (uintptr_t)NULL
;
1832 /* init original jump addresses which have been set during tcg_gen_code() */
1833 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1834 tb_reset_jump(tb
, 0);
1836 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1837 tb_reset_jump(tb
, 1);
1840 /* check next page if needed */
1841 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1843 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1844 phys_page2
= get_page_addr_code(env
, virt_page2
);
1847 * No explicit memory barrier is required -- tb_link_page() makes the
1848 * TB visible in a consistent state.
1850 existing_tb
= tb_link_page(tb
, phys_pc
, phys_page2
);
1851 /* if the TB already exists, discard what we just translated */
1852 if (unlikely(existing_tb
!= tb
)) {
1853 uintptr_t orig_aligned
= (uintptr_t)gen_code_buf
;
1855 orig_aligned
-= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
1856 atomic_set(&tcg_ctx
->code_gen_ptr
, (void *)orig_aligned
);
1864 * @p must be non-NULL.
1865 * user-mode: call with mmap_lock held.
1866 * !user-mode: call with all @pages locked.
1869 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1870 PageDesc
*p
, tb_page_addr_t start
,
1874 TranslationBlock
*tb
;
1875 tb_page_addr_t tb_start
, tb_end
;
1877 #ifdef TARGET_HAS_PRECISE_SMC
1878 CPUState
*cpu
= current_cpu
;
1879 CPUArchState
*env
= NULL
;
1880 bool current_tb_not_found
= retaddr
!= 0;
1881 bool current_tb_modified
= false;
1882 TranslationBlock
*current_tb
= NULL
;
1883 target_ulong current_pc
= 0;
1884 target_ulong current_cs_base
= 0;
1885 uint32_t current_flags
= 0;
1886 #endif /* TARGET_HAS_PRECISE_SMC */
1888 assert_page_locked(p
);
1890 #if defined(TARGET_HAS_PRECISE_SMC)
1896 /* we remove all the TBs in the range [start, end[ */
1897 /* XXX: see if in some cases it could be faster to invalidate all
1899 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1900 assert_page_locked(p
);
1901 /* NOTE: this is subtle as a TB may span two physical pages */
1903 /* NOTE: tb_end may be after the end of the page, but
1904 it is not a problem */
1905 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1906 tb_end
= tb_start
+ tb
->size
;
1908 tb_start
= tb
->page_addr
[1];
1909 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1911 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1912 #ifdef TARGET_HAS_PRECISE_SMC
1913 if (current_tb_not_found
) {
1914 current_tb_not_found
= false;
1915 /* now we have a real cpu fault */
1916 current_tb
= tcg_tb_lookup(retaddr
);
1918 if (current_tb
== tb
&&
1919 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1921 * If we are modifying the current TB, we must stop
1922 * its execution. We could be more precise by checking
1923 * that the modification is after the current PC, but it
1924 * would require a specialized function to partially
1925 * restore the CPU state.
1927 current_tb_modified
= true;
1928 cpu_restore_state_from_tb(cpu
, current_tb
, retaddr
, true);
1929 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1932 #endif /* TARGET_HAS_PRECISE_SMC */
1933 tb_phys_invalidate__locked(tb
);
1936 #if !defined(CONFIG_USER_ONLY)
1937 /* if no code remaining, no need to continue to use slow writes */
1939 invalidate_page_bitmap(p
);
1940 tlb_unprotect_code(start
);
1943 #ifdef TARGET_HAS_PRECISE_SMC
1944 if (current_tb_modified
) {
1945 page_collection_unlock(pages
);
1946 /* Force execution of one insn next time. */
1947 cpu
->cflags_next_tb
= 1 | curr_cflags();
1949 cpu_loop_exit_noexc(cpu
);
1955 * Invalidate all TBs which intersect with the target physical address range
1956 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1957 * 'is_cpu_write_access' should be true if called from a real cpu write
1958 * access: the virtual CPU will exit the current TB if code is modified inside
1961 * Called with mmap_lock held for user-mode emulation
1963 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
)
1965 struct page_collection
*pages
;
1968 assert_memory_lock();
1970 p
= page_find(start
>> TARGET_PAGE_BITS
);
1974 pages
= page_collection_lock(start
, end
);
1975 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
, 0);
1976 page_collection_unlock(pages
);
1980 * Invalidate all TBs which intersect with the target physical address range
1981 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1982 * 'is_cpu_write_access' should be true if called from a real cpu write
1983 * access: the virtual CPU will exit the current TB if code is modified inside
1986 * Called with mmap_lock held for user-mode emulation.
1988 #ifdef CONFIG_SOFTMMU
1989 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
)
1991 void tb_invalidate_phys_range(target_ulong start
, target_ulong end
)
1994 struct page_collection
*pages
;
1995 tb_page_addr_t next
;
1997 assert_memory_lock();
1999 pages
= page_collection_lock(start
, end
);
2000 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2002 start
= next
, next
+= TARGET_PAGE_SIZE
) {
2003 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
2004 tb_page_addr_t bound
= MIN(next
, end
);
2009 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
2011 page_collection_unlock(pages
);
2014 #ifdef CONFIG_SOFTMMU
2015 /* len must be <= 8 and start must be a multiple of len.
2016 * Called via softmmu_template.h when code areas are written to with
2017 * iothread mutex not held.
2019 * Call with all @pages in the range [@start, @start + len[ locked.
2021 void tb_invalidate_phys_page_fast(struct page_collection
*pages
,
2022 tb_page_addr_t start
, int len
,
2027 assert_memory_lock();
2029 p
= page_find(start
>> TARGET_PAGE_BITS
);
2034 assert_page_locked(p
);
2035 if (!p
->code_bitmap
&&
2036 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
2037 build_page_bitmap(p
);
2039 if (p
->code_bitmap
) {
2043 nr
= start
& ~TARGET_PAGE_MASK
;
2044 b
= p
->code_bitmap
[BIT_WORD(nr
)] >> (nr
& (BITS_PER_LONG
- 1));
2045 if (b
& ((1 << len
) - 1)) {
2050 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
,
2055 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2056 * host PC of the faulting store instruction that caused this invalidate.
2057 * Returns true if the caller needs to abort execution of the current
2058 * TB (because it was modified by this store and the guest CPU has
2059 * precise-SMC semantics).
2061 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
2063 TranslationBlock
*tb
;
2066 #ifdef TARGET_HAS_PRECISE_SMC
2067 TranslationBlock
*current_tb
= NULL
;
2068 CPUState
*cpu
= current_cpu
;
2069 CPUArchState
*env
= NULL
;
2070 int current_tb_modified
= 0;
2071 target_ulong current_pc
= 0;
2072 target_ulong current_cs_base
= 0;
2073 uint32_t current_flags
= 0;
2076 assert_memory_lock();
2078 addr
&= TARGET_PAGE_MASK
;
2079 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2084 #ifdef TARGET_HAS_PRECISE_SMC
2085 if (p
->first_tb
&& pc
!= 0) {
2086 current_tb
= tcg_tb_lookup(pc
);
2092 assert_page_locked(p
);
2093 PAGE_FOR_EACH_TB(p
, tb
, n
) {
2094 #ifdef TARGET_HAS_PRECISE_SMC
2095 if (current_tb
== tb
&&
2096 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
2097 /* If we are modifying the current TB, we must stop
2098 its execution. We could be more precise by checking
2099 that the modification is after the current PC, but it
2100 would require a specialized function to partially
2101 restore the CPU state */
2103 current_tb_modified
= 1;
2104 cpu_restore_state_from_tb(cpu
, current_tb
, pc
, true);
2105 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
2108 #endif /* TARGET_HAS_PRECISE_SMC */
2109 tb_phys_invalidate(tb
, addr
);
2111 p
->first_tb
= (uintptr_t)NULL
;
2112 #ifdef TARGET_HAS_PRECISE_SMC
2113 if (current_tb_modified
) {
2114 /* Force execution of one insn next time. */
2115 cpu
->cflags_next_tb
= 1 | curr_cflags();
2124 /* user-mode: call with mmap_lock held */
2125 void tb_check_watchpoint(CPUState
*cpu
, uintptr_t retaddr
)
2127 TranslationBlock
*tb
;
2129 assert_memory_lock();
2131 tb
= tcg_tb_lookup(retaddr
);
2133 /* We can use retranslation to find the PC. */
2134 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2135 tb_phys_invalidate(tb
, -1);
2137 /* The exception probably happened in a helper. The CPU state should
2138 have been saved before calling it. Fetch the PC from there. */
2139 CPUArchState
*env
= cpu
->env_ptr
;
2140 target_ulong pc
, cs_base
;
2141 tb_page_addr_t addr
;
2144 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
2145 addr
= get_page_addr_code(env
, pc
);
2147 tb_invalidate_phys_range(addr
, addr
+ 1);
2152 #ifndef CONFIG_USER_ONLY
2153 /* in deterministic execution mode, instructions doing device I/Os
2154 * must be at the end of the TB.
2156 * Called by softmmu_template.h, with iothread mutex not held.
2158 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
2160 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2161 CPUArchState
*env
= cpu
->env_ptr
;
2163 TranslationBlock
*tb
;
2166 tb
= tcg_tb_lookup(retaddr
);
2168 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
2171 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
2173 /* On MIPS and SH, delay slot instructions can only be restarted if
2174 they were already the first instruction in the TB. If this is not
2175 the first instruction in a TB then re-execute the preceding
2178 #if defined(TARGET_MIPS)
2179 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0
2180 && env
->active_tc
.PC
!= tb
->pc
) {
2181 env
->active_tc
.PC
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
2182 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
2183 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
2186 #elif defined(TARGET_SH4)
2187 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
2188 && env
->pc
!= tb
->pc
) {
2190 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
2191 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
2196 /* Generate a new TB executing the I/O insn. */
2197 cpu
->cflags_next_tb
= curr_cflags() | CF_LAST_IO
| n
;
2199 if (tb_cflags(tb
) & CF_NOCACHE
) {
2201 /* Invalidate original TB if this TB was generated in
2202 * cpu_exec_nocache() */
2203 tb_phys_invalidate(tb
->orig_tb
, -1);
2208 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2209 * the first in the TB) then we end up generating a whole new TB and
2210 * repeating the fault, which is horribly inefficient.
2211 * Better would be to execute just this insn uncached, or generate a
2214 cpu_loop_exit_noexc(cpu
);
2217 static void tb_jmp_cache_clear_page(CPUState
*cpu
, target_ulong page_addr
)
2219 unsigned int i
, i0
= tb_jmp_cache_hash_page(page_addr
);
2221 for (i
= 0; i
< TB_JMP_PAGE_SIZE
; i
++) {
2222 atomic_set(&cpu
->tb_jmp_cache
[i0
+ i
], NULL
);
2226 void tb_flush_jmp_cache(CPUState
*cpu
, target_ulong addr
)
2228 /* Discard jump cache entries for any tb which might potentially
2229 overlap the flushed page. */
2230 tb_jmp_cache_clear_page(cpu
, addr
- TARGET_PAGE_SIZE
);
2231 tb_jmp_cache_clear_page(cpu
, addr
);
2234 static void print_qht_statistics(struct qht_stats hst
)
2236 uint32_t hgram_opts
;
2240 if (!hst
.head_buckets
) {
2243 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2244 hst
.used_head_buckets
, hst
.head_buckets
,
2245 (double)hst
.used_head_buckets
/ hst
.head_buckets
* 100);
2247 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2248 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
2249 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
2250 hgram_opts
|= QDIST_PR_NODECIMAL
;
2252 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
2253 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2254 qdist_avg(&hst
.occupancy
) * 100, hgram
);
2257 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
2258 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
2259 if (hgram_bins
> 10) {
2263 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
2265 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
2266 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2267 qdist_avg(&hst
.chain
), hgram
);
2271 struct tb_tree_stats
{
2275 size_t max_target_size
;
2276 size_t direct_jmp_count
;
2277 size_t direct_jmp2_count
;
2281 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
2283 const TranslationBlock
*tb
= value
;
2284 struct tb_tree_stats
*tst
= data
;
2287 tst
->host_size
+= tb
->tc
.size
;
2288 tst
->target_size
+= tb
->size
;
2289 if (tb
->size
> tst
->max_target_size
) {
2290 tst
->max_target_size
= tb
->size
;
2292 if (tb
->page_addr
[1] != -1) {
2295 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
2296 tst
->direct_jmp_count
++;
2297 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
2298 tst
->direct_jmp2_count
++;
2304 void dump_exec_info(void)
2306 struct tb_tree_stats tst
= {};
2307 struct qht_stats hst
;
2308 size_t nb_tbs
, flush_full
, flush_part
, flush_elide
;
2310 tcg_tb_foreach(tb_tree_stats_iter
, &tst
);
2311 nb_tbs
= tst
.nb_tbs
;
2312 /* XXX: avoid using doubles ? */
2313 qemu_printf("Translation buffer state:\n");
2315 * Report total code size including the padding and TB structs;
2316 * otherwise users might think "-tb-size" is not honoured.
2317 * For avg host size we use the precise numbers from tb_tree_stats though.
2319 qemu_printf("gen code size %zu/%zu\n",
2320 tcg_code_size(), tcg_code_capacity());
2321 qemu_printf("TB count %zu\n", nb_tbs
);
2322 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2323 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
2324 tst
.max_target_size
);
2325 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2326 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
2327 tst
.target_size
? (double)tst
.host_size
/ tst
.target_size
: 0);
2328 qemu_printf("cross page TB count %zu (%zu%%)\n", tst
.cross_page
,
2329 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
2330 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2331 tst
.direct_jmp_count
,
2332 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
2333 tst
.direct_jmp2_count
,
2334 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
2336 qht_statistics_init(&tb_ctx
.htable
, &hst
);
2337 print_qht_statistics(hst
);
2338 qht_statistics_destroy(&hst
);
2340 qemu_printf("\nStatistics:\n");
2341 qemu_printf("TB flush count %u\n",
2342 atomic_read(&tb_ctx
.tb_flush_count
));
2343 qemu_printf("TB invalidate count %zu\n",
2344 tcg_tb_phys_invalidate_count());
2346 tlb_flush_counts(&flush_full
, &flush_part
, &flush_elide
);
2347 qemu_printf("TLB full flushes %zu\n", flush_full
);
2348 qemu_printf("TLB partial flushes %zu\n", flush_part
);
2349 qemu_printf("TLB elided flushes %zu\n", flush_elide
);
2353 void dump_opcount_info(void)
2355 tcg_dump_op_count();
2358 #else /* CONFIG_USER_ONLY */
2360 void cpu_interrupt(CPUState
*cpu
, int mask
)
2362 g_assert(qemu_mutex_iothread_locked());
2363 cpu
->interrupt_request
|= mask
;
2364 atomic_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, -1);
2368 * Walks guest process memory "regions" one by one
2369 * and calls callback function 'fn' for each region.
2371 struct walk_memory_regions_data
{
2372 walk_memory_regions_fn fn
;
2378 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2379 target_ulong end
, int new_prot
)
2381 if (data
->start
!= -1u) {
2382 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2388 data
->start
= (new_prot
? end
: -1u);
2389 data
->prot
= new_prot
;
2394 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2395 target_ulong base
, int level
, void **lp
)
2401 return walk_memory_regions_end(data
, base
, 0);
2407 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2408 int prot
= pd
[i
].flags
;
2410 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2411 if (prot
!= data
->prot
) {
2412 rc
= walk_memory_regions_end(data
, pa
, prot
);
2421 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2422 pa
= base
| ((target_ulong
)i
<<
2423 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2424 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2434 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2436 struct walk_memory_regions_data data
;
2437 uintptr_t i
, l1_sz
= v_l1_size
;
2444 for (i
= 0; i
< l1_sz
; i
++) {
2445 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2446 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2452 return walk_memory_regions_end(&data
, 0, 0);
2455 static int dump_region(void *priv
, target_ulong start
,
2456 target_ulong end
, unsigned long prot
)
2458 FILE *f
= (FILE *)priv
;
2460 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2461 " "TARGET_FMT_lx
" %c%c%c\n",
2462 start
, end
, end
- start
,
2463 ((prot
& PAGE_READ
) ? 'r' : '-'),
2464 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2465 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2470 /* dump memory mappings */
2471 void page_dump(FILE *f
)
2473 const int length
= sizeof(target_ulong
) * 2;
2474 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2475 length
, "start", length
, "end", length
, "size", "prot");
2476 walk_memory_regions(f
, dump_region
);
2479 int page_get_flags(target_ulong address
)
2483 p
= page_find(address
>> TARGET_PAGE_BITS
);
2490 /* Modify the flags of a page and invalidate the code if necessary.
2491 The flag PAGE_WRITE_ORG is positioned automatically depending
2492 on PAGE_WRITE. The mmap_lock should already be held. */
2493 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2495 target_ulong addr
, len
;
2497 /* This function should never be called with addresses outside the
2498 guest address space. If this assert fires, it probably indicates
2499 a missing call to h2g_valid. */
2500 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2501 assert(end
<= ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2503 assert(start
< end
);
2504 assert_memory_lock();
2506 start
= start
& TARGET_PAGE_MASK
;
2507 end
= TARGET_PAGE_ALIGN(end
);
2509 if (flags
& PAGE_WRITE
) {
2510 flags
|= PAGE_WRITE_ORG
;
2513 for (addr
= start
, len
= end
- start
;
2515 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2516 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2518 /* If the write protection bit is set, then we invalidate
2520 if (!(p
->flags
& PAGE_WRITE
) &&
2521 (flags
& PAGE_WRITE
) &&
2523 tb_invalidate_phys_page(addr
, 0);
2529 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2535 /* This function should never be called with addresses outside the
2536 guest address space. If this assert fires, it probably indicates
2537 a missing call to h2g_valid. */
2538 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2539 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2545 if (start
+ len
- 1 < start
) {
2546 /* We've wrapped around. */
2550 /* must do before we loose bits in the next step */
2551 end
= TARGET_PAGE_ALIGN(start
+ len
);
2552 start
= start
& TARGET_PAGE_MASK
;
2554 for (addr
= start
, len
= end
- start
;
2556 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2557 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2561 if (!(p
->flags
& PAGE_VALID
)) {
2565 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2568 if (flags
& PAGE_WRITE
) {
2569 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2572 /* unprotect the page if it was put read-only because it
2573 contains translated code */
2574 if (!(p
->flags
& PAGE_WRITE
)) {
2575 if (!page_unprotect(addr
, 0)) {
2584 /* called from signal handler: invalidate the code and unprotect the
2585 * page. Return 0 if the fault was not handled, 1 if it was handled,
2586 * and 2 if it was handled but the caller must cause the TB to be
2587 * immediately exited. (We can only return 2 if the 'pc' argument is
2590 int page_unprotect(target_ulong address
, uintptr_t pc
)
2593 bool current_tb_invalidated
;
2595 target_ulong host_start
, host_end
, addr
;
2597 /* Technically this isn't safe inside a signal handler. However we
2598 know this only ever happens in a synchronous SEGV handler, so in
2599 practice it seems to be ok. */
2602 p
= page_find(address
>> TARGET_PAGE_BITS
);
2608 /* if the page was really writable, then we change its
2609 protection back to writable */
2610 if (p
->flags
& PAGE_WRITE_ORG
) {
2611 current_tb_invalidated
= false;
2612 if (p
->flags
& PAGE_WRITE
) {
2613 /* If the page is actually marked WRITE then assume this is because
2614 * this thread raced with another one which got here first and
2615 * set the page to PAGE_WRITE and did the TB invalidate for us.
2617 #ifdef TARGET_HAS_PRECISE_SMC
2618 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
2620 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
2624 host_start
= address
& qemu_host_page_mask
;
2625 host_end
= host_start
+ qemu_host_page_size
;
2628 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2629 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2630 p
->flags
|= PAGE_WRITE
;
2633 /* and since the content will be modified, we must invalidate
2634 the corresponding translated code. */
2635 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2636 #ifdef CONFIG_USER_ONLY
2637 if (DEBUG_TB_CHECK_GATE
) {
2638 tb_invalidate_check(addr
);
2642 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2646 /* If current TB was invalidated return to main loop */
2647 return current_tb_invalidated
? 2 : 1;
2652 #endif /* CONFIG_USER_ONLY */
2654 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2655 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2657 #ifdef CONFIG_SOFTMMU