4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #define NO_CPU_IO_DEFS
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
27 #if defined(CONFIG_USER_ONLY)
29 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
30 #include <sys/param.h>
31 #if __FreeBSD_version >= 700104
32 #define HAVE_KINFO_GETVMMAP
33 #define sigqueue sigqueue_freebsd /* avoid redefinition */
35 #include <machine/profile.h>
44 #include "exec/ram_addr.h"
47 #include "exec/cputlb.h"
48 #include "exec/translate-all.h"
49 #include "exec/translator.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/qemu-print.h"
52 #include "qemu/timer.h"
53 #include "qemu/main-loop.h"
54 #include "qemu/cacheinfo.h"
56 #include "sysemu/cpus.h"
57 #include "sysemu/cpu-timers.h"
58 #include "sysemu/tcg.h"
59 #include "qapi/error.h"
60 #include "hw/core/tcg-cpu-ops.h"
61 #include "tb-jmp-cache.h"
63 #include "tb-context.h"
66 /* #define DEBUG_TB_INVALIDATE */
67 /* #define DEBUG_TB_FLUSH */
68 /* make various TB consistency checks */
69 /* #define DEBUG_TB_CHECK */
71 #ifdef DEBUG_TB_INVALIDATE
72 #define DEBUG_TB_INVALIDATE_GATE 1
74 #define DEBUG_TB_INVALIDATE_GATE 0
78 #define DEBUG_TB_FLUSH_GATE 1
80 #define DEBUG_TB_FLUSH_GATE 0
83 #if !defined(CONFIG_USER_ONLY)
84 /* TB consistency checks only implemented for usermode emulation. */
89 #define DEBUG_TB_CHECK_GATE 1
91 #define DEBUG_TB_CHECK_GATE 0
94 /* Access to the various translations structures need to be serialised via locks
96 * In user-mode emulation access to the memory related structures are protected
98 * In !user-mode we use per-page locks.
100 #ifdef CONFIG_SOFTMMU
101 #define assert_memory_lock()
103 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
106 typedef struct PageDesc
{
107 /* list of TBs intersecting this ram page */
109 #ifdef CONFIG_USER_ONLY
113 #ifdef CONFIG_SOFTMMU
119 * struct page_entry - page descriptor entry
120 * @pd: pointer to the &struct PageDesc of the page this entry represents
121 * @index: page index of the page
122 * @locked: whether the page is locked
124 * This struct helps us keep track of the locked state of a page, without
125 * bloating &struct PageDesc.
127 * A page lock protects accesses to all fields of &struct PageDesc.
129 * See also: &struct page_collection.
133 tb_page_addr_t index
;
138 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
139 * @tree: Binary search tree (BST) of the pages, with key == page index
140 * @max: Pointer to the page in @tree with the highest page index
142 * To avoid deadlock we lock pages in ascending order of page index.
143 * When operating on a set of pages, we need to keep track of them so that
144 * we can lock them in order and also unlock them later. For this we collect
145 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
146 * @tree implementation we use does not provide an O(1) operation to obtain the
147 * highest-ranked element, we use @max to keep track of the inserted page
148 * with the highest index. This is valuable because if a page is not in
149 * the tree and its index is higher than @max's, then we can lock it
150 * without breaking the locking order rule.
152 * Note on naming: 'struct page_set' would be shorter, but we already have a few
153 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
155 * See also: page_collection_lock().
157 struct page_collection
{
159 struct page_entry
*max
;
162 /* list iterators for lists of tagged pointers in TranslationBlock */
163 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
164 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
165 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
166 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
168 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
169 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
171 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
172 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175 * In system mode we want L1_MAP to be based on ram offsets,
176 * while in user mode we want it to be based on virtual addresses.
178 * TODO: For user mode, see the caveat re host vs guest virtual
179 * address spaces near GUEST_ADDR_MAX.
181 #if !defined(CONFIG_USER_ONLY)
182 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
183 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
185 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
188 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
191 /* Size of the L2 (and L3, etc) page tables. */
193 #define V_L2_SIZE (1 << V_L2_BITS)
195 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
196 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS
>
197 sizeof_field(TranslationBlock
, trace_vcpu_dstate
)
201 * L1 Mapping properties
203 static int v_l1_size
;
204 static int v_l1_shift
;
205 static int v_l2_levels
;
207 /* The bottom level has pointers to PageDesc, and is indexed by
208 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
210 #define V_L1_MIN_BITS 4
211 #define V_L1_MAX_BITS (V_L2_BITS + 3)
212 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
214 static void *l1_map
[V_L1_MAX_SIZE
];
218 static void page_table_config_init(void)
222 assert(TARGET_PAGE_BITS
);
223 /* The bits remaining after N lower levels of page tables. */
224 v_l1_bits
= (L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
) % V_L2_BITS
;
225 if (v_l1_bits
< V_L1_MIN_BITS
) {
226 v_l1_bits
+= V_L2_BITS
;
229 v_l1_size
= 1 << v_l1_bits
;
230 v_l1_shift
= L1_MAP_ADDR_SPACE_BITS
- TARGET_PAGE_BITS
- v_l1_bits
;
231 v_l2_levels
= v_l1_shift
/ V_L2_BITS
- 1;
233 assert(v_l1_bits
<= V_L1_MAX_BITS
);
234 assert(v_l1_shift
% V_L2_BITS
== 0);
235 assert(v_l2_levels
>= 0);
238 /* Encode VAL as a signed leb128 sequence at P.
239 Return P incremented past the encoded value. */
240 static uint8_t *encode_sleb128(uint8_t *p
, target_long val
)
247 more
= !((val
== 0 && (byte
& 0x40) == 0)
248 || (val
== -1 && (byte
& 0x40) != 0));
258 /* Decode a signed leb128 sequence at *PP; increment *PP past the
259 decoded value. Return the decoded value. */
260 static target_long
decode_sleb128(const uint8_t **pp
)
262 const uint8_t *p
= *pp
;
268 val
|= (target_ulong
)(byte
& 0x7f) << shift
;
270 } while (byte
& 0x80);
271 if (shift
< TARGET_LONG_BITS
&& (byte
& 0x40)) {
272 val
|= -(target_ulong
)1 << shift
;
279 /* Encode the data collected about the instructions while compiling TB.
280 Place the data at BLOCK, and return the number of bytes consumed.
282 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
283 which come from the target's insn_start data, followed by a uintptr_t
284 which comes from the host pc of the end of the code implementing the insn.
286 Each line of the table is encoded as sleb128 deltas from the previous
287 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
288 That is, the first column is seeded with the guest pc, the last column
289 with the host pc, and the middle columns with zeros. */
291 static int encode_search(TranslationBlock
*tb
, uint8_t *block
)
293 uint8_t *highwater
= tcg_ctx
->code_gen_highwater
;
297 for (i
= 0, n
= tb
->icount
; i
< n
; ++i
) {
300 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
302 prev
= (j
== 0 ? tb
->pc
: 0);
304 prev
= tcg_ctx
->gen_insn_data
[i
- 1][j
];
306 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_data
[i
][j
] - prev
);
308 prev
= (i
== 0 ? 0 : tcg_ctx
->gen_insn_end_off
[i
- 1]);
309 p
= encode_sleb128(p
, tcg_ctx
->gen_insn_end_off
[i
] - prev
);
311 /* Test for (pending) buffer overflow. The assumption is that any
312 one row beginning below the high water mark cannot overrun
313 the buffer completely. Thus we can test for overflow after
314 encoding a row without having to check during encoding. */
315 if (unlikely(p
> highwater
)) {
323 /* The cpu state corresponding to 'searched_pc' is restored.
324 * When reset_icount is true, current TB will be interrupted and
325 * icount should be recalculated.
327 static int cpu_restore_state_from_tb(CPUState
*cpu
, TranslationBlock
*tb
,
328 uintptr_t searched_pc
, bool reset_icount
)
330 target_ulong data
[TARGET_INSN_START_WORDS
] = { tb
->pc
};
331 uintptr_t host_pc
= (uintptr_t)tb
->tc
.ptr
;
332 CPUArchState
*env
= cpu
->env_ptr
;
333 const uint8_t *p
= tb
->tc
.ptr
+ tb
->tc
.size
;
334 int i
, j
, num_insns
= tb
->icount
;
335 #ifdef CONFIG_PROFILER
336 TCGProfile
*prof
= &tcg_ctx
->prof
;
337 int64_t ti
= profile_getclock();
340 searched_pc
-= GETPC_ADJ
;
342 if (searched_pc
< host_pc
) {
346 /* Reconstruct the stored insn data while looking for the point at
347 which the end of the insn exceeds the searched_pc. */
348 for (i
= 0; i
< num_insns
; ++i
) {
349 for (j
= 0; j
< TARGET_INSN_START_WORDS
; ++j
) {
350 data
[j
] += decode_sleb128(&p
);
352 host_pc
+= decode_sleb128(&p
);
353 if (host_pc
> searched_pc
) {
360 if (reset_icount
&& (tb_cflags(tb
) & CF_USE_ICOUNT
)) {
361 assert(icount_enabled());
362 /* Reset the cycle counter to the start of the block
363 and shift if to the number of actually executed instructions */
364 cpu_neg(cpu
)->icount_decr
.u16
.low
+= num_insns
- i
;
366 restore_state_to_opc(env
, tb
, data
);
368 #ifdef CONFIG_PROFILER
369 qatomic_set(&prof
->restore_time
,
370 prof
->restore_time
+ profile_getclock() - ti
);
371 qatomic_set(&prof
->restore_count
, prof
->restore_count
+ 1);
376 bool cpu_restore_state(CPUState
*cpu
, uintptr_t host_pc
, bool will_exit
)
379 * The host_pc has to be in the rx region of the code buffer.
380 * If it is not we will not be able to resolve it here.
381 * The two cases where host_pc will not be correct are:
383 * - fault during translation (instruction fetch)
384 * - fault from helper (not using GETPC() macro)
386 * Either way we need return early as we can't resolve it here.
388 if (in_code_gen_buffer((const void *)(host_pc
- tcg_splitwx_diff
))) {
389 TranslationBlock
*tb
= tcg_tb_lookup(host_pc
);
391 cpu_restore_state_from_tb(cpu
, tb
, host_pc
, will_exit
);
401 page_table_config_init();
403 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
405 #ifdef HAVE_KINFO_GETVMMAP
406 struct kinfo_vmentry
*freep
;
409 freep
= kinfo_getvmmap(getpid(), &cnt
);
412 for (i
= 0; i
< cnt
; i
++) {
413 unsigned long startaddr
, endaddr
;
415 startaddr
= freep
[i
].kve_start
;
416 endaddr
= freep
[i
].kve_end
;
417 if (h2g_valid(startaddr
)) {
418 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
420 if (h2g_valid(endaddr
)) {
421 endaddr
= h2g(endaddr
);
422 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
424 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
426 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
437 last_brk
= (unsigned long)sbrk(0);
439 f
= fopen("/compat/linux/proc/self/maps", "r");
444 unsigned long startaddr
, endaddr
;
447 n
= fscanf(f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
449 if (n
== 2 && h2g_valid(startaddr
)) {
450 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
452 if (h2g_valid(endaddr
)) {
453 endaddr
= h2g(endaddr
);
457 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
469 static PageDesc
*page_find_alloc(tb_page_addr_t index
, bool alloc
)
475 /* Level 1. Always allocated. */
476 lp
= l1_map
+ ((index
>> v_l1_shift
) & (v_l1_size
- 1));
479 for (i
= v_l2_levels
; i
> 0; i
--) {
480 void **p
= qatomic_rcu_read(lp
);
488 p
= g_new0(void *, V_L2_SIZE
);
489 existing
= qatomic_cmpxchg(lp
, NULL
, p
);
490 if (unlikely(existing
)) {
496 lp
= p
+ ((index
>> (i
* V_L2_BITS
)) & (V_L2_SIZE
- 1));
499 pd
= qatomic_rcu_read(lp
);
506 pd
= g_new0(PageDesc
, V_L2_SIZE
);
507 #ifndef CONFIG_USER_ONLY
511 for (i
= 0; i
< V_L2_SIZE
; i
++) {
512 qemu_spin_init(&pd
[i
].lock
);
516 existing
= qatomic_cmpxchg(lp
, NULL
, pd
);
517 if (unlikely(existing
)) {
518 #ifndef CONFIG_USER_ONLY
522 for (i
= 0; i
< V_L2_SIZE
; i
++) {
523 qemu_spin_destroy(&pd
[i
].lock
);
532 return pd
+ (index
& (V_L2_SIZE
- 1));
535 static inline PageDesc
*page_find(tb_page_addr_t index
)
537 return page_find_alloc(index
, false);
540 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
541 PageDesc
**ret_p2
, tb_page_addr_t phys2
, bool alloc
);
543 /* In user-mode page locks aren't used; mmap_lock is enough */
544 #ifdef CONFIG_USER_ONLY
546 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
548 static inline void page_lock(PageDesc
*pd
)
551 static inline void page_unlock(PageDesc
*pd
)
554 static inline void page_lock_tb(const TranslationBlock
*tb
)
557 static inline void page_unlock_tb(const TranslationBlock
*tb
)
560 struct page_collection
*
561 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
566 void page_collection_unlock(struct page_collection
*set
)
568 #else /* !CONFIG_USER_ONLY */
570 #ifdef CONFIG_DEBUG_TCG
572 static __thread GHashTable
*ht_pages_locked_debug
;
574 static void ht_pages_locked_debug_init(void)
576 if (ht_pages_locked_debug
) {
579 ht_pages_locked_debug
= g_hash_table_new(NULL
, NULL
);
582 static bool page_is_locked(const PageDesc
*pd
)
586 ht_pages_locked_debug_init();
587 found
= g_hash_table_lookup(ht_pages_locked_debug
, pd
);
591 static void page_lock__debug(PageDesc
*pd
)
593 ht_pages_locked_debug_init();
594 g_assert(!page_is_locked(pd
));
595 g_hash_table_insert(ht_pages_locked_debug
, pd
, pd
);
598 static void page_unlock__debug(const PageDesc
*pd
)
602 ht_pages_locked_debug_init();
603 g_assert(page_is_locked(pd
));
604 removed
= g_hash_table_remove(ht_pages_locked_debug
, pd
);
609 do_assert_page_locked(const PageDesc
*pd
, const char *file
, int line
)
611 if (unlikely(!page_is_locked(pd
))) {
612 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
618 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
620 void assert_no_pages_locked(void)
622 ht_pages_locked_debug_init();
623 g_assert(g_hash_table_size(ht_pages_locked_debug
) == 0);
626 #else /* !CONFIG_DEBUG_TCG */
628 #define assert_page_locked(pd)
630 static inline void page_lock__debug(const PageDesc
*pd
)
634 static inline void page_unlock__debug(const PageDesc
*pd
)
638 #endif /* CONFIG_DEBUG_TCG */
640 static inline void page_lock(PageDesc
*pd
)
642 page_lock__debug(pd
);
643 qemu_spin_lock(&pd
->lock
);
646 static inline void page_unlock(PageDesc
*pd
)
648 qemu_spin_unlock(&pd
->lock
);
649 page_unlock__debug(pd
);
652 /* lock the page(s) of a TB in the correct acquisition order */
653 static inline void page_lock_tb(const TranslationBlock
*tb
)
655 page_lock_pair(NULL
, tb
->page_addr
[0], NULL
, tb
->page_addr
[1], false);
658 static inline void page_unlock_tb(const TranslationBlock
*tb
)
660 PageDesc
*p1
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
663 if (unlikely(tb
->page_addr
[1] != -1)) {
664 PageDesc
*p2
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
672 static inline struct page_entry
*
673 page_entry_new(PageDesc
*pd
, tb_page_addr_t index
)
675 struct page_entry
*pe
= g_malloc(sizeof(*pe
));
683 static void page_entry_destroy(gpointer p
)
685 struct page_entry
*pe
= p
;
687 g_assert(pe
->locked
);
692 /* returns false on success */
693 static bool page_entry_trylock(struct page_entry
*pe
)
697 busy
= qemu_spin_trylock(&pe
->pd
->lock
);
699 g_assert(!pe
->locked
);
701 page_lock__debug(pe
->pd
);
706 static void do_page_entry_lock(struct page_entry
*pe
)
709 g_assert(!pe
->locked
);
713 static gboolean
page_entry_lock(gpointer key
, gpointer value
, gpointer data
)
715 struct page_entry
*pe
= value
;
717 do_page_entry_lock(pe
);
721 static gboolean
page_entry_unlock(gpointer key
, gpointer value
, gpointer data
)
723 struct page_entry
*pe
= value
;
733 * Trylock a page, and if successful, add the page to a collection.
734 * Returns true ("busy") if the page could not be locked; false otherwise.
736 static bool page_trylock_add(struct page_collection
*set
, tb_page_addr_t addr
)
738 tb_page_addr_t index
= addr
>> TARGET_PAGE_BITS
;
739 struct page_entry
*pe
;
742 pe
= g_tree_lookup(set
->tree
, &index
);
747 pd
= page_find(index
);
752 pe
= page_entry_new(pd
, index
);
753 g_tree_insert(set
->tree
, &pe
->index
, pe
);
756 * If this is either (1) the first insertion or (2) a page whose index
757 * is higher than any other so far, just lock the page and move on.
759 if (set
->max
== NULL
|| pe
->index
> set
->max
->index
) {
761 do_page_entry_lock(pe
);
765 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
768 return page_entry_trylock(pe
);
771 static gint
tb_page_addr_cmp(gconstpointer ap
, gconstpointer bp
, gpointer udata
)
773 tb_page_addr_t a
= *(const tb_page_addr_t
*)ap
;
774 tb_page_addr_t b
= *(const tb_page_addr_t
*)bp
;
785 * Lock a range of pages ([@start,@end[) as well as the pages of all
787 * Locking order: acquire locks in ascending order of page index.
789 struct page_collection
*
790 page_collection_lock(tb_page_addr_t start
, tb_page_addr_t end
)
792 struct page_collection
*set
= g_malloc(sizeof(*set
));
793 tb_page_addr_t index
;
796 start
>>= TARGET_PAGE_BITS
;
797 end
>>= TARGET_PAGE_BITS
;
798 g_assert(start
<= end
);
800 set
->tree
= g_tree_new_full(tb_page_addr_cmp
, NULL
, NULL
,
803 assert_no_pages_locked();
806 g_tree_foreach(set
->tree
, page_entry_lock
, NULL
);
808 for (index
= start
; index
<= end
; index
++) {
809 TranslationBlock
*tb
;
812 pd
= page_find(index
);
816 if (page_trylock_add(set
, index
<< TARGET_PAGE_BITS
)) {
817 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
820 assert_page_locked(pd
);
821 PAGE_FOR_EACH_TB(pd
, tb
, n
) {
822 if (page_trylock_add(set
, tb
->page_addr
[0]) ||
823 (tb
->page_addr
[1] != -1 &&
824 page_trylock_add(set
, tb
->page_addr
[1]))) {
825 /* drop all locks, and reacquire in order */
826 g_tree_foreach(set
->tree
, page_entry_unlock
, NULL
);
834 void page_collection_unlock(struct page_collection
*set
)
836 /* entries are unlocked and freed via page_entry_destroy */
837 g_tree_destroy(set
->tree
);
841 #endif /* !CONFIG_USER_ONLY */
843 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
844 PageDesc
**ret_p2
, tb_page_addr_t phys2
, bool alloc
)
847 tb_page_addr_t page1
;
848 tb_page_addr_t page2
;
850 assert_memory_lock();
851 g_assert(phys1
!= -1);
853 page1
= phys1
>> TARGET_PAGE_BITS
;
854 page2
= phys2
>> TARGET_PAGE_BITS
;
856 p1
= page_find_alloc(page1
, alloc
);
860 if (likely(phys2
== -1)) {
863 } else if (page1
== page2
) {
870 p2
= page_find_alloc(page2
, alloc
);
883 static bool tb_cmp(const void *ap
, const void *bp
)
885 const TranslationBlock
*a
= ap
;
886 const TranslationBlock
*b
= bp
;
888 return a
->pc
== b
->pc
&&
889 a
->cs_base
== b
->cs_base
&&
890 a
->flags
== b
->flags
&&
891 (tb_cflags(a
) & ~CF_INVALID
) == (tb_cflags(b
) & ~CF_INVALID
) &&
892 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
893 a
->page_addr
[0] == b
->page_addr
[0] &&
894 a
->page_addr
[1] == b
->page_addr
[1];
897 void tb_htable_init(void)
899 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
901 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
904 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
905 static void page_flush_tb_1(int level
, void **lp
)
915 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
917 pd
[i
].first_tb
= (uintptr_t)NULL
;
923 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
924 page_flush_tb_1(level
- 1, pp
+ i
);
929 static void page_flush_tb(void)
931 int i
, l1_sz
= v_l1_size
;
933 for (i
= 0; i
< l1_sz
; i
++) {
934 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
938 static gboolean
tb_host_size_iter(gpointer key
, gpointer value
, gpointer data
)
940 const TranslationBlock
*tb
= value
;
943 *size
+= tb
->tc
.size
;
947 /* flush all the translation blocks */
948 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
950 bool did_flush
= false;
953 /* If it is already been done on request of another CPU,
956 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
961 if (DEBUG_TB_FLUSH_GATE
) {
962 size_t nb_tbs
= tcg_nb_tbs();
963 size_t host_size
= 0;
965 tcg_tb_foreach(tb_host_size_iter
, &host_size
);
966 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
967 tcg_code_size(), nb_tbs
, nb_tbs
> 0 ? host_size
/ nb_tbs
: 0);
971 tcg_flush_jmp_cache(cpu
);
974 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
977 tcg_region_reset_all();
978 /* XXX: flush processor icache at this point if cache flush is
980 qatomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
985 qemu_plugin_flush_cb();
989 void tb_flush(CPUState
*cpu
)
992 unsigned tb_flush_count
= qatomic_mb_read(&tb_ctx
.tb_flush_count
);
994 if (cpu_in_exclusive_context(cpu
)) {
995 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
997 async_safe_run_on_cpu(cpu
, do_tb_flush
,
998 RUN_ON_CPU_HOST_INT(tb_flush_count
));
1004 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1005 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1006 * and let the optimizer get rid of them by wrapping their user-only callers
1007 * with if (DEBUG_TB_CHECK_GATE).
1009 #ifdef CONFIG_USER_ONLY
1011 static void do_tb_invalidate_check(void *p
, uint32_t hash
, void *userp
)
1013 TranslationBlock
*tb
= p
;
1014 target_ulong addr
= *(target_ulong
*)userp
;
1016 if (!(addr
+ TARGET_PAGE_SIZE
<= tb
->pc
|| addr
>= tb
->pc
+ tb
->size
)) {
1017 printf("ERROR invalidate: address=" TARGET_FMT_lx
1018 " PC=%08lx size=%04x\n", addr
, (long)tb
->pc
, tb
->size
);
1022 /* verify that all the pages have correct rights for code
1024 * Called with mmap_lock held.
1026 static void tb_invalidate_check(target_ulong address
)
1028 address
&= TARGET_PAGE_MASK
;
1029 qht_iter(&tb_ctx
.htable
, do_tb_invalidate_check
, &address
);
1032 static void do_tb_page_check(void *p
, uint32_t hash
, void *userp
)
1034 TranslationBlock
*tb
= p
;
1037 flags1
= page_get_flags(tb
->pc
);
1038 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
1039 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
1040 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1041 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
1045 /* verify that all the pages have correct rights for code */
1046 static void tb_page_check(void)
1048 qht_iter(&tb_ctx
.htable
, do_tb_page_check
, NULL
);
1051 #endif /* CONFIG_USER_ONLY */
1054 * user-mode: call with mmap_lock held
1055 * !user-mode: call with @pd->lock held
1057 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
1059 TranslationBlock
*tb1
;
1063 assert_page_locked(pd
);
1064 pprev
= &pd
->first_tb
;
1065 PAGE_FOR_EACH_TB(pd
, tb1
, n1
) {
1067 *pprev
= tb1
->page_next
[n1
];
1070 pprev
= &tb1
->page_next
[n1
];
1072 g_assert_not_reached();
1075 /* remove @orig from its @n_orig-th jump list */
1076 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
1078 uintptr_t ptr
, ptr_locked
;
1079 TranslationBlock
*dest
;
1080 TranslationBlock
*tb
;
1084 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1085 ptr
= qatomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
1086 dest
= (TranslationBlock
*)(ptr
& ~1);
1091 qemu_spin_lock(&dest
->jmp_lock
);
1093 * While acquiring the lock, the jump might have been removed if the
1094 * destination TB was invalidated; check again.
1096 ptr_locked
= qatomic_read(&orig
->jmp_dest
[n_orig
]);
1097 if (ptr_locked
!= ptr
) {
1098 qemu_spin_unlock(&dest
->jmp_lock
);
1100 * The only possibility is that the jump was unlinked via
1101 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1102 * because we set the LSB above.
1104 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
1108 * We first acquired the lock, and since the destination pointer matches,
1109 * we know for sure that @orig is in the jmp list.
1111 pprev
= &dest
->jmp_list_head
;
1112 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1113 if (tb
== orig
&& n
== n_orig
) {
1114 *pprev
= tb
->jmp_list_next
[n
];
1115 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1116 qemu_spin_unlock(&dest
->jmp_lock
);
1119 pprev
= &tb
->jmp_list_next
[n
];
1121 g_assert_not_reached();
1124 /* reset the jump entry 'n' of a TB so that it is not chained to
1126 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
1128 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
1129 tb_set_jmp_target(tb
, n
, addr
);
1132 /* remove any jumps to the TB */
1133 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
1135 TranslationBlock
*tb
;
1138 qemu_spin_lock(&dest
->jmp_lock
);
1140 TB_FOR_EACH_JMP(dest
, tb
, n
) {
1141 tb_reset_jump(tb
, n
);
1142 qatomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
1143 /* No need to clear the list entry; setting the dest ptr is enough */
1145 dest
->jmp_list_head
= (uintptr_t)NULL
;
1147 qemu_spin_unlock(&dest
->jmp_lock
);
1151 * In user-mode, call with mmap_lock held.
1152 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1155 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
1160 tb_page_addr_t phys_pc
;
1161 uint32_t orig_cflags
= tb_cflags(tb
);
1163 assert_memory_lock();
1165 /* make sure no further incoming jumps will be chained to this TB */
1166 qemu_spin_lock(&tb
->jmp_lock
);
1167 qatomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
1168 qemu_spin_unlock(&tb
->jmp_lock
);
1170 /* remove the TB from the hash list */
1171 phys_pc
= tb
->page_addr
[0];
1172 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, orig_cflags
,
1173 tb
->trace_vcpu_dstate
);
1174 if (!qht_remove(&tb_ctx
.htable
, tb
, h
)) {
1178 /* remove the TB from the page list */
1179 if (rm_from_page_list
) {
1180 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
1181 tb_page_remove(p
, tb
);
1182 if (tb
->page_addr
[1] != -1) {
1183 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
1184 tb_page_remove(p
, tb
);
1188 /* remove the TB from the hash list */
1189 h
= tb_jmp_cache_hash_func(tb
->pc
);
1191 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
1192 if (qatomic_read(&jc
->array
[h
].tb
) == tb
) {
1193 qatomic_set(&jc
->array
[h
].tb
, NULL
);
1197 /* suppress this TB from the two jump lists */
1198 tb_remove_from_jmp_list(tb
, 0);
1199 tb_remove_from_jmp_list(tb
, 1);
1201 /* suppress any remaining jumps to this TB */
1204 qatomic_set(&tb_ctx
.tb_phys_invalidate_count
,
1205 tb_ctx
.tb_phys_invalidate_count
+ 1);
1208 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
1210 qemu_thread_jit_write();
1211 do_tb_phys_invalidate(tb
, true);
1212 qemu_thread_jit_execute();
1215 /* invalidate one TB
1217 * Called with mmap_lock held in user-mode.
1219 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
1221 if (page_addr
== -1 && tb
->page_addr
[0] != -1) {
1223 do_tb_phys_invalidate(tb
, true);
1226 do_tb_phys_invalidate(tb
, false);
1230 /* add the tb in the target page and protect it if necessary
1232 * Called with mmap_lock held for user-mode emulation.
1233 * Called with @p->lock held in !user-mode.
1235 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
1236 unsigned int n
, tb_page_addr_t page_addr
)
1238 #ifndef CONFIG_USER_ONLY
1239 bool page_already_protected
;
1242 assert_page_locked(p
);
1244 tb
->page_addr
[n
] = page_addr
;
1245 tb
->page_next
[n
] = p
->first_tb
;
1246 #ifndef CONFIG_USER_ONLY
1247 page_already_protected
= p
->first_tb
!= (uintptr_t)NULL
;
1249 p
->first_tb
= (uintptr_t)tb
| n
;
1251 #if defined(CONFIG_USER_ONLY)
1252 /* translator_loop() must have made all TB pages non-writable */
1253 assert(!(p
->flags
& PAGE_WRITE
));
1255 /* if some code is already present, then the pages are already
1256 protected. So we handle the case where only the first TB is
1257 allocated in a physical page */
1258 if (!page_already_protected
) {
1259 tlb_protect_code(page_addr
);
1265 * Add a new TB and link it to the physical page tables. phys_page2 is
1266 * (-1) to indicate that only one page contains the TB.
1268 * Called with mmap_lock held for user-mode emulation.
1270 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1271 * Note that in !user-mode, another thread might have already added a TB
1272 * for the same block of guest code that @tb corresponds to. In that case,
1273 * the caller should discard the original @tb, and use instead the returned TB.
1275 static TranslationBlock
*
1276 tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
1277 tb_page_addr_t phys_page2
)
1280 PageDesc
*p2
= NULL
;
1281 void *existing_tb
= NULL
;
1284 assert_memory_lock();
1285 tcg_debug_assert(!(tb
->cflags
& CF_INVALID
));
1288 * Add the TB to the page list, acquiring first the pages's locks.
1289 * We keep the locks held until after inserting the TB in the hash table,
1290 * so that if the insertion fails we know for sure that the TBs are still
1291 * in the page descriptors.
1292 * Note that inserting into the hash table first isn't an option, since
1293 * we can only insert TBs that are fully initialized.
1295 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, true);
1296 tb_page_add(p
, tb
, 0, phys_pc
);
1298 tb_page_add(p2
, tb
, 1, phys_page2
);
1300 tb
->page_addr
[1] = -1;
1303 /* add in the hash table */
1304 h
= tb_hash_func(phys_pc
, tb
->pc
, tb
->flags
, tb
->cflags
,
1305 tb
->trace_vcpu_dstate
);
1306 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
1308 /* remove TB from the page(s) if we couldn't insert it */
1309 if (unlikely(existing_tb
)) {
1310 tb_page_remove(p
, tb
);
1312 tb_page_remove(p2
, tb
);
1317 if (p2
&& p2
!= p
) {
1322 #ifdef CONFIG_USER_ONLY
1323 if (DEBUG_TB_CHECK_GATE
) {
1330 /* Called with mmap_lock held for user mode emulation. */
1331 TranslationBlock
*tb_gen_code(CPUState
*cpu
,
1332 target_ulong pc
, target_ulong cs_base
,
1333 uint32_t flags
, int cflags
)
1335 CPUArchState
*env
= cpu
->env_ptr
;
1336 TranslationBlock
*tb
, *existing_tb
;
1337 tb_page_addr_t phys_pc
;
1338 tcg_insn_unit
*gen_code_buf
;
1339 int gen_code_size
, search_size
, max_insns
;
1340 #ifdef CONFIG_PROFILER
1341 TCGProfile
*prof
= &tcg_ctx
->prof
;
1346 assert_memory_lock();
1347 qemu_thread_jit_write();
1349 phys_pc
= get_page_addr_code_hostp(env
, pc
, &host_pc
);
1351 if (phys_pc
== -1) {
1352 /* Generate a one-shot TB with 1 insn in it */
1353 cflags
= (cflags
& ~CF_COUNT_MASK
) | CF_LAST_IO
| 1;
1356 max_insns
= cflags
& CF_COUNT_MASK
;
1357 if (max_insns
== 0) {
1358 max_insns
= TCG_MAX_INSNS
;
1360 QEMU_BUILD_BUG_ON(CF_COUNT_MASK
+ 1 != TCG_MAX_INSNS
);
1363 tb
= tcg_tb_alloc(tcg_ctx
);
1364 if (unlikely(!tb
)) {
1365 /* flush must be done */
1368 /* Make the execution loop process the flush as soon as possible. */
1369 cpu
->exception_index
= EXCP_INTERRUPT
;
1373 gen_code_buf
= tcg_ctx
->code_gen_ptr
;
1374 tb
->tc
.ptr
= tcg_splitwx_to_rx(gen_code_buf
);
1376 tb
->cs_base
= cs_base
;
1378 tb
->cflags
= cflags
;
1379 tb
->trace_vcpu_dstate
= *cpu
->trace_dstate
;
1380 tb
->page_addr
[0] = phys_pc
;
1381 tb
->page_addr
[1] = -1;
1382 tcg_ctx
->tb_cflags
= cflags
;
1385 #ifdef CONFIG_PROFILER
1386 /* includes aborted translations because of exceptions */
1387 qatomic_set(&prof
->tb_count1
, prof
->tb_count1
+ 1);
1388 ti
= profile_getclock();
1391 gen_code_size
= sigsetjmp(tcg_ctx
->jmp_trans
, 0);
1392 if (unlikely(gen_code_size
!= 0)) {
1396 tcg_func_start(tcg_ctx
);
1398 tcg_ctx
->cpu
= env_cpu(env
);
1399 gen_intermediate_code(cpu
, tb
, max_insns
, pc
, host_pc
);
1400 assert(tb
->size
!= 0);
1401 tcg_ctx
->cpu
= NULL
;
1402 max_insns
= tb
->icount
;
1404 trace_translate_block(tb
, tb
->pc
, tb
->tc
.ptr
);
1406 /* generate machine code */
1407 tb
->jmp_reset_offset
[0] = TB_JMP_RESET_OFFSET_INVALID
;
1408 tb
->jmp_reset_offset
[1] = TB_JMP_RESET_OFFSET_INVALID
;
1409 tcg_ctx
->tb_jmp_reset_offset
= tb
->jmp_reset_offset
;
1410 if (TCG_TARGET_HAS_direct_jump
) {
1411 tcg_ctx
->tb_jmp_insn_offset
= tb
->jmp_target_arg
;
1412 tcg_ctx
->tb_jmp_target_addr
= NULL
;
1414 tcg_ctx
->tb_jmp_insn_offset
= NULL
;
1415 tcg_ctx
->tb_jmp_target_addr
= tb
->jmp_target_arg
;
1418 #ifdef CONFIG_PROFILER
1419 qatomic_set(&prof
->tb_count
, prof
->tb_count
+ 1);
1420 qatomic_set(&prof
->interm_time
,
1421 prof
->interm_time
+ profile_getclock() - ti
);
1422 ti
= profile_getclock();
1425 gen_code_size
= tcg_gen_code(tcg_ctx
, tb
);
1426 if (unlikely(gen_code_size
< 0)) {
1428 switch (gen_code_size
) {
1431 * Overflow of code_gen_buffer, or the current slice of it.
1433 * TODO: We don't need to re-do gen_intermediate_code, nor
1434 * should we re-do the tcg optimization currently hidden
1435 * inside tcg_gen_code. All that should be required is to
1436 * flush the TBs, allocate a new TB, re-initialize it per
1437 * above, and re-do the actual code generation.
1439 qemu_log_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
,
1440 "Restarting code generation for "
1441 "code_gen_buffer overflow\n");
1442 goto buffer_overflow
;
1446 * The code generated for the TranslationBlock is too large.
1447 * The maximum size allowed by the unwind info is 64k.
1448 * There may be stricter constraints from relocations
1449 * in the tcg backend.
1451 * Try again with half as many insns as we attempted this time.
1452 * If a single insn overflows, there's a bug somewhere...
1454 assert(max_insns
> 1);
1456 qemu_log_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
,
1457 "Restarting code generation with "
1458 "smaller translation block (max %d insns)\n",
1463 g_assert_not_reached();
1466 search_size
= encode_search(tb
, (void *)gen_code_buf
+ gen_code_size
);
1467 if (unlikely(search_size
< 0)) {
1468 goto buffer_overflow
;
1470 tb
->tc
.size
= gen_code_size
;
1472 #ifdef CONFIG_PROFILER
1473 qatomic_set(&prof
->code_time
, prof
->code_time
+ profile_getclock() - ti
);
1474 qatomic_set(&prof
->code_in_len
, prof
->code_in_len
+ tb
->size
);
1475 qatomic_set(&prof
->code_out_len
, prof
->code_out_len
+ gen_code_size
);
1476 qatomic_set(&prof
->search_out_len
, prof
->search_out_len
+ search_size
);
1480 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM
) &&
1481 qemu_log_in_addr_range(tb
->pc
)) {
1482 FILE *logfile
= qemu_log_trylock();
1484 int code_size
, data_size
;
1485 const tcg_target_ulong
*rx_data_gen_ptr
;
1489 if (tcg_ctx
->data_gen_ptr
) {
1490 rx_data_gen_ptr
= tcg_splitwx_to_rx(tcg_ctx
->data_gen_ptr
);
1491 code_size
= (const void *)rx_data_gen_ptr
- tb
->tc
.ptr
;
1492 data_size
= gen_code_size
- code_size
;
1494 rx_data_gen_ptr
= 0;
1495 code_size
= gen_code_size
;
1499 /* Dump header and the first instruction */
1500 fprintf(logfile
, "OUT: [size=%d]\n", gen_code_size
);
1502 " -- guest addr 0x" TARGET_FMT_lx
" + tb prologue\n",
1503 tcg_ctx
->gen_insn_data
[insn
][0]);
1504 chunk_start
= tcg_ctx
->gen_insn_end_off
[insn
];
1505 disas(logfile
, tb
->tc
.ptr
, chunk_start
);
1508 * Dump each instruction chunk, wrapping up empty chunks into
1509 * the next instruction. The whole array is offset so the
1510 * first entry is the beginning of the 2nd instruction.
1512 while (insn
< tb
->icount
) {
1513 size_t chunk_end
= tcg_ctx
->gen_insn_end_off
[insn
];
1514 if (chunk_end
> chunk_start
) {
1515 fprintf(logfile
, " -- guest addr 0x" TARGET_FMT_lx
"\n",
1516 tcg_ctx
->gen_insn_data
[insn
][0]);
1517 disas(logfile
, tb
->tc
.ptr
+ chunk_start
,
1518 chunk_end
- chunk_start
);
1519 chunk_start
= chunk_end
;
1524 if (chunk_start
< code_size
) {
1525 fprintf(logfile
, " -- tb slow paths + alignment\n");
1526 disas(logfile
, tb
->tc
.ptr
+ chunk_start
,
1527 code_size
- chunk_start
);
1530 /* Finally dump any data we may have after the block */
1533 fprintf(logfile
, " data: [size=%d]\n", data_size
);
1534 for (i
= 0; i
< data_size
/ sizeof(tcg_target_ulong
); i
++) {
1535 if (sizeof(tcg_target_ulong
) == 8) {
1537 "0x%08" PRIxPTR
": .quad 0x%016" TCG_PRIlx
"\n",
1538 (uintptr_t)&rx_data_gen_ptr
[i
], rx_data_gen_ptr
[i
]);
1539 } else if (sizeof(tcg_target_ulong
) == 4) {
1541 "0x%08" PRIxPTR
": .long 0x%08" TCG_PRIlx
"\n",
1542 (uintptr_t)&rx_data_gen_ptr
[i
], rx_data_gen_ptr
[i
]);
1544 qemu_build_not_reached();
1548 fprintf(logfile
, "\n");
1549 qemu_log_unlock(logfile
);
1554 qatomic_set(&tcg_ctx
->code_gen_ptr
, (void *)
1555 ROUND_UP((uintptr_t)gen_code_buf
+ gen_code_size
+ search_size
,
1558 /* init jump list */
1559 qemu_spin_init(&tb
->jmp_lock
);
1560 tb
->jmp_list_head
= (uintptr_t)NULL
;
1561 tb
->jmp_list_next
[0] = (uintptr_t)NULL
;
1562 tb
->jmp_list_next
[1] = (uintptr_t)NULL
;
1563 tb
->jmp_dest
[0] = (uintptr_t)NULL
;
1564 tb
->jmp_dest
[1] = (uintptr_t)NULL
;
1566 /* init original jump addresses which have been set during tcg_gen_code() */
1567 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1568 tb_reset_jump(tb
, 0);
1570 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1571 tb_reset_jump(tb
, 1);
1575 * If the TB is not associated with a physical RAM page then it must be
1576 * a temporary one-insn TB, and we have nothing left to do. Return early
1577 * before attempting to link to other TBs or add to the lookup table.
1579 if (tb
->page_addr
[0] == -1) {
1584 * Insert TB into the corresponding region tree before publishing it
1585 * through QHT. Otherwise rewinding happened in the TB might fail to
1586 * lookup itself using host PC.
1591 * No explicit memory barrier is required -- tb_link_page() makes the
1592 * TB visible in a consistent state.
1594 existing_tb
= tb_link_page(tb
, tb
->page_addr
[0], tb
->page_addr
[1]);
1595 /* if the TB already exists, discard what we just translated */
1596 if (unlikely(existing_tb
!= tb
)) {
1597 uintptr_t orig_aligned
= (uintptr_t)gen_code_buf
;
1599 orig_aligned
-= ROUND_UP(sizeof(*tb
), qemu_icache_linesize
);
1600 qatomic_set(&tcg_ctx
->code_gen_ptr
, (void *)orig_aligned
);
1608 * @p must be non-NULL.
1609 * user-mode: call with mmap_lock held.
1610 * !user-mode: call with all @pages locked.
1613 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
1614 PageDesc
*p
, tb_page_addr_t start
,
1618 TranslationBlock
*tb
;
1619 tb_page_addr_t tb_start
, tb_end
;
1621 #ifdef TARGET_HAS_PRECISE_SMC
1622 CPUState
*cpu
= current_cpu
;
1623 CPUArchState
*env
= NULL
;
1624 bool current_tb_not_found
= retaddr
!= 0;
1625 bool current_tb_modified
= false;
1626 TranslationBlock
*current_tb
= NULL
;
1627 target_ulong current_pc
= 0;
1628 target_ulong current_cs_base
= 0;
1629 uint32_t current_flags
= 0;
1630 #endif /* TARGET_HAS_PRECISE_SMC */
1632 assert_page_locked(p
);
1634 #if defined(TARGET_HAS_PRECISE_SMC)
1640 /* we remove all the TBs in the range [start, end[ */
1641 /* XXX: see if in some cases it could be faster to invalidate all
1643 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1644 assert_page_locked(p
);
1645 /* NOTE: this is subtle as a TB may span two physical pages */
1647 /* NOTE: tb_end may be after the end of the page, but
1648 it is not a problem */
1649 tb_start
= tb
->page_addr
[0];
1650 tb_end
= tb_start
+ tb
->size
;
1652 tb_start
= tb
->page_addr
[1];
1653 tb_end
= tb_start
+ ((tb
->page_addr
[0] + tb
->size
)
1654 & ~TARGET_PAGE_MASK
);
1656 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1657 #ifdef TARGET_HAS_PRECISE_SMC
1658 if (current_tb_not_found
) {
1659 current_tb_not_found
= false;
1660 /* now we have a real cpu fault */
1661 current_tb
= tcg_tb_lookup(retaddr
);
1663 if (current_tb
== tb
&&
1664 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1666 * If we are modifying the current TB, we must stop
1667 * its execution. We could be more precise by checking
1668 * that the modification is after the current PC, but it
1669 * would require a specialized function to partially
1670 * restore the CPU state.
1672 current_tb_modified
= true;
1673 cpu_restore_state_from_tb(cpu
, current_tb
, retaddr
, true);
1674 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1677 #endif /* TARGET_HAS_PRECISE_SMC */
1678 tb_phys_invalidate__locked(tb
);
1681 #if !defined(CONFIG_USER_ONLY)
1682 /* if no code remaining, no need to continue to use slow writes */
1684 tlb_unprotect_code(start
);
1687 #ifdef TARGET_HAS_PRECISE_SMC
1688 if (current_tb_modified
) {
1689 page_collection_unlock(pages
);
1690 /* Force execution of one insn next time. */
1691 cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(cpu
);
1693 cpu_loop_exit_noexc(cpu
);
1699 * Invalidate all TBs which intersect with the target physical address range
1700 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1701 * 'is_cpu_write_access' should be true if called from a real cpu write
1702 * access: the virtual CPU will exit the current TB if code is modified inside
1705 * Called with mmap_lock held for user-mode emulation
1707 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
)
1709 struct page_collection
*pages
;
1712 assert_memory_lock();
1714 p
= page_find(start
>> TARGET_PAGE_BITS
);
1718 pages
= page_collection_lock(start
, end
);
1719 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
, 0);
1720 page_collection_unlock(pages
);
1724 * Invalidate all TBs which intersect with the target physical address range
1725 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1726 * 'is_cpu_write_access' should be true if called from a real cpu write
1727 * access: the virtual CPU will exit the current TB if code is modified inside
1730 * Called with mmap_lock held for user-mode emulation.
1732 #ifdef CONFIG_SOFTMMU
1733 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
)
1735 void tb_invalidate_phys_range(target_ulong start
, target_ulong end
)
1738 struct page_collection
*pages
;
1739 tb_page_addr_t next
;
1741 assert_memory_lock();
1743 pages
= page_collection_lock(start
, end
);
1744 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1746 start
= next
, next
+= TARGET_PAGE_SIZE
) {
1747 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
1748 tb_page_addr_t bound
= MIN(next
, end
);
1753 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
1755 page_collection_unlock(pages
);
1758 #ifdef CONFIG_SOFTMMU
1759 /* len must be <= 8 and start must be a multiple of len.
1760 * Called via softmmu_template.h when code areas are written to with
1761 * iothread mutex not held.
1763 * Call with all @pages in the range [@start, @start + len[ locked.
1765 void tb_invalidate_phys_page_fast(struct page_collection
*pages
,
1766 tb_page_addr_t start
, int len
,
1771 assert_memory_lock();
1773 p
= page_find(start
>> TARGET_PAGE_BITS
);
1778 assert_page_locked(p
);
1779 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
,
1783 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1784 * host PC of the faulting store instruction that caused this invalidate.
1785 * Returns true if the caller needs to abort execution of the current
1786 * TB (because it was modified by this store and the guest CPU has
1787 * precise-SMC semantics).
1789 static bool tb_invalidate_phys_page(tb_page_addr_t addr
, uintptr_t pc
)
1791 TranslationBlock
*tb
;
1794 #ifdef TARGET_HAS_PRECISE_SMC
1795 TranslationBlock
*current_tb
= NULL
;
1796 CPUState
*cpu
= current_cpu
;
1797 CPUArchState
*env
= NULL
;
1798 int current_tb_modified
= 0;
1799 target_ulong current_pc
= 0;
1800 target_ulong current_cs_base
= 0;
1801 uint32_t current_flags
= 0;
1804 assert_memory_lock();
1806 addr
&= TARGET_PAGE_MASK
;
1807 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1812 #ifdef TARGET_HAS_PRECISE_SMC
1813 if (p
->first_tb
&& pc
!= 0) {
1814 current_tb
= tcg_tb_lookup(pc
);
1820 assert_page_locked(p
);
1821 PAGE_FOR_EACH_TB(p
, tb
, n
) {
1822 #ifdef TARGET_HAS_PRECISE_SMC
1823 if (current_tb
== tb
&&
1824 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
1825 /* If we are modifying the current TB, we must stop
1826 its execution. We could be more precise by checking
1827 that the modification is after the current PC, but it
1828 would require a specialized function to partially
1829 restore the CPU state */
1831 current_tb_modified
= 1;
1832 cpu_restore_state_from_tb(cpu
, current_tb
, pc
, true);
1833 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1836 #endif /* TARGET_HAS_PRECISE_SMC */
1837 tb_phys_invalidate(tb
, addr
);
1839 p
->first_tb
= (uintptr_t)NULL
;
1840 #ifdef TARGET_HAS_PRECISE_SMC
1841 if (current_tb_modified
) {
1842 /* Force execution of one insn next time. */
1843 cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(cpu
);
1852 /* user-mode: call with mmap_lock held */
1853 void tb_check_watchpoint(CPUState
*cpu
, uintptr_t retaddr
)
1855 TranslationBlock
*tb
;
1857 assert_memory_lock();
1859 tb
= tcg_tb_lookup(retaddr
);
1861 /* We can use retranslation to find the PC. */
1862 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
1863 tb_phys_invalidate(tb
, -1);
1865 /* The exception probably happened in a helper. The CPU state should
1866 have been saved before calling it. Fetch the PC from there. */
1867 CPUArchState
*env
= cpu
->env_ptr
;
1868 target_ulong pc
, cs_base
;
1869 tb_page_addr_t addr
;
1872 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &flags
);
1873 addr
= get_page_addr_code(env
, pc
);
1875 tb_invalidate_phys_range(addr
, addr
+ 1);
1880 #ifndef CONFIG_USER_ONLY
1882 * In deterministic execution mode, instructions doing device I/Os
1883 * must be at the end of the TB.
1885 * Called by softmmu_template.h, with iothread mutex not held.
1887 void cpu_io_recompile(CPUState
*cpu
, uintptr_t retaddr
)
1889 TranslationBlock
*tb
;
1893 tb
= tcg_tb_lookup(retaddr
);
1895 cpu_abort(cpu
, "cpu_io_recompile: could not find TB for pc=%p",
1898 cpu_restore_state_from_tb(cpu
, tb
, retaddr
, true);
1901 * Some guests must re-execute the branch when re-executing a delay
1902 * slot instruction. When this is the case, adjust icount and N
1903 * to account for the re-execution of the branch.
1906 cc
= CPU_GET_CLASS(cpu
);
1907 if (cc
->tcg_ops
->io_recompile_replay_branch
&&
1908 cc
->tcg_ops
->io_recompile_replay_branch(cpu
, tb
)) {
1909 cpu_neg(cpu
)->icount_decr
.u16
.low
++;
1914 * Exit the loop and potentially generate a new TB executing the
1915 * just the I/O insns. We also limit instrumentation to memory
1916 * operations only (which execute after completion) so we don't
1917 * double instrument the instruction.
1919 cpu
->cflags_next_tb
= curr_cflags(cpu
) | CF_MEMI_ONLY
| CF_LAST_IO
| n
;
1921 qemu_log_mask_and_addr(CPU_LOG_EXEC
, tb
->pc
,
1922 "cpu_io_recompile: rewound execution of TB to "
1923 TARGET_FMT_lx
"\n", tb
->pc
);
1925 cpu_loop_exit_noexc(cpu
);
1928 static void print_qht_statistics(struct qht_stats hst
, GString
*buf
)
1930 uint32_t hgram_opts
;
1934 if (!hst
.head_buckets
) {
1937 g_string_append_printf(buf
, "TB hash buckets %zu/%zu "
1938 "(%0.2f%% head buckets used)\n",
1939 hst
.used_head_buckets
, hst
.head_buckets
,
1940 (double)hst
.used_head_buckets
/
1941 hst
.head_buckets
* 100);
1943 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1944 hgram_opts
|= QDIST_PR_100X
| QDIST_PR_PERCENT
;
1945 if (qdist_xmax(&hst
.occupancy
) - qdist_xmin(&hst
.occupancy
) == 1) {
1946 hgram_opts
|= QDIST_PR_NODECIMAL
;
1948 hgram
= qdist_pr(&hst
.occupancy
, 10, hgram_opts
);
1949 g_string_append_printf(buf
, "TB hash occupancy %0.2f%% avg chain occ. "
1951 qdist_avg(&hst
.occupancy
) * 100, hgram
);
1954 hgram_opts
= QDIST_PR_BORDER
| QDIST_PR_LABELS
;
1955 hgram_bins
= qdist_xmax(&hst
.chain
) - qdist_xmin(&hst
.chain
);
1956 if (hgram_bins
> 10) {
1960 hgram_opts
|= QDIST_PR_NODECIMAL
| QDIST_PR_NOBINRANGE
;
1962 hgram
= qdist_pr(&hst
.chain
, hgram_bins
, hgram_opts
);
1963 g_string_append_printf(buf
, "TB hash avg chain %0.3f buckets. "
1965 qdist_avg(&hst
.chain
), hgram
);
1969 struct tb_tree_stats
{
1973 size_t max_target_size
;
1974 size_t direct_jmp_count
;
1975 size_t direct_jmp2_count
;
1979 static gboolean
tb_tree_stats_iter(gpointer key
, gpointer value
, gpointer data
)
1981 const TranslationBlock
*tb
= value
;
1982 struct tb_tree_stats
*tst
= data
;
1985 tst
->host_size
+= tb
->tc
.size
;
1986 tst
->target_size
+= tb
->size
;
1987 if (tb
->size
> tst
->max_target_size
) {
1988 tst
->max_target_size
= tb
->size
;
1990 if (tb
->page_addr
[1] != -1) {
1993 if (tb
->jmp_reset_offset
[0] != TB_JMP_RESET_OFFSET_INVALID
) {
1994 tst
->direct_jmp_count
++;
1995 if (tb
->jmp_reset_offset
[1] != TB_JMP_RESET_OFFSET_INVALID
) {
1996 tst
->direct_jmp2_count
++;
2002 void dump_exec_info(GString
*buf
)
2004 struct tb_tree_stats tst
= {};
2005 struct qht_stats hst
;
2006 size_t nb_tbs
, flush_full
, flush_part
, flush_elide
;
2008 tcg_tb_foreach(tb_tree_stats_iter
, &tst
);
2009 nb_tbs
= tst
.nb_tbs
;
2010 /* XXX: avoid using doubles ? */
2011 g_string_append_printf(buf
, "Translation buffer state:\n");
2013 * Report total code size including the padding and TB structs;
2014 * otherwise users might think "-accel tcg,tb-size" is not honoured.
2015 * For avg host size we use the precise numbers from tb_tree_stats though.
2017 g_string_append_printf(buf
, "gen code size %zu/%zu\n",
2018 tcg_code_size(), tcg_code_capacity());
2019 g_string_append_printf(buf
, "TB count %zu\n", nb_tbs
);
2020 g_string_append_printf(buf
, "TB avg target size %zu max=%zu bytes\n",
2021 nb_tbs
? tst
.target_size
/ nb_tbs
: 0,
2022 tst
.max_target_size
);
2023 g_string_append_printf(buf
, "TB avg host size %zu bytes "
2024 "(expansion ratio: %0.1f)\n",
2025 nb_tbs
? tst
.host_size
/ nb_tbs
: 0,
2027 (double)tst
.host_size
/ tst
.target_size
: 0);
2028 g_string_append_printf(buf
, "cross page TB count %zu (%zu%%)\n",
2030 nb_tbs
? (tst
.cross_page
* 100) / nb_tbs
: 0);
2031 g_string_append_printf(buf
, "direct jump count %zu (%zu%%) "
2032 "(2 jumps=%zu %zu%%)\n",
2033 tst
.direct_jmp_count
,
2034 nb_tbs
? (tst
.direct_jmp_count
* 100) / nb_tbs
: 0,
2035 tst
.direct_jmp2_count
,
2036 nb_tbs
? (tst
.direct_jmp2_count
* 100) / nb_tbs
: 0);
2038 qht_statistics_init(&tb_ctx
.htable
, &hst
);
2039 print_qht_statistics(hst
, buf
);
2040 qht_statistics_destroy(&hst
);
2042 g_string_append_printf(buf
, "\nStatistics:\n");
2043 g_string_append_printf(buf
, "TB flush count %u\n",
2044 qatomic_read(&tb_ctx
.tb_flush_count
));
2045 g_string_append_printf(buf
, "TB invalidate count %u\n",
2046 qatomic_read(&tb_ctx
.tb_phys_invalidate_count
));
2048 tlb_flush_counts(&flush_full
, &flush_part
, &flush_elide
);
2049 g_string_append_printf(buf
, "TLB full flushes %zu\n", flush_full
);
2050 g_string_append_printf(buf
, "TLB partial flushes %zu\n", flush_part
);
2051 g_string_append_printf(buf
, "TLB elided flushes %zu\n", flush_elide
);
2055 #else /* CONFIG_USER_ONLY */
2057 void cpu_interrupt(CPUState
*cpu
, int mask
)
2059 g_assert(qemu_mutex_iothread_locked());
2060 cpu
->interrupt_request
|= mask
;
2061 qatomic_set(&cpu_neg(cpu
)->icount_decr
.u16
.high
, -1);
2065 * Walks guest process memory "regions" one by one
2066 * and calls callback function 'fn' for each region.
2068 struct walk_memory_regions_data
{
2069 walk_memory_regions_fn fn
;
2075 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2076 target_ulong end
, int new_prot
)
2078 if (data
->start
!= -1u) {
2079 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2085 data
->start
= (new_prot
? end
: -1u);
2086 data
->prot
= new_prot
;
2091 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2092 target_ulong base
, int level
, void **lp
)
2098 return walk_memory_regions_end(data
, base
, 0);
2104 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2105 int prot
= pd
[i
].flags
;
2107 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2108 if (prot
!= data
->prot
) {
2109 rc
= walk_memory_regions_end(data
, pa
, prot
);
2118 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
2119 pa
= base
| ((target_ulong
)i
<<
2120 (TARGET_PAGE_BITS
+ V_L2_BITS
* level
));
2121 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2131 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2133 struct walk_memory_regions_data data
;
2134 uintptr_t i
, l1_sz
= v_l1_size
;
2141 for (i
= 0; i
< l1_sz
; i
++) {
2142 target_ulong base
= i
<< (v_l1_shift
+ TARGET_PAGE_BITS
);
2143 int rc
= walk_memory_regions_1(&data
, base
, v_l2_levels
, l1_map
+ i
);
2149 return walk_memory_regions_end(&data
, 0, 0);
2152 static int dump_region(void *priv
, target_ulong start
,
2153 target_ulong end
, unsigned long prot
)
2155 FILE *f
= (FILE *)priv
;
2157 (void) fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
2158 " "TARGET_FMT_lx
" %c%c%c\n",
2159 start
, end
, end
- start
,
2160 ((prot
& PAGE_READ
) ? 'r' : '-'),
2161 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2162 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2167 /* dump memory mappings */
2168 void page_dump(FILE *f
)
2170 const int length
= sizeof(target_ulong
) * 2;
2171 (void) fprintf(f
, "%-*s %-*s %-*s %s\n",
2172 length
, "start", length
, "end", length
, "size", "prot");
2173 walk_memory_regions(f
, dump_region
);
2176 int page_get_flags(target_ulong address
)
2180 p
= page_find(address
>> TARGET_PAGE_BITS
);
2188 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
2189 * By default, they are not kept.
2191 #ifndef PAGE_TARGET_STICKY
2192 #define PAGE_TARGET_STICKY 0
2194 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
2196 /* Modify the flags of a page and invalidate the code if necessary.
2197 The flag PAGE_WRITE_ORG is positioned automatically depending
2198 on PAGE_WRITE. The mmap_lock should already be held. */
2199 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2201 target_ulong addr
, len
;
2202 bool reset_target_data
;
2204 /* This function should never be called with addresses outside the
2205 guest address space. If this assert fires, it probably indicates
2206 a missing call to h2g_valid. */
2207 assert(end
- 1 <= GUEST_ADDR_MAX
);
2208 assert(start
< end
);
2209 /* Only set PAGE_ANON with new mappings. */
2210 assert(!(flags
& PAGE_ANON
) || (flags
& PAGE_RESET
));
2211 assert_memory_lock();
2213 start
= start
& TARGET_PAGE_MASK
;
2214 end
= TARGET_PAGE_ALIGN(end
);
2216 if (flags
& PAGE_WRITE
) {
2217 flags
|= PAGE_WRITE_ORG
;
2219 reset_target_data
= !(flags
& PAGE_VALID
) || (flags
& PAGE_RESET
);
2220 flags
&= ~PAGE_RESET
;
2222 for (addr
= start
, len
= end
- start
;
2224 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2225 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, true);
2227 /* If the write protection bit is set, then we invalidate
2229 if (!(p
->flags
& PAGE_WRITE
) &&
2230 (flags
& PAGE_WRITE
) &&
2232 tb_invalidate_phys_page(addr
, 0);
2234 if (reset_target_data
) {
2235 g_free(p
->target_data
);
2236 p
->target_data
= NULL
;
2239 /* Using mprotect on a page does not change sticky bits. */
2240 p
->flags
= (p
->flags
& PAGE_STICKY
) | flags
;
2245 void page_reset_target_data(target_ulong start
, target_ulong end
)
2247 target_ulong addr
, len
;
2250 * This function should never be called with addresses outside the
2251 * guest address space. If this assert fires, it probably indicates
2252 * a missing call to h2g_valid.
2254 assert(end
- 1 <= GUEST_ADDR_MAX
);
2255 assert(start
< end
);
2256 assert_memory_lock();
2258 start
= start
& TARGET_PAGE_MASK
;
2259 end
= TARGET_PAGE_ALIGN(end
);
2261 for (addr
= start
, len
= end
- start
;
2263 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2264 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2266 g_free(p
->target_data
);
2267 p
->target_data
= NULL
;
2271 void *page_get_target_data(target_ulong address
)
2273 PageDesc
*p
= page_find(address
>> TARGET_PAGE_BITS
);
2274 return p
? p
->target_data
: NULL
;
2277 void *page_alloc_target_data(target_ulong address
, size_t size
)
2279 PageDesc
*p
= page_find(address
>> TARGET_PAGE_BITS
);
2282 if (p
->flags
& PAGE_VALID
) {
2283 ret
= p
->target_data
;
2285 p
->target_data
= ret
= g_malloc0(size
);
2291 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2297 /* This function should never be called with addresses outside the
2298 guest address space. If this assert fires, it probably indicates
2299 a missing call to h2g_valid. */
2300 if (TARGET_ABI_BITS
> L1_MAP_ADDR_SPACE_BITS
) {
2301 assert(start
< ((target_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2307 if (start
+ len
- 1 < start
) {
2308 /* We've wrapped around. */
2312 /* must do before we loose bits in the next step */
2313 end
= TARGET_PAGE_ALIGN(start
+ len
);
2314 start
= start
& TARGET_PAGE_MASK
;
2316 for (addr
= start
, len
= end
- start
;
2318 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2319 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2323 if (!(p
->flags
& PAGE_VALID
)) {
2327 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
)) {
2330 if (flags
& PAGE_WRITE
) {
2331 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
2334 /* unprotect the page if it was put read-only because it
2335 contains translated code */
2336 if (!(p
->flags
& PAGE_WRITE
)) {
2337 if (!page_unprotect(addr
, 0)) {
2346 void page_protect(tb_page_addr_t page_addr
)
2352 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
2353 if (p
&& (p
->flags
& PAGE_WRITE
)) {
2355 * Force the host page as non writable (writes will have a page fault +
2356 * mprotect overhead).
2358 page_addr
&= qemu_host_page_mask
;
2360 for (addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
2361 addr
+= TARGET_PAGE_SIZE
) {
2363 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2368 p
->flags
&= ~PAGE_WRITE
;
2370 mprotect(g2h_untagged(page_addr
), qemu_host_page_size
,
2371 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
2372 if (DEBUG_TB_INVALIDATE_GATE
) {
2373 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT
"\n", page_addr
);
2378 /* called from signal handler: invalidate the code and unprotect the
2379 * page. Return 0 if the fault was not handled, 1 if it was handled,
2380 * and 2 if it was handled but the caller must cause the TB to be
2381 * immediately exited. (We can only return 2 if the 'pc' argument is
2384 int page_unprotect(target_ulong address
, uintptr_t pc
)
2387 bool current_tb_invalidated
;
2389 target_ulong host_start
, host_end
, addr
;
2391 /* Technically this isn't safe inside a signal handler. However we
2392 know this only ever happens in a synchronous SEGV handler, so in
2393 practice it seems to be ok. */
2396 p
= page_find(address
>> TARGET_PAGE_BITS
);
2402 /* if the page was really writable, then we change its
2403 protection back to writable */
2404 if (p
->flags
& PAGE_WRITE_ORG
) {
2405 current_tb_invalidated
= false;
2406 if (p
->flags
& PAGE_WRITE
) {
2407 /* If the page is actually marked WRITE then assume this is because
2408 * this thread raced with another one which got here first and
2409 * set the page to PAGE_WRITE and did the TB invalidate for us.
2411 #ifdef TARGET_HAS_PRECISE_SMC
2412 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
2414 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
2418 host_start
= address
& qemu_host_page_mask
;
2419 host_end
= host_start
+ qemu_host_page_size
;
2422 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2423 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2424 p
->flags
|= PAGE_WRITE
;
2427 /* and since the content will be modified, we must invalidate
2428 the corresponding translated code. */
2429 current_tb_invalidated
|= tb_invalidate_phys_page(addr
, pc
);
2430 #ifdef CONFIG_USER_ONLY
2431 if (DEBUG_TB_CHECK_GATE
) {
2432 tb_invalidate_check(addr
);
2436 mprotect((void *)g2h_untagged(host_start
), qemu_host_page_size
,
2440 /* If current TB was invalidated return to main loop */
2441 return current_tb_invalidated
? 2 : 1;
2446 #endif /* CONFIG_USER_ONLY */
2449 * Called by generic code at e.g. cpu reset after cpu creation,
2450 * therefore we must be prepared to allocate the jump cache.
2452 void tcg_flush_jmp_cache(CPUState
*cpu
)
2454 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
2457 for (int i
= 0; i
< TB_JMP_CACHE_SIZE
; i
++) {
2458 qatomic_set(&jc
->array
[i
].tb
, NULL
);
2461 /* This should happen once during realize, and thus never race. */
2462 jc
= g_new0(CPUJumpCache
, 1);
2463 jc
= qatomic_xchg(&cpu
->tb_jmp_cache
, jc
);
2468 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2469 void tcg_flush_softmmu_tlb(CPUState
*cs
)
2471 #ifdef CONFIG_SOFTMMU