]>
Commit | Line | Data |
---|---|---|
c03f041f PMD |
1 | /* |
2 | * Internal execution defines for qemu | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * SPDX-License-Identifier: LGPL-2.1-or-later | |
7 | */ | |
8 | ||
9 | #ifndef ACCEL_TCG_INTERNAL_H | |
10 | #define ACCEL_TCG_INTERNAL_H | |
11 | ||
12 | #include "exec/exec-all.h" | |
deba7870 | 13 | #include "exec/translate-all.h" |
c03f041f | 14 | |
55098769 RH |
15 | /* |
16 | * Access to the various translations structures need to be serialised | |
17 | * via locks for consistency. In user-mode emulation access to the | |
18 | * memory related structures are protected with mmap_lock. | |
19 | * In !user-mode we use per-page locks. | |
20 | */ | |
905db98a | 21 | #ifdef CONFIG_USER_ONLY |
55098769 | 22 | #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) |
905db98a PMD |
23 | #else |
24 | #define assert_memory_lock() | |
55098769 RH |
25 | #endif |
26 | ||
6ca5ac13 RH |
27 | #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG) |
28 | void assert_no_pages_locked(void); | |
babcbc22 | 29 | #else |
6ca5ac13 | 30 | static inline void assert_no_pages_locked(void) { } |
babcbc22 RH |
31 | #endif |
32 | ||
8a14b62c | 33 | #ifdef CONFIG_USER_ONLY |
6ca5ac13 | 34 | static inline void page_table_config_init(void) { } |
8a14b62c | 35 | #else |
6ca5ac13 | 36 | void page_table_config_init(void); |
8a14b62c | 37 | #endif |
a97d5d2c | 38 | |
deba7870 RH |
39 | #ifdef CONFIG_USER_ONLY |
40 | /* | |
41 | * For user-only, page_protect sets the page read-only. | |
42 | * Since most execution is already on read-only pages, and we'd need to | |
43 | * account for other TBs on the same page, defer undoing any page protection | |
44 | * until we receive the write fault. | |
45 | */ | |
46 | static inline void tb_lock_page0(tb_page_addr_t p0) | |
47 | { | |
48 | page_protect(p0); | |
49 | } | |
50 | ||
51 | static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) | |
52 | { | |
53 | page_protect(p1); | |
54 | } | |
55 | ||
56 | static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } | |
57 | static inline void tb_unlock_pages(TranslationBlock *tb) { } | |
58 | #else | |
59 | void tb_lock_page0(tb_page_addr_t); | |
60 | void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); | |
61 | void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); | |
62 | void tb_unlock_pages(TranslationBlock *); | |
63 | #endif | |
64 | ||
6ca5ac13 | 65 | #ifdef CONFIG_SOFTMMU |
f349e92e PMD |
66 | void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, |
67 | unsigned size, | |
68 | uintptr_t retaddr); | |
38fc4b11 | 69 | G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
6ca5ac13 | 70 | #endif /* CONFIG_SOFTMMU */ |
8a14b62c | 71 | |
256d11f9 AJ |
72 | TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc, |
73 | uint64_t cs_base, uint32_t flags, | |
c03f041f | 74 | int cflags); |
fa79cde6 RH |
75 | void page_init(void); |
76 | void tb_htable_init(void); | |
8a14b62c | 77 | void tb_reset_jump(TranslationBlock *tb, int n); |
deba7870 | 78 | TranslationBlock *tb_link_page(TranslationBlock *tb); |
67aabbb3 | 79 | bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); |
6392bd6b | 80 | void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, |
cfa29dd5 | 81 | uintptr_t host_pc); |
65269192 | 82 | |
fbf59aad | 83 | /* Return the current PC from CPU, which may be cached in TB. */ |
256d11f9 | 84 | static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb) |
fbf59aad | 85 | { |
4be79026 AJ |
86 | if (tb_cflags(tb) & CF_PCREL) { |
87 | return cpu->cc->get_pc(cpu); | |
88 | } else { | |
279513c7 | 89 | return tb->pc; |
4be79026 | 90 | } |
fbf59aad RH |
91 | } |
92 | ||
9877ea05 RH |
93 | /* |
94 | * Return true if CS is not running in parallel with other cpus, either | |
95 | * because there are no other cpus or we are within an exclusive context. | |
96 | */ | |
97 | static inline bool cpu_in_serial_context(CPUState *cs) | |
98 | { | |
99 | return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs); | |
100 | } | |
101 | ||
00c9a5c2 PMD |
102 | extern int64_t max_delay; |
103 | extern int64_t max_advance; | |
104 | ||
0e33928c PM |
105 | extern bool one_insn_per_tb; |
106 | ||
f86e8f3d RH |
107 | /** |
108 | * tcg_req_mo: | |
109 | * @type: TCGBar | |
110 | * | |
111 | * Filter @type to the barrier that is required for the guest | |
112 | * memory ordering vs the host memory ordering. A non-zero | |
113 | * result indicates that some barrier is required. | |
114 | * | |
115 | * If TCG_GUEST_DEFAULT_MO is not defined, assume that the | |
116 | * guest requires strict ordering. | |
117 | * | |
118 | * This is a macro so that it's constant even without optimization. | |
119 | */ | |
120 | #ifdef TCG_GUEST_DEFAULT_MO | |
121 | # define tcg_req_mo(type) \ | |
122 | ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) | |
123 | #else | |
124 | # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO) | |
125 | #endif | |
126 | ||
127 | /** | |
128 | * cpu_req_mo: | |
129 | * @type: TCGBar | |
130 | * | |
131 | * If tcg_req_mo indicates a barrier for @type is required | |
132 | * for the guest memory model, issue a host memory barrier. | |
133 | */ | |
134 | #define cpu_req_mo(type) \ | |
135 | do { \ | |
136 | if (tcg_req_mo(type)) { \ | |
137 | smp_mb(); \ | |
138 | } \ | |
139 | } while (0) | |
140 | ||
c03f041f | 141 | #endif /* ACCEL_TCG_INTERNAL_H */ |