]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
2a6a4076 MA |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H | |
7d99a001 BS |
22 | |
23 | #include "qemu-common.h" | |
00f6da6a | 24 | #include "exec/tb-context.h" |
416986d3 | 25 | #include "sysemu/cpus.h" |
7d99a001 | 26 | |
b346ff46 | 27 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
de9a95f0 | 28 | #define DEBUG_DISAS |
b346ff46 | 29 | |
41c1b1c9 PB |
30 | /* Page tracking code uses ram addresses in system mode, and virtual |
31 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate | |
32 | type. */ | |
33 | #if defined(CONFIG_USER_ONLY) | |
b480d9b7 | 34 | typedef abi_ulong tb_page_addr_t; |
67a5b5d2 | 35 | #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx |
41c1b1c9 PB |
36 | #else |
37 | typedef ram_addr_t tb_page_addr_t; | |
67a5b5d2 | 38 | #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT |
41c1b1c9 PB |
39 | #endif |
40 | ||
1de7afc9 | 41 | #include "qemu/log.h" |
b346ff46 | 42 | |
9c489ea6 | 43 | void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb); |
9349b4f9 | 44 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
bad729e2 | 45 | target_ulong *data); |
d2856f1a | 46 | |
57fec1fe | 47 | void cpu_gen_init(void); |
d25f2a72 AB |
48 | |
49 | /** | |
50 | * cpu_restore_state: | |
51 | * @cpu: the vCPU state is to be restore to | |
52 | * @searched_pc: the host PC the fault occurred at | |
afd46fca PD |
53 | * @will_exit: true if the TB executed will be interrupted after some |
54 | cpu adjustments. Required for maintaining the correct | |
55 | icount valus | |
d25f2a72 AB |
56 | * @return: true if state was restored, false otherwise |
57 | * | |
58 | * Attempt to restore the state for a fault occurring in translated | |
59 | * code. If the searched_pc is not in translated code no state is | |
60 | * restored and the function returns false. | |
61 | */ | |
afd46fca | 62 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); |
a8a826a3 | 63 | |
6886b980 | 64 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
90b40a69 | 65 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
648f034c | 66 | TranslationBlock *tb_gen_code(CPUState *cpu, |
89fee74a EC |
67 | target_ulong pc, target_ulong cs_base, |
68 | uint32_t flags, | |
2e70f6ef | 69 | int cflags); |
1bc7e522 | 70 | |
5638d180 | 71 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
1c3c8af1 | 72 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
fdbc2b57 | 73 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
1652b974 | 74 | |
0cac1b66 | 75 | #if !defined(CONFIG_USER_ONLY) |
32857f4d | 76 | void cpu_reloading_memory_map(void); |
56943e8c PM |
77 | /** |
78 | * cpu_address_space_init: | |
79 | * @cpu: CPU to add this address space to | |
56943e8c | 80 | * @asidx: integer index of this address space |
80ceb07a PX |
81 | * @prefix: prefix to be used as name of address space |
82 | * @mr: the root memory region of address space | |
56943e8c PM |
83 | * |
84 | * Add the specified address space to the CPU's cpu_ases list. | |
85 | * The address space added with @asidx 0 is the one used for the | |
86 | * convenience pointer cpu->as. | |
87 | * The target-specific code which registers ASes is responsible | |
88 | * for defining what semantics address space 0, 1, 2, etc have. | |
89 | * | |
12ebc9a7 PM |
90 | * Before the first call to this function, the caller must set |
91 | * cpu->num_ases to the total number of address spaces it needs | |
92 | * to support. | |
93 | * | |
56943e8c PM |
94 | * Note that with KVM only one address space is supported. |
95 | */ | |
80ceb07a PX |
96 | void cpu_address_space_init(CPUState *cpu, int asidx, |
97 | const char *prefix, MemoryRegion *mr); | |
b11ec7f2 YZ |
98 | #endif |
99 | ||
100 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) | |
0cac1b66 | 101 | /* cputlb.c */ |
d7a74a9d PM |
102 | /** |
103 | * tlb_flush_page: | |
104 | * @cpu: CPU whose TLB should be flushed | |
105 | * @addr: virtual address of page to be flushed | |
106 | * | |
107 | * Flush one page from the TLB of the specified CPU, for all | |
108 | * MMU indexes. | |
109 | */ | |
31b030d4 | 110 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
c3b9a07a AB |
111 | /** |
112 | * tlb_flush_page_all_cpus: | |
113 | * @cpu: src CPU of the flush | |
114 | * @addr: virtual address of page to be flushed | |
115 | * | |
116 | * Flush one page from the TLB of the specified CPU, for all | |
117 | * MMU indexes. | |
118 | */ | |
119 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); | |
120 | /** | |
121 | * tlb_flush_page_all_cpus_synced: | |
122 | * @cpu: src CPU of the flush | |
123 | * @addr: virtual address of page to be flushed | |
124 | * | |
125 | * Flush one page from the TLB of the specified CPU, for all MMU | |
126 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work | |
127 | * is scheduled as safe work meaning all flushes will be complete once | |
128 | * the source vCPUs safe work is complete. This will depend on when | |
129 | * the guests translation ends the TB. | |
130 | */ | |
131 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); | |
d7a74a9d PM |
132 | /** |
133 | * tlb_flush: | |
134 | * @cpu: CPU whose TLB should be flushed | |
d7a74a9d | 135 | * |
d10eb08f AB |
136 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
137 | * allow the implementation to drop entries from the TLB at any time | |
138 | * so this is generally safe. If more selective flushing is required | |
139 | * use one of the other functions for efficiency. | |
d7a74a9d | 140 | */ |
d10eb08f | 141 | void tlb_flush(CPUState *cpu); |
c3b9a07a AB |
142 | /** |
143 | * tlb_flush_all_cpus: | |
144 | * @cpu: src CPU of the flush | |
145 | */ | |
146 | void tlb_flush_all_cpus(CPUState *src_cpu); | |
147 | /** | |
148 | * tlb_flush_all_cpus_synced: | |
149 | * @cpu: src CPU of the flush | |
150 | * | |
151 | * Like tlb_flush_all_cpus except this except the source vCPUs work is | |
152 | * scheduled as safe work meaning all flushes will be complete once | |
153 | * the source vCPUs safe work is complete. This will depend on when | |
154 | * the guests translation ends the TB. | |
155 | */ | |
156 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); | |
d7a74a9d PM |
157 | /** |
158 | * tlb_flush_page_by_mmuidx: | |
159 | * @cpu: CPU whose TLB should be flushed | |
160 | * @addr: virtual address of page to be flushed | |
0336cbf8 | 161 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
162 | * |
163 | * Flush one page from the TLB of the specified CPU, for the specified | |
164 | * MMU indexes. | |
165 | */ | |
0336cbf8 AB |
166 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
167 | uint16_t idxmap); | |
c3b9a07a AB |
168 | /** |
169 | * tlb_flush_page_by_mmuidx_all_cpus: | |
170 | * @cpu: Originating CPU of the flush | |
171 | * @addr: virtual address of page to be flushed | |
172 | * @idxmap: bitmap of MMU indexes to flush | |
173 | * | |
174 | * Flush one page from the TLB of all CPUs, for the specified | |
175 | * MMU indexes. | |
176 | */ | |
177 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | |
178 | uint16_t idxmap); | |
179 | /** | |
180 | * tlb_flush_page_by_mmuidx_all_cpus_synced: | |
181 | * @cpu: Originating CPU of the flush | |
182 | * @addr: virtual address of page to be flushed | |
183 | * @idxmap: bitmap of MMU indexes to flush | |
184 | * | |
185 | * Flush one page from the TLB of all CPUs, for the specified MMU | |
186 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source | |
187 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
188 | * complete once the source vCPUs safe work is complete. This will | |
189 | * depend on when the guests translation ends the TB. | |
190 | */ | |
191 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, | |
192 | uint16_t idxmap); | |
d7a74a9d PM |
193 | /** |
194 | * tlb_flush_by_mmuidx: | |
195 | * @cpu: CPU whose TLB should be flushed | |
c3b9a07a | 196 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
0336cbf8 | 197 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
198 | * |
199 | * Flush all entries from the TLB of the specified CPU, for the specified | |
200 | * MMU indexes. | |
201 | */ | |
0336cbf8 | 202 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
c3b9a07a AB |
203 | /** |
204 | * tlb_flush_by_mmuidx_all_cpus: | |
205 | * @cpu: Originating CPU of the flush | |
206 | * @idxmap: bitmap of MMU indexes to flush | |
207 | * | |
208 | * Flush all entries from all TLBs of all CPUs, for the specified | |
209 | * MMU indexes. | |
210 | */ | |
211 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); | |
212 | /** | |
213 | * tlb_flush_by_mmuidx_all_cpus_synced: | |
214 | * @cpu: Originating CPU of the flush | |
215 | * @idxmap: bitmap of MMU indexes to flush | |
216 | * | |
217 | * Flush all entries from all TLBs of all CPUs, for the specified | |
218 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source | |
219 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
220 | * complete once the source vCPUs safe work is complete. This will | |
221 | * depend on when the guests translation ends the TB. | |
222 | */ | |
223 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); | |
1787cc8e PM |
224 | /** |
225 | * tlb_set_page_with_attrs: | |
226 | * @cpu: CPU to add this TLB entry for | |
227 | * @vaddr: virtual address of page to add entry for | |
228 | * @paddr: physical address of the page | |
229 | * @attrs: memory transaction attributes | |
230 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) | |
231 | * @mmu_idx: MMU index to insert TLB entry for | |
232 | * @size: size of the page in bytes | |
233 | * | |
234 | * Add an entry to this CPU's TLB (a mapping from virtual address | |
235 | * @vaddr to physical address @paddr) with the specified memory | |
236 | * transaction attributes. This is generally called by the target CPU | |
237 | * specific code after it has been called through the tlb_fill() | |
238 | * entry point and performed a successful page table walk to find | |
239 | * the physical address and attributes for the virtual address | |
240 | * which provoked the TLB miss. | |
241 | * | |
242 | * At most one entry for a given virtual address is permitted. Only a | |
243 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only | |
244 | * used by tlb_flush_page. | |
245 | */ | |
fadc1cbe PM |
246 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
247 | hwaddr paddr, MemTxAttrs attrs, | |
248 | int prot, int mmu_idx, target_ulong size); | |
1787cc8e PM |
249 | /* tlb_set_page: |
250 | * | |
251 | * This function is equivalent to calling tlb_set_page_with_attrs() | |
252 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided | |
253 | * as a convenience for CPUs which don't use memory transaction attributes. | |
254 | */ | |
255 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
256 | hwaddr paddr, int prot, | |
257 | int mmu_idx, target_ulong size); | |
c874dc4f | 258 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); |
98670d47 | 259 | void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, |
3b4afc9e | 260 | uintptr_t retaddr); |
0cac1b66 | 261 | #else |
31b030d4 | 262 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
0cac1b66 BS |
263 | { |
264 | } | |
c3b9a07a AB |
265 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
266 | { | |
267 | } | |
268 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, | |
269 | target_ulong addr) | |
270 | { | |
271 | } | |
d10eb08f | 272 | static inline void tlb_flush(CPUState *cpu) |
0cac1b66 BS |
273 | { |
274 | } | |
c3b9a07a AB |
275 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
276 | { | |
277 | } | |
278 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
279 | { | |
280 | } | |
d7a74a9d | 281 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
0336cbf8 | 282 | target_ulong addr, uint16_t idxmap) |
d7a74a9d PM |
283 | { |
284 | } | |
285 | ||
0336cbf8 | 286 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d PM |
287 | { |
288 | } | |
c3b9a07a AB |
289 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
290 | target_ulong addr, | |
291 | uint16_t idxmap) | |
292 | { | |
293 | } | |
294 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
295 | target_ulong addr, | |
296 | uint16_t idxmap) | |
297 | { | |
298 | } | |
299 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) | |
300 | { | |
301 | } | |
302 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
303 | uint16_t idxmap) | |
304 | { | |
305 | } | |
c874dc4f PM |
306 | static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, |
307 | MemTxAttrs attrs) | |
406bc339 PK |
308 | { |
309 | } | |
c527ee8f | 310 | #endif |
d4e8164f | 311 | |
d4e8164f FB |
312 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
313 | ||
126d89e8 RH |
314 | /* Estimated block size for TB allocation. */ |
315 | /* ??? The following is based on a 2015 survey of x86_64 host output. | |
316 | Better would seem to be some sort of dynamically sized TB array, | |
317 | adapting to the block sizes actually being produced. */ | |
4390df51 | 318 | #if defined(CONFIG_SOFTMMU) |
126d89e8 | 319 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
4390df51 | 320 | #else |
126d89e8 | 321 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
4390df51 FB |
322 | #endif |
323 | ||
e7e168f4 EC |
324 | /* |
325 | * Translation Cache-related fields of a TB. | |
2ac01d6d EC |
326 | * This struct exists just for convenience; we keep track of TB's in a binary |
327 | * search tree, and the only fields needed to compare TB's in the tree are | |
328 | * @ptr and @size. | |
329 | * Note: the address of search data can be obtained by adding @size to @ptr. | |
e7e168f4 EC |
330 | */ |
331 | struct tb_tc { | |
332 | void *ptr; /* pointer to the translated code */ | |
2ac01d6d | 333 | size_t size; |
e7e168f4 EC |
334 | }; |
335 | ||
2e70f6ef | 336 | struct TranslationBlock { |
2e12669a FB |
337 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
338 | target_ulong cs_base; /* CS base for this block */ | |
89fee74a | 339 | uint32_t flags; /* flags defining in which context the code was generated */ |
d4e8164f FB |
340 | uint16_t size; /* size of target code for this block (1 <= |
341 | size <= TARGET_PAGE_SIZE) */ | |
0266359e PB |
342 | uint16_t icount; |
343 | uint32_t cflags; /* compile flags */ | |
416986d3 RH |
344 | #define CF_COUNT_MASK 0x00007fff |
345 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ | |
346 | #define CF_NOCACHE 0x00010000 /* To be freed after execution */ | |
347 | #define CF_USE_ICOUNT 0x00020000 | |
348 | #define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */ | |
349 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ | |
4e2ca83e | 350 | /* cflags' mask for hashing/comparison */ |
0cf8a44c RH |
351 | #define CF_HASH_MASK \ |
352 | (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL) | |
58fe2f10 | 353 | |
61a67f71 LV |
354 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
355 | uint32_t trace_vcpu_dstate; | |
356 | ||
e7e168f4 EC |
357 | struct tb_tc tc; |
358 | ||
02d57ea1 SF |
359 | /* original tb when cflags has CF_NOCACHE */ |
360 | struct TranslationBlock *orig_tb; | |
4390df51 FB |
361 | /* first and second physical page containing code. The lower bit |
362 | of the pointer tells the index in page_next[] */ | |
5fafdf24 | 363 | struct TranslationBlock *page_next[2]; |
41c1b1c9 | 364 | tb_page_addr_t page_addr[2]; |
4390df51 | 365 | |
f309101c SF |
366 | /* The following data are used to directly call another TB from |
367 | * the code of this one. This can be done either by emitting direct or | |
368 | * indirect native jump instructions. These jumps are reset so that the TB | |
eb5e2b9e | 369 | * just continues its execution. The TB can be linked to another one by |
f309101c SF |
370 | * setting one of the jump targets (or patching the jump instruction). Only |
371 | * two of such jumps are supported. | |
372 | */ | |
373 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ | |
374 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ | |
a8583393 RH |
375 | uintptr_t jmp_target_arg[2]; /* target address or offset */ |
376 | ||
eb5e2b9e | 377 | /* Each TB has an associated circular list of TBs jumping to this one. |
f309101c SF |
378 | * jmp_list_first points to the first TB jumping to this one. |
379 | * jmp_list_next is used to point to the next TB in a list. | |
380 | * Since each TB can have two jumps, it can participate in two lists. | |
c37e6d7e SF |
381 | * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a |
382 | * TranslationBlock structure, but the two least significant bits of | |
383 | * them are used to encode which data field of the pointed TB should | |
384 | * be used to traverse the list further from that TB: | |
f309101c SF |
385 | * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first. |
386 | * In other words, 0/1 tells which jump is used in the pointed TB, | |
387 | * and 2 means that this is a pointer back to the target TB of this list. | |
388 | */ | |
c37e6d7e SF |
389 | uintptr_t jmp_list_next[2]; |
390 | uintptr_t jmp_list_first; | |
2e70f6ef | 391 | }; |
d4e8164f | 392 | |
4e2ca83e EC |
393 | extern bool parallel_cpus; |
394 | ||
395 | /* Hide the atomic_read to make code a little easier on the eyes */ | |
396 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | |
397 | { | |
398 | return atomic_read(&tb->cflags); | |
399 | } | |
400 | ||
401 | /* current cflags for hashing/comparison */ | |
402 | static inline uint32_t curr_cflags(void) | |
403 | { | |
416986d3 RH |
404 | return (parallel_cpus ? CF_PARALLEL : 0) |
405 | | (use_icount ? CF_USE_ICOUNT : 0); | |
4e2ca83e EC |
406 | } |
407 | ||
be1e0117 | 408 | void tb_remove(TranslationBlock *tb); |
bbd77c18 | 409 | void tb_flush(CPUState *cpu); |
41c1b1c9 | 410 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
cedbcb01 | 411 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
4e2ca83e EC |
412 | target_ulong cs_base, uint32_t flags, |
413 | uint32_t cf_mask); | |
a8583393 | 414 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
d4e8164f | 415 | |
01ecaf43 | 416 | /* GETPC is the true target of the return instruction that we'll execute. */ |
7316329a | 417 | #if defined(CONFIG_TCG_INTERPRETER) |
c3ca0467 | 418 | extern uintptr_t tci_tb_ptr; |
01ecaf43 | 419 | # define GETPC() tci_tb_ptr |
0f842f8a | 420 | #else |
01ecaf43 | 421 | # define GETPC() \ |
0f842f8a RH |
422 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
423 | #endif | |
424 | ||
425 | /* The true return address will often point to a host insn that is part of | |
426 | the next translated guest insn. Adjust the address backward to point to | |
427 | the middle of the call insn. Subtracting one would do the job except for | |
428 | several compressed mode architectures (arm, mips) which set the low bit | |
429 | to indicate the compressed mode; subtracting two works around that. It | |
430 | is also the case that there are no host isas that contain a call insn | |
431 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
a17d4482 | 432 | #define GETPC_ADJ 2 |
3917149d | 433 | |
beeaef55 PB |
434 | void tb_lock(void); |
435 | void tb_unlock(void); | |
436 | void tb_lock_reset(void); | |
437 | ||
e95c8d51 | 438 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 439 | |
2d54f194 PM |
440 | /** |
441 | * iotlb_to_section: | |
442 | * @cpu: CPU performing the access | |
443 | * @index: TCG CPU IOTLB entry | |
444 | * | |
445 | * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that | |
446 | * it refers to. @index will have been initially created and returned | |
447 | * by memory_region_section_get_iotlb(). | |
448 | */ | |
449 | struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, | |
450 | hwaddr index, MemTxAttrs attrs); | |
b3755a91 | 451 | |
98670d47 LV |
452 | void tlb_fill(CPUState *cpu, target_ulong addr, int size, |
453 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); | |
6e59c1db | 454 | |
6e59c1db | 455 | #endif |
4390df51 FB |
456 | |
457 | #if defined(CONFIG_USER_ONLY) | |
8fd19e6c PB |
458 | void mmap_lock(void); |
459 | void mmap_unlock(void); | |
301e40ed | 460 | bool have_mmap_lock(void); |
8fd19e6c | 461 | |
9349b4f9 | 462 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
4390df51 FB |
463 | { |
464 | return addr; | |
465 | } | |
466 | #else | |
8fd19e6c PB |
467 | static inline void mmap_lock(void) {} |
468 | static inline void mmap_unlock(void) {} | |
469 | ||
0cac1b66 | 470 | /* cputlb.c */ |
9349b4f9 | 471 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
dfccc760 PC |
472 | |
473 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); | |
474 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); | |
475 | ||
476 | /* exec.c */ | |
477 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); | |
478 | ||
479 | MemoryRegionSection * | |
d7898cda PM |
480 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
481 | hwaddr *xlat, hwaddr *plen); | |
dfccc760 PC |
482 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
483 | MemoryRegionSection *section, | |
484 | target_ulong vaddr, | |
485 | hwaddr paddr, hwaddr xlat, | |
486 | int prot, | |
487 | target_ulong *address); | |
488 | bool memory_region_is_unassigned(MemoryRegion *mr); | |
489 | ||
4390df51 | 490 | #endif |
9df217a3 | 491 | |
1b530a6d AJ |
492 | /* vl.c */ |
493 | extern int singlestep; | |
494 | ||
875cdcf6 | 495 | #endif |