]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
tcg: Add in_code_gen_buffer
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
d4e8164f
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001 22
ec150c7e 23#include "cpu.h"
00f6da6a 24#include "exec/tb-context.h"
dc069b22 25#ifdef CONFIG_TCG
4b2190da 26#include "exec/cpu_ldst.h"
dc069b22 27#endif
740b1759 28#include "sysemu/cpu-timers.h"
7d99a001 29
b346ff46 30/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 31#define DEBUG_DISAS
b346ff46 32
41c1b1c9
PB
33/* Page tracking code uses ram addresses in system mode, and virtual
34 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
35 type. */
36#if defined(CONFIG_USER_ONLY)
b480d9b7 37typedef abi_ulong tb_page_addr_t;
67a5b5d2 38#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
41c1b1c9
PB
39#else
40typedef ram_addr_t tb_page_addr_t;
67a5b5d2 41#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41c1b1c9
PB
42#endif
43
1de7afc9 44#include "qemu/log.h"
b346ff46 45
8b86d6d2
RH
46void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
47void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
bad729e2 48 target_ulong *data);
d2856f1a 49
57fec1fe 50void cpu_gen_init(void);
d25f2a72
AB
51
52/**
53 * cpu_restore_state:
54 * @cpu: the vCPU state is to be restore to
55 * @searched_pc: the host PC the fault occurred at
afd46fca
PD
56 * @will_exit: true if the TB executed will be interrupted after some
57 cpu adjustments. Required for maintaining the correct
58 icount valus
d25f2a72
AB
59 * @return: true if state was restored, false otherwise
60 *
61 * Attempt to restore the state for a fault occurring in translated
62 * code. If the searched_pc is not in translated code no state is
63 * restored and the function returns false.
64 */
afd46fca 65bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
a8a826a3 66
6886b980 67void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
90b40a69 68void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
648f034c 69TranslationBlock *tb_gen_code(CPUState *cpu,
89fee74a
EC
70 target_ulong pc, target_ulong cs_base,
71 uint32_t flags,
2e70f6ef 72 int cflags);
1bc7e522 73
5638d180 74void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 75void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
fdbc2b57 76void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 77
1f6493be
DH
78/**
79 * cpu_loop_exit_requested:
80 * @cpu: The CPU state to be tested
81 *
82 * Indicate if somebody asked for a return of the CPU to the main loop
83 * (e.g., via cpu_exit() or cpu_interrupt()).
84 *
85 * This is helpful for architectures that support interruptible
86 * instructions. After writing back all state to registers/memory, this
87 * call can be used to check if it makes sense to return to the main loop
88 * or to continue executing the interruptible instruction.
89 */
90static inline bool cpu_loop_exit_requested(CPUState *cpu)
91{
d73415a3 92 return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
1f6493be
DH
93}
94
0cac1b66 95#if !defined(CONFIG_USER_ONLY)
32857f4d 96void cpu_reloading_memory_map(void);
56943e8c
PM
97/**
98 * cpu_address_space_init:
99 * @cpu: CPU to add this address space to
56943e8c 100 * @asidx: integer index of this address space
80ceb07a
PX
101 * @prefix: prefix to be used as name of address space
102 * @mr: the root memory region of address space
56943e8c
PM
103 *
104 * Add the specified address space to the CPU's cpu_ases list.
105 * The address space added with @asidx 0 is the one used for the
106 * convenience pointer cpu->as.
107 * The target-specific code which registers ASes is responsible
108 * for defining what semantics address space 0, 1, 2, etc have.
109 *
12ebc9a7
PM
110 * Before the first call to this function, the caller must set
111 * cpu->num_ases to the total number of address spaces it needs
112 * to support.
113 *
56943e8c
PM
114 * Note that with KVM only one address space is supported.
115 */
80ceb07a
PX
116void cpu_address_space_init(CPUState *cpu, int asidx,
117 const char *prefix, MemoryRegion *mr);
b11ec7f2
YZ
118#endif
119
120#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 121/* cputlb.c */
5005e253
EC
122/**
123 * tlb_init - initialize a CPU's TLB
124 * @cpu: CPU whose TLB should be initialized
125 */
126void tlb_init(CPUState *cpu);
816d9be5
EC
127/**
128 * tlb_destroy - destroy a CPU's TLB
129 * @cpu: CPU whose TLB should be destroyed
130 */
131void tlb_destroy(CPUState *cpu);
d7a74a9d
PM
132/**
133 * tlb_flush_page:
134 * @cpu: CPU whose TLB should be flushed
135 * @addr: virtual address of page to be flushed
136 *
137 * Flush one page from the TLB of the specified CPU, for all
138 * MMU indexes.
139 */
31b030d4 140void tlb_flush_page(CPUState *cpu, target_ulong addr);
c3b9a07a
AB
141/**
142 * tlb_flush_page_all_cpus:
143 * @cpu: src CPU of the flush
144 * @addr: virtual address of page to be flushed
145 *
146 * Flush one page from the TLB of the specified CPU, for all
147 * MMU indexes.
148 */
149void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
150/**
151 * tlb_flush_page_all_cpus_synced:
152 * @cpu: src CPU of the flush
153 * @addr: virtual address of page to be flushed
154 *
155 * Flush one page from the TLB of the specified CPU, for all MMU
156 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
157 * is scheduled as safe work meaning all flushes will be complete once
158 * the source vCPUs safe work is complete. This will depend on when
159 * the guests translation ends the TB.
160 */
161void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
d7a74a9d
PM
162/**
163 * tlb_flush:
164 * @cpu: CPU whose TLB should be flushed
d7a74a9d 165 *
d10eb08f
AB
166 * Flush the entire TLB for the specified CPU. Most CPU architectures
167 * allow the implementation to drop entries from the TLB at any time
168 * so this is generally safe. If more selective flushing is required
169 * use one of the other functions for efficiency.
d7a74a9d 170 */
d10eb08f 171void tlb_flush(CPUState *cpu);
c3b9a07a
AB
172/**
173 * tlb_flush_all_cpus:
174 * @cpu: src CPU of the flush
175 */
176void tlb_flush_all_cpus(CPUState *src_cpu);
177/**
178 * tlb_flush_all_cpus_synced:
179 * @cpu: src CPU of the flush
180 *
181 * Like tlb_flush_all_cpus except this except the source vCPUs work is
182 * scheduled as safe work meaning all flushes will be complete once
183 * the source vCPUs safe work is complete. This will depend on when
184 * the guests translation ends the TB.
185 */
186void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
187/**
188 * tlb_flush_page_by_mmuidx:
189 * @cpu: CPU whose TLB should be flushed
190 * @addr: virtual address of page to be flushed
0336cbf8 191 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
192 *
193 * Flush one page from the TLB of the specified CPU, for the specified
194 * MMU indexes.
195 */
0336cbf8
AB
196void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
197 uint16_t idxmap);
c3b9a07a
AB
198/**
199 * tlb_flush_page_by_mmuidx_all_cpus:
200 * @cpu: Originating CPU of the flush
201 * @addr: virtual address of page to be flushed
202 * @idxmap: bitmap of MMU indexes to flush
203 *
204 * Flush one page from the TLB of all CPUs, for the specified
205 * MMU indexes.
206 */
207void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
208 uint16_t idxmap);
209/**
210 * tlb_flush_page_by_mmuidx_all_cpus_synced:
211 * @cpu: Originating CPU of the flush
212 * @addr: virtual address of page to be flushed
213 * @idxmap: bitmap of MMU indexes to flush
214 *
215 * Flush one page from the TLB of all CPUs, for the specified MMU
216 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
217 * vCPUs work is scheduled as safe work meaning all flushes will be
218 * complete once the source vCPUs safe work is complete. This will
219 * depend on when the guests translation ends the TB.
220 */
221void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
222 uint16_t idxmap);
d7a74a9d
PM
223/**
224 * tlb_flush_by_mmuidx:
225 * @cpu: CPU whose TLB should be flushed
c3b9a07a 226 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 227 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
228 *
229 * Flush all entries from the TLB of the specified CPU, for the specified
230 * MMU indexes.
231 */
0336cbf8 232void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
233/**
234 * tlb_flush_by_mmuidx_all_cpus:
235 * @cpu: Originating CPU of the flush
236 * @idxmap: bitmap of MMU indexes to flush
237 *
238 * Flush all entries from all TLBs of all CPUs, for the specified
239 * MMU indexes.
240 */
241void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
242/**
243 * tlb_flush_by_mmuidx_all_cpus_synced:
244 * @cpu: Originating CPU of the flush
245 * @idxmap: bitmap of MMU indexes to flush
246 *
247 * Flush all entries from all TLBs of all CPUs, for the specified
248 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
249 * vCPUs work is scheduled as safe work meaning all flushes will be
250 * complete once the source vCPUs safe work is complete. This will
251 * depend on when the guests translation ends the TB.
252 */
253void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
3ab6e68c
RH
254
255/**
256 * tlb_flush_page_bits_by_mmuidx
257 * @cpu: CPU whose TLB should be flushed
258 * @addr: virtual address of page to be flushed
259 * @idxmap: bitmap of mmu indexes to flush
260 * @bits: number of significant bits in address
261 *
262 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
263 */
264void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
265 uint16_t idxmap, unsigned bits);
266
267/* Similarly, with broadcast and syncing. */
268void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
269 uint16_t idxmap, unsigned bits);
270void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
271 (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
272
1787cc8e
PM
273/**
274 * tlb_set_page_with_attrs:
275 * @cpu: CPU to add this TLB entry for
276 * @vaddr: virtual address of page to add entry for
277 * @paddr: physical address of the page
278 * @attrs: memory transaction attributes
279 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
280 * @mmu_idx: MMU index to insert TLB entry for
281 * @size: size of the page in bytes
282 *
283 * Add an entry to this CPU's TLB (a mapping from virtual address
284 * @vaddr to physical address @paddr) with the specified memory
285 * transaction attributes. This is generally called by the target CPU
286 * specific code after it has been called through the tlb_fill()
287 * entry point and performed a successful page table walk to find
288 * the physical address and attributes for the virtual address
289 * which provoked the TLB miss.
290 *
291 * At most one entry for a given virtual address is permitted. Only a
292 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
293 * used by tlb_flush_page.
294 */
fadc1cbe
PM
295void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
296 hwaddr paddr, MemTxAttrs attrs,
297 int prot, int mmu_idx, target_ulong size);
1787cc8e
PM
298/* tlb_set_page:
299 *
300 * This function is equivalent to calling tlb_set_page_with_attrs()
301 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
302 * as a convenience for CPUs which don't use memory transaction attributes.
303 */
304void tlb_set_page(CPUState *cpu, target_ulong vaddr,
305 hwaddr paddr, int prot,
306 int mmu_idx, target_ulong size);
0cac1b66 307#else
5005e253
EC
308static inline void tlb_init(CPUState *cpu)
309{
310}
816d9be5
EC
311static inline void tlb_destroy(CPUState *cpu)
312{
313}
31b030d4 314static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
315{
316}
c3b9a07a
AB
317static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
318{
319}
320static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
321 target_ulong addr)
322{
323}
d10eb08f 324static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
325{
326}
c3b9a07a
AB
327static inline void tlb_flush_all_cpus(CPUState *src_cpu)
328{
329}
330static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
331{
332}
d7a74a9d 333static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
0336cbf8 334 target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
335{
336}
337
0336cbf8 338static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
339{
340}
c3b9a07a
AB
341static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
342 target_ulong addr,
343 uint16_t idxmap)
344{
345}
346static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
347 target_ulong addr,
348 uint16_t idxmap)
349{
350}
351static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
352{
353}
8bca9a03 354
c3b9a07a
AB
355static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
356 uint16_t idxmap)
357{
358}
3ab6e68c
RH
359static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
360 target_ulong addr,
361 uint16_t idxmap,
362 unsigned bits)
363{
364}
365static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
366 target_ulong addr,
367 uint16_t idxmap,
368 unsigned bits)
369{
370}
371static inline void
372tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
373 uint16_t idxmap, unsigned bits)
374{
375}
c527ee8f 376#endif
857129b3
RH
377/**
378 * probe_access:
379 * @env: CPUArchState
380 * @addr: guest virtual address to look up
381 * @size: size of the access
382 * @access_type: read, write or execute permission
383 * @mmu_idx: MMU index to use for lookup
384 * @retaddr: return address for unwinding
385 *
386 * Look up the guest virtual address @addr. Raise an exception if the
387 * page does not satisfy @access_type. Raise an exception if the
388 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
389 * page as dirty.
390 *
391 * Finally, return the host address for a page that is backed by RAM,
392 * or NULL if the page requires I/O.
393 */
c25c283d
DH
394void *probe_access(CPUArchState *env, target_ulong addr, int size,
395 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
396
397static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
398 int mmu_idx, uintptr_t retaddr)
399{
400 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
401}
d4e8164f 402
9e70492b
BM
403static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
404 int mmu_idx, uintptr_t retaddr)
405{
406 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
407}
408
069cfe77
RH
409/**
410 * probe_access_flags:
411 * @env: CPUArchState
412 * @addr: guest virtual address to look up
413 * @access_type: read, write or execute permission
414 * @mmu_idx: MMU index to use for lookup
415 * @nonfault: suppress the fault
416 * @phost: return value for host address
417 * @retaddr: return address for unwinding
418 *
419 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
420 * the page, and storing the host address for RAM in @phost.
421 *
422 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
423 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
424 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
425 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
426 */
427int probe_access_flags(CPUArchState *env, target_ulong addr,
428 MMUAccessType access_type, int mmu_idx,
429 bool nonfault, void **phost, uintptr_t retaddr);
430
d4e8164f
FB
431#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
432
126d89e8
RH
433/* Estimated block size for TB allocation. */
434/* ??? The following is based on a 2015 survey of x86_64 host output.
435 Better would seem to be some sort of dynamically sized TB array,
436 adapting to the block sizes actually being produced. */
4390df51 437#if defined(CONFIG_SOFTMMU)
126d89e8 438#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 439#else
126d89e8 440#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
441#endif
442
e7e168f4
EC
443/*
444 * Translation Cache-related fields of a TB.
2ac01d6d
EC
445 * This struct exists just for convenience; we keep track of TB's in a binary
446 * search tree, and the only fields needed to compare TB's in the tree are
447 * @ptr and @size.
448 * Note: the address of search data can be obtained by adding @size to @ptr.
e7e168f4
EC
449 */
450struct tb_tc {
451 void *ptr; /* pointer to the translated code */
2ac01d6d 452 size_t size;
e7e168f4
EC
453};
454
2e70f6ef 455struct TranslationBlock {
2e12669a
FB
456 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
457 target_ulong cs_base; /* CS base for this block */
89fee74a 458 uint32_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
459 uint16_t size; /* size of target code for this block (1 <=
460 size <= TARGET_PAGE_SIZE) */
0266359e
PB
461 uint16_t icount;
462 uint32_t cflags; /* compile flags */
416986d3
RH
463#define CF_COUNT_MASK 0x00007fff
464#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
465#define CF_NOCACHE 0x00010000 /* To be freed after execution */
466#define CF_USE_ICOUNT 0x00020000
194125e3 467#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
416986d3 468#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
f7b78602
PM
469#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
470#define CF_CLUSTER_SHIFT 24
4e2ca83e 471/* cflags' mask for hashing/comparison */
0cf8a44c 472#define CF_HASH_MASK \
f7b78602 473 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
58fe2f10 474
61a67f71
LV
475 /* Per-vCPU dynamic tracing state used to generate this TB */
476 uint32_t trace_vcpu_dstate;
477
e7e168f4
EC
478 struct tb_tc tc;
479
02d57ea1
SF
480 /* original tb when cflags has CF_NOCACHE */
481 struct TranslationBlock *orig_tb;
4390df51 482 /* first and second physical page containing code. The lower bit
0b5c91f7
EC
483 of the pointer tells the index in page_next[].
484 The list is protected by the TB's page('s) lock(s) */
1e05197f 485 uintptr_t page_next[2];
41c1b1c9 486 tb_page_addr_t page_addr[2];
4390df51 487
194125e3
EC
488 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
489 QemuSpin jmp_lock;
490
f309101c
SF
491 /* The following data are used to directly call another TB from
492 * the code of this one. This can be done either by emitting direct or
493 * indirect native jump instructions. These jumps are reset so that the TB
eb5e2b9e 494 * just continues its execution. The TB can be linked to another one by
f309101c
SF
495 * setting one of the jump targets (or patching the jump instruction). Only
496 * two of such jumps are supported.
497 */
498 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
499#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
a8583393
RH
500 uintptr_t jmp_target_arg[2]; /* target address or offset */
501
194125e3
EC
502 /*
503 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
504 * Each TB can have two outgoing jumps, and therefore can participate
505 * in two lists. The list entries are kept in jmp_list_next[2]. The least
506 * significant bit (LSB) of the pointers in these lists is used to encode
507 * which of the two list entries is to be used in the pointed TB.
508 *
509 * List traversals are protected by jmp_lock. The destination TB of each
510 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
511 * can be acquired from any origin TB.
512 *
513 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
514 * being invalidated, so that no further outgoing jumps from it can be set.
515 *
516 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
517 * to a destination TB that has CF_INVALID set.
f309101c 518 */
194125e3 519 uintptr_t jmp_list_head;
c37e6d7e 520 uintptr_t jmp_list_next[2];
194125e3 521 uintptr_t jmp_dest[2];
2e70f6ef 522};
d4e8164f 523
4e2ca83e
EC
524extern bool parallel_cpus;
525
d73415a3 526/* Hide the qatomic_read to make code a little easier on the eyes */
4e2ca83e
EC
527static inline uint32_t tb_cflags(const TranslationBlock *tb)
528{
d73415a3 529 return qatomic_read(&tb->cflags);
4e2ca83e
EC
530}
531
532/* current cflags for hashing/comparison */
533static inline uint32_t curr_cflags(void)
534{
416986d3 535 return (parallel_cpus ? CF_PARALLEL : 0)
740b1759 536 | (icount_enabled() ? CF_USE_ICOUNT : 0);
4e2ca83e
EC
537}
538
646f34fa 539/* TranslationBlock invalidate API */
646f34fa 540#if defined(CONFIG_USER_ONLY)
c40d4792 541void tb_invalidate_phys_addr(target_ulong addr);
646f34fa 542void tb_invalidate_phys_range(target_ulong start, target_ulong end);
c40d4792
PB
543#else
544void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 545#endif
bbd77c18 546void tb_flush(CPUState *cpu);
41c1b1c9 547void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
cedbcb01 548TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e
EC
549 target_ulong cs_base, uint32_t flags,
550 uint32_t cf_mask);
a8583393 551void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 552
01ecaf43 553/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 554#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 555extern uintptr_t tci_tb_ptr;
01ecaf43 556# define GETPC() tci_tb_ptr
0f842f8a 557#else
01ecaf43 558# define GETPC() \
0f842f8a
RH
559 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
560#endif
561
562/* The true return address will often point to a host insn that is part of
563 the next translated guest insn. Adjust the address backward to point to
564 the middle of the call insn. Subtracting one would do the job except for
565 several compressed mode architectures (arm, mips) which set the low bit
566 to indicate the compressed mode; subtracting two works around that. It
567 is also the case that there are no host isas that contain a call insn
568 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 569#define GETPC_ADJ 2
3917149d 570
faa9372c
EC
571#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
572void assert_no_pages_locked(void);
573#else
574static inline void assert_no_pages_locked(void)
575{
576}
577#endif
578
e95c8d51 579#if !defined(CONFIG_USER_ONLY)
6e59c1db 580
2d54f194
PM
581/**
582 * iotlb_to_section:
583 * @cpu: CPU performing the access
584 * @index: TCG CPU IOTLB entry
585 *
586 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
587 * it refers to. @index will have been initially created and returned
588 * by memory_region_section_get_iotlb().
589 */
590struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
591 hwaddr index, MemTxAttrs attrs);
6e59c1db 592#endif
4390df51
FB
593
594#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
595void mmap_lock(void);
596void mmap_unlock(void);
301e40ed 597bool have_mmap_lock(void);
8fd19e6c 598
8c01eb78
EC
599/**
600 * get_page_addr_code() - user-mode version
601 * @env: CPUArchState
602 * @addr: guest virtual address of guest code
603 *
604 * Returns @addr.
605 */
606static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
607 target_ulong addr)
4390df51
FB
608{
609 return addr;
610}
4b2190da
EC
611
612/**
613 * get_page_addr_code_hostp() - user-mode version
614 * @env: CPUArchState
615 * @addr: guest virtual address of guest code
616 *
617 * Returns @addr.
618 *
619 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
620 * is kept.
621 */
622static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
623 target_ulong addr,
624 void **hostp)
625{
626 if (hostp) {
627 *hostp = g2h(addr);
628 }
629 return addr;
630}
4390df51 631#else
8fd19e6c
PB
632static inline void mmap_lock(void) {}
633static inline void mmap_unlock(void) {}
634
8c01eb78
EC
635/**
636 * get_page_addr_code() - full-system version
637 * @env: CPUArchState
638 * @addr: guest virtual address of guest code
639 *
640 * If we cannot translate and execute from the entire RAM page, or if
641 * the region is not backed by RAM, returns -1. Otherwise, returns the
642 * ram_addr_t corresponding to the guest code at @addr.
643 *
644 * Note: this function can trigger an exception.
645 */
646tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
dfccc760 647
4b2190da
EC
648/**
649 * get_page_addr_code_hostp() - full-system version
650 * @env: CPUArchState
651 * @addr: guest virtual address of guest code
652 *
653 * See get_page_addr_code() (full-system version) for documentation on the
654 * return value.
655 *
656 * Sets *@hostp (when @hostp is non-NULL) as follows.
657 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
658 * to the host address where @addr's content is kept.
659 *
660 * Note: this function can trigger an exception.
661 */
662tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
663 void **hostp);
664
dfccc760
PC
665void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
666void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
667
668/* exec.c */
669void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
670
671MemoryRegionSection *
d7898cda 672address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
673 hwaddr *xlat, hwaddr *plen,
674 MemTxAttrs attrs, int *prot);
dfccc760 675hwaddr memory_region_section_get_iotlb(CPUState *cpu,
8f5db641 676 MemoryRegionSection *section);
4390df51 677#endif
9df217a3 678
875cdcf6 679#endif