]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
exec/exec-all: Move 'qemu/log.h' include in units requiring it
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
d4e8164f
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001 22
ec150c7e 23#include "cpu.h"
dc069b22 24#ifdef CONFIG_TCG
4b2190da 25#include "exec/cpu_ldst.h"
dc069b22 26#endif
740b1759 27#include "sysemu/cpu-timers.h"
7d99a001 28
b346ff46 29/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 30#define DEBUG_DISAS
b346ff46 31
41c1b1c9
PB
32/* Page tracking code uses ram addresses in system mode, and virtual
33 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
34 type. */
35#if defined(CONFIG_USER_ONLY)
b480d9b7 36typedef abi_ulong tb_page_addr_t;
67a5b5d2 37#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
41c1b1c9
PB
38#else
39typedef ram_addr_t tb_page_addr_t;
67a5b5d2 40#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41c1b1c9
PB
41#endif
42
8b86d6d2
RH
43void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
44void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
bad729e2 45 target_ulong *data);
d2856f1a 46
d25f2a72
AB
47/**
48 * cpu_restore_state:
49 * @cpu: the vCPU state is to be restore to
50 * @searched_pc: the host PC the fault occurred at
afd46fca
PD
51 * @will_exit: true if the TB executed will be interrupted after some
52 cpu adjustments. Required for maintaining the correct
53 icount valus
d25f2a72
AB
54 * @return: true if state was restored, false otherwise
55 *
56 * Attempt to restore the state for a fault occurring in translated
57 * code. If the searched_pc is not in translated code no state is
58 * restored and the function returns false.
59 */
afd46fca 60bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
a8a826a3 61
6886b980 62void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
5638d180 63void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 64void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
fdbc2b57 65void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 66
1f6493be
DH
67/**
68 * cpu_loop_exit_requested:
69 * @cpu: The CPU state to be tested
70 *
71 * Indicate if somebody asked for a return of the CPU to the main loop
72 * (e.g., via cpu_exit() or cpu_interrupt()).
73 *
74 * This is helpful for architectures that support interruptible
75 * instructions. After writing back all state to registers/memory, this
76 * call can be used to check if it makes sense to return to the main loop
77 * or to continue executing the interruptible instruction.
78 */
79static inline bool cpu_loop_exit_requested(CPUState *cpu)
80{
d73415a3 81 return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
1f6493be
DH
82}
83
0cac1b66 84#if !defined(CONFIG_USER_ONLY)
32857f4d 85void cpu_reloading_memory_map(void);
56943e8c
PM
86/**
87 * cpu_address_space_init:
88 * @cpu: CPU to add this address space to
56943e8c 89 * @asidx: integer index of this address space
80ceb07a
PX
90 * @prefix: prefix to be used as name of address space
91 * @mr: the root memory region of address space
56943e8c
PM
92 *
93 * Add the specified address space to the CPU's cpu_ases list.
94 * The address space added with @asidx 0 is the one used for the
95 * convenience pointer cpu->as.
96 * The target-specific code which registers ASes is responsible
97 * for defining what semantics address space 0, 1, 2, etc have.
98 *
12ebc9a7
PM
99 * Before the first call to this function, the caller must set
100 * cpu->num_ases to the total number of address spaces it needs
101 * to support.
102 *
56943e8c
PM
103 * Note that with KVM only one address space is supported.
104 */
80ceb07a
PX
105void cpu_address_space_init(CPUState *cpu, int asidx,
106 const char *prefix, MemoryRegion *mr);
b11ec7f2
YZ
107#endif
108
109#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 110/* cputlb.c */
5005e253
EC
111/**
112 * tlb_init - initialize a CPU's TLB
113 * @cpu: CPU whose TLB should be initialized
114 */
115void tlb_init(CPUState *cpu);
816d9be5
EC
116/**
117 * tlb_destroy - destroy a CPU's TLB
118 * @cpu: CPU whose TLB should be destroyed
119 */
120void tlb_destroy(CPUState *cpu);
d7a74a9d
PM
121/**
122 * tlb_flush_page:
123 * @cpu: CPU whose TLB should be flushed
124 * @addr: virtual address of page to be flushed
125 *
126 * Flush one page from the TLB of the specified CPU, for all
127 * MMU indexes.
128 */
31b030d4 129void tlb_flush_page(CPUState *cpu, target_ulong addr);
c3b9a07a
AB
130/**
131 * tlb_flush_page_all_cpus:
132 * @cpu: src CPU of the flush
133 * @addr: virtual address of page to be flushed
134 *
135 * Flush one page from the TLB of the specified CPU, for all
136 * MMU indexes.
137 */
138void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
139/**
140 * tlb_flush_page_all_cpus_synced:
141 * @cpu: src CPU of the flush
142 * @addr: virtual address of page to be flushed
143 *
144 * Flush one page from the TLB of the specified CPU, for all MMU
145 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
146 * is scheduled as safe work meaning all flushes will be complete once
147 * the source vCPUs safe work is complete. This will depend on when
148 * the guests translation ends the TB.
149 */
150void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
d7a74a9d
PM
151/**
152 * tlb_flush:
153 * @cpu: CPU whose TLB should be flushed
d7a74a9d 154 *
d10eb08f
AB
155 * Flush the entire TLB for the specified CPU. Most CPU architectures
156 * allow the implementation to drop entries from the TLB at any time
157 * so this is generally safe. If more selective flushing is required
158 * use one of the other functions for efficiency.
d7a74a9d 159 */
d10eb08f 160void tlb_flush(CPUState *cpu);
c3b9a07a
AB
161/**
162 * tlb_flush_all_cpus:
163 * @cpu: src CPU of the flush
164 */
165void tlb_flush_all_cpus(CPUState *src_cpu);
166/**
167 * tlb_flush_all_cpus_synced:
168 * @cpu: src CPU of the flush
169 *
170 * Like tlb_flush_all_cpus except this except the source vCPUs work is
171 * scheduled as safe work meaning all flushes will be complete once
172 * the source vCPUs safe work is complete. This will depend on when
173 * the guests translation ends the TB.
174 */
175void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
176/**
177 * tlb_flush_page_by_mmuidx:
178 * @cpu: CPU whose TLB should be flushed
179 * @addr: virtual address of page to be flushed
0336cbf8 180 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
181 *
182 * Flush one page from the TLB of the specified CPU, for the specified
183 * MMU indexes.
184 */
0336cbf8
AB
185void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
186 uint16_t idxmap);
c3b9a07a
AB
187/**
188 * tlb_flush_page_by_mmuidx_all_cpus:
189 * @cpu: Originating CPU of the flush
190 * @addr: virtual address of page to be flushed
191 * @idxmap: bitmap of MMU indexes to flush
192 *
193 * Flush one page from the TLB of all CPUs, for the specified
194 * MMU indexes.
195 */
196void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
197 uint16_t idxmap);
198/**
199 * tlb_flush_page_by_mmuidx_all_cpus_synced:
200 * @cpu: Originating CPU of the flush
201 * @addr: virtual address of page to be flushed
202 * @idxmap: bitmap of MMU indexes to flush
203 *
204 * Flush one page from the TLB of all CPUs, for the specified MMU
205 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
206 * vCPUs work is scheduled as safe work meaning all flushes will be
207 * complete once the source vCPUs safe work is complete. This will
208 * depend on when the guests translation ends the TB.
209 */
210void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
211 uint16_t idxmap);
d7a74a9d
PM
212/**
213 * tlb_flush_by_mmuidx:
214 * @cpu: CPU whose TLB should be flushed
c3b9a07a 215 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 216 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
217 *
218 * Flush all entries from the TLB of the specified CPU, for the specified
219 * MMU indexes.
220 */
0336cbf8 221void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
222/**
223 * tlb_flush_by_mmuidx_all_cpus:
224 * @cpu: Originating CPU of the flush
225 * @idxmap: bitmap of MMU indexes to flush
226 *
227 * Flush all entries from all TLBs of all CPUs, for the specified
228 * MMU indexes.
229 */
230void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
231/**
232 * tlb_flush_by_mmuidx_all_cpus_synced:
233 * @cpu: Originating CPU of the flush
234 * @idxmap: bitmap of MMU indexes to flush
235 *
236 * Flush all entries from all TLBs of all CPUs, for the specified
237 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
238 * vCPUs work is scheduled as safe work meaning all flushes will be
239 * complete once the source vCPUs safe work is complete. This will
240 * depend on when the guests translation ends the TB.
241 */
242void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
3ab6e68c
RH
243
244/**
245 * tlb_flush_page_bits_by_mmuidx
246 * @cpu: CPU whose TLB should be flushed
247 * @addr: virtual address of page to be flushed
248 * @idxmap: bitmap of mmu indexes to flush
249 * @bits: number of significant bits in address
250 *
251 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
252 */
253void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
254 uint16_t idxmap, unsigned bits);
255
256/* Similarly, with broadcast and syncing. */
257void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
258 uint16_t idxmap, unsigned bits);
259void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
260 (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
261
e5b1921b
RH
262/**
263 * tlb_flush_range_by_mmuidx
264 * @cpu: CPU whose TLB should be flushed
265 * @addr: virtual address of the start of the range to be flushed
266 * @len: length of range to be flushed
267 * @idxmap: bitmap of mmu indexes to flush
268 * @bits: number of significant bits in address
269 *
270 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
271 * comparing only the low @bits worth of each virtual page.
272 */
273void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
274 target_ulong len, uint16_t idxmap,
275 unsigned bits);
600b819f
RH
276
277/* Similarly, with broadcast and syncing. */
278void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
279 target_ulong len, uint16_t idxmap,
280 unsigned bits);
c13b27d8
RH
281void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
282 target_ulong addr,
283 target_ulong len,
284 uint16_t idxmap,
285 unsigned bits);
600b819f 286
1787cc8e
PM
287/**
288 * tlb_set_page_with_attrs:
289 * @cpu: CPU to add this TLB entry for
290 * @vaddr: virtual address of page to add entry for
291 * @paddr: physical address of the page
292 * @attrs: memory transaction attributes
293 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
294 * @mmu_idx: MMU index to insert TLB entry for
295 * @size: size of the page in bytes
296 *
297 * Add an entry to this CPU's TLB (a mapping from virtual address
298 * @vaddr to physical address @paddr) with the specified memory
299 * transaction attributes. This is generally called by the target CPU
300 * specific code after it has been called through the tlb_fill()
301 * entry point and performed a successful page table walk to find
302 * the physical address and attributes for the virtual address
303 * which provoked the TLB miss.
304 *
305 * At most one entry for a given virtual address is permitted. Only a
306 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
307 * used by tlb_flush_page.
308 */
fadc1cbe
PM
309void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
310 hwaddr paddr, MemTxAttrs attrs,
311 int prot, int mmu_idx, target_ulong size);
1787cc8e
PM
312/* tlb_set_page:
313 *
314 * This function is equivalent to calling tlb_set_page_with_attrs()
315 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
316 * as a convenience for CPUs which don't use memory transaction attributes.
317 */
318void tlb_set_page(CPUState *cpu, target_ulong vaddr,
319 hwaddr paddr, int prot,
320 int mmu_idx, target_ulong size);
0cac1b66 321#else
5005e253
EC
322static inline void tlb_init(CPUState *cpu)
323{
324}
816d9be5
EC
325static inline void tlb_destroy(CPUState *cpu)
326{
327}
31b030d4 328static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
329{
330}
c3b9a07a
AB
331static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
332{
333}
334static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
335 target_ulong addr)
336{
337}
d10eb08f 338static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
339{
340}
c3b9a07a
AB
341static inline void tlb_flush_all_cpus(CPUState *src_cpu)
342{
343}
344static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
345{
346}
d7a74a9d 347static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
0336cbf8 348 target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
349{
350}
351
0336cbf8 352static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
353{
354}
c3b9a07a
AB
355static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
356 target_ulong addr,
357 uint16_t idxmap)
358{
359}
360static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
361 target_ulong addr,
362 uint16_t idxmap)
363{
364}
365static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
366{
367}
8bca9a03 368
c3b9a07a
AB
369static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
370 uint16_t idxmap)
371{
372}
3ab6e68c
RH
373static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
374 target_ulong addr,
375 uint16_t idxmap,
376 unsigned bits)
377{
378}
379static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
380 target_ulong addr,
381 uint16_t idxmap,
382 unsigned bits)
383{
384}
385static inline void
386tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
387 uint16_t idxmap, unsigned bits)
388{
389}
e5b1921b
RH
390static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
391 target_ulong len, uint16_t idxmap,
392 unsigned bits)
393{
394}
600b819f
RH
395static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
396 target_ulong addr,
397 target_ulong len,
398 uint16_t idxmap,
399 unsigned bits)
400{
401}
c13b27d8
RH
402static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
403 target_ulong addr,
404 target_long len,
405 uint16_t idxmap,
406 unsigned bits)
407{
408}
c527ee8f 409#endif
857129b3
RH
410/**
411 * probe_access:
412 * @env: CPUArchState
413 * @addr: guest virtual address to look up
414 * @size: size of the access
415 * @access_type: read, write or execute permission
416 * @mmu_idx: MMU index to use for lookup
417 * @retaddr: return address for unwinding
418 *
419 * Look up the guest virtual address @addr. Raise an exception if the
420 * page does not satisfy @access_type. Raise an exception if the
421 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
422 * page as dirty.
423 *
424 * Finally, return the host address for a page that is backed by RAM,
425 * or NULL if the page requires I/O.
426 */
c25c283d
DH
427void *probe_access(CPUArchState *env, target_ulong addr, int size,
428 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
429
430static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
431 int mmu_idx, uintptr_t retaddr)
432{
433 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
434}
d4e8164f 435
9e70492b
BM
436static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
437 int mmu_idx, uintptr_t retaddr)
438{
439 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
440}
441
069cfe77
RH
442/**
443 * probe_access_flags:
444 * @env: CPUArchState
445 * @addr: guest virtual address to look up
446 * @access_type: read, write or execute permission
447 * @mmu_idx: MMU index to use for lookup
448 * @nonfault: suppress the fault
449 * @phost: return value for host address
450 * @retaddr: return address for unwinding
451 *
452 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
453 * the page, and storing the host address for RAM in @phost.
454 *
455 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
456 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
457 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
458 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
459 */
460int probe_access_flags(CPUArchState *env, target_ulong addr,
461 MMUAccessType access_type, int mmu_idx,
462 bool nonfault, void **phost, uintptr_t retaddr);
463
d4e8164f
FB
464#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
465
126d89e8
RH
466/* Estimated block size for TB allocation. */
467/* ??? The following is based on a 2015 survey of x86_64 host output.
468 Better would seem to be some sort of dynamically sized TB array,
469 adapting to the block sizes actually being produced. */
4390df51 470#if defined(CONFIG_SOFTMMU)
126d89e8 471#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 472#else
126d89e8 473#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
474#endif
475
e7e168f4
EC
476/*
477 * Translation Cache-related fields of a TB.
2ac01d6d
EC
478 * This struct exists just for convenience; we keep track of TB's in a binary
479 * search tree, and the only fields needed to compare TB's in the tree are
480 * @ptr and @size.
481 * Note: the address of search data can be obtained by adding @size to @ptr.
e7e168f4
EC
482 */
483struct tb_tc {
db0c51a3 484 const void *ptr; /* pointer to the translated code */
2ac01d6d 485 size_t size;
e7e168f4
EC
486};
487
2e70f6ef 488struct TranslationBlock {
2e12669a
FB
489 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
490 target_ulong cs_base; /* CS base for this block */
89fee74a 491 uint32_t flags; /* flags defining in which context the code was generated */
0266359e 492 uint32_t cflags; /* compile flags */
78ff82bb
RH
493
494/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
84f15616
RH
495#define CF_COUNT_MASK 0x000001ff
496#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
497#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
c2ffd754 498#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
84f15616
RH
499#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
500#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
501#define CF_USE_ICOUNT 0x00020000
502#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
503#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
48e14066 504#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
84f15616 505#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
f7b78602 506#define CF_CLUSTER_SHIFT 24
58fe2f10 507
61a67f71
LV
508 /* Per-vCPU dynamic tracing state used to generate this TB */
509 uint32_t trace_vcpu_dstate;
510
872ebd88
AB
511 /*
512 * Above fields used for comparing
513 */
514
515 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
516 uint16_t size;
517 uint16_t icount;
518
e7e168f4
EC
519 struct tb_tc tc;
520
4390df51 521 /* first and second physical page containing code. The lower bit
0b5c91f7
EC
522 of the pointer tells the index in page_next[].
523 The list is protected by the TB's page('s) lock(s) */
1e05197f 524 uintptr_t page_next[2];
41c1b1c9 525 tb_page_addr_t page_addr[2];
4390df51 526
194125e3
EC
527 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
528 QemuSpin jmp_lock;
529
f309101c
SF
530 /* The following data are used to directly call another TB from
531 * the code of this one. This can be done either by emitting direct or
532 * indirect native jump instructions. These jumps are reset so that the TB
eb5e2b9e 533 * just continues its execution. The TB can be linked to another one by
f309101c
SF
534 * setting one of the jump targets (or patching the jump instruction). Only
535 * two of such jumps are supported.
536 */
537 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
538#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
a8583393
RH
539 uintptr_t jmp_target_arg[2]; /* target address or offset */
540
194125e3
EC
541 /*
542 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
543 * Each TB can have two outgoing jumps, and therefore can participate
544 * in two lists. The list entries are kept in jmp_list_next[2]. The least
545 * significant bit (LSB) of the pointers in these lists is used to encode
546 * which of the two list entries is to be used in the pointed TB.
547 *
548 * List traversals are protected by jmp_lock. The destination TB of each
549 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
550 * can be acquired from any origin TB.
551 *
552 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
553 * being invalidated, so that no further outgoing jumps from it can be set.
554 *
555 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
556 * to a destination TB that has CF_INVALID set.
f309101c 557 */
194125e3 558 uintptr_t jmp_list_head;
c37e6d7e 559 uintptr_t jmp_list_next[2];
194125e3 560 uintptr_t jmp_dest[2];
2e70f6ef 561};
d4e8164f 562
d73415a3 563/* Hide the qatomic_read to make code a little easier on the eyes */
4e2ca83e
EC
564static inline uint32_t tb_cflags(const TranslationBlock *tb)
565{
d73415a3 566 return qatomic_read(&tb->cflags);
4e2ca83e
EC
567}
568
569/* current cflags for hashing/comparison */
043e35d9 570uint32_t curr_cflags(CPUState *cpu);
4e2ca83e 571
646f34fa 572/* TranslationBlock invalidate API */
646f34fa 573#if defined(CONFIG_USER_ONLY)
c40d4792 574void tb_invalidate_phys_addr(target_ulong addr);
646f34fa 575void tb_invalidate_phys_range(target_ulong start, target_ulong end);
c40d4792
PB
576#else
577void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 578#endif
bbd77c18 579void tb_flush(CPUState *cpu);
41c1b1c9 580void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
cedbcb01 581TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e 582 target_ulong cs_base, uint32_t flags,
bf253ac6 583 uint32_t cflags);
a8583393 584void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 585
01ecaf43 586/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 587#if defined(CONFIG_TCG_INTERPRETER)
13e71f08 588extern __thread uintptr_t tci_tb_ptr;
01ecaf43 589# define GETPC() tci_tb_ptr
0f842f8a 590#else
01ecaf43 591# define GETPC() \
0f842f8a
RH
592 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
593#endif
594
595/* The true return address will often point to a host insn that is part of
596 the next translated guest insn. Adjust the address backward to point to
597 the middle of the call insn. Subtracting one would do the job except for
598 several compressed mode architectures (arm, mips) which set the low bit
599 to indicate the compressed mode; subtracting two works around that. It
600 is also the case that there are no host isas that contain a call insn
601 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 602#define GETPC_ADJ 2
3917149d 603
faa9372c
EC
604#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
605void assert_no_pages_locked(void);
606#else
607static inline void assert_no_pages_locked(void)
608{
609}
610#endif
611
e95c8d51 612#if !defined(CONFIG_USER_ONLY)
6e59c1db 613
2d54f194
PM
614/**
615 * iotlb_to_section:
616 * @cpu: CPU performing the access
617 * @index: TCG CPU IOTLB entry
618 *
619 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
620 * it refers to. @index will have been initially created and returned
621 * by memory_region_section_get_iotlb().
622 */
623struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
624 hwaddr index, MemTxAttrs attrs);
6e59c1db 625#endif
4390df51
FB
626
627#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
628void mmap_lock(void);
629void mmap_unlock(void);
301e40ed 630bool have_mmap_lock(void);
8fd19e6c 631
8c01eb78
EC
632/**
633 * get_page_addr_code() - user-mode version
634 * @env: CPUArchState
635 * @addr: guest virtual address of guest code
636 *
637 * Returns @addr.
638 */
639static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
640 target_ulong addr)
4390df51
FB
641{
642 return addr;
643}
4b2190da
EC
644
645/**
646 * get_page_addr_code_hostp() - user-mode version
647 * @env: CPUArchState
648 * @addr: guest virtual address of guest code
649 *
650 * Returns @addr.
651 *
652 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
653 * is kept.
654 */
655static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
656 target_ulong addr,
657 void **hostp)
658{
659 if (hostp) {
3e8f1628 660 *hostp = g2h_untagged(addr);
4b2190da
EC
661 }
662 return addr;
663}
8b1d5b3c 664
0fdbb7d2
RH
665/**
666 * adjust_signal_pc:
667 * @pc: raw pc from the host signal ucontext_t.
668 * @is_write: host memory operation was write, or read-modify-write.
669 *
670 * Alter @pc as required for unwinding. Return the type of the
671 * guest memory access -- host reads may be for guest execution.
672 */
673MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
674
5e38ba7d
RH
675/**
676 * handle_sigsegv_accerr_write:
677 * @cpu: the cpu context
678 * @old_set: the sigset_t from the signal ucontext_t
679 * @host_pc: the host pc, adjusted for the signal
680 * @host_addr: the host address of the fault
681 *
682 * Return true if the write fault has been handled, and should be re-tried.
683 */
684bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
685 uintptr_t host_pc, abi_ptr guest_addr);
686
72d2bbf9
RH
687/**
688 * cpu_loop_exit_sigsegv:
689 * @cpu: the cpu context
690 * @addr: the guest address of the fault
691 * @access_type: access was read/write/execute
692 * @maperr: true for invalid page, false for permission fault
693 * @ra: host pc for unwinding
694 *
695 * Use the TCGCPUOps hook to record cpu state, do guest operating system
696 * specific things to raise SIGSEGV, and jump to the main cpu loop.
697 */
698void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
699 MMUAccessType access_type,
700 bool maperr, uintptr_t ra);
701
12ed5640
RH
702/**
703 * cpu_loop_exit_sigbus:
704 * @cpu: the cpu context
705 * @addr: the guest address of the alignment fault
706 * @access_type: access was read/write/execute
707 * @ra: host pc for unwinding
708 *
709 * Use the TCGCPUOps hook to record cpu state, do guest operating system
710 * specific things to raise SIGBUS, and jump to the main cpu loop.
711 */
712void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
713 MMUAccessType access_type,
714 uintptr_t ra);
715
4390df51 716#else
8fd19e6c
PB
717static inline void mmap_lock(void) {}
718static inline void mmap_unlock(void) {}
719
8c01eb78
EC
720/**
721 * get_page_addr_code() - full-system version
722 * @env: CPUArchState
723 * @addr: guest virtual address of guest code
724 *
725 * If we cannot translate and execute from the entire RAM page, or if
726 * the region is not backed by RAM, returns -1. Otherwise, returns the
727 * ram_addr_t corresponding to the guest code at @addr.
728 *
729 * Note: this function can trigger an exception.
730 */
731tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
dfccc760 732
4b2190da
EC
733/**
734 * get_page_addr_code_hostp() - full-system version
735 * @env: CPUArchState
736 * @addr: guest virtual address of guest code
737 *
738 * See get_page_addr_code() (full-system version) for documentation on the
739 * return value.
740 *
741 * Sets *@hostp (when @hostp is non-NULL) as follows.
742 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
743 * to the host address where @addr's content is kept.
744 *
745 * Note: this function can trigger an exception.
746 */
747tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
748 void **hostp);
749
dfccc760
PC
750void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
751void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
752
dfccc760 753MemoryRegionSection *
d7898cda 754address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
755 hwaddr *xlat, hwaddr *plen,
756 MemTxAttrs attrs, int *prot);
dfccc760 757hwaddr memory_region_section_get_iotlb(CPUState *cpu,
8f5db641 758 MemoryRegionSection *section);
4390df51 759#endif
9df217a3 760
875cdcf6 761#endif