]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into...
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
d4e8164f
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001 22
ec150c7e 23#include "cpu.h"
dc069b22 24#ifdef CONFIG_TCG
4b2190da 25#include "exec/cpu_ldst.h"
dc069b22 26#endif
bdbb9d69 27#include "exec/translation-block.h"
e022d9ca 28#include "qemu/clang-tsa.h"
7d99a001 29
6392bd6b
RH
30/**
31 * cpu_unwind_state_data:
32 * @cpu: the cpu context
33 * @host_pc: the host pc within the translation
34 * @data: output data
35 *
36 * Attempt to load the the unwind state for a host pc occurring in
37 * translated code. If @host_pc is not in translated code, the
38 * function returns false; otherwise @data is loaded.
39 * This is the same unwind info as given to restore_state_to_opc.
40 */
41bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
42
d25f2a72
AB
43/**
44 * cpu_restore_state:
6392bd6b
RH
45 * @cpu: the cpu context
46 * @host_pc: the host pc within the translation
d25f2a72
AB
47 * @return: true if state was restored, false otherwise
48 *
49 * Attempt to restore the state for a fault occurring in translated
6392bd6b 50 * code. If @host_pc is not in translated code no state is
d25f2a72
AB
51 * restored and the function returns false.
52 */
3d419a4d 53bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
a8a826a3 54
8905770b
MAL
55G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
56G_NORETURN void cpu_loop_exit(CPUState *cpu);
57G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
58G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 59
1f6493be
DH
60/**
61 * cpu_loop_exit_requested:
62 * @cpu: The CPU state to be tested
63 *
64 * Indicate if somebody asked for a return of the CPU to the main loop
65 * (e.g., via cpu_exit() or cpu_interrupt()).
66 *
67 * This is helpful for architectures that support interruptible
68 * instructions. After writing back all state to registers/memory, this
69 * call can be used to check if it makes sense to return to the main loop
70 * or to continue executing the interruptible instruction.
71 */
72static inline bool cpu_loop_exit_requested(CPUState *cpu)
73{
d73415a3 74 return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
1f6493be
DH
75}
76
b11ec7f2 77#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 78/* cputlb.c */
5005e253
EC
79/**
80 * tlb_init - initialize a CPU's TLB
81 * @cpu: CPU whose TLB should be initialized
82 */
83void tlb_init(CPUState *cpu);
816d9be5
EC
84/**
85 * tlb_destroy - destroy a CPU's TLB
86 * @cpu: CPU whose TLB should be destroyed
87 */
88void tlb_destroy(CPUState *cpu);
d7a74a9d
PM
89/**
90 * tlb_flush_page:
91 * @cpu: CPU whose TLB should be flushed
92 * @addr: virtual address of page to be flushed
93 *
94 * Flush one page from the TLB of the specified CPU, for all
95 * MMU indexes.
96 */
732d5487 97void tlb_flush_page(CPUState *cpu, vaddr addr);
c3b9a07a
AB
98/**
99 * tlb_flush_page_all_cpus:
100 * @cpu: src CPU of the flush
101 * @addr: virtual address of page to be flushed
102 *
103 * Flush one page from the TLB of the specified CPU, for all
104 * MMU indexes.
105 */
732d5487 106void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
c3b9a07a
AB
107/**
108 * tlb_flush_page_all_cpus_synced:
109 * @cpu: src CPU of the flush
110 * @addr: virtual address of page to be flushed
111 *
112 * Flush one page from the TLB of the specified CPU, for all MMU
113 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
114 * is scheduled as safe work meaning all flushes will be complete once
115 * the source vCPUs safe work is complete. This will depend on when
116 * the guests translation ends the TB.
117 */
732d5487 118void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
d7a74a9d
PM
119/**
120 * tlb_flush:
121 * @cpu: CPU whose TLB should be flushed
d7a74a9d 122 *
d10eb08f
AB
123 * Flush the entire TLB for the specified CPU. Most CPU architectures
124 * allow the implementation to drop entries from the TLB at any time
125 * so this is generally safe. If more selective flushing is required
126 * use one of the other functions for efficiency.
d7a74a9d 127 */
d10eb08f 128void tlb_flush(CPUState *cpu);
c3b9a07a
AB
129/**
130 * tlb_flush_all_cpus:
131 * @cpu: src CPU of the flush
132 */
133void tlb_flush_all_cpus(CPUState *src_cpu);
134/**
135 * tlb_flush_all_cpus_synced:
136 * @cpu: src CPU of the flush
137 *
138 * Like tlb_flush_all_cpus except this except the source vCPUs work is
139 * scheduled as safe work meaning all flushes will be complete once
140 * the source vCPUs safe work is complete. This will depend on when
141 * the guests translation ends the TB.
142 */
143void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
144/**
145 * tlb_flush_page_by_mmuidx:
146 * @cpu: CPU whose TLB should be flushed
147 * @addr: virtual address of page to be flushed
0336cbf8 148 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
149 *
150 * Flush one page from the TLB of the specified CPU, for the specified
151 * MMU indexes.
152 */
732d5487 153void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
0336cbf8 154 uint16_t idxmap);
c3b9a07a
AB
155/**
156 * tlb_flush_page_by_mmuidx_all_cpus:
157 * @cpu: Originating CPU of the flush
158 * @addr: virtual address of page to be flushed
159 * @idxmap: bitmap of MMU indexes to flush
160 *
161 * Flush one page from the TLB of all CPUs, for the specified
162 * MMU indexes.
163 */
732d5487 164void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
c3b9a07a
AB
165 uint16_t idxmap);
166/**
167 * tlb_flush_page_by_mmuidx_all_cpus_synced:
168 * @cpu: Originating CPU of the flush
169 * @addr: virtual address of page to be flushed
170 * @idxmap: bitmap of MMU indexes to flush
171 *
172 * Flush one page from the TLB of all CPUs, for the specified MMU
173 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
174 * vCPUs work is scheduled as safe work meaning all flushes will be
175 * complete once the source vCPUs safe work is complete. This will
176 * depend on when the guests translation ends the TB.
177 */
732d5487 178void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
c3b9a07a 179 uint16_t idxmap);
d7a74a9d
PM
180/**
181 * tlb_flush_by_mmuidx:
182 * @cpu: CPU whose TLB should be flushed
c3b9a07a 183 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 184 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
185 *
186 * Flush all entries from the TLB of the specified CPU, for the specified
187 * MMU indexes.
188 */
0336cbf8 189void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
190/**
191 * tlb_flush_by_mmuidx_all_cpus:
192 * @cpu: Originating CPU of the flush
193 * @idxmap: bitmap of MMU indexes to flush
194 *
195 * Flush all entries from all TLBs of all CPUs, for the specified
196 * MMU indexes.
197 */
198void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
199/**
200 * tlb_flush_by_mmuidx_all_cpus_synced:
201 * @cpu: Originating CPU of the flush
202 * @idxmap: bitmap of MMU indexes to flush
203 *
204 * Flush all entries from all TLBs of all CPUs, for the specified
205 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
206 * vCPUs work is scheduled as safe work meaning all flushes will be
207 * complete once the source vCPUs safe work is complete. This will
208 * depend on when the guests translation ends the TB.
209 */
210void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
3ab6e68c
RH
211
212/**
213 * tlb_flush_page_bits_by_mmuidx
214 * @cpu: CPU whose TLB should be flushed
215 * @addr: virtual address of page to be flushed
216 * @idxmap: bitmap of mmu indexes to flush
217 * @bits: number of significant bits in address
218 *
219 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
220 */
732d5487 221void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
3ab6e68c
RH
222 uint16_t idxmap, unsigned bits);
223
224/* Similarly, with broadcast and syncing. */
732d5487 225void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
3ab6e68c
RH
226 uint16_t idxmap, unsigned bits);
227void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
732d5487 228 (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
3ab6e68c 229
e5b1921b
RH
230/**
231 * tlb_flush_range_by_mmuidx
232 * @cpu: CPU whose TLB should be flushed
233 * @addr: virtual address of the start of the range to be flushed
234 * @len: length of range to be flushed
235 * @idxmap: bitmap of mmu indexes to flush
236 * @bits: number of significant bits in address
237 *
238 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
239 * comparing only the low @bits worth of each virtual page.
240 */
732d5487
AJ
241void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
242 vaddr len, uint16_t idxmap,
e5b1921b 243 unsigned bits);
600b819f
RH
244
245/* Similarly, with broadcast and syncing. */
732d5487
AJ
246void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
247 vaddr len, uint16_t idxmap,
600b819f 248 unsigned bits);
c13b27d8 249void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
732d5487
AJ
250 vaddr addr,
251 vaddr len,
c13b27d8
RH
252 uint16_t idxmap,
253 unsigned bits);
600b819f 254
40473689
RH
255/**
256 * tlb_set_page_full:
257 * @cpu: CPU context
258 * @mmu_idx: mmu index of the tlb to modify
732d5487 259 * @addr: virtual address of the entry to add
40473689
RH
260 * @full: the details of the tlb entry
261 *
262 * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
263 * @full must be filled, except for xlat_section, and constitute
264 * the complete description of the translated page.
265 *
266 * This is generally called by the target tlb_fill function after
267 * having performed a successful page table walk to find the physical
268 * address and attributes for the translation.
269 *
270 * At most one entry for a given virtual address is permitted. Only a
271 * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
272 * used by tlb_flush_page.
273 */
732d5487 274void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
40473689
RH
275 CPUTLBEntryFull *full);
276
1787cc8e
PM
277/**
278 * tlb_set_page_with_attrs:
279 * @cpu: CPU to add this TLB entry for
732d5487 280 * @addr: virtual address of page to add entry for
1787cc8e
PM
281 * @paddr: physical address of the page
282 * @attrs: memory transaction attributes
283 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
284 * @mmu_idx: MMU index to insert TLB entry for
285 * @size: size of the page in bytes
286 *
287 * Add an entry to this CPU's TLB (a mapping from virtual address
732d5487 288 * @addr to physical address @paddr) with the specified memory
1787cc8e
PM
289 * transaction attributes. This is generally called by the target CPU
290 * specific code after it has been called through the tlb_fill()
291 * entry point and performed a successful page table walk to find
292 * the physical address and attributes for the virtual address
293 * which provoked the TLB miss.
294 *
295 * At most one entry for a given virtual address is permitted. Only a
296 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
297 * used by tlb_flush_page.
298 */
732d5487 299void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
fadc1cbe 300 hwaddr paddr, MemTxAttrs attrs,
732d5487 301 int prot, int mmu_idx, vaddr size);
1787cc8e
PM
302/* tlb_set_page:
303 *
304 * This function is equivalent to calling tlb_set_page_with_attrs()
305 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
306 * as a convenience for CPUs which don't use memory transaction attributes.
307 */
732d5487 308void tlb_set_page(CPUState *cpu, vaddr addr,
1787cc8e 309 hwaddr paddr, int prot,
732d5487 310 int mmu_idx, vaddr size);
0cac1b66 311#else
5005e253
EC
312static inline void tlb_init(CPUState *cpu)
313{
314}
816d9be5
EC
315static inline void tlb_destroy(CPUState *cpu)
316{
317}
732d5487 318static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
0cac1b66
BS
319{
320}
732d5487 321static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
c3b9a07a
AB
322{
323}
732d5487 324static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
c3b9a07a
AB
325{
326}
d10eb08f 327static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
328{
329}
c3b9a07a
AB
330static inline void tlb_flush_all_cpus(CPUState *src_cpu)
331{
332}
333static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
334{
335}
d7a74a9d 336static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
732d5487 337 vaddr addr, uint16_t idxmap)
d7a74a9d
PM
338{
339}
340
0336cbf8 341static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
342{
343}
c3b9a07a 344static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
732d5487 345 vaddr addr,
c3b9a07a
AB
346 uint16_t idxmap)
347{
348}
349static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
732d5487 350 vaddr addr,
c3b9a07a
AB
351 uint16_t idxmap)
352{
353}
354static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
355{
356}
8bca9a03 357
c3b9a07a
AB
358static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
359 uint16_t idxmap)
360{
361}
3ab6e68c 362static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
732d5487 363 vaddr addr,
3ab6e68c
RH
364 uint16_t idxmap,
365 unsigned bits)
366{
367}
368static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
732d5487 369 vaddr addr,
3ab6e68c
RH
370 uint16_t idxmap,
371 unsigned bits)
372{
373}
374static inline void
732d5487 375tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
3ab6e68c
RH
376 uint16_t idxmap, unsigned bits)
377{
378}
732d5487
AJ
379static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
380 vaddr len, uint16_t idxmap,
e5b1921b
RH
381 unsigned bits)
382{
383}
600b819f 384static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
732d5487
AJ
385 vaddr addr,
386 vaddr len,
600b819f
RH
387 uint16_t idxmap,
388 unsigned bits)
389{
390}
c13b27d8 391static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
732d5487
AJ
392 vaddr addr,
393 vaddr len,
c13b27d8
RH
394 uint16_t idxmap,
395 unsigned bits)
396{
397}
c527ee8f 398#endif
857129b3
RH
399/**
400 * probe_access:
401 * @env: CPUArchState
402 * @addr: guest virtual address to look up
403 * @size: size of the access
404 * @access_type: read, write or execute permission
405 * @mmu_idx: MMU index to use for lookup
406 * @retaddr: return address for unwinding
407 *
408 * Look up the guest virtual address @addr. Raise an exception if the
409 * page does not satisfy @access_type. Raise an exception if the
410 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
411 * page as dirty.
412 *
413 * Finally, return the host address for a page that is backed by RAM,
414 * or NULL if the page requires I/O.
415 */
4f8f4127 416void *probe_access(CPUArchState *env, vaddr addr, int size,
c25c283d
DH
417 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
418
4f8f4127 419static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
c25c283d
DH
420 int mmu_idx, uintptr_t retaddr)
421{
422 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
423}
d4e8164f 424
4f8f4127 425static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
9e70492b
BM
426 int mmu_idx, uintptr_t retaddr)
427{
428 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
429}
430
069cfe77
RH
431/**
432 * probe_access_flags:
433 * @env: CPUArchState
434 * @addr: guest virtual address to look up
1770b2f2 435 * @size: size of the access
069cfe77
RH
436 * @access_type: read, write or execute permission
437 * @mmu_idx: MMU index to use for lookup
438 * @nonfault: suppress the fault
439 * @phost: return value for host address
440 * @retaddr: return address for unwinding
441 *
442 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
443 * the page, and storing the host address for RAM in @phost.
444 *
445 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
446 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
447 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
448 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
449 */
4f8f4127 450int probe_access_flags(CPUArchState *env, vaddr addr, int size,
069cfe77
RH
451 MMUAccessType access_type, int mmu_idx,
452 bool nonfault, void **phost, uintptr_t retaddr);
453
af803a4f
RH
454#ifndef CONFIG_USER_ONLY
455/**
456 * probe_access_full:
457 * Like probe_access_flags, except also return into @pfull.
458 *
459 * The CPUTLBEntryFull structure returned via @pfull is transient
460 * and must be consumed or copied immediately, before any further
461 * access or changes to TLB @mmu_idx.
462 */
4f8f4127 463int probe_access_full(CPUArchState *env, vaddr addr, int size,
af803a4f
RH
464 MMUAccessType access_type, int mmu_idx,
465 bool nonfault, void **phost,
466 CPUTLBEntryFull **pfull, uintptr_t retaddr);
467#endif
468
d73415a3 469/* Hide the qatomic_read to make code a little easier on the eyes */
4e2ca83e
EC
470static inline uint32_t tb_cflags(const TranslationBlock *tb)
471{
d73415a3 472 return qatomic_read(&tb->cflags);
4e2ca83e
EC
473}
474
28905cfb
RH
475static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
476{
a97d5d2c
RH
477#ifdef CONFIG_USER_ONLY
478 return tb->itree.start;
479#else
28905cfb 480 return tb->page_addr[0];
a97d5d2c 481#endif
28905cfb
RH
482}
483
484static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
485{
a97d5d2c
RH
486#ifdef CONFIG_USER_ONLY
487 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
488 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
489#else
28905cfb 490 return tb->page_addr[1];
a97d5d2c 491#endif
28905cfb
RH
492}
493
494static inline void tb_set_page_addr0(TranslationBlock *tb,
495 tb_page_addr_t addr)
496{
a97d5d2c
RH
497#ifdef CONFIG_USER_ONLY
498 tb->itree.start = addr;
499 /*
500 * To begin, we record an interval of one byte. When the translation
501 * loop encounters a second page, the interval will be extended to
502 * include the first byte of the second page, which is sufficient to
503 * allow tb_page_addr1() above to work properly. The final corrected
504 * interval will be set by tb_page_add() from tb->size before the
505 * node is added to the interval tree.
506 */
507 tb->itree.last = addr;
508#else
28905cfb 509 tb->page_addr[0] = addr;
a97d5d2c 510#endif
28905cfb
RH
511}
512
513static inline void tb_set_page_addr1(TranslationBlock *tb,
514 tb_page_addr_t addr)
515{
a97d5d2c
RH
516#ifdef CONFIG_USER_ONLY
517 /* Extend the interval to the first byte of the second page. See above. */
518 tb->itree.last = addr;
519#else
28905cfb 520 tb->page_addr[1] = addr;
a97d5d2c 521#endif
28905cfb
RH
522}
523
4e2ca83e 524/* current cflags for hashing/comparison */
043e35d9 525uint32_t curr_cflags(CPUState *cpu);
4e2ca83e 526
646f34fa 527/* TranslationBlock invalidate API */
646f34fa 528#if defined(CONFIG_USER_ONLY)
c814c892 529void tb_invalidate_phys_addr(hwaddr addr);
c40d4792
PB
530#else
531void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 532#endif
41c1b1c9 533void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
e506ad6a 534void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
a8583393 535void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 536
01ecaf43 537/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 538#if defined(CONFIG_TCG_INTERPRETER)
13e71f08 539extern __thread uintptr_t tci_tb_ptr;
01ecaf43 540# define GETPC() tci_tb_ptr
0f842f8a 541#else
01ecaf43 542# define GETPC() \
0f842f8a
RH
543 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
544#endif
545
546/* The true return address will often point to a host insn that is part of
547 the next translated guest insn. Adjust the address backward to point to
548 the middle of the call insn. Subtracting one would do the job except for
549 several compressed mode architectures (arm, mips) which set the low bit
550 to indicate the compressed mode; subtracting two works around that. It
551 is also the case that there are no host isas that contain a call insn
552 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 553#define GETPC_ADJ 2
3917149d 554
e95c8d51 555#if !defined(CONFIG_USER_ONLY)
6e59c1db 556
2d54f194
PM
557/**
558 * iotlb_to_section:
559 * @cpu: CPU performing the access
560 * @index: TCG CPU IOTLB entry
561 *
562 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
563 * it refers to. @index will have been initially created and returned
564 * by memory_region_section_get_iotlb().
565 */
566struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
567 hwaddr index, MemTxAttrs attrs);
6e59c1db 568#endif
4390df51 569
8c01eb78 570/**
cdf71308 571 * get_page_addr_code_hostp()
8c01eb78
EC
572 * @env: CPUArchState
573 * @addr: guest virtual address of guest code
574 *
cdf71308
RH
575 * See get_page_addr_code() (full-system version) for documentation on the
576 * return value.
577 *
578 * Sets *@hostp (when @hostp is non-NULL) as follows.
579 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
580 * to the host address where @addr's content is kept.
581 *
582 * Note: this function can trigger an exception.
8c01eb78 583 */
4f8f4127 584tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
cdf71308 585 void **hostp);
4b2190da
EC
586
587/**
cdf71308 588 * get_page_addr_code()
4b2190da
EC
589 * @env: CPUArchState
590 * @addr: guest virtual address of guest code
591 *
cdf71308
RH
592 * If we cannot translate and execute from the entire RAM page, or if
593 * the region is not backed by RAM, returns -1. Otherwise, returns the
594 * ram_addr_t corresponding to the guest code at @addr.
4b2190da 595 *
cdf71308 596 * Note: this function can trigger an exception.
4b2190da 597 */
cdf71308 598static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
4f8f4127 599 vaddr addr)
4b2190da 600{
cdf71308 601 return get_page_addr_code_hostp(env, addr, NULL);
4b2190da 602}
8b1d5b3c 603
cdf71308 604#if defined(CONFIG_USER_ONLY)
e022d9ca
EGE
605void TSA_NO_TSA mmap_lock(void);
606void TSA_NO_TSA mmap_unlock(void);
cdf71308
RH
607bool have_mmap_lock(void);
608
0fdbb7d2
RH
609/**
610 * adjust_signal_pc:
611 * @pc: raw pc from the host signal ucontext_t.
612 * @is_write: host memory operation was write, or read-modify-write.
613 *
614 * Alter @pc as required for unwinding. Return the type of the
615 * guest memory access -- host reads may be for guest execution.
616 */
617MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
618
5e38ba7d
RH
619/**
620 * handle_sigsegv_accerr_write:
621 * @cpu: the cpu context
622 * @old_set: the sigset_t from the signal ucontext_t
623 * @host_pc: the host pc, adjusted for the signal
624 * @host_addr: the host address of the fault
625 *
626 * Return true if the write fault has been handled, and should be re-tried.
627 */
628bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
629 uintptr_t host_pc, abi_ptr guest_addr);
630
72d2bbf9
RH
631/**
632 * cpu_loop_exit_sigsegv:
633 * @cpu: the cpu context
634 * @addr: the guest address of the fault
635 * @access_type: access was read/write/execute
636 * @maperr: true for invalid page, false for permission fault
637 * @ra: host pc for unwinding
638 *
639 * Use the TCGCPUOps hook to record cpu state, do guest operating system
640 * specific things to raise SIGSEGV, and jump to the main cpu loop.
641 */
8905770b
MAL
642G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
643 MMUAccessType access_type,
644 bool maperr, uintptr_t ra);
72d2bbf9 645
12ed5640
RH
646/**
647 * cpu_loop_exit_sigbus:
648 * @cpu: the cpu context
649 * @addr: the guest address of the alignment fault
650 * @access_type: access was read/write/execute
651 * @ra: host pc for unwinding
652 *
653 * Use the TCGCPUOps hook to record cpu state, do guest operating system
654 * specific things to raise SIGBUS, and jump to the main cpu loop.
655 */
8905770b
MAL
656G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
657 MMUAccessType access_type,
658 uintptr_t ra);
12ed5640 659
4390df51 660#else
8fd19e6c
PB
661static inline void mmap_lock(void) {}
662static inline void mmap_unlock(void) {}
663
dfccc760 664void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
732d5487 665void tlb_set_dirty(CPUState *cpu, vaddr addr);
dfccc760 666
dfccc760 667MemoryRegionSection *
d7898cda 668address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
669 hwaddr *xlat, hwaddr *plen,
670 MemTxAttrs attrs, int *prot);
dfccc760 671hwaddr memory_region_section_get_iotlb(CPUState *cpu,
8f5db641 672 MemoryRegionSection *section);
4390df51 673#endif
9df217a3 674
875cdcf6 675#endif