]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
exec: Move cpu_loop_foo() target agnostic functions to 'cpu-common.h'
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
d4e8164f
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001 22
ec150c7e 23#include "cpu.h"
dcc0f35d 24#if defined(CONFIG_USER_ONLY)
4b2190da 25#include "exec/cpu_ldst.h"
dc069b22 26#endif
bdbb9d69 27#include "exec/translation-block.h"
e022d9ca 28#include "qemu/clang-tsa.h"
7d99a001 29
1f6493be
DH
30/**
31 * cpu_loop_exit_requested:
32 * @cpu: The CPU state to be tested
33 *
34 * Indicate if somebody asked for a return of the CPU to the main loop
35 * (e.g., via cpu_exit() or cpu_interrupt()).
36 *
37 * This is helpful for architectures that support interruptible
38 * instructions. After writing back all state to registers/memory, this
39 * call can be used to check if it makes sense to return to the main loop
40 * or to continue executing the interruptible instruction.
41 */
42static inline bool cpu_loop_exit_requested(CPUState *cpu)
43{
a953b5fa 44 return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
1f6493be
DH
45}
46
b11ec7f2 47#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 48/* cputlb.c */
5005e253
EC
49/**
50 * tlb_init - initialize a CPU's TLB
51 * @cpu: CPU whose TLB should be initialized
52 */
53void tlb_init(CPUState *cpu);
816d9be5
EC
54/**
55 * tlb_destroy - destroy a CPU's TLB
56 * @cpu: CPU whose TLB should be destroyed
57 */
58void tlb_destroy(CPUState *cpu);
d7a74a9d
PM
59/**
60 * tlb_flush_page:
61 * @cpu: CPU whose TLB should be flushed
62 * @addr: virtual address of page to be flushed
63 *
64 * Flush one page from the TLB of the specified CPU, for all
65 * MMU indexes.
66 */
732d5487 67void tlb_flush_page(CPUState *cpu, vaddr addr);
c3b9a07a
AB
68/**
69 * tlb_flush_page_all_cpus:
70 * @cpu: src CPU of the flush
71 * @addr: virtual address of page to be flushed
72 *
73 * Flush one page from the TLB of the specified CPU, for all
74 * MMU indexes.
75 */
732d5487 76void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
c3b9a07a
AB
77/**
78 * tlb_flush_page_all_cpus_synced:
79 * @cpu: src CPU of the flush
80 * @addr: virtual address of page to be flushed
81 *
82 * Flush one page from the TLB of the specified CPU, for all MMU
83 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
84 * is scheduled as safe work meaning all flushes will be complete once
85 * the source vCPUs safe work is complete. This will depend on when
86 * the guests translation ends the TB.
87 */
732d5487 88void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
d7a74a9d
PM
89/**
90 * tlb_flush:
91 * @cpu: CPU whose TLB should be flushed
d7a74a9d 92 *
d10eb08f
AB
93 * Flush the entire TLB for the specified CPU. Most CPU architectures
94 * allow the implementation to drop entries from the TLB at any time
95 * so this is generally safe. If more selective flushing is required
96 * use one of the other functions for efficiency.
d7a74a9d 97 */
d10eb08f 98void tlb_flush(CPUState *cpu);
c3b9a07a
AB
99/**
100 * tlb_flush_all_cpus:
101 * @cpu: src CPU of the flush
102 */
103void tlb_flush_all_cpus(CPUState *src_cpu);
104/**
105 * tlb_flush_all_cpus_synced:
106 * @cpu: src CPU of the flush
107 *
108 * Like tlb_flush_all_cpus except this except the source vCPUs work is
109 * scheduled as safe work meaning all flushes will be complete once
110 * the source vCPUs safe work is complete. This will depend on when
111 * the guests translation ends the TB.
112 */
113void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
114/**
115 * tlb_flush_page_by_mmuidx:
116 * @cpu: CPU whose TLB should be flushed
117 * @addr: virtual address of page to be flushed
0336cbf8 118 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
119 *
120 * Flush one page from the TLB of the specified CPU, for the specified
121 * MMU indexes.
122 */
732d5487 123void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
0336cbf8 124 uint16_t idxmap);
c3b9a07a
AB
125/**
126 * tlb_flush_page_by_mmuidx_all_cpus:
127 * @cpu: Originating CPU of the flush
128 * @addr: virtual address of page to be flushed
129 * @idxmap: bitmap of MMU indexes to flush
130 *
131 * Flush one page from the TLB of all CPUs, for the specified
132 * MMU indexes.
133 */
732d5487 134void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
c3b9a07a
AB
135 uint16_t idxmap);
136/**
137 * tlb_flush_page_by_mmuidx_all_cpus_synced:
138 * @cpu: Originating CPU of the flush
139 * @addr: virtual address of page to be flushed
140 * @idxmap: bitmap of MMU indexes to flush
141 *
142 * Flush one page from the TLB of all CPUs, for the specified MMU
143 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
144 * vCPUs work is scheduled as safe work meaning all flushes will be
145 * complete once the source vCPUs safe work is complete. This will
146 * depend on when the guests translation ends the TB.
147 */
732d5487 148void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
c3b9a07a 149 uint16_t idxmap);
d7a74a9d
PM
150/**
151 * tlb_flush_by_mmuidx:
152 * @cpu: CPU whose TLB should be flushed
c3b9a07a 153 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 154 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
155 *
156 * Flush all entries from the TLB of the specified CPU, for the specified
157 * MMU indexes.
158 */
0336cbf8 159void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
160/**
161 * tlb_flush_by_mmuidx_all_cpus:
162 * @cpu: Originating CPU of the flush
163 * @idxmap: bitmap of MMU indexes to flush
164 *
165 * Flush all entries from all TLBs of all CPUs, for the specified
166 * MMU indexes.
167 */
168void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
169/**
170 * tlb_flush_by_mmuidx_all_cpus_synced:
171 * @cpu: Originating CPU of the flush
172 * @idxmap: bitmap of MMU indexes to flush
173 *
174 * Flush all entries from all TLBs of all CPUs, for the specified
175 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
176 * vCPUs work is scheduled as safe work meaning all flushes will be
177 * complete once the source vCPUs safe work is complete. This will
178 * depend on when the guests translation ends the TB.
179 */
180void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
3ab6e68c
RH
181
182/**
183 * tlb_flush_page_bits_by_mmuidx
184 * @cpu: CPU whose TLB should be flushed
185 * @addr: virtual address of page to be flushed
186 * @idxmap: bitmap of mmu indexes to flush
187 * @bits: number of significant bits in address
188 *
189 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
190 */
732d5487 191void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
3ab6e68c
RH
192 uint16_t idxmap, unsigned bits);
193
194/* Similarly, with broadcast and syncing. */
732d5487 195void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
3ab6e68c
RH
196 uint16_t idxmap, unsigned bits);
197void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
732d5487 198 (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
3ab6e68c 199
e5b1921b
RH
200/**
201 * tlb_flush_range_by_mmuidx
202 * @cpu: CPU whose TLB should be flushed
203 * @addr: virtual address of the start of the range to be flushed
204 * @len: length of range to be flushed
205 * @idxmap: bitmap of mmu indexes to flush
206 * @bits: number of significant bits in address
207 *
208 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
209 * comparing only the low @bits worth of each virtual page.
210 */
732d5487
AJ
211void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
212 vaddr len, uint16_t idxmap,
e5b1921b 213 unsigned bits);
600b819f
RH
214
215/* Similarly, with broadcast and syncing. */
732d5487
AJ
216void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
217 vaddr len, uint16_t idxmap,
600b819f 218 unsigned bits);
c13b27d8 219void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
732d5487
AJ
220 vaddr addr,
221 vaddr len,
c13b27d8
RH
222 uint16_t idxmap,
223 unsigned bits);
600b819f 224
40473689
RH
225/**
226 * tlb_set_page_full:
227 * @cpu: CPU context
228 * @mmu_idx: mmu index of the tlb to modify
732d5487 229 * @addr: virtual address of the entry to add
40473689
RH
230 * @full: the details of the tlb entry
231 *
232 * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
233 * @full must be filled, except for xlat_section, and constitute
234 * the complete description of the translated page.
235 *
236 * This is generally called by the target tlb_fill function after
237 * having performed a successful page table walk to find the physical
238 * address and attributes for the translation.
239 *
240 * At most one entry for a given virtual address is permitted. Only a
241 * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
242 * used by tlb_flush_page.
243 */
732d5487 244void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
40473689
RH
245 CPUTLBEntryFull *full);
246
1787cc8e
PM
247/**
248 * tlb_set_page_with_attrs:
249 * @cpu: CPU to add this TLB entry for
732d5487 250 * @addr: virtual address of page to add entry for
1787cc8e
PM
251 * @paddr: physical address of the page
252 * @attrs: memory transaction attributes
253 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
254 * @mmu_idx: MMU index to insert TLB entry for
255 * @size: size of the page in bytes
256 *
257 * Add an entry to this CPU's TLB (a mapping from virtual address
732d5487 258 * @addr to physical address @paddr) with the specified memory
1787cc8e
PM
259 * transaction attributes. This is generally called by the target CPU
260 * specific code after it has been called through the tlb_fill()
261 * entry point and performed a successful page table walk to find
262 * the physical address and attributes for the virtual address
263 * which provoked the TLB miss.
264 *
265 * At most one entry for a given virtual address is permitted. Only a
266 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
267 * used by tlb_flush_page.
268 */
732d5487 269void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
fadc1cbe 270 hwaddr paddr, MemTxAttrs attrs,
732d5487 271 int prot, int mmu_idx, vaddr size);
1787cc8e
PM
272/* tlb_set_page:
273 *
274 * This function is equivalent to calling tlb_set_page_with_attrs()
275 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
276 * as a convenience for CPUs which don't use memory transaction attributes.
277 */
732d5487 278void tlb_set_page(CPUState *cpu, vaddr addr,
1787cc8e 279 hwaddr paddr, int prot,
732d5487 280 int mmu_idx, vaddr size);
0cac1b66 281#else
5005e253
EC
282static inline void tlb_init(CPUState *cpu)
283{
284}
816d9be5
EC
285static inline void tlb_destroy(CPUState *cpu)
286{
287}
732d5487 288static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
0cac1b66
BS
289{
290}
732d5487 291static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
c3b9a07a
AB
292{
293}
732d5487 294static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
c3b9a07a
AB
295{
296}
d10eb08f 297static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
298{
299}
c3b9a07a
AB
300static inline void tlb_flush_all_cpus(CPUState *src_cpu)
301{
302}
303static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
304{
305}
d7a74a9d 306static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
732d5487 307 vaddr addr, uint16_t idxmap)
d7a74a9d
PM
308{
309}
310
0336cbf8 311static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
312{
313}
c3b9a07a 314static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
732d5487 315 vaddr addr,
c3b9a07a
AB
316 uint16_t idxmap)
317{
318}
319static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
732d5487 320 vaddr addr,
c3b9a07a
AB
321 uint16_t idxmap)
322{
323}
324static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
325{
326}
8bca9a03 327
c3b9a07a
AB
328static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
329 uint16_t idxmap)
330{
331}
3ab6e68c 332static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
732d5487 333 vaddr addr,
3ab6e68c
RH
334 uint16_t idxmap,
335 unsigned bits)
336{
337}
338static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
732d5487 339 vaddr addr,
3ab6e68c
RH
340 uint16_t idxmap,
341 unsigned bits)
342{
343}
344static inline void
732d5487 345tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
3ab6e68c
RH
346 uint16_t idxmap, unsigned bits)
347{
348}
732d5487
AJ
349static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
350 vaddr len, uint16_t idxmap,
e5b1921b
RH
351 unsigned bits)
352{
353}
600b819f 354static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
732d5487
AJ
355 vaddr addr,
356 vaddr len,
600b819f
RH
357 uint16_t idxmap,
358 unsigned bits)
359{
360}
c13b27d8 361static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
732d5487
AJ
362 vaddr addr,
363 vaddr len,
c13b27d8
RH
364 uint16_t idxmap,
365 unsigned bits)
366{
367}
c527ee8f 368#endif
857129b3
RH
369/**
370 * probe_access:
371 * @env: CPUArchState
372 * @addr: guest virtual address to look up
373 * @size: size of the access
374 * @access_type: read, write or execute permission
375 * @mmu_idx: MMU index to use for lookup
376 * @retaddr: return address for unwinding
377 *
378 * Look up the guest virtual address @addr. Raise an exception if the
379 * page does not satisfy @access_type. Raise an exception if the
380 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
381 * page as dirty.
382 *
383 * Finally, return the host address for a page that is backed by RAM,
384 * or NULL if the page requires I/O.
385 */
4f8f4127 386void *probe_access(CPUArchState *env, vaddr addr, int size,
c25c283d
DH
387 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
388
4f8f4127 389static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
c25c283d
DH
390 int mmu_idx, uintptr_t retaddr)
391{
392 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
393}
d4e8164f 394
4f8f4127 395static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
9e70492b
BM
396 int mmu_idx, uintptr_t retaddr)
397{
398 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
399}
400
069cfe77
RH
401/**
402 * probe_access_flags:
403 * @env: CPUArchState
404 * @addr: guest virtual address to look up
1770b2f2 405 * @size: size of the access
069cfe77
RH
406 * @access_type: read, write or execute permission
407 * @mmu_idx: MMU index to use for lookup
408 * @nonfault: suppress the fault
409 * @phost: return value for host address
410 * @retaddr: return address for unwinding
411 *
412 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
413 * the page, and storing the host address for RAM in @phost.
414 *
415 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
416 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
417 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
418 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
419 */
4f8f4127 420int probe_access_flags(CPUArchState *env, vaddr addr, int size,
069cfe77
RH
421 MMUAccessType access_type, int mmu_idx,
422 bool nonfault, void **phost, uintptr_t retaddr);
423
af803a4f
RH
424#ifndef CONFIG_USER_ONLY
425/**
426 * probe_access_full:
427 * Like probe_access_flags, except also return into @pfull.
428 *
429 * The CPUTLBEntryFull structure returned via @pfull is transient
430 * and must be consumed or copied immediately, before any further
431 * access or changes to TLB @mmu_idx.
432 */
4f8f4127 433int probe_access_full(CPUArchState *env, vaddr addr, int size,
af803a4f
RH
434 MMUAccessType access_type, int mmu_idx,
435 bool nonfault, void **phost,
436 CPUTLBEntryFull **pfull, uintptr_t retaddr);
6d03226b
AB
437
438/**
439 * probe_access_mmu() - Like probe_access_full except cannot fault and
440 * doesn't trigger instrumentation.
441 *
442 * @env: CPUArchState
443 * @vaddr: virtual address to probe
444 * @size: size of the probe
445 * @access_type: read, write or execute permission
446 * @mmu_idx: softmmu index
447 * @phost: ptr to return value host address or NULL
448 * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
449 *
450 * The CPUTLBEntryFull structure returned via @pfull is transient
451 * and must be consumed or copied immediately, before any further
452 * access or changes to TLB @mmu_idx.
453 *
454 * Returns: TLB flags as per probe_access_flags()
455 */
456int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
457 MMUAccessType access_type, int mmu_idx,
458 void **phost, CPUTLBEntryFull **pfull);
459
af803a4f
RH
460#endif
461
d73415a3 462/* Hide the qatomic_read to make code a little easier on the eyes */
4e2ca83e
EC
463static inline uint32_t tb_cflags(const TranslationBlock *tb)
464{
d73415a3 465 return qatomic_read(&tb->cflags);
4e2ca83e
EC
466}
467
28905cfb
RH
468static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
469{
a97d5d2c
RH
470#ifdef CONFIG_USER_ONLY
471 return tb->itree.start;
472#else
28905cfb 473 return tb->page_addr[0];
a97d5d2c 474#endif
28905cfb
RH
475}
476
477static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
478{
a97d5d2c
RH
479#ifdef CONFIG_USER_ONLY
480 tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
481 return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
482#else
28905cfb 483 return tb->page_addr[1];
a97d5d2c 484#endif
28905cfb
RH
485}
486
487static inline void tb_set_page_addr0(TranslationBlock *tb,
488 tb_page_addr_t addr)
489{
a97d5d2c
RH
490#ifdef CONFIG_USER_ONLY
491 tb->itree.start = addr;
492 /*
493 * To begin, we record an interval of one byte. When the translation
494 * loop encounters a second page, the interval will be extended to
495 * include the first byte of the second page, which is sufficient to
496 * allow tb_page_addr1() above to work properly. The final corrected
497 * interval will be set by tb_page_add() from tb->size before the
498 * node is added to the interval tree.
499 */
500 tb->itree.last = addr;
501#else
28905cfb 502 tb->page_addr[0] = addr;
a97d5d2c 503#endif
28905cfb
RH
504}
505
506static inline void tb_set_page_addr1(TranslationBlock *tb,
507 tb_page_addr_t addr)
508{
a97d5d2c
RH
509#ifdef CONFIG_USER_ONLY
510 /* Extend the interval to the first byte of the second page. See above. */
511 tb->itree.last = addr;
512#else
28905cfb 513 tb->page_addr[1] = addr;
a97d5d2c 514#endif
28905cfb
RH
515}
516
4e2ca83e 517/* current cflags for hashing/comparison */
043e35d9 518uint32_t curr_cflags(CPUState *cpu);
4e2ca83e 519
646f34fa 520/* TranslationBlock invalidate API */
646f34fa 521#if defined(CONFIG_USER_ONLY)
c814c892 522void tb_invalidate_phys_addr(hwaddr addr);
c40d4792
PB
523#else
524void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 525#endif
41c1b1c9 526void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
e506ad6a 527void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
a8583393 528void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 529
01ecaf43 530/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 531#if defined(CONFIG_TCG_INTERPRETER)
13e71f08 532extern __thread uintptr_t tci_tb_ptr;
01ecaf43 533# define GETPC() tci_tb_ptr
0f842f8a 534#else
01ecaf43 535# define GETPC() \
0f842f8a
RH
536 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
537#endif
538
539/* The true return address will often point to a host insn that is part of
540 the next translated guest insn. Adjust the address backward to point to
541 the middle of the call insn. Subtracting one would do the job except for
542 several compressed mode architectures (arm, mips) which set the low bit
543 to indicate the compressed mode; subtracting two works around that. It
544 is also the case that there are no host isas that contain a call insn
545 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 546#define GETPC_ADJ 2
3917149d 547
e95c8d51 548#if !defined(CONFIG_USER_ONLY)
6e59c1db 549
2d54f194
PM
550/**
551 * iotlb_to_section:
552 * @cpu: CPU performing the access
553 * @index: TCG CPU IOTLB entry
554 *
555 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
556 * it refers to. @index will have been initially created and returned
557 * by memory_region_section_get_iotlb().
558 */
559struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
560 hwaddr index, MemTxAttrs attrs);
6e59c1db 561#endif
4390df51 562
8c01eb78 563/**
cdf71308 564 * get_page_addr_code_hostp()
8c01eb78
EC
565 * @env: CPUArchState
566 * @addr: guest virtual address of guest code
567 *
cdf71308
RH
568 * See get_page_addr_code() (full-system version) for documentation on the
569 * return value.
570 *
571 * Sets *@hostp (when @hostp is non-NULL) as follows.
572 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
573 * to the host address where @addr's content is kept.
574 *
575 * Note: this function can trigger an exception.
8c01eb78 576 */
4f8f4127 577tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
cdf71308 578 void **hostp);
4b2190da
EC
579
580/**
cdf71308 581 * get_page_addr_code()
4b2190da
EC
582 * @env: CPUArchState
583 * @addr: guest virtual address of guest code
584 *
cdf71308
RH
585 * If we cannot translate and execute from the entire RAM page, or if
586 * the region is not backed by RAM, returns -1. Otherwise, returns the
587 * ram_addr_t corresponding to the guest code at @addr.
4b2190da 588 *
cdf71308 589 * Note: this function can trigger an exception.
4b2190da 590 */
cdf71308 591static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
4f8f4127 592 vaddr addr)
4b2190da 593{
cdf71308 594 return get_page_addr_code_hostp(env, addr, NULL);
4b2190da 595}
8b1d5b3c 596
cdf71308 597#if defined(CONFIG_USER_ONLY)
e022d9ca
EGE
598void TSA_NO_TSA mmap_lock(void);
599void TSA_NO_TSA mmap_unlock(void);
cdf71308
RH
600bool have_mmap_lock(void);
601
990ef918
RH
602static inline void mmap_unlock_guard(void *unused)
603{
604 mmap_unlock();
605}
606
607#define WITH_MMAP_LOCK_GUARD() \
608 for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \
609 = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
610
0fdbb7d2
RH
611/**
612 * adjust_signal_pc:
613 * @pc: raw pc from the host signal ucontext_t.
614 * @is_write: host memory operation was write, or read-modify-write.
615 *
616 * Alter @pc as required for unwinding. Return the type of the
617 * guest memory access -- host reads may be for guest execution.
618 */
619MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
620
5e38ba7d
RH
621/**
622 * handle_sigsegv_accerr_write:
623 * @cpu: the cpu context
624 * @old_set: the sigset_t from the signal ucontext_t
625 * @host_pc: the host pc, adjusted for the signal
626 * @host_addr: the host address of the fault
627 *
628 * Return true if the write fault has been handled, and should be re-tried.
629 */
630bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
631 uintptr_t host_pc, abi_ptr guest_addr);
632
72d2bbf9
RH
633/**
634 * cpu_loop_exit_sigsegv:
635 * @cpu: the cpu context
636 * @addr: the guest address of the fault
637 * @access_type: access was read/write/execute
638 * @maperr: true for invalid page, false for permission fault
639 * @ra: host pc for unwinding
640 *
641 * Use the TCGCPUOps hook to record cpu state, do guest operating system
642 * specific things to raise SIGSEGV, and jump to the main cpu loop.
643 */
8905770b
MAL
644G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
645 MMUAccessType access_type,
646 bool maperr, uintptr_t ra);
72d2bbf9 647
12ed5640
RH
648/**
649 * cpu_loop_exit_sigbus:
650 * @cpu: the cpu context
651 * @addr: the guest address of the alignment fault
652 * @access_type: access was read/write/execute
653 * @ra: host pc for unwinding
654 *
655 * Use the TCGCPUOps hook to record cpu state, do guest operating system
656 * specific things to raise SIGBUS, and jump to the main cpu loop.
657 */
8905770b
MAL
658G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
659 MMUAccessType access_type,
660 uintptr_t ra);
12ed5640 661
4390df51 662#else
8fd19e6c
PB
663static inline void mmap_lock(void) {}
664static inline void mmap_unlock(void) {}
990ef918 665#define WITH_MMAP_LOCK_GUARD()
8fd19e6c 666
dfccc760 667void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
732d5487 668void tlb_set_dirty(CPUState *cpu, vaddr addr);
dfccc760 669
dfccc760 670MemoryRegionSection *
d7898cda 671address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
672 hwaddr *xlat, hwaddr *plen,
673 MemTxAttrs attrs, int *prot);
dfccc760 674hwaddr memory_region_section_get_iotlb(CPUState *cpu,
8f5db641 675 MemoryRegionSection *section);
4390df51 676#endif
9df217a3 677
875cdcf6 678#endif