]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/exec-all.h
Merge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into staging
[mirror_qemu.git] / include / exec / exec-all.h
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "exec/cpu_ldst.h"
26 #endif
27 #include "sysemu/cpu-timers.h"
28
29 /* allow to see translation results - the slowdown should be negligible, so we leave it */
30 #define DEBUG_DISAS
31
32 /* Page tracking code uses ram addresses in system mode, and virtual
33 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
34 type. */
35 #if defined(CONFIG_USER_ONLY)
36 typedef abi_ulong tb_page_addr_t;
37 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
38 #else
39 typedef ram_addr_t tb_page_addr_t;
40 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41 #endif
42
43 #include "qemu/log.h"
44
45 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
46 void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
47 target_ulong *data);
48
49 /**
50 * cpu_restore_state:
51 * @cpu: the vCPU state is to be restore to
52 * @searched_pc: the host PC the fault occurred at
53 * @will_exit: true if the TB executed will be interrupted after some
54 cpu adjustments. Required for maintaining the correct
55 icount valus
56 * @return: true if state was restored, false otherwise
57 *
58 * Attempt to restore the state for a fault occurring in translated
59 * code. If the searched_pc is not in translated code no state is
60 * restored and the function returns false.
61 */
62 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
63
64 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
65 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
66 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
67 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
68
69 /**
70 * cpu_loop_exit_requested:
71 * @cpu: The CPU state to be tested
72 *
73 * Indicate if somebody asked for a return of the CPU to the main loop
74 * (e.g., via cpu_exit() or cpu_interrupt()).
75 *
76 * This is helpful for architectures that support interruptible
77 * instructions. After writing back all state to registers/memory, this
78 * call can be used to check if it makes sense to return to the main loop
79 * or to continue executing the interruptible instruction.
80 */
81 static inline bool cpu_loop_exit_requested(CPUState *cpu)
82 {
83 return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
84 }
85
86 #if !defined(CONFIG_USER_ONLY)
87 void cpu_reloading_memory_map(void);
88 /**
89 * cpu_address_space_init:
90 * @cpu: CPU to add this address space to
91 * @asidx: integer index of this address space
92 * @prefix: prefix to be used as name of address space
93 * @mr: the root memory region of address space
94 *
95 * Add the specified address space to the CPU's cpu_ases list.
96 * The address space added with @asidx 0 is the one used for the
97 * convenience pointer cpu->as.
98 * The target-specific code which registers ASes is responsible
99 * for defining what semantics address space 0, 1, 2, etc have.
100 *
101 * Before the first call to this function, the caller must set
102 * cpu->num_ases to the total number of address spaces it needs
103 * to support.
104 *
105 * Note that with KVM only one address space is supported.
106 */
107 void cpu_address_space_init(CPUState *cpu, int asidx,
108 const char *prefix, MemoryRegion *mr);
109 #endif
110
111 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
112 /* cputlb.c */
113 /**
114 * tlb_init - initialize a CPU's TLB
115 * @cpu: CPU whose TLB should be initialized
116 */
117 void tlb_init(CPUState *cpu);
118 /**
119 * tlb_destroy - destroy a CPU's TLB
120 * @cpu: CPU whose TLB should be destroyed
121 */
122 void tlb_destroy(CPUState *cpu);
123 /**
124 * tlb_flush_page:
125 * @cpu: CPU whose TLB should be flushed
126 * @addr: virtual address of page to be flushed
127 *
128 * Flush one page from the TLB of the specified CPU, for all
129 * MMU indexes.
130 */
131 void tlb_flush_page(CPUState *cpu, target_ulong addr);
132 /**
133 * tlb_flush_page_all_cpus:
134 * @cpu: src CPU of the flush
135 * @addr: virtual address of page to be flushed
136 *
137 * Flush one page from the TLB of the specified CPU, for all
138 * MMU indexes.
139 */
140 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
141 /**
142 * tlb_flush_page_all_cpus_synced:
143 * @cpu: src CPU of the flush
144 * @addr: virtual address of page to be flushed
145 *
146 * Flush one page from the TLB of the specified CPU, for all MMU
147 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
148 * is scheduled as safe work meaning all flushes will be complete once
149 * the source vCPUs safe work is complete. This will depend on when
150 * the guests translation ends the TB.
151 */
152 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
153 /**
154 * tlb_flush:
155 * @cpu: CPU whose TLB should be flushed
156 *
157 * Flush the entire TLB for the specified CPU. Most CPU architectures
158 * allow the implementation to drop entries from the TLB at any time
159 * so this is generally safe. If more selective flushing is required
160 * use one of the other functions for efficiency.
161 */
162 void tlb_flush(CPUState *cpu);
163 /**
164 * tlb_flush_all_cpus:
165 * @cpu: src CPU of the flush
166 */
167 void tlb_flush_all_cpus(CPUState *src_cpu);
168 /**
169 * tlb_flush_all_cpus_synced:
170 * @cpu: src CPU of the flush
171 *
172 * Like tlb_flush_all_cpus except this except the source vCPUs work is
173 * scheduled as safe work meaning all flushes will be complete once
174 * the source vCPUs safe work is complete. This will depend on when
175 * the guests translation ends the TB.
176 */
177 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
178 /**
179 * tlb_flush_page_by_mmuidx:
180 * @cpu: CPU whose TLB should be flushed
181 * @addr: virtual address of page to be flushed
182 * @idxmap: bitmap of MMU indexes to flush
183 *
184 * Flush one page from the TLB of the specified CPU, for the specified
185 * MMU indexes.
186 */
187 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
188 uint16_t idxmap);
189 /**
190 * tlb_flush_page_by_mmuidx_all_cpus:
191 * @cpu: Originating CPU of the flush
192 * @addr: virtual address of page to be flushed
193 * @idxmap: bitmap of MMU indexes to flush
194 *
195 * Flush one page from the TLB of all CPUs, for the specified
196 * MMU indexes.
197 */
198 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
199 uint16_t idxmap);
200 /**
201 * tlb_flush_page_by_mmuidx_all_cpus_synced:
202 * @cpu: Originating CPU of the flush
203 * @addr: virtual address of page to be flushed
204 * @idxmap: bitmap of MMU indexes to flush
205 *
206 * Flush one page from the TLB of all CPUs, for the specified MMU
207 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
208 * vCPUs work is scheduled as safe work meaning all flushes will be
209 * complete once the source vCPUs safe work is complete. This will
210 * depend on when the guests translation ends the TB.
211 */
212 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
213 uint16_t idxmap);
214 /**
215 * tlb_flush_by_mmuidx:
216 * @cpu: CPU whose TLB should be flushed
217 * @wait: If true ensure synchronisation by exiting the cpu_loop
218 * @idxmap: bitmap of MMU indexes to flush
219 *
220 * Flush all entries from the TLB of the specified CPU, for the specified
221 * MMU indexes.
222 */
223 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
224 /**
225 * tlb_flush_by_mmuidx_all_cpus:
226 * @cpu: Originating CPU of the flush
227 * @idxmap: bitmap of MMU indexes to flush
228 *
229 * Flush all entries from all TLBs of all CPUs, for the specified
230 * MMU indexes.
231 */
232 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
233 /**
234 * tlb_flush_by_mmuidx_all_cpus_synced:
235 * @cpu: Originating CPU of the flush
236 * @idxmap: bitmap of MMU indexes to flush
237 *
238 * Flush all entries from all TLBs of all CPUs, for the specified
239 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
240 * vCPUs work is scheduled as safe work meaning all flushes will be
241 * complete once the source vCPUs safe work is complete. This will
242 * depend on when the guests translation ends the TB.
243 */
244 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
245
246 /**
247 * tlb_flush_page_bits_by_mmuidx
248 * @cpu: CPU whose TLB should be flushed
249 * @addr: virtual address of page to be flushed
250 * @idxmap: bitmap of mmu indexes to flush
251 * @bits: number of significant bits in address
252 *
253 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
254 */
255 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
256 uint16_t idxmap, unsigned bits);
257
258 /* Similarly, with broadcast and syncing. */
259 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
260 uint16_t idxmap, unsigned bits);
261 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
262 (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
263
264 /**
265 * tlb_flush_range_by_mmuidx
266 * @cpu: CPU whose TLB should be flushed
267 * @addr: virtual address of the start of the range to be flushed
268 * @len: length of range to be flushed
269 * @idxmap: bitmap of mmu indexes to flush
270 * @bits: number of significant bits in address
271 *
272 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
273 * comparing only the low @bits worth of each virtual page.
274 */
275 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
276 target_ulong len, uint16_t idxmap,
277 unsigned bits);
278
279 /* Similarly, with broadcast and syncing. */
280 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
281 target_ulong len, uint16_t idxmap,
282 unsigned bits);
283 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
284 target_ulong addr,
285 target_ulong len,
286 uint16_t idxmap,
287 unsigned bits);
288
289 /**
290 * tlb_set_page_with_attrs:
291 * @cpu: CPU to add this TLB entry for
292 * @vaddr: virtual address of page to add entry for
293 * @paddr: physical address of the page
294 * @attrs: memory transaction attributes
295 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
296 * @mmu_idx: MMU index to insert TLB entry for
297 * @size: size of the page in bytes
298 *
299 * Add an entry to this CPU's TLB (a mapping from virtual address
300 * @vaddr to physical address @paddr) with the specified memory
301 * transaction attributes. This is generally called by the target CPU
302 * specific code after it has been called through the tlb_fill()
303 * entry point and performed a successful page table walk to find
304 * the physical address and attributes for the virtual address
305 * which provoked the TLB miss.
306 *
307 * At most one entry for a given virtual address is permitted. Only a
308 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
309 * used by tlb_flush_page.
310 */
311 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
312 hwaddr paddr, MemTxAttrs attrs,
313 int prot, int mmu_idx, target_ulong size);
314 /* tlb_set_page:
315 *
316 * This function is equivalent to calling tlb_set_page_with_attrs()
317 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
318 * as a convenience for CPUs which don't use memory transaction attributes.
319 */
320 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
321 hwaddr paddr, int prot,
322 int mmu_idx, target_ulong size);
323 #else
324 static inline void tlb_init(CPUState *cpu)
325 {
326 }
327 static inline void tlb_destroy(CPUState *cpu)
328 {
329 }
330 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
331 {
332 }
333 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
334 {
335 }
336 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
337 target_ulong addr)
338 {
339 }
340 static inline void tlb_flush(CPUState *cpu)
341 {
342 }
343 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
344 {
345 }
346 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
347 {
348 }
349 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
350 target_ulong addr, uint16_t idxmap)
351 {
352 }
353
354 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
355 {
356 }
357 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
358 target_ulong addr,
359 uint16_t idxmap)
360 {
361 }
362 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
363 target_ulong addr,
364 uint16_t idxmap)
365 {
366 }
367 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
368 {
369 }
370
371 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
372 uint16_t idxmap)
373 {
374 }
375 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
376 target_ulong addr,
377 uint16_t idxmap,
378 unsigned bits)
379 {
380 }
381 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
382 target_ulong addr,
383 uint16_t idxmap,
384 unsigned bits)
385 {
386 }
387 static inline void
388 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
389 uint16_t idxmap, unsigned bits)
390 {
391 }
392 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
393 target_ulong len, uint16_t idxmap,
394 unsigned bits)
395 {
396 }
397 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
398 target_ulong addr,
399 target_ulong len,
400 uint16_t idxmap,
401 unsigned bits)
402 {
403 }
404 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
405 target_ulong addr,
406 target_long len,
407 uint16_t idxmap,
408 unsigned bits)
409 {
410 }
411 #endif
412 /**
413 * probe_access:
414 * @env: CPUArchState
415 * @addr: guest virtual address to look up
416 * @size: size of the access
417 * @access_type: read, write or execute permission
418 * @mmu_idx: MMU index to use for lookup
419 * @retaddr: return address for unwinding
420 *
421 * Look up the guest virtual address @addr. Raise an exception if the
422 * page does not satisfy @access_type. Raise an exception if the
423 * access (@addr, @size) hits a watchpoint. For writes, mark a clean
424 * page as dirty.
425 *
426 * Finally, return the host address for a page that is backed by RAM,
427 * or NULL if the page requires I/O.
428 */
429 void *probe_access(CPUArchState *env, target_ulong addr, int size,
430 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
431
432 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
433 int mmu_idx, uintptr_t retaddr)
434 {
435 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
436 }
437
438 static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
439 int mmu_idx, uintptr_t retaddr)
440 {
441 return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
442 }
443
444 /**
445 * probe_access_flags:
446 * @env: CPUArchState
447 * @addr: guest virtual address to look up
448 * @access_type: read, write or execute permission
449 * @mmu_idx: MMU index to use for lookup
450 * @nonfault: suppress the fault
451 * @phost: return value for host address
452 * @retaddr: return address for unwinding
453 *
454 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
455 * the page, and storing the host address for RAM in @phost.
456 *
457 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
458 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
459 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
460 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
461 */
462 int probe_access_flags(CPUArchState *env, target_ulong addr,
463 MMUAccessType access_type, int mmu_idx,
464 bool nonfault, void **phost, uintptr_t retaddr);
465
466 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
467
468 /* Estimated block size for TB allocation. */
469 /* ??? The following is based on a 2015 survey of x86_64 host output.
470 Better would seem to be some sort of dynamically sized TB array,
471 adapting to the block sizes actually being produced. */
472 #if defined(CONFIG_SOFTMMU)
473 #define CODE_GEN_AVG_BLOCK_SIZE 400
474 #else
475 #define CODE_GEN_AVG_BLOCK_SIZE 150
476 #endif
477
478 /*
479 * Translation Cache-related fields of a TB.
480 * This struct exists just for convenience; we keep track of TB's in a binary
481 * search tree, and the only fields needed to compare TB's in the tree are
482 * @ptr and @size.
483 * Note: the address of search data can be obtained by adding @size to @ptr.
484 */
485 struct tb_tc {
486 const void *ptr; /* pointer to the translated code */
487 size_t size;
488 };
489
490 struct TranslationBlock {
491 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
492 target_ulong cs_base; /* CS base for this block */
493 uint32_t flags; /* flags defining in which context the code was generated */
494 uint32_t cflags; /* compile flags */
495
496 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
497 #define CF_COUNT_MASK 0x000001ff
498 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
499 #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
500 #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
501 #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
502 #define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
503 #define CF_USE_ICOUNT 0x00020000
504 #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
505 #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
506 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
507 #define CF_CLUSTER_SHIFT 24
508
509 /* Per-vCPU dynamic tracing state used to generate this TB */
510 uint32_t trace_vcpu_dstate;
511
512 /*
513 * Above fields used for comparing
514 */
515
516 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
517 uint16_t size;
518 uint16_t icount;
519
520 struct tb_tc tc;
521
522 /* first and second physical page containing code. The lower bit
523 of the pointer tells the index in page_next[].
524 The list is protected by the TB's page('s) lock(s) */
525 uintptr_t page_next[2];
526 tb_page_addr_t page_addr[2];
527
528 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
529 QemuSpin jmp_lock;
530
531 /* The following data are used to directly call another TB from
532 * the code of this one. This can be done either by emitting direct or
533 * indirect native jump instructions. These jumps are reset so that the TB
534 * just continues its execution. The TB can be linked to another one by
535 * setting one of the jump targets (or patching the jump instruction). Only
536 * two of such jumps are supported.
537 */
538 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
539 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
540 uintptr_t jmp_target_arg[2]; /* target address or offset */
541
542 /*
543 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
544 * Each TB can have two outgoing jumps, and therefore can participate
545 * in two lists. The list entries are kept in jmp_list_next[2]. The least
546 * significant bit (LSB) of the pointers in these lists is used to encode
547 * which of the two list entries is to be used in the pointed TB.
548 *
549 * List traversals are protected by jmp_lock. The destination TB of each
550 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
551 * can be acquired from any origin TB.
552 *
553 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
554 * being invalidated, so that no further outgoing jumps from it can be set.
555 *
556 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
557 * to a destination TB that has CF_INVALID set.
558 */
559 uintptr_t jmp_list_head;
560 uintptr_t jmp_list_next[2];
561 uintptr_t jmp_dest[2];
562 };
563
564 /* Hide the qatomic_read to make code a little easier on the eyes */
565 static inline uint32_t tb_cflags(const TranslationBlock *tb)
566 {
567 return qatomic_read(&tb->cflags);
568 }
569
570 /* current cflags for hashing/comparison */
571 uint32_t curr_cflags(CPUState *cpu);
572
573 /* TranslationBlock invalidate API */
574 #if defined(CONFIG_USER_ONLY)
575 void tb_invalidate_phys_addr(target_ulong addr);
576 void tb_invalidate_phys_range(target_ulong start, target_ulong end);
577 #else
578 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
579 #endif
580 void tb_flush(CPUState *cpu);
581 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
582 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
583 target_ulong cs_base, uint32_t flags,
584 uint32_t cflags);
585 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
586
587 /* GETPC is the true target of the return instruction that we'll execute. */
588 #if defined(CONFIG_TCG_INTERPRETER)
589 extern __thread uintptr_t tci_tb_ptr;
590 # define GETPC() tci_tb_ptr
591 #else
592 # define GETPC() \
593 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
594 #endif
595
596 /* The true return address will often point to a host insn that is part of
597 the next translated guest insn. Adjust the address backward to point to
598 the middle of the call insn. Subtracting one would do the job except for
599 several compressed mode architectures (arm, mips) which set the low bit
600 to indicate the compressed mode; subtracting two works around that. It
601 is also the case that there are no host isas that contain a call insn
602 smaller than 4 bytes, so we don't worry about special-casing this. */
603 #define GETPC_ADJ 2
604
605 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
606 void assert_no_pages_locked(void);
607 #else
608 static inline void assert_no_pages_locked(void)
609 {
610 }
611 #endif
612
613 #if !defined(CONFIG_USER_ONLY)
614
615 /**
616 * iotlb_to_section:
617 * @cpu: CPU performing the access
618 * @index: TCG CPU IOTLB entry
619 *
620 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
621 * it refers to. @index will have been initially created and returned
622 * by memory_region_section_get_iotlb().
623 */
624 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
625 hwaddr index, MemTxAttrs attrs);
626 #endif
627
628 #if defined(CONFIG_USER_ONLY)
629 void mmap_lock(void);
630 void mmap_unlock(void);
631 bool have_mmap_lock(void);
632
633 /**
634 * get_page_addr_code() - user-mode version
635 * @env: CPUArchState
636 * @addr: guest virtual address of guest code
637 *
638 * Returns @addr.
639 */
640 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
641 target_ulong addr)
642 {
643 return addr;
644 }
645
646 /**
647 * get_page_addr_code_hostp() - user-mode version
648 * @env: CPUArchState
649 * @addr: guest virtual address of guest code
650 *
651 * Returns @addr.
652 *
653 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
654 * is kept.
655 */
656 static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
657 target_ulong addr,
658 void **hostp)
659 {
660 if (hostp) {
661 *hostp = g2h_untagged(addr);
662 }
663 return addr;
664 }
665 #else
666 static inline void mmap_lock(void) {}
667 static inline void mmap_unlock(void) {}
668
669 /**
670 * get_page_addr_code() - full-system version
671 * @env: CPUArchState
672 * @addr: guest virtual address of guest code
673 *
674 * If we cannot translate and execute from the entire RAM page, or if
675 * the region is not backed by RAM, returns -1. Otherwise, returns the
676 * ram_addr_t corresponding to the guest code at @addr.
677 *
678 * Note: this function can trigger an exception.
679 */
680 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
681
682 /**
683 * get_page_addr_code_hostp() - full-system version
684 * @env: CPUArchState
685 * @addr: guest virtual address of guest code
686 *
687 * See get_page_addr_code() (full-system version) for documentation on the
688 * return value.
689 *
690 * Sets *@hostp (when @hostp is non-NULL) as follows.
691 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
692 * to the host address where @addr's content is kept.
693 *
694 * Note: this function can trigger an exception.
695 */
696 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
697 void **hostp);
698
699 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
700 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
701
702 MemoryRegionSection *
703 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
704 hwaddr *xlat, hwaddr *plen,
705 MemTxAttrs attrs, int *prot);
706 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
707 MemoryRegionSection *section);
708 #endif
709
710 #endif