]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/internal.h
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into...
[mirror_qemu.git] / accel / tcg / internal.h
1 /*
2 * Internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * SPDX-License-Identifier: LGPL-2.1-or-later
7 */
8
9 #ifndef ACCEL_TCG_INTERNAL_H
10 #define ACCEL_TCG_INTERNAL_H
11
12 #include "exec/exec-all.h"
13
14 /*
15 * Access to the various translations structures need to be serialised
16 * via locks for consistency. In user-mode emulation access to the
17 * memory related structures are protected with mmap_lock.
18 * In !user-mode we use per-page locks.
19 */
20 #ifdef CONFIG_USER_ONLY
21 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
22 #else
23 #define assert_memory_lock()
24 #endif
25
26 #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
27 void assert_no_pages_locked(void);
28 #else
29 static inline void assert_no_pages_locked(void) { }
30 #endif
31
32 #ifdef CONFIG_USER_ONLY
33 static inline void page_table_config_init(void) { }
34 #else
35 void page_table_config_init(void);
36 #endif
37
38 #ifdef CONFIG_SOFTMMU
39 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
40 unsigned size,
41 uintptr_t retaddr);
42 G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
43 #endif /* CONFIG_SOFTMMU */
44
45 TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
46 uint64_t cs_base, uint32_t flags,
47 int cflags);
48 void page_init(void);
49 void tb_htable_init(void);
50 void tb_reset_jump(TranslationBlock *tb, int n);
51 TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
52 tb_page_addr_t phys_page2);
53 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
54 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
55 uintptr_t host_pc);
56
57 /* Return the current PC from CPU, which may be cached in TB. */
58 static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
59 {
60 if (tb_cflags(tb) & CF_PCREL) {
61 return cpu->cc->get_pc(cpu);
62 } else {
63 return tb->pc;
64 }
65 }
66
67 /*
68 * Return true if CS is not running in parallel with other cpus, either
69 * because there are no other cpus or we are within an exclusive context.
70 */
71 static inline bool cpu_in_serial_context(CPUState *cs)
72 {
73 return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
74 }
75
76 extern int64_t max_delay;
77 extern int64_t max_advance;
78
79 extern bool one_insn_per_tb;
80
81 /**
82 * tcg_req_mo:
83 * @type: TCGBar
84 *
85 * Filter @type to the barrier that is required for the guest
86 * memory ordering vs the host memory ordering. A non-zero
87 * result indicates that some barrier is required.
88 *
89 * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
90 * guest requires strict ordering.
91 *
92 * This is a macro so that it's constant even without optimization.
93 */
94 #ifdef TCG_GUEST_DEFAULT_MO
95 # define tcg_req_mo(type) \
96 ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
97 #else
98 # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
99 #endif
100
101 /**
102 * cpu_req_mo:
103 * @type: TCGBar
104 *
105 * If tcg_req_mo indicates a barrier for @type is required
106 * for the guest memory model, issue a host memory barrier.
107 */
108 #define cpu_req_mo(type) \
109 do { \
110 if (tcg_req_mo(type)) { \
111 smp_mb(); \
112 } \
113 } while (0)
114
115 #endif /* ACCEL_TCG_INTERNAL_H */