]>
Commit | Line | Data |
---|---|---|
f184f385 PMD |
1 | /* |
2 | * CPU interfaces that are target independent. | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * SPDX-License-Identifier: LGPL-2.1+ | |
7 | */ | |
1ad2134f | 8 | #ifndef CPU_COMMON_H |
175de524 | 9 | #define CPU_COMMON_H |
1ad2134f | 10 | |
c4b3f46c | 11 | #include "exec/vaddr.h" |
ce927ed9 | 12 | #ifndef CONFIG_USER_ONLY |
022c62cb | 13 | #include "exec/hwaddr.h" |
ce927ed9 | 14 | #endif |
a7f6f4f5 | 15 | #include "hw/core/cpu.h" |
a120d320 | 16 | #include "tcg/debug-assert.h" |
74781c08 | 17 | #include "exec/page-protection.h" |
37b76cfd | 18 | |
65b074da PMD |
19 | #define EXCP_INTERRUPT 0x10000 /* async interruption */ |
20 | #define EXCP_HLT 0x10001 /* hlt instruction reached */ | |
21 | #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ | |
22 | #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ | |
23 | #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ | |
24 | #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ | |
25 | ||
1f269c14 MAL |
26 | void cpu_exec_init_all(void); |
27 | void cpu_exec_step_atomic(CPUState *cpu); | |
28 | ||
8e3b0cbb | 29 | #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size()) |
b269a708 | 30 | |
0ac20318 | 31 | /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ |
370ed600 | 32 | extern QemuMutex qemu_cpu_list_lock; |
267f685b PB |
33 | void qemu_init_cpu_list(void); |
34 | void cpu_list_lock(void); | |
35 | void cpu_list_unlock(void); | |
ab1a161f | 36 | unsigned int cpu_list_generation_id_get(void); |
267f685b | 37 | |
d9f24bf5 PB |
38 | void tcg_iommu_init_notifier_list(CPUState *cpu); |
39 | void tcg_iommu_free_notifier_list(CPUState *cpu); | |
40 | ||
b3755a91 PB |
41 | #if !defined(CONFIG_USER_ONLY) |
42 | ||
dd310534 AG |
43 | enum device_endian { |
44 | DEVICE_NATIVE_ENDIAN, | |
45 | DEVICE_BIG_ENDIAN, | |
46 | DEVICE_LITTLE_ENDIAN, | |
47 | }; | |
48 | ||
e03b5686 | 49 | #if HOST_BIG_ENDIAN |
c99a29e7 YX |
50 | #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN |
51 | #else | |
52 | #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN | |
53 | #endif | |
54 | ||
1ad2134f | 55 | /* address in the RAM (different from a physical address) */ |
4be403c8 | 56 | #if defined(CONFIG_XEN_BACKEND) |
f15fbc4b AP |
57 | typedef uint64_t ram_addr_t; |
58 | # define RAM_ADDR_MAX UINT64_MAX | |
59 | # define RAM_ADDR_FMT "%" PRIx64 | |
60 | #else | |
53576999 SW |
61 | typedef uintptr_t ram_addr_t; |
62 | # define RAM_ADDR_MAX UINTPTR_MAX | |
63 | # define RAM_ADDR_FMT "%" PRIxPTR | |
f15fbc4b | 64 | #endif |
1ad2134f PB |
65 | |
66 | /* memory API */ | |
67 | ||
cd19cfa2 | 68 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); |
1ad2134f | 69 | /* This should not be used by devices. */ |
07bdaa41 | 70 | ram_addr_t qemu_ram_addr_from_host(void *ptr); |
97e03465 | 71 | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr); |
e3dd7493 | 72 | RAMBlock *qemu_ram_block_by_name(const char *name); |
022f033b DH |
73 | |
74 | /* | |
75 | * Translates a host ptr back to a RAMBlock and an offset in that RAMBlock. | |
76 | * | |
77 | * @ptr: The host pointer to translate. | |
78 | * @round_offset: Whether to round the result offset down to a target page | |
79 | * @offset: Will be set to the offset within the returned RAMBlock. | |
80 | * | |
81 | * Returns: RAMBlock (or NULL if not found) | |
82 | * | |
83 | * By the time this function returns, the returned pointer is not protected | |
84 | * by RCU anymore. If the caller is not within an RCU critical section and | |
a4a411fb | 85 | * does not hold the BQL, it must have other means of protecting the |
022f033b DH |
86 | * pointer, such as a reference to the memory region that owns the RAMBlock. |
87 | */ | |
422148d3 | 88 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
f615f396 | 89 | ram_addr_t *offset); |
f90bb71b | 90 | ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); |
fa53a0e5 GA |
91 | void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev); |
92 | void qemu_ram_unset_idstr(RAMBlock *block); | |
422148d3 | 93 | const char *qemu_ram_get_idstr(RAMBlock *rb); |
754cb9c0 YK |
94 | void *qemu_ram_get_host_addr(RAMBlock *rb); |
95 | ram_addr_t qemu_ram_get_offset(RAMBlock *rb); | |
96 | ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); | |
082851a3 | 97 | ram_addr_t qemu_ram_get_max_length(RAMBlock *rb); |
463a4ac2 | 98 | bool qemu_ram_is_shared(RAMBlock *rb); |
8dbe22c6 | 99 | bool qemu_ram_is_noreserve(RAMBlock *rb); |
2ce16640 DDAG |
100 | bool qemu_ram_is_uf_zeroable(RAMBlock *rb); |
101 | void qemu_ram_set_uf_zeroable(RAMBlock *rb); | |
b895de50 CLG |
102 | bool qemu_ram_is_migratable(RAMBlock *rb); |
103 | void qemu_ram_set_migratable(RAMBlock *rb); | |
104 | void qemu_ram_unset_migratable(RAMBlock *rb); | |
b0182e53 | 105 | bool qemu_ram_is_named_file(RAMBlock *rb); |
6d998f3c | 106 | int qemu_ram_get_fd(RAMBlock *rb); |
2ce16640 | 107 | |
863e9621 | 108 | size_t qemu_ram_pagesize(RAMBlock *block); |
67f11b5c | 109 | size_t qemu_ram_pagesize_largest(void); |
1ad2134f | 110 | |
1f649fe0 PMD |
111 | /** |
112 | * cpu_address_space_init: | |
113 | * @cpu: CPU to add this address space to | |
114 | * @asidx: integer index of this address space | |
115 | * @prefix: prefix to be used as name of address space | |
116 | * @mr: the root memory region of address space | |
117 | * | |
118 | * Add the specified address space to the CPU's cpu_ases list. | |
119 | * The address space added with @asidx 0 is the one used for the | |
120 | * convenience pointer cpu->as. | |
121 | * The target-specific code which registers ASes is responsible | |
122 | * for defining what semantics address space 0, 1, 2, etc have. | |
123 | * | |
124 | * Before the first call to this function, the caller must set | |
125 | * cpu->num_ases to the total number of address spaces it needs | |
126 | * to support. | |
127 | * | |
128 | * Note that with KVM only one address space is supported. | |
129 | */ | |
130 | void cpu_address_space_init(CPUState *cpu, int asidx, | |
131 | const char *prefix, MemoryRegion *mr); | |
132 | ||
d7ef71ef | 133 | void cpu_physical_memory_rw(hwaddr addr, void *buf, |
28c80bfe | 134 | hwaddr len, bool is_write); |
a8170e5e | 135 | static inline void cpu_physical_memory_read(hwaddr addr, |
0c249ff7 | 136 | void *buf, hwaddr len) |
1ad2134f | 137 | { |
85eb7c18 | 138 | cpu_physical_memory_rw(addr, buf, len, false); |
1ad2134f | 139 | } |
a8170e5e | 140 | static inline void cpu_physical_memory_write(hwaddr addr, |
0c249ff7 | 141 | const void *buf, hwaddr len) |
1ad2134f | 142 | { |
85eb7c18 | 143 | cpu_physical_memory_rw(addr, (void *)buf, len, true); |
1ad2134f | 144 | } |
a8170e5e AK |
145 | void *cpu_physical_memory_map(hwaddr addr, |
146 | hwaddr *plen, | |
28c80bfe | 147 | bool is_write); |
a8170e5e | 148 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
28c80bfe | 149 | bool is_write, hwaddr access_len); |
1ad2134f | 150 | |
a8170e5e | 151 | bool cpu_physical_memory_is_io(hwaddr phys_addr); |
76f35538 | 152 | |
6842a08e BS |
153 | /* Coalesced MMIO regions are areas where write operations can be reordered. |
154 | * This usually implies that write operations are side-effect free. This allows | |
155 | * batching which can make a major impact on performance when using | |
156 | * virtualization. | |
157 | */ | |
6842a08e BS |
158 | void qemu_flush_coalesced_mmio_buffer(void); |
159 | ||
0c249ff7 | 160 | void cpu_flush_icache_range(hwaddr start, hwaddr len); |
1ad2134f | 161 | |
754cb9c0 | 162 | typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); |
bd2fa51f | 163 | |
e3807054 | 164 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); |
d3a5038c | 165 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); |
b2e9426c XL |
166 | int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, |
167 | size_t length); | |
bd2fa51f | 168 | |
b3755a91 PB |
169 | #endif |
170 | ||
73842ef0 PMD |
171 | /* Returns: 0 on success, -1 on error */ |
172 | int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, | |
173 | void *ptr, size_t len, bool is_write); | |
174 | ||
c5e3c918 | 175 | /* vl.c */ |
c138c3b8 | 176 | void list_cpus(void); |
377bf6f3 | 177 | |
3549118b | 178 | #ifdef CONFIG_TCG |
b254c342 PMD |
179 | |
180 | bool tcg_cflags_has(CPUState *cpu, uint32_t flags); | |
181 | void tcg_cflags_set(CPUState *cpu, uint32_t flags); | |
182 | ||
183 | /* current cflags for hashing/comparison */ | |
184 | uint32_t curr_cflags(CPUState *cpu); | |
185 | ||
3549118b PMD |
186 | /** |
187 | * cpu_unwind_state_data: | |
188 | * @cpu: the cpu context | |
189 | * @host_pc: the host pc within the translation | |
190 | * @data: output data | |
191 | * | |
192 | * Attempt to load the the unwind state for a host pc occurring in | |
193 | * translated code. If @host_pc is not in translated code, the | |
194 | * function returns false; otherwise @data is loaded. | |
195 | * This is the same unwind info as given to restore_state_to_opc. | |
196 | */ | |
197 | bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data); | |
198 | ||
199 | /** | |
200 | * cpu_restore_state: | |
201 | * @cpu: the cpu context | |
202 | * @host_pc: the host pc within the translation | |
203 | * @return: true if state was restored, false otherwise | |
204 | * | |
205 | * Attempt to restore the state for a fault occurring in translated | |
206 | * code. If @host_pc is not in translated code no state is | |
207 | * restored and the function returns false. | |
208 | */ | |
209 | bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc); | |
210 | ||
211 | G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); | |
212 | G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); | |
213 | #endif /* CONFIG_TCG */ | |
214 | G_NORETURN void cpu_loop_exit(CPUState *cpu); | |
215 | G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); | |
216 | ||
a7f6f4f5 AJ |
217 | /* accel/tcg/cpu-exec.c */ |
218 | int cpu_exec(CPUState *cpu); | |
219 | ||
220 | /** | |
221 | * env_archcpu(env) | |
222 | * @env: The architecture environment | |
223 | * | |
224 | * Return the ArchCPU associated with the environment. | |
225 | */ | |
226 | static inline ArchCPU *env_archcpu(CPUArchState *env) | |
227 | { | |
228 | return (void *)env - sizeof(CPUState); | |
229 | } | |
230 | ||
231 | /** | |
232 | * env_cpu(env) | |
233 | * @env: The architecture environment | |
234 | * | |
235 | * Return the CPUState associated with the environment. | |
236 | */ | |
237 | static inline CPUState *env_cpu(CPUArchState *env) | |
238 | { | |
239 | return (void *)env - sizeof(CPUState); | |
240 | } | |
241 | ||
a120d320 RH |
242 | #ifndef CONFIG_USER_ONLY |
243 | /** | |
244 | * cpu_mmu_index: | |
245 | * @env: The cpu environment | |
246 | * @ifetch: True for code access, false for data access. | |
247 | * | |
248 | * Return the core mmu index for the current translation regime. | |
249 | * This function is used by generic TCG code paths. | |
250 | * | |
251 | * The user-only version of this function is inline in cpu-all.h, | |
252 | * where it always returns MMU_USER_IDX. | |
253 | */ | |
3b916140 | 254 | static inline int cpu_mmu_index(CPUState *cs, bool ifetch) |
a120d320 | 255 | { |
a120d320 RH |
256 | int ret = cs->cc->mmu_index(cs, ifetch); |
257 | tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES); | |
258 | return ret; | |
259 | } | |
260 | #endif /* !CONFIG_USER_ONLY */ | |
261 | ||
175de524 | 262 | #endif /* CPU_COMMON_H */ |