]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 9 | * version 2.1 of the License, or (at your option) any later version. |
d4e8164f FB |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
2a6a4076 MA |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H | |
7d99a001 | 22 | |
ec150c7e | 23 | #include "cpu.h" |
dc069b22 | 24 | #ifdef CONFIG_TCG |
4b2190da | 25 | #include "exec/cpu_ldst.h" |
dc069b22 | 26 | #endif |
bdbb9d69 | 27 | #include "exec/translation-block.h" |
e022d9ca | 28 | #include "qemu/clang-tsa.h" |
7d99a001 | 29 | |
6392bd6b RH |
30 | /** |
31 | * cpu_unwind_state_data: | |
32 | * @cpu: the cpu context | |
33 | * @host_pc: the host pc within the translation | |
34 | * @data: output data | |
35 | * | |
36 | * Attempt to load the the unwind state for a host pc occurring in | |
37 | * translated code. If @host_pc is not in translated code, the | |
38 | * function returns false; otherwise @data is loaded. | |
39 | * This is the same unwind info as given to restore_state_to_opc. | |
40 | */ | |
41 | bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data); | |
42 | ||
d25f2a72 AB |
43 | /** |
44 | * cpu_restore_state: | |
6392bd6b RH |
45 | * @cpu: the cpu context |
46 | * @host_pc: the host pc within the translation | |
d25f2a72 AB |
47 | * @return: true if state was restored, false otherwise |
48 | * | |
49 | * Attempt to restore the state for a fault occurring in translated | |
6392bd6b | 50 | * code. If @host_pc is not in translated code no state is |
d25f2a72 AB |
51 | * restored and the function returns false. |
52 | */ | |
3d419a4d | 53 | bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc); |
a8a826a3 | 54 | |
8905770b MAL |
55 | G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); |
56 | G_NORETURN void cpu_loop_exit(CPUState *cpu); | |
57 | G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); | |
58 | G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); | |
1652b974 | 59 | |
1f6493be DH |
60 | /** |
61 | * cpu_loop_exit_requested: | |
62 | * @cpu: The CPU state to be tested | |
63 | * | |
64 | * Indicate if somebody asked for a return of the CPU to the main loop | |
65 | * (e.g., via cpu_exit() or cpu_interrupt()). | |
66 | * | |
67 | * This is helpful for architectures that support interruptible | |
68 | * instructions. After writing back all state to registers/memory, this | |
69 | * call can be used to check if it makes sense to return to the main loop | |
70 | * or to continue executing the interruptible instruction. | |
71 | */ | |
72 | static inline bool cpu_loop_exit_requested(CPUState *cpu) | |
73 | { | |
d73415a3 | 74 | return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; |
1f6493be DH |
75 | } |
76 | ||
b11ec7f2 | 77 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) |
0cac1b66 | 78 | /* cputlb.c */ |
5005e253 EC |
79 | /** |
80 | * tlb_init - initialize a CPU's TLB | |
81 | * @cpu: CPU whose TLB should be initialized | |
82 | */ | |
83 | void tlb_init(CPUState *cpu); | |
816d9be5 EC |
84 | /** |
85 | * tlb_destroy - destroy a CPU's TLB | |
86 | * @cpu: CPU whose TLB should be destroyed | |
87 | */ | |
88 | void tlb_destroy(CPUState *cpu); | |
d7a74a9d PM |
89 | /** |
90 | * tlb_flush_page: | |
91 | * @cpu: CPU whose TLB should be flushed | |
92 | * @addr: virtual address of page to be flushed | |
93 | * | |
94 | * Flush one page from the TLB of the specified CPU, for all | |
95 | * MMU indexes. | |
96 | */ | |
732d5487 | 97 | void tlb_flush_page(CPUState *cpu, vaddr addr); |
c3b9a07a AB |
98 | /** |
99 | * tlb_flush_page_all_cpus: | |
100 | * @cpu: src CPU of the flush | |
101 | * @addr: virtual address of page to be flushed | |
102 | * | |
103 | * Flush one page from the TLB of the specified CPU, for all | |
104 | * MMU indexes. | |
105 | */ | |
732d5487 | 106 | void tlb_flush_page_all_cpus(CPUState *src, vaddr addr); |
c3b9a07a AB |
107 | /** |
108 | * tlb_flush_page_all_cpus_synced: | |
109 | * @cpu: src CPU of the flush | |
110 | * @addr: virtual address of page to be flushed | |
111 | * | |
112 | * Flush one page from the TLB of the specified CPU, for all MMU | |
113 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work | |
114 | * is scheduled as safe work meaning all flushes will be complete once | |
115 | * the source vCPUs safe work is complete. This will depend on when | |
116 | * the guests translation ends the TB. | |
117 | */ | |
732d5487 | 118 | void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); |
d7a74a9d PM |
119 | /** |
120 | * tlb_flush: | |
121 | * @cpu: CPU whose TLB should be flushed | |
d7a74a9d | 122 | * |
d10eb08f AB |
123 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
124 | * allow the implementation to drop entries from the TLB at any time | |
125 | * so this is generally safe. If more selective flushing is required | |
126 | * use one of the other functions for efficiency. | |
d7a74a9d | 127 | */ |
d10eb08f | 128 | void tlb_flush(CPUState *cpu); |
c3b9a07a AB |
129 | /** |
130 | * tlb_flush_all_cpus: | |
131 | * @cpu: src CPU of the flush | |
132 | */ | |
133 | void tlb_flush_all_cpus(CPUState *src_cpu); | |
134 | /** | |
135 | * tlb_flush_all_cpus_synced: | |
136 | * @cpu: src CPU of the flush | |
137 | * | |
138 | * Like tlb_flush_all_cpus except this except the source vCPUs work is | |
139 | * scheduled as safe work meaning all flushes will be complete once | |
140 | * the source vCPUs safe work is complete. This will depend on when | |
141 | * the guests translation ends the TB. | |
142 | */ | |
143 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); | |
d7a74a9d PM |
144 | /** |
145 | * tlb_flush_page_by_mmuidx: | |
146 | * @cpu: CPU whose TLB should be flushed | |
147 | * @addr: virtual address of page to be flushed | |
0336cbf8 | 148 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
149 | * |
150 | * Flush one page from the TLB of the specified CPU, for the specified | |
151 | * MMU indexes. | |
152 | */ | |
732d5487 | 153 | void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, |
0336cbf8 | 154 | uint16_t idxmap); |
c3b9a07a AB |
155 | /** |
156 | * tlb_flush_page_by_mmuidx_all_cpus: | |
157 | * @cpu: Originating CPU of the flush | |
158 | * @addr: virtual address of page to be flushed | |
159 | * @idxmap: bitmap of MMU indexes to flush | |
160 | * | |
161 | * Flush one page from the TLB of all CPUs, for the specified | |
162 | * MMU indexes. | |
163 | */ | |
732d5487 | 164 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr, |
c3b9a07a AB |
165 | uint16_t idxmap); |
166 | /** | |
167 | * tlb_flush_page_by_mmuidx_all_cpus_synced: | |
168 | * @cpu: Originating CPU of the flush | |
169 | * @addr: virtual address of page to be flushed | |
170 | * @idxmap: bitmap of MMU indexes to flush | |
171 | * | |
172 | * Flush one page from the TLB of all CPUs, for the specified MMU | |
173 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source | |
174 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
175 | * complete once the source vCPUs safe work is complete. This will | |
176 | * depend on when the guests translation ends the TB. | |
177 | */ | |
732d5487 | 178 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, |
c3b9a07a | 179 | uint16_t idxmap); |
d7a74a9d PM |
180 | /** |
181 | * tlb_flush_by_mmuidx: | |
182 | * @cpu: CPU whose TLB should be flushed | |
c3b9a07a | 183 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
0336cbf8 | 184 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
185 | * |
186 | * Flush all entries from the TLB of the specified CPU, for the specified | |
187 | * MMU indexes. | |
188 | */ | |
0336cbf8 | 189 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
c3b9a07a AB |
190 | /** |
191 | * tlb_flush_by_mmuidx_all_cpus: | |
192 | * @cpu: Originating CPU of the flush | |
193 | * @idxmap: bitmap of MMU indexes to flush | |
194 | * | |
195 | * Flush all entries from all TLBs of all CPUs, for the specified | |
196 | * MMU indexes. | |
197 | */ | |
198 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); | |
199 | /** | |
200 | * tlb_flush_by_mmuidx_all_cpus_synced: | |
201 | * @cpu: Originating CPU of the flush | |
202 | * @idxmap: bitmap of MMU indexes to flush | |
203 | * | |
204 | * Flush all entries from all TLBs of all CPUs, for the specified | |
205 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source | |
206 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
207 | * complete once the source vCPUs safe work is complete. This will | |
208 | * depend on when the guests translation ends the TB. | |
209 | */ | |
210 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); | |
3ab6e68c RH |
211 | |
212 | /** | |
213 | * tlb_flush_page_bits_by_mmuidx | |
214 | * @cpu: CPU whose TLB should be flushed | |
215 | * @addr: virtual address of page to be flushed | |
216 | * @idxmap: bitmap of mmu indexes to flush | |
217 | * @bits: number of significant bits in address | |
218 | * | |
219 | * Similar to tlb_flush_page_mask, but with a bitmap of indexes. | |
220 | */ | |
732d5487 | 221 | void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, |
3ab6e68c RH |
222 | uint16_t idxmap, unsigned bits); |
223 | ||
224 | /* Similarly, with broadcast and syncing. */ | |
732d5487 | 225 | void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr, |
3ab6e68c RH |
226 | uint16_t idxmap, unsigned bits); |
227 | void tlb_flush_page_bits_by_mmuidx_all_cpus_synced | |
732d5487 | 228 | (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); |
3ab6e68c | 229 | |
e5b1921b RH |
230 | /** |
231 | * tlb_flush_range_by_mmuidx | |
232 | * @cpu: CPU whose TLB should be flushed | |
233 | * @addr: virtual address of the start of the range to be flushed | |
234 | * @len: length of range to be flushed | |
235 | * @idxmap: bitmap of mmu indexes to flush | |
236 | * @bits: number of significant bits in address | |
237 | * | |
238 | * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), | |
239 | * comparing only the low @bits worth of each virtual page. | |
240 | */ | |
732d5487 AJ |
241 | void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, |
242 | vaddr len, uint16_t idxmap, | |
e5b1921b | 243 | unsigned bits); |
600b819f RH |
244 | |
245 | /* Similarly, with broadcast and syncing. */ | |
732d5487 AJ |
246 | void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr, |
247 | vaddr len, uint16_t idxmap, | |
600b819f | 248 | unsigned bits); |
c13b27d8 | 249 | void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, |
732d5487 AJ |
250 | vaddr addr, |
251 | vaddr len, | |
c13b27d8 RH |
252 | uint16_t idxmap, |
253 | unsigned bits); | |
600b819f | 254 | |
40473689 RH |
255 | /** |
256 | * tlb_set_page_full: | |
257 | * @cpu: CPU context | |
258 | * @mmu_idx: mmu index of the tlb to modify | |
732d5487 | 259 | * @addr: virtual address of the entry to add |
40473689 RH |
260 | * @full: the details of the tlb entry |
261 | * | |
262 | * Add an entry to @cpu tlb index @mmu_idx. All of the fields of | |
263 | * @full must be filled, except for xlat_section, and constitute | |
264 | * the complete description of the translated page. | |
265 | * | |
266 | * This is generally called by the target tlb_fill function after | |
267 | * having performed a successful page table walk to find the physical | |
268 | * address and attributes for the translation. | |
269 | * | |
270 | * At most one entry for a given virtual address is permitted. Only a | |
271 | * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only | |
272 | * used by tlb_flush_page. | |
273 | */ | |
732d5487 | 274 | void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, |
40473689 RH |
275 | CPUTLBEntryFull *full); |
276 | ||
1787cc8e PM |
277 | /** |
278 | * tlb_set_page_with_attrs: | |
279 | * @cpu: CPU to add this TLB entry for | |
732d5487 | 280 | * @addr: virtual address of page to add entry for |
1787cc8e PM |
281 | * @paddr: physical address of the page |
282 | * @attrs: memory transaction attributes | |
283 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) | |
284 | * @mmu_idx: MMU index to insert TLB entry for | |
285 | * @size: size of the page in bytes | |
286 | * | |
287 | * Add an entry to this CPU's TLB (a mapping from virtual address | |
732d5487 | 288 | * @addr to physical address @paddr) with the specified memory |
1787cc8e PM |
289 | * transaction attributes. This is generally called by the target CPU |
290 | * specific code after it has been called through the tlb_fill() | |
291 | * entry point and performed a successful page table walk to find | |
292 | * the physical address and attributes for the virtual address | |
293 | * which provoked the TLB miss. | |
294 | * | |
295 | * At most one entry for a given virtual address is permitted. Only a | |
296 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only | |
297 | * used by tlb_flush_page. | |
298 | */ | |
732d5487 | 299 | void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, |
fadc1cbe | 300 | hwaddr paddr, MemTxAttrs attrs, |
732d5487 | 301 | int prot, int mmu_idx, vaddr size); |
1787cc8e PM |
302 | /* tlb_set_page: |
303 | * | |
304 | * This function is equivalent to calling tlb_set_page_with_attrs() | |
305 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided | |
306 | * as a convenience for CPUs which don't use memory transaction attributes. | |
307 | */ | |
732d5487 | 308 | void tlb_set_page(CPUState *cpu, vaddr addr, |
1787cc8e | 309 | hwaddr paddr, int prot, |
732d5487 | 310 | int mmu_idx, vaddr size); |
0cac1b66 | 311 | #else |
5005e253 EC |
312 | static inline void tlb_init(CPUState *cpu) |
313 | { | |
314 | } | |
816d9be5 EC |
315 | static inline void tlb_destroy(CPUState *cpu) |
316 | { | |
317 | } | |
732d5487 | 318 | static inline void tlb_flush_page(CPUState *cpu, vaddr addr) |
0cac1b66 BS |
319 | { |
320 | } | |
732d5487 | 321 | static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) |
c3b9a07a AB |
322 | { |
323 | } | |
732d5487 | 324 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) |
c3b9a07a AB |
325 | { |
326 | } | |
d10eb08f | 327 | static inline void tlb_flush(CPUState *cpu) |
0cac1b66 BS |
328 | { |
329 | } | |
c3b9a07a AB |
330 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
331 | { | |
332 | } | |
333 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
334 | { | |
335 | } | |
d7a74a9d | 336 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
732d5487 | 337 | vaddr addr, uint16_t idxmap) |
d7a74a9d PM |
338 | { |
339 | } | |
340 | ||
0336cbf8 | 341 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d PM |
342 | { |
343 | } | |
c3b9a07a | 344 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
732d5487 | 345 | vaddr addr, |
c3b9a07a AB |
346 | uint16_t idxmap) |
347 | { | |
348 | } | |
349 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
732d5487 | 350 | vaddr addr, |
c3b9a07a AB |
351 | uint16_t idxmap) |
352 | { | |
353 | } | |
354 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) | |
355 | { | |
356 | } | |
8bca9a03 | 357 | |
c3b9a07a AB |
358 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, |
359 | uint16_t idxmap) | |
360 | { | |
361 | } | |
3ab6e68c | 362 | static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, |
732d5487 | 363 | vaddr addr, |
3ab6e68c RH |
364 | uint16_t idxmap, |
365 | unsigned bits) | |
366 | { | |
367 | } | |
368 | static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, | |
732d5487 | 369 | vaddr addr, |
3ab6e68c RH |
370 | uint16_t idxmap, |
371 | unsigned bits) | |
372 | { | |
373 | } | |
374 | static inline void | |
732d5487 | 375 | tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, |
3ab6e68c RH |
376 | uint16_t idxmap, unsigned bits) |
377 | { | |
378 | } | |
732d5487 AJ |
379 | static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, |
380 | vaddr len, uint16_t idxmap, | |
e5b1921b RH |
381 | unsigned bits) |
382 | { | |
383 | } | |
600b819f | 384 | static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, |
732d5487 AJ |
385 | vaddr addr, |
386 | vaddr len, | |
600b819f RH |
387 | uint16_t idxmap, |
388 | unsigned bits) | |
389 | { | |
390 | } | |
c13b27d8 | 391 | static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, |
732d5487 AJ |
392 | vaddr addr, |
393 | vaddr len, | |
c13b27d8 RH |
394 | uint16_t idxmap, |
395 | unsigned bits) | |
396 | { | |
397 | } | |
c527ee8f | 398 | #endif |
857129b3 RH |
399 | /** |
400 | * probe_access: | |
401 | * @env: CPUArchState | |
402 | * @addr: guest virtual address to look up | |
403 | * @size: size of the access | |
404 | * @access_type: read, write or execute permission | |
405 | * @mmu_idx: MMU index to use for lookup | |
406 | * @retaddr: return address for unwinding | |
407 | * | |
408 | * Look up the guest virtual address @addr. Raise an exception if the | |
409 | * page does not satisfy @access_type. Raise an exception if the | |
410 | * access (@addr, @size) hits a watchpoint. For writes, mark a clean | |
411 | * page as dirty. | |
412 | * | |
413 | * Finally, return the host address for a page that is backed by RAM, | |
414 | * or NULL if the page requires I/O. | |
415 | */ | |
4f8f4127 | 416 | void *probe_access(CPUArchState *env, vaddr addr, int size, |
c25c283d DH |
417 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); |
418 | ||
4f8f4127 | 419 | static inline void *probe_write(CPUArchState *env, vaddr addr, int size, |
c25c283d DH |
420 | int mmu_idx, uintptr_t retaddr) |
421 | { | |
422 | return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); | |
423 | } | |
d4e8164f | 424 | |
4f8f4127 | 425 | static inline void *probe_read(CPUArchState *env, vaddr addr, int size, |
9e70492b BM |
426 | int mmu_idx, uintptr_t retaddr) |
427 | { | |
428 | return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); | |
429 | } | |
430 | ||
069cfe77 RH |
431 | /** |
432 | * probe_access_flags: | |
433 | * @env: CPUArchState | |
434 | * @addr: guest virtual address to look up | |
1770b2f2 | 435 | * @size: size of the access |
069cfe77 RH |
436 | * @access_type: read, write or execute permission |
437 | * @mmu_idx: MMU index to use for lookup | |
438 | * @nonfault: suppress the fault | |
439 | * @phost: return value for host address | |
440 | * @retaddr: return address for unwinding | |
441 | * | |
442 | * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for | |
443 | * the page, and storing the host address for RAM in @phost. | |
444 | * | |
445 | * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. | |
446 | * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. | |
447 | * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. | |
448 | * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. | |
449 | */ | |
4f8f4127 | 450 | int probe_access_flags(CPUArchState *env, vaddr addr, int size, |
069cfe77 RH |
451 | MMUAccessType access_type, int mmu_idx, |
452 | bool nonfault, void **phost, uintptr_t retaddr); | |
453 | ||
af803a4f RH |
454 | #ifndef CONFIG_USER_ONLY |
455 | /** | |
456 | * probe_access_full: | |
457 | * Like probe_access_flags, except also return into @pfull. | |
458 | * | |
459 | * The CPUTLBEntryFull structure returned via @pfull is transient | |
460 | * and must be consumed or copied immediately, before any further | |
461 | * access or changes to TLB @mmu_idx. | |
462 | */ | |
4f8f4127 | 463 | int probe_access_full(CPUArchState *env, vaddr addr, int size, |
af803a4f RH |
464 | MMUAccessType access_type, int mmu_idx, |
465 | bool nonfault, void **phost, | |
466 | CPUTLBEntryFull **pfull, uintptr_t retaddr); | |
6d03226b AB |
467 | |
468 | /** | |
469 | * probe_access_mmu() - Like probe_access_full except cannot fault and | |
470 | * doesn't trigger instrumentation. | |
471 | * | |
472 | * @env: CPUArchState | |
473 | * @vaddr: virtual address to probe | |
474 | * @size: size of the probe | |
475 | * @access_type: read, write or execute permission | |
476 | * @mmu_idx: softmmu index | |
477 | * @phost: ptr to return value host address or NULL | |
478 | * @pfull: ptr to return value CPUTLBEntryFull structure or NULL | |
479 | * | |
480 | * The CPUTLBEntryFull structure returned via @pfull is transient | |
481 | * and must be consumed or copied immediately, before any further | |
482 | * access or changes to TLB @mmu_idx. | |
483 | * | |
484 | * Returns: TLB flags as per probe_access_flags() | |
485 | */ | |
486 | int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, | |
487 | MMUAccessType access_type, int mmu_idx, | |
488 | void **phost, CPUTLBEntryFull **pfull); | |
489 | ||
af803a4f RH |
490 | #endif |
491 | ||
d73415a3 | 492 | /* Hide the qatomic_read to make code a little easier on the eyes */ |
4e2ca83e EC |
493 | static inline uint32_t tb_cflags(const TranslationBlock *tb) |
494 | { | |
d73415a3 | 495 | return qatomic_read(&tb->cflags); |
4e2ca83e EC |
496 | } |
497 | ||
28905cfb RH |
498 | static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) |
499 | { | |
a97d5d2c RH |
500 | #ifdef CONFIG_USER_ONLY |
501 | return tb->itree.start; | |
502 | #else | |
28905cfb | 503 | return tb->page_addr[0]; |
a97d5d2c | 504 | #endif |
28905cfb RH |
505 | } |
506 | ||
507 | static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) | |
508 | { | |
a97d5d2c RH |
509 | #ifdef CONFIG_USER_ONLY |
510 | tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK; | |
511 | return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next; | |
512 | #else | |
28905cfb | 513 | return tb->page_addr[1]; |
a97d5d2c | 514 | #endif |
28905cfb RH |
515 | } |
516 | ||
517 | static inline void tb_set_page_addr0(TranslationBlock *tb, | |
518 | tb_page_addr_t addr) | |
519 | { | |
a97d5d2c RH |
520 | #ifdef CONFIG_USER_ONLY |
521 | tb->itree.start = addr; | |
522 | /* | |
523 | * To begin, we record an interval of one byte. When the translation | |
524 | * loop encounters a second page, the interval will be extended to | |
525 | * include the first byte of the second page, which is sufficient to | |
526 | * allow tb_page_addr1() above to work properly. The final corrected | |
527 | * interval will be set by tb_page_add() from tb->size before the | |
528 | * node is added to the interval tree. | |
529 | */ | |
530 | tb->itree.last = addr; | |
531 | #else | |
28905cfb | 532 | tb->page_addr[0] = addr; |
a97d5d2c | 533 | #endif |
28905cfb RH |
534 | } |
535 | ||
536 | static inline void tb_set_page_addr1(TranslationBlock *tb, | |
537 | tb_page_addr_t addr) | |
538 | { | |
a97d5d2c RH |
539 | #ifdef CONFIG_USER_ONLY |
540 | /* Extend the interval to the first byte of the second page. See above. */ | |
541 | tb->itree.last = addr; | |
542 | #else | |
28905cfb | 543 | tb->page_addr[1] = addr; |
a97d5d2c | 544 | #endif |
28905cfb RH |
545 | } |
546 | ||
4e2ca83e | 547 | /* current cflags for hashing/comparison */ |
043e35d9 | 548 | uint32_t curr_cflags(CPUState *cpu); |
4e2ca83e | 549 | |
646f34fa | 550 | /* TranslationBlock invalidate API */ |
646f34fa | 551 | #if defined(CONFIG_USER_ONLY) |
c814c892 | 552 | void tb_invalidate_phys_addr(hwaddr addr); |
c40d4792 PB |
553 | #else |
554 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); | |
646f34fa | 555 | #endif |
41c1b1c9 | 556 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
e506ad6a | 557 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); |
a8583393 | 558 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
d4e8164f | 559 | |
01ecaf43 | 560 | /* GETPC is the true target of the return instruction that we'll execute. */ |
7316329a | 561 | #if defined(CONFIG_TCG_INTERPRETER) |
13e71f08 | 562 | extern __thread uintptr_t tci_tb_ptr; |
01ecaf43 | 563 | # define GETPC() tci_tb_ptr |
0f842f8a | 564 | #else |
01ecaf43 | 565 | # define GETPC() \ |
0f842f8a RH |
566 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
567 | #endif | |
568 | ||
569 | /* The true return address will often point to a host insn that is part of | |
570 | the next translated guest insn. Adjust the address backward to point to | |
571 | the middle of the call insn. Subtracting one would do the job except for | |
572 | several compressed mode architectures (arm, mips) which set the low bit | |
573 | to indicate the compressed mode; subtracting two works around that. It | |
574 | is also the case that there are no host isas that contain a call insn | |
575 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
a17d4482 | 576 | #define GETPC_ADJ 2 |
3917149d | 577 | |
e95c8d51 | 578 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 579 | |
2d54f194 PM |
580 | /** |
581 | * iotlb_to_section: | |
582 | * @cpu: CPU performing the access | |
583 | * @index: TCG CPU IOTLB entry | |
584 | * | |
585 | * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that | |
586 | * it refers to. @index will have been initially created and returned | |
587 | * by memory_region_section_get_iotlb(). | |
588 | */ | |
589 | struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, | |
590 | hwaddr index, MemTxAttrs attrs); | |
6e59c1db | 591 | #endif |
4390df51 | 592 | |
8c01eb78 | 593 | /** |
cdf71308 | 594 | * get_page_addr_code_hostp() |
8c01eb78 EC |
595 | * @env: CPUArchState |
596 | * @addr: guest virtual address of guest code | |
597 | * | |
cdf71308 RH |
598 | * See get_page_addr_code() (full-system version) for documentation on the |
599 | * return value. | |
600 | * | |
601 | * Sets *@hostp (when @hostp is non-NULL) as follows. | |
602 | * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp | |
603 | * to the host address where @addr's content is kept. | |
604 | * | |
605 | * Note: this function can trigger an exception. | |
8c01eb78 | 606 | */ |
4f8f4127 | 607 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, |
cdf71308 | 608 | void **hostp); |
4b2190da EC |
609 | |
610 | /** | |
cdf71308 | 611 | * get_page_addr_code() |
4b2190da EC |
612 | * @env: CPUArchState |
613 | * @addr: guest virtual address of guest code | |
614 | * | |
cdf71308 RH |
615 | * If we cannot translate and execute from the entire RAM page, or if |
616 | * the region is not backed by RAM, returns -1. Otherwise, returns the | |
617 | * ram_addr_t corresponding to the guest code at @addr. | |
4b2190da | 618 | * |
cdf71308 | 619 | * Note: this function can trigger an exception. |
4b2190da | 620 | */ |
cdf71308 | 621 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, |
4f8f4127 | 622 | vaddr addr) |
4b2190da | 623 | { |
cdf71308 | 624 | return get_page_addr_code_hostp(env, addr, NULL); |
4b2190da | 625 | } |
8b1d5b3c | 626 | |
cdf71308 | 627 | #if defined(CONFIG_USER_ONLY) |
e022d9ca EGE |
628 | void TSA_NO_TSA mmap_lock(void); |
629 | void TSA_NO_TSA mmap_unlock(void); | |
cdf71308 RH |
630 | bool have_mmap_lock(void); |
631 | ||
0fdbb7d2 RH |
632 | /** |
633 | * adjust_signal_pc: | |
634 | * @pc: raw pc from the host signal ucontext_t. | |
635 | * @is_write: host memory operation was write, or read-modify-write. | |
636 | * | |
637 | * Alter @pc as required for unwinding. Return the type of the | |
638 | * guest memory access -- host reads may be for guest execution. | |
639 | */ | |
640 | MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); | |
641 | ||
5e38ba7d RH |
642 | /** |
643 | * handle_sigsegv_accerr_write: | |
644 | * @cpu: the cpu context | |
645 | * @old_set: the sigset_t from the signal ucontext_t | |
646 | * @host_pc: the host pc, adjusted for the signal | |
647 | * @host_addr: the host address of the fault | |
648 | * | |
649 | * Return true if the write fault has been handled, and should be re-tried. | |
650 | */ | |
651 | bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, | |
652 | uintptr_t host_pc, abi_ptr guest_addr); | |
653 | ||
72d2bbf9 RH |
654 | /** |
655 | * cpu_loop_exit_sigsegv: | |
656 | * @cpu: the cpu context | |
657 | * @addr: the guest address of the fault | |
658 | * @access_type: access was read/write/execute | |
659 | * @maperr: true for invalid page, false for permission fault | |
660 | * @ra: host pc for unwinding | |
661 | * | |
662 | * Use the TCGCPUOps hook to record cpu state, do guest operating system | |
663 | * specific things to raise SIGSEGV, and jump to the main cpu loop. | |
664 | */ | |
8905770b MAL |
665 | G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, |
666 | MMUAccessType access_type, | |
667 | bool maperr, uintptr_t ra); | |
72d2bbf9 | 668 | |
12ed5640 RH |
669 | /** |
670 | * cpu_loop_exit_sigbus: | |
671 | * @cpu: the cpu context | |
672 | * @addr: the guest address of the alignment fault | |
673 | * @access_type: access was read/write/execute | |
674 | * @ra: host pc for unwinding | |
675 | * | |
676 | * Use the TCGCPUOps hook to record cpu state, do guest operating system | |
677 | * specific things to raise SIGBUS, and jump to the main cpu loop. | |
678 | */ | |
8905770b MAL |
679 | G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, |
680 | MMUAccessType access_type, | |
681 | uintptr_t ra); | |
12ed5640 | 682 | |
4390df51 | 683 | #else |
8fd19e6c PB |
684 | static inline void mmap_lock(void) {} |
685 | static inline void mmap_unlock(void) {} | |
686 | ||
dfccc760 | 687 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); |
732d5487 | 688 | void tlb_set_dirty(CPUState *cpu, vaddr addr); |
dfccc760 | 689 | |
dfccc760 | 690 | MemoryRegionSection * |
d7898cda | 691 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
1f871c5e PM |
692 | hwaddr *xlat, hwaddr *plen, |
693 | MemTxAttrs attrs, int *prot); | |
dfccc760 | 694 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
8f5db641 | 695 | MemoryRegionSection *section); |
4390df51 | 696 | #endif |
9df217a3 | 697 | |
875cdcf6 | 698 | #endif |