]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/exec-all.h
tb: consistently use uint32_t for tb->flags
[mirror_qemu.git] / include / exec / exec-all.h
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
22
23 #include "qemu-common.h"
24
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
27
28 /* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
36
37 /* is_jmp field values */
38 #define DISAS_NEXT 0 /* next instruction can be analyzed */
39 #define DISAS_JUMP 1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
45
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 266
48
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 5
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
65 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
66
67 #include "qemu/log.h"
68
69 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
70 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
71 target_ulong *data);
72
73 void cpu_gen_init(void);
74 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
75
76 void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
77 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
78 TranslationBlock *tb_gen_code(CPUState *cpu,
79 target_ulong pc, target_ulong cs_base,
80 uint32_t flags,
81 int cflags);
82 void cpu_exec_init(CPUState *cpu, Error **errp);
83 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
84 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
85
86 #if !defined(CONFIG_USER_ONLY)
87 void cpu_reloading_memory_map(void);
88 /**
89 * cpu_address_space_init:
90 * @cpu: CPU to add this address space to
91 * @as: address space to add
92 * @asidx: integer index of this address space
93 *
94 * Add the specified address space to the CPU's cpu_ases list.
95 * The address space added with @asidx 0 is the one used for the
96 * convenience pointer cpu->as.
97 * The target-specific code which registers ASes is responsible
98 * for defining what semantics address space 0, 1, 2, etc have.
99 *
100 * Before the first call to this function, the caller must set
101 * cpu->num_ases to the total number of address spaces it needs
102 * to support.
103 *
104 * Note that with KVM only one address space is supported.
105 */
106 void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
107 /**
108 * cpu_get_address_space:
109 * @cpu: CPU to get address space from
110 * @asidx: index identifying which address space to get
111 *
112 * Return the requested address space of this CPU. @asidx
113 * specifies which address space to read.
114 */
115 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
116 /* cputlb.c */
117 /**
118 * tlb_flush_page:
119 * @cpu: CPU whose TLB should be flushed
120 * @addr: virtual address of page to be flushed
121 *
122 * Flush one page from the TLB of the specified CPU, for all
123 * MMU indexes.
124 */
125 void tlb_flush_page(CPUState *cpu, target_ulong addr);
126 /**
127 * tlb_flush:
128 * @cpu: CPU whose TLB should be flushed
129 * @flush_global: ignored
130 *
131 * Flush the entire TLB for the specified CPU.
132 * The flush_global flag is in theory an indicator of whether the whole
133 * TLB should be flushed, or only those entries not marked global.
134 * In practice QEMU does not implement any global/not global flag for
135 * TLB entries, and the argument is ignored.
136 */
137 void tlb_flush(CPUState *cpu, int flush_global);
138 /**
139 * tlb_flush_page_by_mmuidx:
140 * @cpu: CPU whose TLB should be flushed
141 * @addr: virtual address of page to be flushed
142 * @...: list of MMU indexes to flush, terminated by a negative value
143 *
144 * Flush one page from the TLB of the specified CPU, for the specified
145 * MMU indexes.
146 */
147 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
148 /**
149 * tlb_flush_by_mmuidx:
150 * @cpu: CPU whose TLB should be flushed
151 * @...: list of MMU indexes to flush, terminated by a negative value
152 *
153 * Flush all entries from the TLB of the specified CPU, for the specified
154 * MMU indexes.
155 */
156 void tlb_flush_by_mmuidx(CPUState *cpu, ...);
157 /**
158 * tlb_set_page_with_attrs:
159 * @cpu: CPU to add this TLB entry for
160 * @vaddr: virtual address of page to add entry for
161 * @paddr: physical address of the page
162 * @attrs: memory transaction attributes
163 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
164 * @mmu_idx: MMU index to insert TLB entry for
165 * @size: size of the page in bytes
166 *
167 * Add an entry to this CPU's TLB (a mapping from virtual address
168 * @vaddr to physical address @paddr) with the specified memory
169 * transaction attributes. This is generally called by the target CPU
170 * specific code after it has been called through the tlb_fill()
171 * entry point and performed a successful page table walk to find
172 * the physical address and attributes for the virtual address
173 * which provoked the TLB miss.
174 *
175 * At most one entry for a given virtual address is permitted. Only a
176 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
177 * used by tlb_flush_page.
178 */
179 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
180 hwaddr paddr, MemTxAttrs attrs,
181 int prot, int mmu_idx, target_ulong size);
182 /* tlb_set_page:
183 *
184 * This function is equivalent to calling tlb_set_page_with_attrs()
185 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
186 * as a convenience for CPUs which don't use memory transaction attributes.
187 */
188 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
189 hwaddr paddr, int prot,
190 int mmu_idx, target_ulong size);
191 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
192 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
193 uintptr_t retaddr);
194 #else
195 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
196 {
197 }
198
199 static inline void tlb_flush(CPUState *cpu, int flush_global)
200 {
201 }
202
203 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
204 target_ulong addr, ...)
205 {
206 }
207
208 static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
209 {
210 }
211 #endif
212
213 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
214
215 #define CODE_GEN_PHYS_HASH_BITS 15
216 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
217
218 /* Estimated block size for TB allocation. */
219 /* ??? The following is based on a 2015 survey of x86_64 host output.
220 Better would seem to be some sort of dynamically sized TB array,
221 adapting to the block sizes actually being produced. */
222 #if defined(CONFIG_SOFTMMU)
223 #define CODE_GEN_AVG_BLOCK_SIZE 400
224 #else
225 #define CODE_GEN_AVG_BLOCK_SIZE 150
226 #endif
227
228 #if defined(__arm__) || defined(_ARCH_PPC) \
229 || defined(__x86_64__) || defined(__i386__) \
230 || defined(__sparc__) || defined(__aarch64__) \
231 || defined(__s390x__) || defined(__mips__) \
232 || defined(CONFIG_TCG_INTERPRETER)
233 #define USE_DIRECT_JUMP
234 #endif
235
236 struct TranslationBlock {
237 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
238 target_ulong cs_base; /* CS base for this block */
239 uint32_t flags; /* flags defining in which context the code was generated */
240 uint16_t size; /* size of target code for this block (1 <=
241 size <= TARGET_PAGE_SIZE) */
242 uint16_t icount;
243 uint32_t cflags; /* compile flags */
244 #define CF_COUNT_MASK 0x7fff
245 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
246 #define CF_NOCACHE 0x10000 /* To be freed after execution */
247 #define CF_USE_ICOUNT 0x20000
248 #define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
249
250 void *tc_ptr; /* pointer to the translated code */
251 uint8_t *tc_search; /* pointer to search data */
252 /* next matching tb for physical address. */
253 struct TranslationBlock *phys_hash_next;
254 /* original tb when cflags has CF_NOCACHE */
255 struct TranslationBlock *orig_tb;
256 /* first and second physical page containing code. The lower bit
257 of the pointer tells the index in page_next[] */
258 struct TranslationBlock *page_next[2];
259 tb_page_addr_t page_addr[2];
260
261 /* the following data are used to directly call another TB from
262 the code of this one. */
263 uint16_t tb_next_offset[2]; /* offset of original jump target */
264 #ifdef USE_DIRECT_JUMP
265 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
266 #else
267 uintptr_t tb_next[2]; /* address of jump generated code */
268 #endif
269 /* list of TBs jumping to this one. This is a circular list using
270 the two least significant bits of the pointers to tell what is
271 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
272 jmp_first */
273 struct TranslationBlock *jmp_next[2];
274 struct TranslationBlock *jmp_first;
275 };
276
277 #include "qemu/thread.h"
278
279 typedef struct TBContext TBContext;
280
281 struct TBContext {
282
283 TranslationBlock *tbs;
284 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
285 int nb_tbs;
286 /* any access to the tbs or the page table must use this lock */
287 QemuMutex tb_lock;
288
289 /* statistics */
290 int tb_flush_count;
291 int tb_phys_invalidate_count;
292
293 int tb_invalidated_flag;
294 };
295
296 void tb_free(TranslationBlock *tb);
297 void tb_flush(CPUState *cpu);
298 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
299
300 #if defined(USE_DIRECT_JUMP)
301
302 #if defined(CONFIG_TCG_INTERPRETER)
303 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
304 {
305 /* patch the branch destination */
306 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
307 /* no need to flush icache explicitly */
308 }
309 #elif defined(_ARCH_PPC)
310 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
311 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
312 #elif defined(__i386__) || defined(__x86_64__)
313 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
314 {
315 /* patch the branch destination */
316 stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
317 /* no need to flush icache explicitly */
318 }
319 #elif defined(__s390x__)
320 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
321 {
322 /* patch the branch destination */
323 intptr_t disp = addr - (jmp_addr - 2);
324 stl_be_p((void*)jmp_addr, disp / 2);
325 /* no need to flush icache explicitly */
326 }
327 #elif defined(__aarch64__)
328 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
329 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
330 #elif defined(__arm__)
331 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
332 {
333 #if !QEMU_GNUC_PREREQ(4, 1)
334 register unsigned long _beg __asm ("a1");
335 register unsigned long _end __asm ("a2");
336 register unsigned long _flg __asm ("a3");
337 #endif
338
339 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
340 *(uint32_t *)jmp_addr =
341 (*(uint32_t *)jmp_addr & ~0xffffff)
342 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
343
344 #if QEMU_GNUC_PREREQ(4, 1)
345 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
346 #else
347 /* flush icache */
348 _beg = jmp_addr;
349 _end = jmp_addr + 4;
350 _flg = 0;
351 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
352 #endif
353 }
354 #elif defined(__sparc__) || defined(__mips__)
355 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
356 #else
357 #error tb_set_jmp_target1 is missing
358 #endif
359
360 static inline void tb_set_jmp_target(TranslationBlock *tb,
361 int n, uintptr_t addr)
362 {
363 uint16_t offset = tb->tb_jmp_offset[n];
364 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
365 }
366
367 #else
368
369 /* set the jump target */
370 static inline void tb_set_jmp_target(TranslationBlock *tb,
371 int n, uintptr_t addr)
372 {
373 tb->tb_next[n] = addr;
374 }
375
376 #endif
377
378 static inline void tb_add_jump(TranslationBlock *tb, int n,
379 TranslationBlock *tb_next)
380 {
381 /* NOTE: this test is only needed for thread safety */
382 if (!tb->jmp_next[n]) {
383 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
384 "Linking TBs %p [" TARGET_FMT_lx
385 "] index %d -> %p [" TARGET_FMT_lx "]\n",
386 tb->tc_ptr, tb->pc, n,
387 tb_next->tc_ptr, tb_next->pc);
388 /* patch the native jump address */
389 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
390
391 /* add in TB jmp circular list */
392 tb->jmp_next[n] = tb_next->jmp_first;
393 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
394 }
395 }
396
397 /* GETRA is the true target of the return instruction that we'll execute,
398 defined here for simplicity of defining the follow-up macros. */
399 #if defined(CONFIG_TCG_INTERPRETER)
400 extern uintptr_t tci_tb_ptr;
401 # define GETRA() tci_tb_ptr
402 #else
403 # define GETRA() \
404 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
405 #endif
406
407 /* The true return address will often point to a host insn that is part of
408 the next translated guest insn. Adjust the address backward to point to
409 the middle of the call insn. Subtracting one would do the job except for
410 several compressed mode architectures (arm, mips) which set the low bit
411 to indicate the compressed mode; subtracting two works around that. It
412 is also the case that there are no host isas that contain a call insn
413 smaller than 4 bytes, so we don't worry about special-casing this. */
414 #define GETPC_ADJ 2
415
416 #define GETPC() (GETRA() - GETPC_ADJ)
417
418 #if !defined(CONFIG_USER_ONLY)
419
420 struct MemoryRegion *iotlb_to_region(CPUState *cpu,
421 hwaddr index, MemTxAttrs attrs);
422
423 void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
424 uintptr_t retaddr);
425
426 #endif
427
428 #if defined(CONFIG_USER_ONLY)
429 void mmap_lock(void);
430 void mmap_unlock(void);
431
432 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
433 {
434 return addr;
435 }
436 #else
437 static inline void mmap_lock(void) {}
438 static inline void mmap_unlock(void) {}
439
440 /* cputlb.c */
441 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
442
443 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
444 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
445
446 /* exec.c */
447 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
448
449 MemoryRegionSection *
450 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
451 hwaddr *xlat, hwaddr *plen);
452 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
453 MemoryRegionSection *section,
454 target_ulong vaddr,
455 hwaddr paddr, hwaddr xlat,
456 int prot,
457 target_ulong *address);
458 bool memory_region_is_unassigned(MemoryRegion *mr);
459
460 #endif
461
462 /* vl.c */
463 extern int singlestep;
464
465 /* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
466 extern CPUState *tcg_current_cpu;
467 extern bool exit_request;
468
469 #endif