]> git.proxmox.com Git - mirror_qemu.git/blob - exec-all.h
16caf49db78508c0a405b711b34bfe46d13754bf
[mirror_qemu.git] / exec-all.h
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
22
23 #include "qemu-common.h"
24
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
27
28 /* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
36
37 /* is_jmp field values */
38 #define DISAS_NEXT 0 /* next instruction can be analyzed */
39 #define DISAS_JUMP 1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
45
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 208
48
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 5
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
65 /* Maximum size a TCG op can expand to. This is complicated because a
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
68 a couple of fixup instructions per argument. */
69 #define TCG_MAX_OP_SIZE 192
70
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
72
73 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
74 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
75 extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
76
77 #include "qemu-log.h"
78
79 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
80 void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
81 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
82 int pc_pos);
83
84 void cpu_gen_init(void);
85 int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
86 int *gen_code_size_ptr);
87 int cpu_restore_state(struct TranslationBlock *tb,
88 CPUArchState *env, uintptr_t searched_pc);
89 void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
90 void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
91 TranslationBlock *tb_gen_code(CPUArchState *env,
92 target_ulong pc, target_ulong cs_base, int flags,
93 int cflags);
94 void cpu_exec_init(CPUArchState *env);
95 void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
96 int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
97 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
98 int is_cpu_write_access);
99 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
100 int is_cpu_write_access);
101 #if !defined(CONFIG_USER_ONLY)
102 /* cputlb.c */
103 void tlb_flush_page(CPUArchState *env, target_ulong addr);
104 void tlb_flush(CPUArchState *env, int flush_global);
105 void tlb_set_page(CPUArchState *env, target_ulong vaddr,
106 target_phys_addr_t paddr, int prot,
107 int mmu_idx, target_ulong size);
108 void tb_invalidate_phys_addr(target_phys_addr_t addr);
109 #else
110 static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
111 {
112 }
113
114 static inline void tlb_flush(CPUArchState *env, int flush_global)
115 {
116 }
117 #endif
118
119 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
120
121 #define CODE_GEN_PHYS_HASH_BITS 15
122 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
123
124 /* estimated block size for TB allocation */
125 /* XXX: use a per code average code fragment size and modulate it
126 according to the host CPU */
127 #if defined(CONFIG_SOFTMMU)
128 #define CODE_GEN_AVG_BLOCK_SIZE 128
129 #else
130 #define CODE_GEN_AVG_BLOCK_SIZE 64
131 #endif
132
133 #if defined(__arm__) || defined(_ARCH_PPC) \
134 || defined(__x86_64__) || defined(__i386__) \
135 || defined(__sparc__) \
136 || defined(CONFIG_TCG_INTERPRETER)
137 #define USE_DIRECT_JUMP
138 #endif
139
140 struct TranslationBlock {
141 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
142 target_ulong cs_base; /* CS base for this block */
143 uint64_t flags; /* flags defining in which context the code was generated */
144 uint16_t size; /* size of target code for this block (1 <=
145 size <= TARGET_PAGE_SIZE) */
146 uint16_t cflags; /* compile flags */
147 #define CF_COUNT_MASK 0x7fff
148 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
149
150 uint8_t *tc_ptr; /* pointer to the translated code */
151 /* next matching tb for physical address. */
152 struct TranslationBlock *phys_hash_next;
153 /* first and second physical page containing code. The lower bit
154 of the pointer tells the index in page_next[] */
155 struct TranslationBlock *page_next[2];
156 tb_page_addr_t page_addr[2];
157
158 /* the following data are used to directly call another TB from
159 the code of this one. */
160 uint16_t tb_next_offset[2]; /* offset of original jump target */
161 #ifdef USE_DIRECT_JUMP
162 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
163 #else
164 uintptr_t tb_next[2]; /* address of jump generated code */
165 #endif
166 /* list of TBs jumping to this one. This is a circular list using
167 the two least significant bits of the pointers to tell what is
168 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
169 jmp_first */
170 struct TranslationBlock *jmp_next[2];
171 struct TranslationBlock *jmp_first;
172 uint32_t icount;
173 };
174
175 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
176 {
177 target_ulong tmp;
178 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
179 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
180 }
181
182 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
183 {
184 target_ulong tmp;
185 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
186 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
187 | (tmp & TB_JMP_ADDR_MASK));
188 }
189
190 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
191 {
192 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
193 }
194
195 void tb_free(TranslationBlock *tb);
196 void tb_flush(CPUArchState *env);
197 void tb_link_page(TranslationBlock *tb,
198 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
199 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
200
201 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
202
203 #if defined(USE_DIRECT_JUMP)
204
205 #if defined(CONFIG_TCG_INTERPRETER)
206 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
207 {
208 /* patch the branch destination */
209 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
210 /* no need to flush icache explicitly */
211 }
212 #elif defined(_ARCH_PPC)
213 void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
214 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
215 #elif defined(__i386__) || defined(__x86_64__)
216 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
217 {
218 /* patch the branch destination */
219 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
220 /* no need to flush icache explicitly */
221 }
222 #elif defined(__arm__)
223 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
224 {
225 #if !QEMU_GNUC_PREREQ(4, 1)
226 register unsigned long _beg __asm ("a1");
227 register unsigned long _end __asm ("a2");
228 register unsigned long _flg __asm ("a3");
229 #endif
230
231 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
232 *(uint32_t *)jmp_addr =
233 (*(uint32_t *)jmp_addr & ~0xffffff)
234 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
235
236 #if QEMU_GNUC_PREREQ(4, 1)
237 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
238 #else
239 /* flush icache */
240 _beg = jmp_addr;
241 _end = jmp_addr + 4;
242 _flg = 0;
243 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
244 #endif
245 }
246 #elif defined(__sparc__)
247 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
248 #else
249 #error tb_set_jmp_target1 is missing
250 #endif
251
252 static inline void tb_set_jmp_target(TranslationBlock *tb,
253 int n, uintptr_t addr)
254 {
255 uint16_t offset = tb->tb_jmp_offset[n];
256 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
257 }
258
259 #else
260
261 /* set the jump target */
262 static inline void tb_set_jmp_target(TranslationBlock *tb,
263 int n, uintptr_t addr)
264 {
265 tb->tb_next[n] = addr;
266 }
267
268 #endif
269
270 static inline void tb_add_jump(TranslationBlock *tb, int n,
271 TranslationBlock *tb_next)
272 {
273 /* NOTE: this test is only needed for thread safety */
274 if (!tb->jmp_next[n]) {
275 /* patch the native jump address */
276 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
277
278 /* add in TB jmp circular list */
279 tb->jmp_next[n] = tb_next->jmp_first;
280 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
281 }
282 }
283
284 TranslationBlock *tb_find_pc(uintptr_t pc_ptr);
285
286 #include "qemu-lock.h"
287
288 extern spinlock_t tb_lock;
289
290 extern int tb_invalidated_flag;
291
292 /* The return address may point to the start of the next instruction.
293 Subtracting one gets us the call instruction itself. */
294 #if defined(CONFIG_TCG_INTERPRETER)
295 /* Alpha and SH4 user mode emulations and Softmmu call GETPC().
296 For all others, GETPC remains undefined (which makes TCI a little faster. */
297 # if defined(CONFIG_SOFTMMU) || defined(TARGET_ALPHA) || defined(TARGET_SH4) \
298 || defined(TARGET_SPARC)
299 extern uintptr_t tci_tb_ptr;
300 # define GETPC() tci_tb_ptr
301 # endif
302 #elif defined(__s390__) && !defined(__s390x__)
303 # define GETPC() \
304 (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
305 #elif defined(__arm__)
306 /* Thumb return addresses have the low bit set, so we need to subtract two.
307 This is still safe in ARM mode because instructions are 4 bytes. */
308 # define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
309 #else
310 # define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
311 #endif
312
313 #if !defined(CONFIG_USER_ONLY)
314
315 struct MemoryRegion *iotlb_to_region(target_phys_addr_t index);
316 uint64_t io_mem_read(struct MemoryRegion *mr, target_phys_addr_t addr,
317 unsigned size);
318 void io_mem_write(struct MemoryRegion *mr, target_phys_addr_t addr,
319 uint64_t value, unsigned size);
320
321 void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
322 uintptr_t retaddr);
323
324 #include "softmmu_defs.h"
325
326 #define ACCESS_TYPE (NB_MMU_MODES + 1)
327 #define MEMSUFFIX _code
328
329 #define DATA_SIZE 1
330 #include "softmmu_header.h"
331
332 #define DATA_SIZE 2
333 #include "softmmu_header.h"
334
335 #define DATA_SIZE 4
336 #include "softmmu_header.h"
337
338 #define DATA_SIZE 8
339 #include "softmmu_header.h"
340
341 #undef ACCESS_TYPE
342 #undef MEMSUFFIX
343
344 #endif
345
346 #if defined(CONFIG_USER_ONLY)
347 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
348 {
349 return addr;
350 }
351 #else
352 /* cputlb.c */
353 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
354 #endif
355
356 typedef void (CPUDebugExcpHandler)(CPUArchState *env);
357
358 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
359
360 /* vl.c */
361 extern int singlestep;
362
363 /* cpu-exec.c */
364 extern volatile sig_atomic_t exit_request;
365
366 /* Deterministic execution requires that IO only be performed on the last
367 instruction of a TB so that interrupts take effect immediately. */
368 static inline int can_do_io(CPUArchState *env)
369 {
370 if (!use_icount) {
371 return 1;
372 }
373 /* If not executing code then assume we are ok. */
374 if (!env->current_tb) {
375 return 1;
376 }
377 return env->can_do_io != 0;
378 }
379
380 #endif