]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
875cdcf6 AL |
20 | #ifndef _EXEC_ALL_H_ |
21 | #define _EXEC_ALL_H_ | |
7d99a001 BS |
22 | |
23 | #include "qemu-common.h" | |
24 | ||
b346ff46 | 25 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
de9a95f0 | 26 | #define DEBUG_DISAS |
b346ff46 | 27 | |
41c1b1c9 PB |
28 | /* Page tracking code uses ram addresses in system mode, and virtual |
29 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate | |
30 | type. */ | |
31 | #if defined(CONFIG_USER_ONLY) | |
b480d9b7 | 32 | typedef abi_ulong tb_page_addr_t; |
41c1b1c9 PB |
33 | #else |
34 | typedef ram_addr_t tb_page_addr_t; | |
35 | #endif | |
36 | ||
b346ff46 FB |
37 | /* is_jmp field values */ |
38 | #define DISAS_NEXT 0 /* next instruction can be analyzed */ | |
39 | #define DISAS_JUMP 1 /* only pc was modified dynamically */ | |
40 | #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ | |
41 | #define DISAS_TB_JUMP 3 /* only pc was modified statically */ | |
42 | ||
f081c76c | 43 | struct TranslationBlock; |
2e70f6ef | 44 | typedef struct TranslationBlock TranslationBlock; |
b346ff46 FB |
45 | |
46 | /* XXX: make safe guess about sizes */ | |
5b620fb6 | 47 | #define MAX_OP_PER_INSTR 208 |
4d0e4ac7 SB |
48 | |
49 | #if HOST_LONG_BITS == 32 | |
50 | #define MAX_OPC_PARAM_PER_ARG 2 | |
51 | #else | |
52 | #define MAX_OPC_PARAM_PER_ARG 1 | |
53 | #endif | |
3cebc3f1 | 54 | #define MAX_OPC_PARAM_IARGS 5 |
4d0e4ac7 SB |
55 | #define MAX_OPC_PARAM_OARGS 1 |
56 | #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) | |
57 | ||
58 | /* A Call op needs up to 4 + 2N parameters on 32-bit archs, | |
59 | * and up to 4 + N parameters on 64-bit archs | |
60 | * (N = number of input arguments + output arguments). */ | |
61 | #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) | |
6db73509 | 62 | #define OPC_BUF_SIZE 640 |
b346ff46 FB |
63 | #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) |
64 | ||
a208e54a | 65 | /* Maximum size a TCG op can expand to. This is complicated because a |
0cbfcd2b AJ |
66 | single op may require several host instructions and register reloads. |
67 | For now take a wild guess at 192 bytes, which should allow at least | |
a208e54a | 68 | a couple of fixup instructions per argument. */ |
0cbfcd2b | 69 | #define TCG_MAX_OP_SIZE 192 |
a208e54a | 70 | |
0115be31 | 71 | #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) |
b346ff46 | 72 | |
1de7afc9 | 73 | #include "qemu/log.h" |
b346ff46 | 74 | |
9349b4f9 AF |
75 | void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); |
76 | void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); | |
77 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, | |
e87b7cb0 | 78 | int pc_pos); |
d2856f1a | 79 | |
57fec1fe | 80 | void cpu_gen_init(void); |
9349b4f9 | 81 | int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, |
d07bde88 | 82 | int *gen_code_size_ptr); |
a8a826a3 BS |
83 | bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc); |
84 | ||
38c30fb7 | 85 | void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc); |
20503968 | 86 | void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr); |
9349b4f9 | 87 | TranslationBlock *tb_gen_code(CPUArchState *env, |
2e70f6ef PB |
88 | target_ulong pc, target_ulong cs_base, int flags, |
89 | int cflags); | |
9349b4f9 AF |
90 | void cpu_exec_init(CPUArchState *env); |
91 | void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1); | |
6375e09e | 92 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc); |
41c1b1c9 | 93 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, |
2e12669a | 94 | int is_cpu_write_access); |
77a8f1a5 AG |
95 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, |
96 | int is_cpu_write_access); | |
0cac1b66 BS |
97 | #if !defined(CONFIG_USER_ONLY) |
98 | /* cputlb.c */ | |
9349b4f9 AF |
99 | void tlb_flush_page(CPUArchState *env, target_ulong addr); |
100 | void tlb_flush(CPUArchState *env, int flush_global); | |
9349b4f9 | 101 | void tlb_set_page(CPUArchState *env, target_ulong vaddr, |
a8170e5e | 102 | hwaddr paddr, int prot, |
d4c430a8 | 103 | int mmu_idx, target_ulong size); |
a8170e5e | 104 | void tb_invalidate_phys_addr(hwaddr addr); |
0cac1b66 BS |
105 | #else |
106 | static inline void tlb_flush_page(CPUArchState *env, target_ulong addr) | |
107 | { | |
108 | } | |
109 | ||
110 | static inline void tlb_flush(CPUArchState *env, int flush_global) | |
111 | { | |
112 | } | |
c527ee8f | 113 | #endif |
d4e8164f | 114 | |
d4e8164f FB |
115 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
116 | ||
4390df51 FB |
117 | #define CODE_GEN_PHYS_HASH_BITS 15 |
118 | #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) | |
119 | ||
4390df51 FB |
120 | /* estimated block size for TB allocation */ |
121 | /* XXX: use a per code average code fragment size and modulate it | |
122 | according to the host CPU */ | |
123 | #if defined(CONFIG_SOFTMMU) | |
124 | #define CODE_GEN_AVG_BLOCK_SIZE 128 | |
125 | #else | |
126 | #define CODE_GEN_AVG_BLOCK_SIZE 64 | |
127 | #endif | |
128 | ||
5bbd2cae RH |
129 | #if defined(__arm__) || defined(_ARCH_PPC) \ |
130 | || defined(__x86_64__) || defined(__i386__) \ | |
4a136e0a | 131 | || defined(__sparc__) || defined(__aarch64__) \ |
5bbd2cae | 132 | || defined(CONFIG_TCG_INTERPRETER) |
7316329a | 133 | #define USE_DIRECT_JUMP |
d4e8164f FB |
134 | #endif |
135 | ||
2e70f6ef | 136 | struct TranslationBlock { |
2e12669a FB |
137 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
138 | target_ulong cs_base; /* CS base for this block */ | |
c068688b | 139 | uint64_t flags; /* flags defining in which context the code was generated */ |
d4e8164f FB |
140 | uint16_t size; /* size of target code for this block (1 <= |
141 | size <= TARGET_PAGE_SIZE) */ | |
58fe2f10 | 142 | uint16_t cflags; /* compile flags */ |
2e70f6ef PB |
143 | #define CF_COUNT_MASK 0x7fff |
144 | #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ | |
58fe2f10 | 145 | |
d4e8164f | 146 | uint8_t *tc_ptr; /* pointer to the translated code */ |
4390df51 | 147 | /* next matching tb for physical address. */ |
5fafdf24 | 148 | struct TranslationBlock *phys_hash_next; |
4390df51 FB |
149 | /* first and second physical page containing code. The lower bit |
150 | of the pointer tells the index in page_next[] */ | |
5fafdf24 | 151 | struct TranslationBlock *page_next[2]; |
41c1b1c9 | 152 | tb_page_addr_t page_addr[2]; |
4390df51 | 153 | |
d4e8164f FB |
154 | /* the following data are used to directly call another TB from |
155 | the code of this one. */ | |
156 | uint16_t tb_next_offset[2]; /* offset of original jump target */ | |
157 | #ifdef USE_DIRECT_JUMP | |
efc0a514 | 158 | uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ |
d4e8164f | 159 | #else |
6375e09e | 160 | uintptr_t tb_next[2]; /* address of jump generated code */ |
d4e8164f FB |
161 | #endif |
162 | /* list of TBs jumping to this one. This is a circular list using | |
163 | the two least significant bits of the pointers to tell what is | |
164 | the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = | |
165 | jmp_first */ | |
5fafdf24 | 166 | struct TranslationBlock *jmp_next[2]; |
d4e8164f | 167 | struct TranslationBlock *jmp_first; |
2e70f6ef PB |
168 | uint32_t icount; |
169 | }; | |
d4e8164f | 170 | |
5e5f07e0 EV |
171 | #include "exec/spinlock.h" |
172 | ||
173 | typedef struct TBContext TBContext; | |
174 | ||
175 | struct TBContext { | |
176 | ||
177 | TranslationBlock *tbs; | |
178 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
179 | int nb_tbs; | |
180 | /* any access to the tbs or the page table must use this lock */ | |
181 | spinlock_t tb_lock; | |
182 | ||
183 | /* statistics */ | |
184 | int tb_flush_count; | |
185 | int tb_phys_invalidate_count; | |
186 | ||
187 | int tb_invalidated_flag; | |
188 | }; | |
189 | ||
b362e5e0 PB |
190 | static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
191 | { | |
192 | target_ulong tmp; | |
193 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); | |
b5e19d4c | 194 | return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; |
b362e5e0 PB |
195 | } |
196 | ||
8a40a180 | 197 | static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) |
d4e8164f | 198 | { |
b362e5e0 PB |
199 | target_ulong tmp; |
200 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); | |
b5e19d4c EI |
201 | return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) |
202 | | (tmp & TB_JMP_ADDR_MASK)); | |
d4e8164f FB |
203 | } |
204 | ||
41c1b1c9 | 205 | static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) |
4390df51 | 206 | { |
f96a3834 | 207 | return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); |
4390df51 FB |
208 | } |
209 | ||
2e70f6ef | 210 | void tb_free(TranslationBlock *tb); |
9349b4f9 | 211 | void tb_flush(CPUArchState *env); |
41c1b1c9 | 212 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
d4e8164f | 213 | |
4390df51 FB |
214 | #if defined(USE_DIRECT_JUMP) |
215 | ||
7316329a SW |
216 | #if defined(CONFIG_TCG_INTERPRETER) |
217 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) | |
218 | { | |
219 | /* patch the branch destination */ | |
220 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); | |
221 | /* no need to flush icache explicitly */ | |
222 | } | |
223 | #elif defined(_ARCH_PPC) | |
64b85a8f | 224 | void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); |
810260a8 | 225 | #define tb_set_jmp_target1 ppc_tb_set_jmp_target |
57fec1fe | 226 | #elif defined(__i386__) || defined(__x86_64__) |
6375e09e | 227 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
4390df51 FB |
228 | { |
229 | /* patch the branch destination */ | |
230 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); | |
1235fc06 | 231 | /* no need to flush icache explicitly */ |
4390df51 | 232 | } |
4a136e0a CF |
233 | #elif defined(__aarch64__) |
234 | void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); | |
235 | #define tb_set_jmp_target1 aarch64_tb_set_jmp_target | |
811d4cf4 | 236 | #elif defined(__arm__) |
6375e09e | 237 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
811d4cf4 | 238 | { |
4a1e19ae | 239 | #if !QEMU_GNUC_PREREQ(4, 1) |
811d4cf4 AZ |
240 | register unsigned long _beg __asm ("a1"); |
241 | register unsigned long _end __asm ("a2"); | |
242 | register unsigned long _flg __asm ("a3"); | |
3233f0d4 | 243 | #endif |
811d4cf4 AZ |
244 | |
245 | /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ | |
87b78ad1 LD |
246 | *(uint32_t *)jmp_addr = |
247 | (*(uint32_t *)jmp_addr & ~0xffffff) | |
248 | | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); | |
811d4cf4 | 249 | |
3233f0d4 | 250 | #if QEMU_GNUC_PREREQ(4, 1) |
4a1e19ae | 251 | __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); |
3233f0d4 | 252 | #else |
811d4cf4 AZ |
253 | /* flush icache */ |
254 | _beg = jmp_addr; | |
255 | _end = jmp_addr + 4; | |
256 | _flg = 0; | |
257 | __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); | |
3233f0d4 | 258 | #endif |
811d4cf4 | 259 | } |
5bbd2cae RH |
260 | #elif defined(__sparc__) |
261 | void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); | |
7316329a SW |
262 | #else |
263 | #error tb_set_jmp_target1 is missing | |
4390df51 | 264 | #endif |
d4e8164f | 265 | |
5fafdf24 | 266 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
6375e09e | 267 | int n, uintptr_t addr) |
4cbb86e1 | 268 | { |
6375e09e SW |
269 | uint16_t offset = tb->tb_jmp_offset[n]; |
270 | tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); | |
4cbb86e1 FB |
271 | } |
272 | ||
d4e8164f FB |
273 | #else |
274 | ||
275 | /* set the jump target */ | |
5fafdf24 | 276 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
6375e09e | 277 | int n, uintptr_t addr) |
d4e8164f | 278 | { |
95f7652d | 279 | tb->tb_next[n] = addr; |
d4e8164f FB |
280 | } |
281 | ||
282 | #endif | |
283 | ||
5fafdf24 | 284 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
d4e8164f FB |
285 | TranslationBlock *tb_next) |
286 | { | |
cf25629d FB |
287 | /* NOTE: this test is only needed for thread safety */ |
288 | if (!tb->jmp_next[n]) { | |
289 | /* patch the native jump address */ | |
6375e09e | 290 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); |
3b46e624 | 291 | |
cf25629d FB |
292 | /* add in TB jmp circular list */ |
293 | tb->jmp_next[n] = tb_next->jmp_first; | |
6375e09e | 294 | tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); |
cf25629d | 295 | } |
d4e8164f FB |
296 | } |
297 | ||
0f842f8a RH |
298 | /* GETRA is the true target of the return instruction that we'll execute, |
299 | defined here for simplicity of defining the follow-up macros. */ | |
7316329a | 300 | #if defined(CONFIG_TCG_INTERPRETER) |
c3ca0467 | 301 | extern uintptr_t tci_tb_ptr; |
0f842f8a RH |
302 | # define GETRA() tci_tb_ptr |
303 | #else | |
304 | # define GETRA() \ | |
305 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) | |
306 | #endif | |
307 | ||
308 | /* The true return address will often point to a host insn that is part of | |
309 | the next translated guest insn. Adjust the address backward to point to | |
310 | the middle of the call insn. Subtracting one would do the job except for | |
311 | several compressed mode architectures (arm, mips) which set the low bit | |
312 | to indicate the compressed mode; subtracting two works around that. It | |
313 | is also the case that there are no host isas that contain a call insn | |
314 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
315 | #if defined(CONFIG_TCG_INTERPRETER) | |
316 | # define GETPC_ADJ 0 | |
3917149d | 317 | #else |
0f842f8a | 318 | # define GETPC_ADJ 2 |
3917149d BS |
319 | #endif |
320 | ||
0f842f8a RH |
321 | #define GETPC() (GETRA() - GETPC_ADJ) |
322 | ||
323 | /* The LDST optimizations splits code generation into fast and slow path. | |
324 | In some implementations, we pass the "logical" return address manually; | |
325 | in others, we must infer the logical return from the true return. */ | |
fdbb84d1 | 326 | #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU) |
0f842f8a RH |
327 | # if defined (_ARCH_PPC) && !defined (_ARCH_PPC64) |
328 | # define GETRA_LDST(RA) (*(int32_t *)((RA) - 4)) | |
df5e0ef7 RH |
329 | # elif defined(__arm__) |
330 | /* We define two insns between the return address and the branch back to | |
331 | straight-line. Find and decode that branch insn. */ | |
0f842f8a RH |
332 | # define GETRA_LDST(RA) tcg_getra_ldst(RA) |
333 | static inline uintptr_t tcg_getra_ldst(uintptr_t ra) | |
df5e0ef7 RH |
334 | { |
335 | int32_t b; | |
336 | ra += 8; /* skip the two insns */ | |
337 | b = *(int32_t *)ra; /* load the branch insn */ | |
338 | b = (b << 8) >> (8 - 2); /* extract the displacement */ | |
339 | ra += 8; /* branches are relative to pc+8 */ | |
340 | ra += b; /* apply the displacement */ | |
c6d8ed24 JK |
341 | return ra; |
342 | } | |
584950fd | 343 | # elif defined(__aarch64__) |
0f842f8a RH |
344 | # define GETRA_LDST(RA) tcg_getra_ldst(RA) |
345 | static inline uintptr_t tcg_getra_ldst(uintptr_t ra) | |
c6d8ed24 JK |
346 | { |
347 | int32_t b; | |
348 | ra += 4; /* skip one instruction */ | |
349 | b = *(int32_t *)ra; /* load the branch insn */ | |
350 | b = (b << 6) >> (6 - 2); /* extract the displacement */ | |
351 | ra += b; /* apply the displacement */ | |
df5e0ef7 RH |
352 | return ra; |
353 | } | |
fdbb84d1 | 354 | # endif |
0f842f8a RH |
355 | #endif /* CONFIG_QEMU_LDST_OPTIMIZATION */ |
356 | ||
357 | /* ??? Delete these once they are no longer used. */ | |
fdbb84d1 | 358 | bool is_tcg_gen_code(uintptr_t pc_ptr); |
0f842f8a RH |
359 | #ifdef GETRA_LDST |
360 | # define GETRA_EXT() tcg_getra_ext(GETRA()) | |
361 | static inline uintptr_t tcg_getra_ext(uintptr_t ra) | |
362 | { | |
363 | return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra; | |
364 | } | |
fdbb84d1 | 365 | #else |
0f842f8a | 366 | # define GETRA_EXT() GETRA() |
fdbb84d1 YL |
367 | #endif |
368 | ||
e95c8d51 | 369 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 370 | |
a8170e5e | 371 | struct MemoryRegion *iotlb_to_region(hwaddr index); |
791af8c8 PB |
372 | bool io_mem_read(struct MemoryRegion *mr, hwaddr addr, |
373 | uint64_t *pvalue, unsigned size); | |
374 | bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, | |
37ec01d4 | 375 | uint64_t value, unsigned size); |
b3755a91 | 376 | |
9349b4f9 | 377 | void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx, |
20503968 | 378 | uintptr_t retaddr); |
6e59c1db | 379 | |
e58eb534 RH |
380 | uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
381 | uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); | |
382 | uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); | |
383 | uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); | |
79383c9c | 384 | |
6ebbf390 | 385 | #define ACCESS_TYPE (NB_MMU_MODES + 1) |
6e59c1db | 386 | #define MEMSUFFIX _code |
6e59c1db FB |
387 | |
388 | #define DATA_SIZE 1 | |
022c62cb | 389 | #include "exec/softmmu_header.h" |
6e59c1db FB |
390 | |
391 | #define DATA_SIZE 2 | |
022c62cb | 392 | #include "exec/softmmu_header.h" |
6e59c1db FB |
393 | |
394 | #define DATA_SIZE 4 | |
022c62cb | 395 | #include "exec/softmmu_header.h" |
6e59c1db | 396 | |
c27004ec | 397 | #define DATA_SIZE 8 |
022c62cb | 398 | #include "exec/softmmu_header.h" |
c27004ec | 399 | |
6e59c1db FB |
400 | #undef ACCESS_TYPE |
401 | #undef MEMSUFFIX | |
6e59c1db FB |
402 | |
403 | #endif | |
4390df51 FB |
404 | |
405 | #if defined(CONFIG_USER_ONLY) | |
9349b4f9 | 406 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
4390df51 FB |
407 | { |
408 | return addr; | |
409 | } | |
410 | #else | |
0cac1b66 | 411 | /* cputlb.c */ |
9349b4f9 | 412 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
4390df51 | 413 | #endif |
9df217a3 | 414 | |
9349b4f9 | 415 | typedef void (CPUDebugExcpHandler)(CPUArchState *env); |
dde2367e | 416 | |
84e3b602 | 417 | void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); |
1b530a6d AJ |
418 | |
419 | /* vl.c */ | |
420 | extern int singlestep; | |
421 | ||
1a28cac3 MT |
422 | /* cpu-exec.c */ |
423 | extern volatile sig_atomic_t exit_request; | |
424 | ||
946fb27c PB |
425 | /* Deterministic execution requires that IO only be performed on the last |
426 | instruction of a TB so that interrupts take effect immediately. */ | |
9349b4f9 | 427 | static inline int can_do_io(CPUArchState *env) |
946fb27c | 428 | { |
d77953b9 AF |
429 | CPUState *cpu = ENV_GET_CPU(env); |
430 | ||
946fb27c PB |
431 | if (!use_icount) { |
432 | return 1; | |
433 | } | |
434 | /* If not executing code then assume we are ok. */ | |
d77953b9 | 435 | if (cpu->current_tb == NULL) { |
946fb27c PB |
436 | return 1; |
437 | } | |
438 | return env->can_do_io != 0; | |
439 | } | |
440 | ||
875cdcf6 | 441 | #endif |