]> git.proxmox.com Git - mirror_qemu.git/blame - exec-all.h
Rename target_phys_addr_t to hwaddr
[mirror_qemu.git] / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
875cdcf6
AL
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
7d99a001
BS
22
23#include "qemu-common.h"
24
b346ff46 25/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 26#define DEBUG_DISAS
b346ff46 27
41c1b1c9
PB
28/* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31#if defined(CONFIG_USER_ONLY)
b480d9b7 32typedef abi_ulong tb_page_addr_t;
41c1b1c9
PB
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
b346ff46
FB
37/* is_jmp field values */
38#define DISAS_NEXT 0 /* next instruction can be analyzed */
39#define DISAS_JUMP 1 /* only pc was modified dynamically */
40#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
f081c76c 43struct TranslationBlock;
2e70f6ef 44typedef struct TranslationBlock TranslationBlock;
b346ff46
FB
45
46/* XXX: make safe guess about sizes */
5b620fb6 47#define MAX_OP_PER_INSTR 208
4d0e4ac7
SB
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
3cebc3f1 54#define MAX_OPC_PARAM_IARGS 5
4d0e4ac7
SB
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
6db73509 62#define OPC_BUF_SIZE 640
b346ff46
FB
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
a208e54a 65/* Maximum size a TCG op can expand to. This is complicated because a
0cbfcd2b
AJ
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
a208e54a 68 a couple of fixup instructions per argument. */
0cbfcd2b 69#define TCG_MAX_OP_SIZE 192
a208e54a 70
0115be31 71#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
b346ff46 72
c27004ec 73extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
b346ff46 74extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
2e70f6ef 75extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
b346ff46 76
79383c9c 77#include "qemu-log.h"
b346ff46 78
9349b4f9
AF
79void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
80void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
81void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
e87b7cb0 82 int pc_pos);
d2856f1a 83
57fec1fe 84void cpu_gen_init(void);
9349b4f9 85int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
d07bde88 86 int *gen_code_size_ptr);
5fafdf24 87int cpu_restore_state(struct TranslationBlock *tb,
6375e09e 88 CPUArchState *env, uintptr_t searched_pc);
38c30fb7 89void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
20503968 90void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
9349b4f9 91TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
92 target_ulong pc, target_ulong cs_base, int flags,
93 int cflags);
9349b4f9
AF
94void cpu_exec_init(CPUArchState *env);
95void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
6375e09e 96int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
41c1b1c9 97void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
2e12669a 98 int is_cpu_write_access);
77a8f1a5
AG
99void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
100 int is_cpu_write_access);
0cac1b66
BS
101#if !defined(CONFIG_USER_ONLY)
102/* cputlb.c */
9349b4f9
AF
103void tlb_flush_page(CPUArchState *env, target_ulong addr);
104void tlb_flush(CPUArchState *env, int flush_global);
9349b4f9 105void tlb_set_page(CPUArchState *env, target_ulong vaddr,
a8170e5e 106 hwaddr paddr, int prot,
d4c430a8 107 int mmu_idx, target_ulong size);
a8170e5e 108void tb_invalidate_phys_addr(hwaddr addr);
0cac1b66
BS
109#else
110static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
111{
112}
113
114static inline void tlb_flush(CPUArchState *env, int flush_global)
115{
116}
c527ee8f 117#endif
d4e8164f 118
d4e8164f
FB
119#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
120
4390df51
FB
121#define CODE_GEN_PHYS_HASH_BITS 15
122#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
123
4390df51
FB
124/* estimated block size for TB allocation */
125/* XXX: use a per code average code fragment size and modulate it
126 according to the host CPU */
127#if defined(CONFIG_SOFTMMU)
128#define CODE_GEN_AVG_BLOCK_SIZE 128
129#else
130#define CODE_GEN_AVG_BLOCK_SIZE 64
131#endif
132
5bbd2cae
RH
133#if defined(__arm__) || defined(_ARCH_PPC) \
134 || defined(__x86_64__) || defined(__i386__) \
135 || defined(__sparc__) \
136 || defined(CONFIG_TCG_INTERPRETER)
7316329a 137#define USE_DIRECT_JUMP
d4e8164f
FB
138#endif
139
2e70f6ef 140struct TranslationBlock {
2e12669a
FB
141 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
142 target_ulong cs_base; /* CS base for this block */
c068688b 143 uint64_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
144 uint16_t size; /* size of target code for this block (1 <=
145 size <= TARGET_PAGE_SIZE) */
58fe2f10 146 uint16_t cflags; /* compile flags */
2e70f6ef
PB
147#define CF_COUNT_MASK 0x7fff
148#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
58fe2f10 149
d4e8164f 150 uint8_t *tc_ptr; /* pointer to the translated code */
4390df51 151 /* next matching tb for physical address. */
5fafdf24 152 struct TranslationBlock *phys_hash_next;
4390df51
FB
153 /* first and second physical page containing code. The lower bit
154 of the pointer tells the index in page_next[] */
5fafdf24 155 struct TranslationBlock *page_next[2];
41c1b1c9 156 tb_page_addr_t page_addr[2];
4390df51 157
d4e8164f
FB
158 /* the following data are used to directly call another TB from
159 the code of this one. */
160 uint16_t tb_next_offset[2]; /* offset of original jump target */
161#ifdef USE_DIRECT_JUMP
efc0a514 162 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
d4e8164f 163#else
6375e09e 164 uintptr_t tb_next[2]; /* address of jump generated code */
d4e8164f
FB
165#endif
166 /* list of TBs jumping to this one. This is a circular list using
167 the two least significant bits of the pointers to tell what is
168 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
169 jmp_first */
5fafdf24 170 struct TranslationBlock *jmp_next[2];
d4e8164f 171 struct TranslationBlock *jmp_first;
2e70f6ef
PB
172 uint32_t icount;
173};
d4e8164f 174
b362e5e0
PB
175static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
176{
177 target_ulong tmp;
178 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
b5e19d4c 179 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
b362e5e0
PB
180}
181
8a40a180 182static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
d4e8164f 183{
b362e5e0
PB
184 target_ulong tmp;
185 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
b5e19d4c
EI
186 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
187 | (tmp & TB_JMP_ADDR_MASK));
d4e8164f
FB
188}
189
41c1b1c9 190static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
4390df51 191{
f96a3834 192 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
4390df51
FB
193}
194
2e70f6ef 195void tb_free(TranslationBlock *tb);
9349b4f9 196void tb_flush(CPUArchState *env);
41c1b1c9
PB
197void tb_link_page(TranslationBlock *tb,
198 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
199void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
d4e8164f 200
4390df51 201extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
d4e8164f 202
4390df51
FB
203#if defined(USE_DIRECT_JUMP)
204
7316329a
SW
205#if defined(CONFIG_TCG_INTERPRETER)
206static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
207{
208 /* patch the branch destination */
209 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
210 /* no need to flush icache explicitly */
211}
212#elif defined(_ARCH_PPC)
64b85a8f 213void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
810260a8 214#define tb_set_jmp_target1 ppc_tb_set_jmp_target
57fec1fe 215#elif defined(__i386__) || defined(__x86_64__)
6375e09e 216static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
4390df51
FB
217{
218 /* patch the branch destination */
219 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
1235fc06 220 /* no need to flush icache explicitly */
4390df51 221}
811d4cf4 222#elif defined(__arm__)
6375e09e 223static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
811d4cf4 224{
4a1e19ae 225#if !QEMU_GNUC_PREREQ(4, 1)
811d4cf4
AZ
226 register unsigned long _beg __asm ("a1");
227 register unsigned long _end __asm ("a2");
228 register unsigned long _flg __asm ("a3");
3233f0d4 229#endif
811d4cf4
AZ
230
231 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
87b78ad1
LD
232 *(uint32_t *)jmp_addr =
233 (*(uint32_t *)jmp_addr & ~0xffffff)
234 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
811d4cf4 235
3233f0d4 236#if QEMU_GNUC_PREREQ(4, 1)
4a1e19ae 237 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
3233f0d4 238#else
811d4cf4
AZ
239 /* flush icache */
240 _beg = jmp_addr;
241 _end = jmp_addr + 4;
242 _flg = 0;
243 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
3233f0d4 244#endif
811d4cf4 245}
5bbd2cae
RH
246#elif defined(__sparc__)
247void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
7316329a
SW
248#else
249#error tb_set_jmp_target1 is missing
4390df51 250#endif
d4e8164f 251
5fafdf24 252static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 253 int n, uintptr_t addr)
4cbb86e1 254{
6375e09e
SW
255 uint16_t offset = tb->tb_jmp_offset[n];
256 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
4cbb86e1
FB
257}
258
d4e8164f
FB
259#else
260
261/* set the jump target */
5fafdf24 262static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 263 int n, uintptr_t addr)
d4e8164f 264{
95f7652d 265 tb->tb_next[n] = addr;
d4e8164f
FB
266}
267
268#endif
269
5fafdf24 270static inline void tb_add_jump(TranslationBlock *tb, int n,
d4e8164f
FB
271 TranslationBlock *tb_next)
272{
cf25629d
FB
273 /* NOTE: this test is only needed for thread safety */
274 if (!tb->jmp_next[n]) {
275 /* patch the native jump address */
6375e09e 276 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
3b46e624 277
cf25629d
FB
278 /* add in TB jmp circular list */
279 tb->jmp_next[n] = tb_next->jmp_first;
6375e09e 280 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
cf25629d 281 }
d4e8164f
FB
282}
283
6375e09e 284TranslationBlock *tb_find_pc(uintptr_t pc_ptr);
a513fe19 285
d5975363 286#include "qemu-lock.h"
d4e8164f 287
c227f099 288extern spinlock_t tb_lock;
d4e8164f 289
36bdbe54 290extern int tb_invalidated_flag;
6e59c1db 291
3917149d
BS
292/* The return address may point to the start of the next instruction.
293 Subtracting one gets us the call instruction itself. */
7316329a
SW
294#if defined(CONFIG_TCG_INTERPRETER)
295/* Alpha and SH4 user mode emulations and Softmmu call GETPC().
296 For all others, GETPC remains undefined (which makes TCI a little faster. */
42a15928
MR
297# if defined(CONFIG_SOFTMMU) || defined(TARGET_ALPHA) || defined(TARGET_SH4) \
298 || defined(TARGET_SPARC)
c3ca0467 299extern uintptr_t tci_tb_ptr;
7316329a
SW
300# define GETPC() tci_tb_ptr
301# endif
302#elif defined(__s390__) && !defined(__s390x__)
6375e09e 303# define GETPC() \
20503968 304 (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
3917149d
BS
305#elif defined(__arm__)
306/* Thumb return addresses have the low bit set, so we need to subtract two.
307 This is still safe in ARM mode because instructions are 4 bytes. */
20503968 308# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
3917149d 309#else
20503968 310# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
3917149d
BS
311#endif
312
e95c8d51 313#if !defined(CONFIG_USER_ONLY)
6e59c1db 314
a8170e5e
AK
315struct MemoryRegion *iotlb_to_region(hwaddr index);
316uint64_t io_mem_read(struct MemoryRegion *mr, hwaddr addr,
37ec01d4 317 unsigned size);
a8170e5e 318void io_mem_write(struct MemoryRegion *mr, hwaddr addr,
37ec01d4 319 uint64_t value, unsigned size);
b3755a91 320
9349b4f9 321void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
20503968 322 uintptr_t retaddr);
6e59c1db 323
79383c9c
BS
324#include "softmmu_defs.h"
325
6ebbf390 326#define ACCESS_TYPE (NB_MMU_MODES + 1)
6e59c1db 327#define MEMSUFFIX _code
6e59c1db
FB
328
329#define DATA_SIZE 1
330#include "softmmu_header.h"
331
332#define DATA_SIZE 2
333#include "softmmu_header.h"
334
335#define DATA_SIZE 4
336#include "softmmu_header.h"
337
c27004ec
FB
338#define DATA_SIZE 8
339#include "softmmu_header.h"
340
6e59c1db
FB
341#undef ACCESS_TYPE
342#undef MEMSUFFIX
6e59c1db
FB
343
344#endif
4390df51
FB
345
346#if defined(CONFIG_USER_ONLY)
9349b4f9 347static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
348{
349 return addr;
350}
351#else
0cac1b66 352/* cputlb.c */
9349b4f9 353tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
4390df51 354#endif
9df217a3 355
9349b4f9 356typedef void (CPUDebugExcpHandler)(CPUArchState *env);
dde2367e 357
84e3b602 358void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
1b530a6d
AJ
359
360/* vl.c */
361extern int singlestep;
362
1a28cac3
MT
363/* cpu-exec.c */
364extern volatile sig_atomic_t exit_request;
365
946fb27c
PB
366/* Deterministic execution requires that IO only be performed on the last
367 instruction of a TB so that interrupts take effect immediately. */
9349b4f9 368static inline int can_do_io(CPUArchState *env)
946fb27c
PB
369{
370 if (!use_icount) {
371 return 1;
372 }
373 /* If not executing code then assume we are ok. */
374 if (!env->current_tb) {
375 return 1;
376 }
377 return env->can_do_io != 0;
378}
379
875cdcf6 380#endif