]> git.proxmox.com Git - qemu.git/blame - include/exec/exec-all.h
Merge remote-tracking branch 'mjt/trivial-patches-next' into staging
[qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
875cdcf6
AL
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
7d99a001
BS
22
23#include "qemu-common.h"
24
b346ff46 25/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 26#define DEBUG_DISAS
b346ff46 27
41c1b1c9
PB
28/* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31#if defined(CONFIG_USER_ONLY)
b480d9b7 32typedef abi_ulong tb_page_addr_t;
41c1b1c9
PB
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
b346ff46
FB
37/* is_jmp field values */
38#define DISAS_NEXT 0 /* next instruction can be analyzed */
39#define DISAS_JUMP 1 /* only pc was modified dynamically */
40#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
f081c76c 43struct TranslationBlock;
2e70f6ef 44typedef struct TranslationBlock TranslationBlock;
b346ff46
FB
45
46/* XXX: make safe guess about sizes */
5b620fb6 47#define MAX_OP_PER_INSTR 208
4d0e4ac7
SB
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
3cebc3f1 54#define MAX_OPC_PARAM_IARGS 5
4d0e4ac7
SB
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
6db73509 62#define OPC_BUF_SIZE 640
b346ff46
FB
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
a208e54a 65/* Maximum size a TCG op can expand to. This is complicated because a
0cbfcd2b
AJ
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
a208e54a 68 a couple of fixup instructions per argument. */
0cbfcd2b 69#define TCG_MAX_OP_SIZE 192
a208e54a 70
0115be31 71#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
b346ff46 72
1de7afc9 73#include "qemu/log.h"
b346ff46 74
9349b4f9
AF
75void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
e87b7cb0 78 int pc_pos);
d2856f1a 79
57fec1fe 80void cpu_gen_init(void);
9349b4f9 81int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
d07bde88 82 int *gen_code_size_ptr);
a8a826a3
BS
83bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
84
38c30fb7 85void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
20503968 86void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
9349b4f9 87TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
88 target_ulong pc, target_ulong cs_base, int flags,
89 int cflags);
9349b4f9
AF
90void cpu_exec_init(CPUArchState *env);
91void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
6375e09e 92int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
41c1b1c9 93void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
2e12669a 94 int is_cpu_write_access);
77a8f1a5
AG
95void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
96 int is_cpu_write_access);
0cac1b66
BS
97#if !defined(CONFIG_USER_ONLY)
98/* cputlb.c */
9349b4f9
AF
99void tlb_flush_page(CPUArchState *env, target_ulong addr);
100void tlb_flush(CPUArchState *env, int flush_global);
9349b4f9 101void tlb_set_page(CPUArchState *env, target_ulong vaddr,
a8170e5e 102 hwaddr paddr, int prot,
d4c430a8 103 int mmu_idx, target_ulong size);
a8170e5e 104void tb_invalidate_phys_addr(hwaddr addr);
0cac1b66
BS
105#else
106static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
107{
108}
109
110static inline void tlb_flush(CPUArchState *env, int flush_global)
111{
112}
c527ee8f 113#endif
d4e8164f 114
d4e8164f
FB
115#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
116
4390df51
FB
117#define CODE_GEN_PHYS_HASH_BITS 15
118#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
119
4390df51
FB
120/* estimated block size for TB allocation */
121/* XXX: use a per code average code fragment size and modulate it
122 according to the host CPU */
123#if defined(CONFIG_SOFTMMU)
124#define CODE_GEN_AVG_BLOCK_SIZE 128
125#else
126#define CODE_GEN_AVG_BLOCK_SIZE 64
127#endif
128
5bbd2cae
RH
129#if defined(__arm__) || defined(_ARCH_PPC) \
130 || defined(__x86_64__) || defined(__i386__) \
4a136e0a 131 || defined(__sparc__) || defined(__aarch64__) \
5bbd2cae 132 || defined(CONFIG_TCG_INTERPRETER)
7316329a 133#define USE_DIRECT_JUMP
d4e8164f
FB
134#endif
135
2e70f6ef 136struct TranslationBlock {
2e12669a
FB
137 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
138 target_ulong cs_base; /* CS base for this block */
c068688b 139 uint64_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
140 uint16_t size; /* size of target code for this block (1 <=
141 size <= TARGET_PAGE_SIZE) */
58fe2f10 142 uint16_t cflags; /* compile flags */
2e70f6ef
PB
143#define CF_COUNT_MASK 0x7fff
144#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
58fe2f10 145
d4e8164f 146 uint8_t *tc_ptr; /* pointer to the translated code */
4390df51 147 /* next matching tb for physical address. */
5fafdf24 148 struct TranslationBlock *phys_hash_next;
4390df51
FB
149 /* first and second physical page containing code. The lower bit
150 of the pointer tells the index in page_next[] */
5fafdf24 151 struct TranslationBlock *page_next[2];
41c1b1c9 152 tb_page_addr_t page_addr[2];
4390df51 153
d4e8164f
FB
154 /* the following data are used to directly call another TB from
155 the code of this one. */
156 uint16_t tb_next_offset[2]; /* offset of original jump target */
157#ifdef USE_DIRECT_JUMP
efc0a514 158 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
d4e8164f 159#else
6375e09e 160 uintptr_t tb_next[2]; /* address of jump generated code */
d4e8164f
FB
161#endif
162 /* list of TBs jumping to this one. This is a circular list using
163 the two least significant bits of the pointers to tell what is
164 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
165 jmp_first */
5fafdf24 166 struct TranslationBlock *jmp_next[2];
d4e8164f 167 struct TranslationBlock *jmp_first;
2e70f6ef
PB
168 uint32_t icount;
169};
d4e8164f 170
5e5f07e0
EV
171#include "exec/spinlock.h"
172
173typedef struct TBContext TBContext;
174
175struct TBContext {
176
177 TranslationBlock *tbs;
178 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
179 int nb_tbs;
180 /* any access to the tbs or the page table must use this lock */
181 spinlock_t tb_lock;
182
183 /* statistics */
184 int tb_flush_count;
185 int tb_phys_invalidate_count;
186
187 int tb_invalidated_flag;
188};
189
b362e5e0
PB
190static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
191{
192 target_ulong tmp;
193 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
b5e19d4c 194 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
b362e5e0
PB
195}
196
8a40a180 197static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
d4e8164f 198{
b362e5e0
PB
199 target_ulong tmp;
200 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
b5e19d4c
EI
201 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
202 | (tmp & TB_JMP_ADDR_MASK));
d4e8164f
FB
203}
204
41c1b1c9 205static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
4390df51 206{
f96a3834 207 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
4390df51
FB
208}
209
2e70f6ef 210void tb_free(TranslationBlock *tb);
9349b4f9 211void tb_flush(CPUArchState *env);
41c1b1c9 212void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
d4e8164f 213
4390df51
FB
214#if defined(USE_DIRECT_JUMP)
215
7316329a
SW
216#if defined(CONFIG_TCG_INTERPRETER)
217static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
218{
219 /* patch the branch destination */
220 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
221 /* no need to flush icache explicitly */
222}
223#elif defined(_ARCH_PPC)
64b85a8f 224void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
810260a8 225#define tb_set_jmp_target1 ppc_tb_set_jmp_target
57fec1fe 226#elif defined(__i386__) || defined(__x86_64__)
6375e09e 227static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
4390df51
FB
228{
229 /* patch the branch destination */
230 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
1235fc06 231 /* no need to flush icache explicitly */
4390df51 232}
4a136e0a
CF
233#elif defined(__aarch64__)
234void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
235#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
811d4cf4 236#elif defined(__arm__)
6375e09e 237static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
811d4cf4 238{
4a1e19ae 239#if !QEMU_GNUC_PREREQ(4, 1)
811d4cf4
AZ
240 register unsigned long _beg __asm ("a1");
241 register unsigned long _end __asm ("a2");
242 register unsigned long _flg __asm ("a3");
3233f0d4 243#endif
811d4cf4
AZ
244
245 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
87b78ad1
LD
246 *(uint32_t *)jmp_addr =
247 (*(uint32_t *)jmp_addr & ~0xffffff)
248 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
811d4cf4 249
3233f0d4 250#if QEMU_GNUC_PREREQ(4, 1)
4a1e19ae 251 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
3233f0d4 252#else
811d4cf4
AZ
253 /* flush icache */
254 _beg = jmp_addr;
255 _end = jmp_addr + 4;
256 _flg = 0;
257 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
3233f0d4 258#endif
811d4cf4 259}
5bbd2cae
RH
260#elif defined(__sparc__)
261void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
7316329a
SW
262#else
263#error tb_set_jmp_target1 is missing
4390df51 264#endif
d4e8164f 265
5fafdf24 266static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 267 int n, uintptr_t addr)
4cbb86e1 268{
6375e09e
SW
269 uint16_t offset = tb->tb_jmp_offset[n];
270 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
4cbb86e1
FB
271}
272
d4e8164f
FB
273#else
274
275/* set the jump target */
5fafdf24 276static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 277 int n, uintptr_t addr)
d4e8164f 278{
95f7652d 279 tb->tb_next[n] = addr;
d4e8164f
FB
280}
281
282#endif
283
5fafdf24 284static inline void tb_add_jump(TranslationBlock *tb, int n,
d4e8164f
FB
285 TranslationBlock *tb_next)
286{
cf25629d
FB
287 /* NOTE: this test is only needed for thread safety */
288 if (!tb->jmp_next[n]) {
289 /* patch the native jump address */
6375e09e 290 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
3b46e624 291
cf25629d
FB
292 /* add in TB jmp circular list */
293 tb->jmp_next[n] = tb_next->jmp_first;
6375e09e 294 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
cf25629d 295 }
d4e8164f
FB
296}
297
3917149d
BS
298/* The return address may point to the start of the next instruction.
299 Subtracting one gets us the call instruction itself. */
7316329a 300#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 301extern uintptr_t tci_tb_ptr;
b54c2873 302# define GETPC() tci_tb_ptr
7316329a 303#elif defined(__s390__) && !defined(__s390x__)
6375e09e 304# define GETPC() \
20503968 305 (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
3917149d
BS
306#elif defined(__arm__)
307/* Thumb return addresses have the low bit set, so we need to subtract two.
308 This is still safe in ARM mode because instructions are 4 bytes. */
20503968 309# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
3917149d 310#else
20503968 311# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
3917149d
BS
312#endif
313
fdbb84d1
YL
314#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
315/* qemu_ld/st optimization split code generation to fast and slow path, thus,
316 it needs special handling for an MMU helper which is called from the slow
317 path, to get the fast path's pc without any additional argument.
318 It uses a tricky solution which embeds the fast path pc into the slow path.
319
320 Code flow in slow path:
321 (1) pre-process
322 (2) call MMU helper
323 (3) jump to (5)
324 (4) fast path information (implementation specific)
325 (5) post-process (e.g. stack adjust)
326 (6) jump to corresponding code of the next of fast path
327 */
328# if defined(__i386__) || defined(__x86_64__)
329/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
330 so that the destination is the next code of fast path, though this jmp is
331 never executed.
332
333 call MMU helper
334 jmp POST_PROC (2byte) <- GETRA()
335 jmp NEXT_CODE (5byte)
336 POST_PROCESS ... <- GETRA() + 7
337 */
338# define GETRA() ((uintptr_t)__builtin_return_address(0))
339# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
340 *(int32_t *)((void *)GETRA() + 3) - 1))
ed224a56 341# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
342# define GETRA() ((uintptr_t)__builtin_return_address(0))
c878da3b 343# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
df5e0ef7
RH
344# elif defined(__arm__)
345/* We define two insns between the return address and the branch back to
346 straight-line. Find and decode that branch insn. */
347# define GETRA() ((uintptr_t)__builtin_return_address(0))
348# define GETPC_LDST() tcg_getpc_ldst(GETRA())
349static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
350{
351 int32_t b;
352 ra += 8; /* skip the two insns */
353 b = *(int32_t *)ra; /* load the branch insn */
354 b = (b << 8) >> (8 - 2); /* extract the displacement */
355 ra += 8; /* branches are relative to pc+8 */
356 ra += b; /* apply the displacement */
357 ra -= 4; /* return a pointer into the current opcode,
358 not the start of the next opcode */
359 return ra;
360}
fdbb84d1
YL
361# else
362# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
363# endif
364bool is_tcg_gen_code(uintptr_t pc_ptr);
365# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
366#else
367# define GETPC_EXT() GETPC()
368#endif
369
e95c8d51 370#if !defined(CONFIG_USER_ONLY)
6e59c1db 371
a8170e5e 372struct MemoryRegion *iotlb_to_region(hwaddr index);
791af8c8
PB
373bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
374 uint64_t *pvalue, unsigned size);
375bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
37ec01d4 376 uint64_t value, unsigned size);
b3755a91 377
9349b4f9 378void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
20503968 379 uintptr_t retaddr);
6e59c1db 380
022c62cb 381#include "exec/softmmu_defs.h"
79383c9c 382
6ebbf390 383#define ACCESS_TYPE (NB_MMU_MODES + 1)
6e59c1db 384#define MEMSUFFIX _code
6e59c1db
FB
385
386#define DATA_SIZE 1
022c62cb 387#include "exec/softmmu_header.h"
6e59c1db
FB
388
389#define DATA_SIZE 2
022c62cb 390#include "exec/softmmu_header.h"
6e59c1db
FB
391
392#define DATA_SIZE 4
022c62cb 393#include "exec/softmmu_header.h"
6e59c1db 394
c27004ec 395#define DATA_SIZE 8
022c62cb 396#include "exec/softmmu_header.h"
c27004ec 397
6e59c1db
FB
398#undef ACCESS_TYPE
399#undef MEMSUFFIX
6e59c1db
FB
400
401#endif
4390df51
FB
402
403#if defined(CONFIG_USER_ONLY)
9349b4f9 404static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
405{
406 return addr;
407}
408#else
0cac1b66 409/* cputlb.c */
9349b4f9 410tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
4390df51 411#endif
9df217a3 412
9349b4f9 413typedef void (CPUDebugExcpHandler)(CPUArchState *env);
dde2367e 414
84e3b602 415void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
1b530a6d
AJ
416
417/* vl.c */
418extern int singlestep;
419
1a28cac3
MT
420/* cpu-exec.c */
421extern volatile sig_atomic_t exit_request;
422
946fb27c
PB
423/* Deterministic execution requires that IO only be performed on the last
424 instruction of a TB so that interrupts take effect immediately. */
9349b4f9 425static inline int can_do_io(CPUArchState *env)
946fb27c 426{
d77953b9
AF
427 CPUState *cpu = ENV_GET_CPU(env);
428
946fb27c
PB
429 if (!use_icount) {
430 return 1;
431 }
432 /* If not executing code then assume we are ok. */
d77953b9 433 if (cpu->current_tb == NULL) {
946fb27c
PB
434 return 1;
435 }
436 return env->can_do_io != 0;
437}
438
875cdcf6 439#endif