]> git.proxmox.com Git - qemu.git/blame - include/exec/exec-all.h
Merge remote-tracking branch 'jliu/or32' into staging
[qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
875cdcf6
AL
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
7d99a001
BS
22
23#include "qemu-common.h"
24
b346ff46 25/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 26#define DEBUG_DISAS
b346ff46 27
41c1b1c9
PB
28/* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31#if defined(CONFIG_USER_ONLY)
b480d9b7 32typedef abi_ulong tb_page_addr_t;
41c1b1c9
PB
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
b346ff46
FB
37/* is_jmp field values */
38#define DISAS_NEXT 0 /* next instruction can be analyzed */
39#define DISAS_JUMP 1 /* only pc was modified dynamically */
40#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
f081c76c 43struct TranslationBlock;
2e70f6ef 44typedef struct TranslationBlock TranslationBlock;
b346ff46
FB
45
46/* XXX: make safe guess about sizes */
5b620fb6 47#define MAX_OP_PER_INSTR 208
4d0e4ac7
SB
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
3cebc3f1 54#define MAX_OPC_PARAM_IARGS 5
4d0e4ac7
SB
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
6db73509 62#define OPC_BUF_SIZE 640
b346ff46
FB
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
a208e54a 65/* Maximum size a TCG op can expand to. This is complicated because a
0cbfcd2b
AJ
66 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
a208e54a 68 a couple of fixup instructions per argument. */
0cbfcd2b 69#define TCG_MAX_OP_SIZE 192
a208e54a 70
0115be31 71#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
b346ff46 72
1de7afc9 73#include "qemu/log.h"
b346ff46 74
9349b4f9
AF
75void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
e87b7cb0 78 int pc_pos);
d2856f1a 79
57fec1fe 80void cpu_gen_init(void);
9349b4f9 81int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
d07bde88 82 int *gen_code_size_ptr);
a8a826a3
BS
83bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
84
38c30fb7 85void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
20503968 86void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
9349b4f9 87TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
88 target_ulong pc, target_ulong cs_base, int flags,
89 int cflags);
9349b4f9
AF
90void cpu_exec_init(CPUArchState *env);
91void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
6375e09e 92int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
41c1b1c9 93void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
2e12669a 94 int is_cpu_write_access);
77a8f1a5
AG
95void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
96 int is_cpu_write_access);
0cac1b66
BS
97#if !defined(CONFIG_USER_ONLY)
98/* cputlb.c */
9349b4f9
AF
99void tlb_flush_page(CPUArchState *env, target_ulong addr);
100void tlb_flush(CPUArchState *env, int flush_global);
9349b4f9 101void tlb_set_page(CPUArchState *env, target_ulong vaddr,
a8170e5e 102 hwaddr paddr, int prot,
d4c430a8 103 int mmu_idx, target_ulong size);
a8170e5e 104void tb_invalidate_phys_addr(hwaddr addr);
0cac1b66
BS
105#else
106static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
107{
108}
109
110static inline void tlb_flush(CPUArchState *env, int flush_global)
111{
112}
c527ee8f 113#endif
d4e8164f 114
d4e8164f
FB
115#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
116
4390df51
FB
117#define CODE_GEN_PHYS_HASH_BITS 15
118#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
119
4390df51
FB
120/* estimated block size for TB allocation */
121/* XXX: use a per code average code fragment size and modulate it
122 according to the host CPU */
123#if defined(CONFIG_SOFTMMU)
124#define CODE_GEN_AVG_BLOCK_SIZE 128
125#else
126#define CODE_GEN_AVG_BLOCK_SIZE 64
127#endif
128
5bbd2cae
RH
129#if defined(__arm__) || defined(_ARCH_PPC) \
130 || defined(__x86_64__) || defined(__i386__) \
4a136e0a 131 || defined(__sparc__) || defined(__aarch64__) \
5bbd2cae 132 || defined(CONFIG_TCG_INTERPRETER)
7316329a 133#define USE_DIRECT_JUMP
d4e8164f
FB
134#endif
135
2e70f6ef 136struct TranslationBlock {
2e12669a
FB
137 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
138 target_ulong cs_base; /* CS base for this block */
c068688b 139 uint64_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
140 uint16_t size; /* size of target code for this block (1 <=
141 size <= TARGET_PAGE_SIZE) */
58fe2f10 142 uint16_t cflags; /* compile flags */
2e70f6ef
PB
143#define CF_COUNT_MASK 0x7fff
144#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
58fe2f10 145
d4e8164f 146 uint8_t *tc_ptr; /* pointer to the translated code */
4390df51 147 /* next matching tb for physical address. */
5fafdf24 148 struct TranslationBlock *phys_hash_next;
4390df51
FB
149 /* first and second physical page containing code. The lower bit
150 of the pointer tells the index in page_next[] */
5fafdf24 151 struct TranslationBlock *page_next[2];
41c1b1c9 152 tb_page_addr_t page_addr[2];
4390df51 153
d4e8164f
FB
154 /* the following data are used to directly call another TB from
155 the code of this one. */
156 uint16_t tb_next_offset[2]; /* offset of original jump target */
157#ifdef USE_DIRECT_JUMP
efc0a514 158 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
d4e8164f 159#else
6375e09e 160 uintptr_t tb_next[2]; /* address of jump generated code */
d4e8164f
FB
161#endif
162 /* list of TBs jumping to this one. This is a circular list using
163 the two least significant bits of the pointers to tell what is
164 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
165 jmp_first */
5fafdf24 166 struct TranslationBlock *jmp_next[2];
d4e8164f 167 struct TranslationBlock *jmp_first;
2e70f6ef
PB
168 uint32_t icount;
169};
d4e8164f 170
5e5f07e0
EV
171#include "exec/spinlock.h"
172
173typedef struct TBContext TBContext;
174
175struct TBContext {
176
177 TranslationBlock *tbs;
178 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
179 int nb_tbs;
180 /* any access to the tbs or the page table must use this lock */
181 spinlock_t tb_lock;
182
183 /* statistics */
184 int tb_flush_count;
185 int tb_phys_invalidate_count;
186
187 int tb_invalidated_flag;
188};
189
b362e5e0
PB
190static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
191{
192 target_ulong tmp;
193 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
b5e19d4c 194 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
b362e5e0
PB
195}
196
8a40a180 197static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
d4e8164f 198{
b362e5e0
PB
199 target_ulong tmp;
200 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
b5e19d4c
EI
201 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
202 | (tmp & TB_JMP_ADDR_MASK));
d4e8164f
FB
203}
204
41c1b1c9 205static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
4390df51 206{
f96a3834 207 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
4390df51
FB
208}
209
2e70f6ef 210void tb_free(TranslationBlock *tb);
9349b4f9 211void tb_flush(CPUArchState *env);
41c1b1c9 212void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
d4e8164f 213
4390df51
FB
214#if defined(USE_DIRECT_JUMP)
215
7316329a
SW
216#if defined(CONFIG_TCG_INTERPRETER)
217static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
218{
219 /* patch the branch destination */
220 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
221 /* no need to flush icache explicitly */
222}
223#elif defined(_ARCH_PPC)
64b85a8f 224void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
810260a8 225#define tb_set_jmp_target1 ppc_tb_set_jmp_target
57fec1fe 226#elif defined(__i386__) || defined(__x86_64__)
6375e09e 227static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
4390df51
FB
228{
229 /* patch the branch destination */
230 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
1235fc06 231 /* no need to flush icache explicitly */
4390df51 232}
4a136e0a
CF
233#elif defined(__aarch64__)
234void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
235#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
811d4cf4 236#elif defined(__arm__)
6375e09e 237static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
811d4cf4 238{
4a1e19ae 239#if !QEMU_GNUC_PREREQ(4, 1)
811d4cf4
AZ
240 register unsigned long _beg __asm ("a1");
241 register unsigned long _end __asm ("a2");
242 register unsigned long _flg __asm ("a3");
3233f0d4 243#endif
811d4cf4
AZ
244
245 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
87b78ad1
LD
246 *(uint32_t *)jmp_addr =
247 (*(uint32_t *)jmp_addr & ~0xffffff)
248 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
811d4cf4 249
3233f0d4 250#if QEMU_GNUC_PREREQ(4, 1)
4a1e19ae 251 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
3233f0d4 252#else
811d4cf4
AZ
253 /* flush icache */
254 _beg = jmp_addr;
255 _end = jmp_addr + 4;
256 _flg = 0;
257 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
3233f0d4 258#endif
811d4cf4 259}
5bbd2cae
RH
260#elif defined(__sparc__)
261void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
7316329a
SW
262#else
263#error tb_set_jmp_target1 is missing
4390df51 264#endif
d4e8164f 265
5fafdf24 266static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 267 int n, uintptr_t addr)
4cbb86e1 268{
6375e09e
SW
269 uint16_t offset = tb->tb_jmp_offset[n];
270 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
4cbb86e1
FB
271}
272
d4e8164f
FB
273#else
274
275/* set the jump target */
5fafdf24 276static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 277 int n, uintptr_t addr)
d4e8164f 278{
95f7652d 279 tb->tb_next[n] = addr;
d4e8164f
FB
280}
281
282#endif
283
5fafdf24 284static inline void tb_add_jump(TranslationBlock *tb, int n,
d4e8164f
FB
285 TranslationBlock *tb_next)
286{
cf25629d
FB
287 /* NOTE: this test is only needed for thread safety */
288 if (!tb->jmp_next[n]) {
289 /* patch the native jump address */
6375e09e 290 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
3b46e624 291
cf25629d
FB
292 /* add in TB jmp circular list */
293 tb->jmp_next[n] = tb_next->jmp_first;
6375e09e 294 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
cf25629d 295 }
d4e8164f
FB
296}
297
0f842f8a
RH
298/* GETRA is the true target of the return instruction that we'll execute,
299 defined here for simplicity of defining the follow-up macros. */
7316329a 300#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 301extern uintptr_t tci_tb_ptr;
0f842f8a
RH
302# define GETRA() tci_tb_ptr
303#else
304# define GETRA() \
305 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
306#endif
307
308/* The true return address will often point to a host insn that is part of
309 the next translated guest insn. Adjust the address backward to point to
310 the middle of the call insn. Subtracting one would do the job except for
311 several compressed mode architectures (arm, mips) which set the low bit
312 to indicate the compressed mode; subtracting two works around that. It
313 is also the case that there are no host isas that contain a call insn
314 smaller than 4 bytes, so we don't worry about special-casing this. */
315#if defined(CONFIG_TCG_INTERPRETER)
316# define GETPC_ADJ 0
3917149d 317#else
0f842f8a 318# define GETPC_ADJ 2
3917149d
BS
319#endif
320
0f842f8a
RH
321#define GETPC() (GETRA() - GETPC_ADJ)
322
e95c8d51 323#if !defined(CONFIG_USER_ONLY)
6e59c1db 324
575ddeb4 325void phys_mem_set_alloc(void *(*alloc)(size_t));
91138037 326
a8170e5e 327struct MemoryRegion *iotlb_to_region(hwaddr index);
791af8c8
PB
328bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
329 uint64_t *pvalue, unsigned size);
330bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
37ec01d4 331 uint64_t value, unsigned size);
b3755a91 332
9349b4f9 333void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
20503968 334 uintptr_t retaddr);
6e59c1db 335
e58eb534
RH
336uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
337uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
338uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
339uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
79383c9c 340
6ebbf390 341#define ACCESS_TYPE (NB_MMU_MODES + 1)
6e59c1db 342#define MEMSUFFIX _code
6e59c1db
FB
343
344#define DATA_SIZE 1
022c62cb 345#include "exec/softmmu_header.h"
6e59c1db
FB
346
347#define DATA_SIZE 2
022c62cb 348#include "exec/softmmu_header.h"
6e59c1db
FB
349
350#define DATA_SIZE 4
022c62cb 351#include "exec/softmmu_header.h"
6e59c1db 352
c27004ec 353#define DATA_SIZE 8
022c62cb 354#include "exec/softmmu_header.h"
c27004ec 355
6e59c1db
FB
356#undef ACCESS_TYPE
357#undef MEMSUFFIX
6e59c1db
FB
358
359#endif
4390df51
FB
360
361#if defined(CONFIG_USER_ONLY)
9349b4f9 362static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
363{
364 return addr;
365}
366#else
0cac1b66 367/* cputlb.c */
9349b4f9 368tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
4390df51 369#endif
9df217a3 370
9349b4f9 371typedef void (CPUDebugExcpHandler)(CPUArchState *env);
dde2367e 372
84e3b602 373void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
1b530a6d
AJ
374
375/* vl.c */
376extern int singlestep;
377
1a28cac3
MT
378/* cpu-exec.c */
379extern volatile sig_atomic_t exit_request;
380
946fb27c
PB
381/* Deterministic execution requires that IO only be performed on the last
382 instruction of a TB so that interrupts take effect immediately. */
9349b4f9 383static inline int can_do_io(CPUArchState *env)
946fb27c 384{
d77953b9
AF
385 CPUState *cpu = ENV_GET_CPU(env);
386
946fb27c
PB
387 if (!use_icount) {
388 return 1;
389 }
390 /* If not executing code then assume we are ok. */
d77953b9 391 if (cpu->current_tb == NULL) {
946fb27c
PB
392 return 1;
393 }
394 return env->can_do_io != 0;
395}
396
875cdcf6 397#endif