]> git.proxmox.com Git - qemu.git/blob - exec-all.h
Disable phsyical memory handling in userspace emulation.
[qemu.git] / exec-all.h
1 /*
2 * internal execution defines for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
22
23 #include "qemu-common.h"
24
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
27
28 /* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31 #if defined(CONFIG_USER_ONLY)
32 typedef target_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
36
37 /* is_jmp field values */
38 #define DISAS_NEXT 0 /* next instruction can be analyzed */
39 #define DISAS_JUMP 1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
43 typedef struct TranslationBlock TranslationBlock;
44
45 /* XXX: make safe guess about sizes */
46 #define MAX_OP_PER_INSTR 96
47 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */
48 #define MAX_OPC_PARAM 10
49 #define OPC_BUF_SIZE 640
50 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
51
52 /* Maximum size a TCG op can expand to. This is complicated because a
53 single op may require several host instructions and register reloads.
54 For now take a wild guess at 192 bytes, which should allow at least
55 a couple of fixup instructions per argument. */
56 #define TCG_MAX_OP_SIZE 192
57
58 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
59
60 extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
61 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
62 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
63 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
64 extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
65 extern target_ulong gen_opc_jump_pc[2];
66 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
67
68 #include "qemu-log.h"
69
70 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
71 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
72 void gen_pc_load(CPUState *env, struct TranslationBlock *tb,
73 unsigned long searched_pc, int pc_pos, void *puc);
74
75 unsigned long code_gen_max_block_size(void);
76 void cpu_gen_init(void);
77 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
78 int *gen_code_size_ptr);
79 int cpu_restore_state(struct TranslationBlock *tb,
80 CPUState *env, unsigned long searched_pc,
81 void *puc);
82 int cpu_restore_state_copy(struct TranslationBlock *tb,
83 CPUState *env, unsigned long searched_pc,
84 void *puc);
85 void cpu_resume_from_signal(CPUState *env1, void *puc);
86 void cpu_io_recompile(CPUState *env, void *retaddr);
87 TranslationBlock *tb_gen_code(CPUState *env,
88 target_ulong pc, target_ulong cs_base, int flags,
89 int cflags);
90 void cpu_exec_init(CPUState *env);
91 void QEMU_NORETURN cpu_loop_exit(void);
92 int page_unprotect(target_ulong address, unsigned long pc, void *puc);
93 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
94 int is_cpu_write_access);
95 void tb_invalidate_page_range(target_ulong start, target_ulong end);
96 void tlb_flush_page(CPUState *env, target_ulong addr);
97 void tlb_flush(CPUState *env, int flush_global);
98 #if !defined(CONFIG_USER_ONLY)
99 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
100 target_phys_addr_t paddr, int prot,
101 int mmu_idx, int is_softmmu);
102 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
103 target_phys_addr_t paddr, int prot,
104 int mmu_idx, int is_softmmu)
105 {
106 if (prot & PAGE_READ)
107 prot |= PAGE_EXEC;
108 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
109 }
110 #endif
111
112 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
113
114 #define CODE_GEN_PHYS_HASH_BITS 15
115 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
116
117 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024)
118
119 /* estimated block size for TB allocation */
120 /* XXX: use a per code average code fragment size and modulate it
121 according to the host CPU */
122 #if defined(CONFIG_SOFTMMU)
123 #define CODE_GEN_AVG_BLOCK_SIZE 128
124 #else
125 #define CODE_GEN_AVG_BLOCK_SIZE 64
126 #endif
127
128 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__)
129 #define USE_DIRECT_JUMP
130 #endif
131
132 struct TranslationBlock {
133 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
134 target_ulong cs_base; /* CS base for this block */
135 uint64_t flags; /* flags defining in which context the code was generated */
136 uint16_t size; /* size of target code for this block (1 <=
137 size <= TARGET_PAGE_SIZE) */
138 uint16_t cflags; /* compile flags */
139 #define CF_COUNT_MASK 0x7fff
140 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
141
142 uint8_t *tc_ptr; /* pointer to the translated code */
143 /* next matching tb for physical address. */
144 struct TranslationBlock *phys_hash_next;
145 /* first and second physical page containing code. The lower bit
146 of the pointer tells the index in page_next[] */
147 struct TranslationBlock *page_next[2];
148 tb_page_addr_t page_addr[2];
149
150 /* the following data are used to directly call another TB from
151 the code of this one. */
152 uint16_t tb_next_offset[2]; /* offset of original jump target */
153 #ifdef USE_DIRECT_JUMP
154 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
155 #else
156 unsigned long tb_next[2]; /* address of jump generated code */
157 #endif
158 /* list of TBs jumping to this one. This is a circular list using
159 the two least significant bits of the pointers to tell what is
160 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
161 jmp_first */
162 struct TranslationBlock *jmp_next[2];
163 struct TranslationBlock *jmp_first;
164 uint32_t icount;
165 };
166
167 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
168 {
169 target_ulong tmp;
170 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
171 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
172 }
173
174 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
175 {
176 target_ulong tmp;
177 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
178 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
179 | (tmp & TB_JMP_ADDR_MASK));
180 }
181
182 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
183 {
184 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
185 }
186
187 TranslationBlock *tb_alloc(target_ulong pc);
188 void tb_free(TranslationBlock *tb);
189 void tb_flush(CPUState *env);
190 void tb_link_page(TranslationBlock *tb,
191 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2);
192 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
193
194 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
195 extern uint8_t *code_gen_ptr;
196 extern int code_gen_max_blocks;
197
198 #if defined(USE_DIRECT_JUMP)
199
200 #if defined(_ARCH_PPC)
201 extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
202 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
203 #elif defined(__i386__) || defined(__x86_64__)
204 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
205 {
206 /* patch the branch destination */
207 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
208 /* no need to flush icache explicitly */
209 }
210 #elif defined(__arm__)
211 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
212 {
213 #if QEMU_GNUC_PREREQ(4, 1)
214 void __clear_cache(char *beg, char *end);
215 #else
216 register unsigned long _beg __asm ("a1");
217 register unsigned long _end __asm ("a2");
218 register unsigned long _flg __asm ("a3");
219 #endif
220
221 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
222 *(uint32_t *)jmp_addr =
223 (*(uint32_t *)jmp_addr & ~0xffffff)
224 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
225
226 #if QEMU_GNUC_PREREQ(4, 1)
227 __clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
228 #else
229 /* flush icache */
230 _beg = jmp_addr;
231 _end = jmp_addr + 4;
232 _flg = 0;
233 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
234 #endif
235 }
236 #endif
237
238 static inline void tb_set_jmp_target(TranslationBlock *tb,
239 int n, unsigned long addr)
240 {
241 unsigned long offset;
242
243 offset = tb->tb_jmp_offset[n];
244 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
245 offset = tb->tb_jmp_offset[n + 2];
246 if (offset != 0xffff)
247 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
248 }
249
250 #else
251
252 /* set the jump target */
253 static inline void tb_set_jmp_target(TranslationBlock *tb,
254 int n, unsigned long addr)
255 {
256 tb->tb_next[n] = addr;
257 }
258
259 #endif
260
261 static inline void tb_add_jump(TranslationBlock *tb, int n,
262 TranslationBlock *tb_next)
263 {
264 /* NOTE: this test is only needed for thread safety */
265 if (!tb->jmp_next[n]) {
266 /* patch the native jump address */
267 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
268
269 /* add in TB jmp circular list */
270 tb->jmp_next[n] = tb_next->jmp_first;
271 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
272 }
273 }
274
275 TranslationBlock *tb_find_pc(unsigned long pc_ptr);
276
277 #include "qemu-lock.h"
278
279 extern spinlock_t tb_lock;
280
281 extern int tb_invalidated_flag;
282
283 #if !defined(CONFIG_USER_ONLY)
284
285 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
286 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
287 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
288
289 void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
290 void *retaddr);
291
292 #include "softmmu_defs.h"
293
294 #define ACCESS_TYPE (NB_MMU_MODES + 1)
295 #define MEMSUFFIX _code
296 #define env cpu_single_env
297
298 #define DATA_SIZE 1
299 #include "softmmu_header.h"
300
301 #define DATA_SIZE 2
302 #include "softmmu_header.h"
303
304 #define DATA_SIZE 4
305 #include "softmmu_header.h"
306
307 #define DATA_SIZE 8
308 #include "softmmu_header.h"
309
310 #undef ACCESS_TYPE
311 #undef MEMSUFFIX
312 #undef env
313
314 #endif
315
316 #if defined(CONFIG_USER_ONLY)
317 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
318 {
319 return addr;
320 }
321 #else
322 /* NOTE: this function can trigger an exception */
323 /* NOTE2: the returned address is not exactly the physical address: it
324 is the offset relative to phys_ram_base */
325 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
326 {
327 int mmu_idx, page_index, pd;
328 void *p;
329
330 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
331 mmu_idx = cpu_mmu_index(env1);
332 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
333 (addr & TARGET_PAGE_MASK))) {
334 ldub_code(addr);
335 }
336 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
337 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
338 #if defined(TARGET_SPARC) || defined(TARGET_MIPS)
339 do_unassigned_access(addr, 0, 1, 0, 4);
340 #else
341 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
342 #endif
343 }
344 p = (void *)(unsigned long)addr
345 + env1->tlb_table[mmu_idx][page_index].addend;
346 return qemu_ram_addr_from_host(p);
347 }
348
349 /* Deterministic execution requires that IO only be performed on the last
350 instruction of a TB so that interrupts take effect immediately. */
351 static inline int can_do_io(CPUState *env)
352 {
353 if (!use_icount)
354 return 1;
355
356 /* If not executing code then assume we are ok. */
357 if (!env->current_tb)
358 return 1;
359
360 return env->can_do_io != 0;
361 }
362 #endif
363
364 typedef void (CPUDebugExcpHandler)(CPUState *env);
365
366 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
367
368 /* vl.c */
369 extern int singlestep;
370
371 #endif