]>
git.proxmox.com Git - qemu.git/blob - exec-all.h
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
32 #define __builtin_expect(x, n) (x)
36 #define REGPARM(n) __attribute((regparm(n)))
41 /* is_jmp field values */
42 #define DISAS_NEXT 0 /* next instruction can be analyzed */
43 #define DISAS_JUMP 1 /* only pc was modified dynamically */
44 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
45 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
47 struct TranslationBlock
;
49 /* XXX: make safe guess about sizes */
50 #define MAX_OP_PER_INSTR 32
51 #define OPC_BUF_SIZE 512
52 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
54 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
56 extern uint16_t gen_opc_buf
[OPC_BUF_SIZE
];
57 extern uint32_t gen_opparam_buf
[OPPARAM_BUF_SIZE
];
58 extern uint32_t gen_opc_pc
[OPC_BUF_SIZE
];
59 extern uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
60 extern uint8_t gen_opc_instr_start
[OPC_BUF_SIZE
];
62 typedef void (GenOpFunc
)(void);
63 typedef void (GenOpFunc1
)(long);
64 typedef void (GenOpFunc2
)(long, long);
65 typedef void (GenOpFunc3
)(long, long, long);
67 #if defined(TARGET_I386)
69 void optimize_flags_init(void);
76 int gen_intermediate_code(CPUState
*env
, struct TranslationBlock
*tb
);
77 int gen_intermediate_code_pc(CPUState
*env
, struct TranslationBlock
*tb
);
78 void dump_ops(const uint16_t *opc_buf
, const uint32_t *opparam_buf
);
79 int cpu_gen_code(CPUState
*env
, struct TranslationBlock
*tb
,
80 int max_code_size
, int *gen_code_size_ptr
);
81 int cpu_restore_state(struct TranslationBlock
*tb
,
82 CPUState
*env
, unsigned long searched_pc
,
84 int cpu_gen_code_copy(CPUState
*env
, struct TranslationBlock
*tb
,
85 int max_code_size
, int *gen_code_size_ptr
);
86 int cpu_restore_state_copy(struct TranslationBlock
*tb
,
87 CPUState
*env
, unsigned long searched_pc
,
89 void cpu_exec_init(void);
90 int page_unprotect(unsigned long address
);
91 void tb_invalidate_page_range(target_ulong start
, target_ulong end
);
92 void tlb_flush_page(CPUState
*env
, uint32_t addr
);
93 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
);
94 void tlb_flush(CPUState
*env
, int flush_global
);
95 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
96 int is_user
, int is_softmmu
);
98 #define CODE_GEN_MAX_SIZE 65536
99 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
101 #define CODE_GEN_HASH_BITS 15
102 #define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)
104 #define CODE_GEN_PHYS_HASH_BITS 15
105 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
107 /* maximum total translate dcode allocated */
109 /* NOTE: the translated code area cannot be too big because on some
110 archs the range of "fast" function calls is limited. Here is a
111 summary of the ranges:
113 i386 : signed 32 bits
116 sparc : signed 32 bits
117 alpha : signed 23 bits
120 #if defined(__alpha__)
121 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
122 #elif defined(__powerpc__)
123 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
125 #define CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
128 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
130 /* estimated block size for TB allocation */
131 /* XXX: use a per code average code fragment size and modulate it
132 according to the host CPU */
133 #if defined(CONFIG_SOFTMMU)
134 #define CODE_GEN_AVG_BLOCK_SIZE 128
136 #define CODE_GEN_AVG_BLOCK_SIZE 64
139 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
141 #if defined(__powerpc__)
142 #define USE_DIRECT_JUMP
144 #if defined(__i386__)
145 #define USE_DIRECT_JUMP
148 typedef struct TranslationBlock
{
149 unsigned long pc
; /* simulated PC corresponding to this block (EIP + CS base) */
150 unsigned long cs_base
; /* CS base for this block */
151 unsigned int flags
; /* flags defining in which context the code was generated */
152 uint16_t size
; /* size of target code for this block (1 <=
153 size <= TARGET_PAGE_SIZE) */
154 uint16_t cflags
; /* compile flags */
155 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
157 uint8_t *tc_ptr
; /* pointer to the translated code */
158 struct TranslationBlock
*hash_next
; /* next matching tb for virtual address */
159 /* next matching tb for physical address. */
160 struct TranslationBlock
*phys_hash_next
;
161 /* first and second physical page containing code. The lower bit
162 of the pointer tells the index in page_next[] */
163 struct TranslationBlock
*page_next
[2];
164 target_ulong page_addr
[2];
166 /* the following data are used to directly call another TB from
167 the code of this one. */
168 uint16_t tb_next_offset
[2]; /* offset of original jump target */
169 #ifdef USE_DIRECT_JUMP
170 uint16_t tb_jmp_offset
[4]; /* offset of jump instruction */
172 uint32_t tb_next
[2]; /* address of jump generated code */
174 /* list of TBs jumping to this one. This is a circular list using
175 the two least significant bits of the pointers to tell what is
176 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
178 struct TranslationBlock
*jmp_next
[2];
179 struct TranslationBlock
*jmp_first
;
182 static inline unsigned int tb_hash_func(unsigned long pc
)
184 return pc
& (CODE_GEN_HASH_SIZE
- 1);
187 static inline unsigned int tb_phys_hash_func(unsigned long pc
)
189 return pc
& (CODE_GEN_PHYS_HASH_SIZE
- 1);
192 TranslationBlock
*tb_alloc(unsigned long pc
);
193 void tb_flush(CPUState
*env
);
194 void tb_link(TranslationBlock
*tb
);
195 void tb_link_phys(TranslationBlock
*tb
,
196 target_ulong phys_pc
, target_ulong phys_page2
);
198 extern TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
199 extern TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
201 extern uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
202 extern uint8_t *code_gen_ptr
;
204 /* find a translation block in the translation cache. If not found,
205 return NULL and the pointer to the last element of the list in pptb */
206 static inline TranslationBlock
*tb_find(TranslationBlock
***pptb
,
208 unsigned long cs_base
,
211 TranslationBlock
**ptb
, *tb
;
214 h
= tb_hash_func(pc
);
220 if (tb
->pc
== pc
&& tb
->cs_base
== cs_base
&& tb
->flags
== flags
)
222 ptb
= &tb
->hash_next
;
229 #if defined(USE_DIRECT_JUMP)
231 #if defined(__powerpc__)
232 static inline void tb_set_jmp_target1(unsigned long jmp_addr
, unsigned long addr
)
236 /* patch the branch destination */
237 ptr
= (uint32_t *)jmp_addr
;
239 val
= (val
& ~0x03fffffc) | ((addr
- jmp_addr
) & 0x03fffffc);
242 asm volatile ("dcbst 0,%0" : : "r"(ptr
) : "memory");
243 asm volatile ("sync" : : : "memory");
244 asm volatile ("icbi 0,%0" : : "r"(ptr
) : "memory");
245 asm volatile ("sync" : : : "memory");
246 asm volatile ("isync" : : : "memory");
248 #elif defined(__i386__)
249 static inline void tb_set_jmp_target1(unsigned long jmp_addr
, unsigned long addr
)
251 /* patch the branch destination */
252 *(uint32_t *)jmp_addr
= addr
- (jmp_addr
+ 4);
253 /* no need to flush icache explicitely */
257 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
258 int n
, unsigned long addr
)
260 unsigned long offset
;
262 offset
= tb
->tb_jmp_offset
[n
];
263 tb_set_jmp_target1((unsigned long)(tb
->tc_ptr
+ offset
), addr
);
264 offset
= tb
->tb_jmp_offset
[n
+ 2];
265 if (offset
!= 0xffff)
266 tb_set_jmp_target1((unsigned long)(tb
->tc_ptr
+ offset
), addr
);
271 /* set the jump target */
272 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
273 int n
, unsigned long addr
)
275 tb
->tb_next
[n
] = addr
;
280 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
281 TranslationBlock
*tb_next
)
283 /* NOTE: this test is only needed for thread safety */
284 if (!tb
->jmp_next
[n
]) {
285 /* patch the native jump address */
286 tb_set_jmp_target(tb
, n
, (unsigned long)tb_next
->tc_ptr
);
288 /* add in TB jmp circular list */
289 tb
->jmp_next
[n
] = tb_next
->jmp_first
;
290 tb_next
->jmp_first
= (TranslationBlock
*)((long)(tb
) | (n
));
294 TranslationBlock
*tb_find_pc(unsigned long pc_ptr
);
297 #define offsetof(type, field) ((size_t) &((type *)0)->field)
300 #if defined(__powerpc__)
302 /* we patch the jump instruction directly */
303 #define JUMP_TB(opname, tbparam, n, eip)\
305 asm volatile (".section \".data\"\n"\
306 "__op_label" #n "." stringify(opname) ":\n"\
309 "b __op_jmp" #n "\n"\
311 T0 = (long)(tbparam) + (n);\
316 #define JUMP_TB2(opname, tbparam, n)\
318 asm volatile ("b __op_jmp" #n "\n");\
321 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
323 /* we patch the jump instruction directly */
324 #define JUMP_TB(opname, tbparam, n, eip)\
326 asm volatile (".section \".data\"\n"\
327 "__op_label" #n "." stringify(opname) ":\n"\
330 "jmp __op_jmp" #n "\n"\
332 T0 = (long)(tbparam) + (n);\
337 #define JUMP_TB2(opname, tbparam, n)\
339 asm volatile ("jmp __op_jmp" #n "\n");\
344 /* jump to next block operations (more portable code, does not need
345 cache flushing, but slower because of indirect jump) */
346 #define JUMP_TB(opname, tbparam, n, eip)\
348 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
349 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
350 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
352 T0 = (long)(tbparam) + (n);\
358 /* second jump to same destination 'n' */
359 #define JUMP_TB2(opname, tbparam, n)\
361 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n - 2]);\
366 extern CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
367 extern CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
370 static inline int testandset (int *p
)
373 __asm__
__volatile__ (
381 : "r" (p
), "r" (1), "r" (0)
388 static inline int testandset (int *p
)
393 __asm__
__volatile__ ("lock; cmpxchgl %3, %1; sete %0"
394 : "=q" (ret
), "=m" (*p
), "=a" (readval
)
395 : "r" (1), "m" (*p
), "a" (0)
402 static inline int testandset (int *p
)
406 __asm__
__volatile__ ("0: cs %0,%1,0(%2)\n"
409 : "r" (1), "a" (p
), "0" (*p
)
416 static inline int testandset (int *p
)
421 __asm__
__volatile__ ("0: mov 1,%2\n"
428 : "=r" (ret
), "=m" (*p
), "=r" (one
)
435 static inline int testandset (int *p
)
439 __asm__
__volatile__("ldstub [%1], %0"
444 return (ret
? 1 : 0);
449 static inline int testandset (int *spinlock
)
451 register unsigned int ret
;
452 __asm__
__volatile__("swp %0, %1, [%2]"
454 : "0"(1), "r"(spinlock
));
461 static inline int testandset (int *p
)
464 __asm__
__volatile__("tas %1; sne %0"
472 typedef int spinlock_t
;
474 #define SPIN_LOCK_UNLOCKED 0
476 #if defined(CONFIG_USER_ONLY)
477 static inline void spin_lock(spinlock_t
*lock
)
479 while (testandset(lock
));
482 static inline void spin_unlock(spinlock_t
*lock
)
487 static inline int spin_trylock(spinlock_t
*lock
)
489 return !testandset(lock
);
492 static inline void spin_lock(spinlock_t
*lock
)
496 static inline void spin_unlock(spinlock_t
*lock
)
500 static inline int spin_trylock(spinlock_t
*lock
)
506 extern spinlock_t tb_lock
;
508 extern int tb_invalidated_flag
;
510 #if (defined(TARGET_I386) || defined(TARGET_PPC)) && \
511 !defined(CONFIG_USER_ONLY)
513 void tlb_fill(unsigned long addr
, int is_write
, int is_user
,
516 #define ACCESS_TYPE 3
517 #define MEMSUFFIX _code
518 #define env cpu_single_env
521 #include "softmmu_header.h"
524 #include "softmmu_header.h"
527 #include "softmmu_header.h"
535 #if defined(CONFIG_USER_ONLY)
536 static inline target_ulong
get_phys_addr_code(CPUState
*env
, target_ulong addr
)
541 /* NOTE: this function can trigger an exception */
542 /* NOTE2: the returned address is not exactly the physical address: it
543 is the offset relative to phys_ram_base */
544 /* XXX: i386 target specific */
545 static inline target_ulong
get_phys_addr_code(CPUState
*env
, target_ulong addr
)
549 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
550 #if defined(TARGET_I386)
551 is_user
= ((env
->hflags
& HF_CPL_MASK
) == 3);
552 #elif defined (TARGET_PPC)
555 #error "Unimplemented !"
557 if (__builtin_expect(env
->tlb_read
[is_user
][index
].address
!=
558 (addr
& TARGET_PAGE_MASK
), 0)) {
559 ldub_code((void *)addr
);
561 return addr
+ env
->tlb_read
[is_user
][index
].addend
- (unsigned long)phys_ram_base
;