]>
git.proxmox.com Git - qemu.git/blob - exec-all.h
2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
33 typedef uint32_t target_ulong
;
37 #define __builtin_expect(x, n) (x)
41 #define REGPARM(n) __attribute((regparm(n)))
46 /* is_jmp field values */
47 #define DISAS_NEXT 0 /* next instruction can be analyzed */
48 #define DISAS_JUMP 1 /* only pc was modified dynamically */
49 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
50 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
52 struct TranslationBlock
;
54 /* XXX: make safe guess about sizes */
55 #define MAX_OP_PER_INSTR 32
56 #define OPC_BUF_SIZE 512
57 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
59 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
61 extern uint16_t gen_opc_buf
[OPC_BUF_SIZE
];
62 extern uint32_t gen_opparam_buf
[OPPARAM_BUF_SIZE
];
63 extern uint32_t gen_opc_pc
[OPC_BUF_SIZE
];
64 extern uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
65 extern uint8_t gen_opc_instr_start
[OPC_BUF_SIZE
];
67 typedef void (GenOpFunc
)(void);
68 typedef void (GenOpFunc1
)(long);
69 typedef void (GenOpFunc2
)(long, long);
70 typedef void (GenOpFunc3
)(long, long, long);
72 #if defined(TARGET_I386)
74 void optimize_flags_init(void);
81 int gen_intermediate_code(CPUState
*env
, struct TranslationBlock
*tb
);
82 int gen_intermediate_code_pc(CPUState
*env
, struct TranslationBlock
*tb
);
83 void dump_ops(const uint16_t *opc_buf
, const uint32_t *opparam_buf
);
84 int cpu_gen_code(CPUState
*env
, struct TranslationBlock
*tb
,
85 int max_code_size
, int *gen_code_size_ptr
);
86 int cpu_restore_state(struct TranslationBlock
*tb
,
87 CPUState
*env
, unsigned long searched_pc
);
88 void cpu_exec_init(void);
89 int page_unprotect(unsigned long address
);
90 void tb_invalidate_page_range(target_ulong start
, target_ulong end
);
91 void tlb_flush_page(CPUState
*env
, uint32_t addr
);
92 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
);
93 void tlb_flush(CPUState
*env
);
94 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
95 int is_user
, int is_softmmu
);
97 #define CODE_GEN_MAX_SIZE 65536
98 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
100 #define CODE_GEN_HASH_BITS 15
101 #define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)
103 #define CODE_GEN_PHYS_HASH_BITS 15
104 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
106 /* maximum total translate dcode allocated */
108 /* NOTE: the translated code area cannot be too big because on some
109 archs the range of "fast" function calls is limited. Here is a
110 summary of the ranges:
112 i386 : signed 32 bits
115 sparc : signed 32 bits
116 alpha : signed 23 bits
119 #if defined(__alpha__)
120 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
121 #elif defined(__powerpc__)
122 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
124 #define CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
127 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
129 /* estimated block size for TB allocation */
130 /* XXX: use a per code average code fragment size and modulate it
131 according to the host CPU */
132 #if defined(CONFIG_SOFTMMU)
133 #define CODE_GEN_AVG_BLOCK_SIZE 128
135 #define CODE_GEN_AVG_BLOCK_SIZE 64
138 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
140 #if defined(__powerpc__)
141 #define USE_DIRECT_JUMP
143 #if defined(__i386__)
144 #define USE_DIRECT_JUMP
147 typedef struct TranslationBlock
{
148 unsigned long pc
; /* simulated PC corresponding to this block (EIP + CS base) */
149 unsigned long cs_base
; /* CS base for this block */
150 unsigned int flags
; /* flags defining in which context the code was generated */
151 uint16_t size
; /* size of target code for this block (1 <=
152 size <= TARGET_PAGE_SIZE) */
153 uint8_t *tc_ptr
; /* pointer to the translated code */
154 struct TranslationBlock
*hash_next
; /* next matching tb for virtual address */
155 /* next matching tb for physical address. */
156 struct TranslationBlock
*phys_hash_next
;
157 /* first and second physical page containing code. The lower bit
158 of the pointer tells the index in page_next[] */
159 struct TranslationBlock
*page_next
[2];
160 target_ulong page_addr
[2];
162 /* the following data are used to directly call another TB from
163 the code of this one. */
164 uint16_t tb_next_offset
[2]; /* offset of original jump target */
165 #ifdef USE_DIRECT_JUMP
166 uint16_t tb_jmp_offset
[4]; /* offset of jump instruction */
168 uint32_t tb_next
[2]; /* address of jump generated code */
170 /* list of TBs jumping to this one. This is a circular list using
171 the two least significant bits of the pointers to tell what is
172 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
174 struct TranslationBlock
*jmp_next
[2];
175 struct TranslationBlock
*jmp_first
;
178 static inline unsigned int tb_hash_func(unsigned long pc
)
180 return pc
& (CODE_GEN_HASH_SIZE
- 1);
183 static inline unsigned int tb_phys_hash_func(unsigned long pc
)
185 return pc
& (CODE_GEN_PHYS_HASH_SIZE
- 1);
188 TranslationBlock
*tb_alloc(unsigned long pc
);
189 void tb_flush(CPUState
*env
);
190 void tb_link(TranslationBlock
*tb
);
191 void tb_link_phys(TranslationBlock
*tb
,
192 target_ulong phys_pc
, target_ulong phys_page2
);
194 extern TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
195 extern TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
197 extern uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
198 extern uint8_t *code_gen_ptr
;
200 /* find a translation block in the translation cache. If not found,
201 return NULL and the pointer to the last element of the list in pptb */
202 static inline TranslationBlock
*tb_find(TranslationBlock
***pptb
,
204 unsigned long cs_base
,
207 TranslationBlock
**ptb
, *tb
;
210 h
= tb_hash_func(pc
);
216 if (tb
->pc
== pc
&& tb
->cs_base
== cs_base
&& tb
->flags
== flags
)
218 ptb
= &tb
->hash_next
;
225 #if defined(USE_DIRECT_JUMP)
227 #if defined(__powerpc__)
228 static inline void tb_set_jmp_target1(unsigned long jmp_addr
, unsigned long addr
)
232 /* patch the branch destination */
233 ptr
= (uint32_t *)jmp_addr
;
235 val
= (val
& ~0x03fffffc) | ((addr
- jmp_addr
) & 0x03fffffc);
238 asm volatile ("dcbst 0,%0" : : "r"(ptr
) : "memory");
239 asm volatile ("sync" : : : "memory");
240 asm volatile ("icbi 0,%0" : : "r"(ptr
) : "memory");
241 asm volatile ("sync" : : : "memory");
242 asm volatile ("isync" : : : "memory");
244 #elif defined(__i386__)
245 static inline void tb_set_jmp_target1(unsigned long jmp_addr
, unsigned long addr
)
247 /* patch the branch destination */
248 *(uint32_t *)jmp_addr
= addr
- (jmp_addr
+ 4);
249 /* no need to flush icache explicitely */
253 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
254 int n
, unsigned long addr
)
256 unsigned long offset
;
258 offset
= tb
->tb_jmp_offset
[n
];
259 tb_set_jmp_target1((unsigned long)(tb
->tc_ptr
+ offset
), addr
);
260 offset
= tb
->tb_jmp_offset
[n
+ 2];
261 if (offset
!= 0xffff)
262 tb_set_jmp_target1((unsigned long)(tb
->tc_ptr
+ offset
), addr
);
267 /* set the jump target */
268 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
269 int n
, unsigned long addr
)
271 tb
->tb_next
[n
] = addr
;
276 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
277 TranslationBlock
*tb_next
)
279 /* NOTE: this test is only needed for thread safety */
280 if (!tb
->jmp_next
[n
]) {
281 /* patch the native jump address */
282 tb_set_jmp_target(tb
, n
, (unsigned long)tb_next
->tc_ptr
);
284 /* add in TB jmp circular list */
285 tb
->jmp_next
[n
] = tb_next
->jmp_first
;
286 tb_next
->jmp_first
= (TranslationBlock
*)((long)(tb
) | (n
));
290 TranslationBlock
*tb_find_pc(unsigned long pc_ptr
);
293 #define offsetof(type, field) ((size_t) &((type *)0)->field)
296 #if defined(__powerpc__)
298 /* we patch the jump instruction directly */
299 #define JUMP_TB(opname, tbparam, n, eip)\
301 asm volatile (".section \".data\"\n"\
302 "__op_label" #n "." stringify(opname) ":\n"\
305 "b __op_jmp" #n "\n"\
307 T0 = (long)(tbparam) + (n);\
312 #define JUMP_TB2(opname, tbparam, n)\
314 asm volatile ("b __op_jmp" #n "\n");\
317 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
319 /* we patch the jump instruction directly */
320 #define JUMP_TB(opname, tbparam, n, eip)\
322 asm volatile (".section \".data\"\n"\
323 "__op_label" #n "." stringify(opname) ":\n"\
326 "jmp __op_jmp" #n "\n"\
328 T0 = (long)(tbparam) + (n);\
333 #define JUMP_TB2(opname, tbparam, n)\
335 asm volatile ("jmp __op_jmp" #n "\n");\
340 /* jump to next block operations (more portable code, does not need
341 cache flushing, but slower because of indirect jump) */
342 #define JUMP_TB(opname, tbparam, n, eip)\
344 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
345 static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
346 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
348 T0 = (long)(tbparam) + (n);\
354 /* second jump to same destination 'n' */
355 #define JUMP_TB2(opname, tbparam, n)\
357 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n - 2]);\
362 extern CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
363 extern CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
366 static inline int testandset (int *p
)
369 __asm__
__volatile__ (
377 : "r" (p
), "r" (1), "r" (0)
384 static inline int testandset (int *p
)
389 __asm__
__volatile__ ("lock; cmpxchgl %3, %1; sete %0"
390 : "=q" (ret
), "=m" (*p
), "=a" (readval
)
391 : "r" (1), "m" (*p
), "a" (0)
398 static inline int testandset (int *p
)
402 __asm__
__volatile__ ("0: cs %0,%1,0(%2)\n"
405 : "r" (1), "a" (p
), "0" (*p
)
412 static inline int testandset (int *p
)
417 __asm__
__volatile__ ("0: mov 1,%2\n"
424 : "=r" (ret
), "=m" (*p
), "=r" (one
)
431 static inline int testandset (int *p
)
435 __asm__
__volatile__("ldstub [%1], %0"
440 return (ret
? 1 : 0);
445 static inline int testandset (int *spinlock
)
447 register unsigned int ret
;
448 __asm__
__volatile__("swp %0, %1, [%2]"
450 : "0"(1), "r"(spinlock
));
457 static inline int testandset (int *p
)
460 __asm__
__volatile__("tas %1; sne %0"
468 typedef int spinlock_t
;
470 #define SPIN_LOCK_UNLOCKED 0
472 #if defined(CONFIG_USER_ONLY)
473 static inline void spin_lock(spinlock_t
*lock
)
475 while (testandset(lock
));
478 static inline void spin_unlock(spinlock_t
*lock
)
483 static inline int spin_trylock(spinlock_t
*lock
)
485 return !testandset(lock
);
488 static inline void spin_lock(spinlock_t
*lock
)
492 static inline void spin_unlock(spinlock_t
*lock
)
496 static inline int spin_trylock(spinlock_t
*lock
)
502 extern spinlock_t tb_lock
;
504 extern int tb_invalidated_flag
;
506 #if (defined(TARGET_I386) || defined(TARGET_PPC)) && \
507 !defined(CONFIG_USER_ONLY)
509 void tlb_fill(unsigned long addr
, int is_write
, int is_user
,
512 #define ACCESS_TYPE 3
513 #define MEMSUFFIX _code
514 #define env cpu_single_env
517 #include "softmmu_header.h"
520 #include "softmmu_header.h"
523 #include "softmmu_header.h"
531 #if defined(CONFIG_USER_ONLY)
532 static inline target_ulong
get_phys_addr_code(CPUState
*env
, target_ulong addr
)
537 /* NOTE: this function can trigger an exception */
538 /* XXX: i386 target specific */
539 static inline target_ulong
get_phys_addr_code(CPUState
*env
, target_ulong addr
)
543 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
544 #if defined(TARGET_I386)
545 is_user
= ((env
->hflags
& HF_CPL_MASK
) == 3);
546 #elif defined (TARGET_PPC)
549 #error "Unimplemented !"
551 if (__builtin_expect(env
->tlb_read
[is_user
][index
].address
!=
552 (addr
& TARGET_PAGE_MASK
), 0)) {
553 ldub_code((void *)addr
);
555 return addr
+ env
->tlb_read
[is_user
][index
].addend
- (unsigned long)phys_ram_base
;