2 * internal execution defines for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* allow to see translation results - the slowdown should be negligible, so we leave it */
25 #define xglue(x, y) x ## y
26 #define glue(x, y) xglue(x, y)
27 #define stringify(s) tostring(s)
28 #define tostring(s) #s
33 #define __builtin_expect(x, n) (x)
36 #define likely(x) __builtin_expect(!!(x), 1)
37 #define unlikely(x) __builtin_expect(!!(x), 0)
41 #define REGPARM(n) __attribute((regparm(n)))
46 /* is_jmp field values */
47 #define DISAS_NEXT 0 /* next instruction can be analyzed */
48 #define DISAS_JUMP 1 /* only pc was modified dynamically */
49 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */
50 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
52 struct TranslationBlock
;
54 /* XXX: make safe guess about sizes */
55 #define MAX_OP_PER_INSTR 32
56 #define OPC_BUF_SIZE 512
57 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
59 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
61 extern uint16_t gen_opc_buf
[OPC_BUF_SIZE
];
62 extern uint32_t gen_opparam_buf
[OPPARAM_BUF_SIZE
];
63 extern long gen_labels
[OPC_BUF_SIZE
];
64 extern int nb_gen_labels
;
65 extern target_ulong gen_opc_pc
[OPC_BUF_SIZE
];
66 extern target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
67 extern uint8_t gen_opc_cc_op
[OPC_BUF_SIZE
];
68 extern uint8_t gen_opc_instr_start
[OPC_BUF_SIZE
];
69 extern target_ulong gen_opc_jump_pc
[2];
70 extern uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
72 typedef void (GenOpFunc
)(void);
73 typedef void (GenOpFunc1
)(long);
74 typedef void (GenOpFunc2
)(long, long);
75 typedef void (GenOpFunc3
)(long, long, long);
77 #if defined(TARGET_I386)
79 void optimize_flags_init(void);
86 void muls64(int64_t *phigh
, int64_t *plow
, int64_t a
, int64_t b
);
87 void mulu64(uint64_t *phigh
, uint64_t *plow
, uint64_t a
, uint64_t b
);
89 int gen_intermediate_code(CPUState
*env
, struct TranslationBlock
*tb
);
90 int gen_intermediate_code_pc(CPUState
*env
, struct TranslationBlock
*tb
);
91 void dump_ops(const uint16_t *opc_buf
, const uint32_t *opparam_buf
);
92 int cpu_gen_code(CPUState
*env
, struct TranslationBlock
*tb
,
93 int max_code_size
, int *gen_code_size_ptr
);
94 int cpu_restore_state(struct TranslationBlock
*tb
,
95 CPUState
*env
, unsigned long searched_pc
,
97 int cpu_gen_code_copy(CPUState
*env
, struct TranslationBlock
*tb
,
98 int max_code_size
, int *gen_code_size_ptr
);
99 int cpu_restore_state_copy(struct TranslationBlock
*tb
,
100 CPUState
*env
, unsigned long searched_pc
,
102 void cpu_resume_from_signal(CPUState
*env1
, void *puc
);
103 void cpu_exec_init(CPUState
*env
);
104 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
);
105 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
106 int is_cpu_write_access
);
107 void tb_invalidate_page_range(target_ulong start
, target_ulong end
);
108 void tlb_flush_page(CPUState
*env
, target_ulong addr
);
109 void tlb_flush(CPUState
*env
, int flush_global
);
110 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
111 target_phys_addr_t paddr
, int prot
,
112 int is_user
, int is_softmmu
);
113 static inline int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
114 target_phys_addr_t paddr
, int prot
,
115 int is_user
, int is_softmmu
)
117 if (prot
& PAGE_READ
)
119 return tlb_set_page_exec(env
, vaddr
, paddr
, prot
, is_user
, is_softmmu
);
122 #define CODE_GEN_MAX_SIZE 65536
123 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
125 #define CODE_GEN_PHYS_HASH_BITS 15
126 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
128 /* maximum total translate dcode allocated */
130 /* NOTE: the translated code area cannot be too big because on some
131 archs the range of "fast" function calls is limited. Here is a
132 summary of the ranges:
134 i386 : signed 32 bits
137 sparc : signed 32 bits
138 alpha : signed 23 bits
141 #if defined(__alpha__)
142 #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024)
143 #elif defined(__ia64)
144 #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */
145 #elif defined(__powerpc__)
146 #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024)
148 #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024)
151 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
153 /* estimated block size for TB allocation */
154 /* XXX: use a per code average code fragment size and modulate it
155 according to the host CPU */
156 #if defined(CONFIG_SOFTMMU)
157 #define CODE_GEN_AVG_BLOCK_SIZE 128
159 #define CODE_GEN_AVG_BLOCK_SIZE 64
162 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
164 #if defined(__powerpc__)
165 #define USE_DIRECT_JUMP
167 #if defined(__i386__) && !defined(_WIN32)
168 #define USE_DIRECT_JUMP
171 typedef struct TranslationBlock
{
172 target_ulong pc
; /* simulated PC corresponding to this block (EIP + CS base) */
173 target_ulong cs_base
; /* CS base for this block */
174 uint64_t flags
; /* flags defining in which context the code was generated */
175 uint16_t size
; /* size of target code for this block (1 <=
176 size <= TARGET_PAGE_SIZE) */
177 uint16_t cflags
; /* compile flags */
178 #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */
179 #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */
180 #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */
181 #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
183 uint8_t *tc_ptr
; /* pointer to the translated code */
184 /* next matching tb for physical address. */
185 struct TranslationBlock
*phys_hash_next
;
186 /* first and second physical page containing code. The lower bit
187 of the pointer tells the index in page_next[] */
188 struct TranslationBlock
*page_next
[2];
189 target_ulong page_addr
[2];
191 /* the following data are used to directly call another TB from
192 the code of this one. */
193 uint16_t tb_next_offset
[2]; /* offset of original jump target */
194 #ifdef USE_DIRECT_JUMP
195 uint16_t tb_jmp_offset
[4]; /* offset of jump instruction */
197 uint32_t tb_next
[2]; /* address of jump generated code */
199 /* list of TBs jumping to this one. This is a circular list using
200 the two least significant bits of the pointers to tell what is
201 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
203 struct TranslationBlock
*jmp_next
[2];
204 struct TranslationBlock
*jmp_first
;
207 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc
)
210 tmp
= pc
^ (pc
>> (TARGET_PAGE_BITS
- TB_JMP_PAGE_BITS
));
211 return (tmp
>> TB_JMP_PAGE_BITS
) & TB_JMP_PAGE_MASK
;
214 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc
)
217 tmp
= pc
^ (pc
>> (TARGET_PAGE_BITS
- TB_JMP_PAGE_BITS
));
218 return (((tmp
>> TB_JMP_PAGE_BITS
) & TB_JMP_PAGE_MASK
) |
219 (tmp
& TB_JMP_ADDR_MASK
));
222 static inline unsigned int tb_phys_hash_func(unsigned long pc
)
224 return pc
& (CODE_GEN_PHYS_HASH_SIZE
- 1);
227 TranslationBlock
*tb_alloc(target_ulong pc
);
228 void tb_flush(CPUState
*env
);
229 void tb_link_phys(TranslationBlock
*tb
,
230 target_ulong phys_pc
, target_ulong phys_page2
);
232 extern TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
234 extern uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
235 extern uint8_t *code_gen_ptr
;
237 #if defined(USE_DIRECT_JUMP)
239 #if defined(__powerpc__)
240 static inline void tb_set_jmp_target1(unsigned long jmp_addr
, unsigned long addr
)
244 /* patch the branch destination */
245 ptr
= (uint32_t *)jmp_addr
;
247 val
= (val
& ~0x03fffffc) | ((addr
- jmp_addr
) & 0x03fffffc);
250 asm volatile ("dcbst 0,%0" : : "r"(ptr
) : "memory");
251 asm volatile ("sync" : : : "memory");
252 asm volatile ("icbi 0,%0" : : "r"(ptr
) : "memory");
253 asm volatile ("sync" : : : "memory");
254 asm volatile ("isync" : : : "memory");
256 #elif defined(__i386__)
257 static inline void tb_set_jmp_target1(unsigned long jmp_addr
, unsigned long addr
)
259 /* patch the branch destination */
260 *(uint32_t *)jmp_addr
= addr
- (jmp_addr
+ 4);
261 /* no need to flush icache explicitely */
265 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
266 int n
, unsigned long addr
)
268 unsigned long offset
;
270 offset
= tb
->tb_jmp_offset
[n
];
271 tb_set_jmp_target1((unsigned long)(tb
->tc_ptr
+ offset
), addr
);
272 offset
= tb
->tb_jmp_offset
[n
+ 2];
273 if (offset
!= 0xffff)
274 tb_set_jmp_target1((unsigned long)(tb
->tc_ptr
+ offset
), addr
);
279 /* set the jump target */
280 static inline void tb_set_jmp_target(TranslationBlock
*tb
,
281 int n
, unsigned long addr
)
283 tb
->tb_next
[n
] = addr
;
288 static inline void tb_add_jump(TranslationBlock
*tb
, int n
,
289 TranslationBlock
*tb_next
)
291 /* NOTE: this test is only needed for thread safety */
292 if (!tb
->jmp_next
[n
]) {
293 /* patch the native jump address */
294 tb_set_jmp_target(tb
, n
, (unsigned long)tb_next
->tc_ptr
);
296 /* add in TB jmp circular list */
297 tb
->jmp_next
[n
] = tb_next
->jmp_first
;
298 tb_next
->jmp_first
= (TranslationBlock
*)((long)(tb
) | (n
));
302 TranslationBlock
*tb_find_pc(unsigned long pc_ptr
);
305 #define offsetof(type, field) ((size_t) &((type *)0)->field)
309 #define ASM_DATA_SECTION ".section \".data\"\n"
310 #define ASM_PREVIOUS_SECTION ".section .text\n"
311 #elif defined(__APPLE__)
312 #define ASM_DATA_SECTION ".data\n"
313 #define ASM_PREVIOUS_SECTION ".text\n"
315 #define ASM_DATA_SECTION ".section \".data\"\n"
316 #define ASM_PREVIOUS_SECTION ".previous\n"
319 #define ASM_OP_LABEL_NAME(n, opname) \
320 ASM_NAME(__op_label) #n "." ASM_NAME(opname)
322 #if defined(__powerpc__)
324 /* we patch the jump instruction directly */
325 #define GOTO_TB(opname, tbparam, n)\
327 asm volatile (ASM_DATA_SECTION\
328 ASM_OP_LABEL_NAME(n, opname) ":\n"\
330 ASM_PREVIOUS_SECTION \
331 "b " ASM_NAME(__op_jmp) #n "\n"\
335 #elif defined(__i386__) && defined(USE_DIRECT_JUMP)
337 /* we patch the jump instruction directly */
338 #define GOTO_TB(opname, tbparam, n)\
340 asm volatile (".section .data\n"\
341 ASM_OP_LABEL_NAME(n, opname) ":\n"\
343 ASM_PREVIOUS_SECTION \
344 "jmp " ASM_NAME(__op_jmp) #n "\n"\
348 #elif defined(__s390__)
349 /* GCC spills R13, so we have to restore it before branching away */
351 #define GOTO_TB(opname, tbparam, n)\
353 static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\
354 static void __attribute__((used)) *__op_label ## n \
355 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
356 __asm__ __volatile__ ( \
357 "l %%r13,52(%%r15)\n" \
359 : : "r" (((TranslationBlock*)tbparam)->tb_next[n]));\
361 for(;*((int*)0);); /* just to keep GCC busy */ \
368 /* jump to next block operations (more portable code, does not need
369 cache flushing, but slower because of indirect jump) */
370 #define GOTO_TB(opname, tbparam, n)\
372 static void __attribute__((used)) *dummy ## n = &&dummy_label ## n;\
373 static void __attribute__((used)) *__op_label ## n \
374 __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\
375 goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
382 extern CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
383 extern CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
384 extern void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
386 #if defined(__powerpc__)
387 static inline int testandset (int *p
)
390 __asm__
__volatile__ (
398 : "r" (p
), "r" (1), "r" (0)
402 #elif defined(__i386__)
403 static inline int testandset (int *p
)
405 long int readval
= 0;
407 __asm__
__volatile__ ("lock; cmpxchgl %2, %0"
408 : "+m" (*p
), "+a" (readval
)
413 #elif defined(__x86_64__)
414 static inline int testandset (int *p
)
416 long int readval
= 0;
418 __asm__
__volatile__ ("lock; cmpxchgl %2, %0"
419 : "+m" (*p
), "+a" (readval
)
424 #elif defined(__s390__)
425 static inline int testandset (int *p
)
429 __asm__
__volatile__ ("0: cs %0,%1,0(%2)\n"
432 : "r" (1), "a" (p
), "0" (*p
)
436 #elif defined(__alpha__)
437 static inline int testandset (int *p
)
442 __asm__
__volatile__ ("0: mov 1,%2\n"
449 : "=r" (ret
), "=m" (*p
), "=r" (one
)
453 #elif defined(__sparc__)
454 static inline int testandset (int *p
)
458 __asm__
__volatile__("ldstub [%1], %0"
463 return (ret
? 1 : 0);
465 #elif defined(__arm__)
466 static inline int testandset (int *spinlock
)
468 register unsigned int ret
;
469 __asm__
__volatile__("swp %0, %1, [%2]"
471 : "0"(1), "r"(spinlock
));
475 #elif defined(__mc68000)
476 static inline int testandset (int *p
)
479 __asm__
__volatile__("tas %1; sne %0"
485 #elif defined(__ia64)
487 #include <ia64intrin.h>
489 static inline int testandset (int *p
)
491 return __sync_lock_test_and_set (p
, 1);
493 #elif defined(__mips__)
494 static inline int testandset (int *p
)
498 __asm__
__volatile__ (
507 : "=r" (ret
), "+R" (*p
)
514 #error unimplemented CPU support
517 typedef int spinlock_t
;
519 #define SPIN_LOCK_UNLOCKED 0
521 #if defined(CONFIG_USER_ONLY)
522 static inline void spin_lock(spinlock_t
*lock
)
524 while (testandset(lock
));
527 static inline void spin_unlock(spinlock_t
*lock
)
532 static inline int spin_trylock(spinlock_t
*lock
)
534 return !testandset(lock
);
537 static inline void spin_lock(spinlock_t
*lock
)
541 static inline void spin_unlock(spinlock_t
*lock
)
545 static inline int spin_trylock(spinlock_t
*lock
)
551 extern spinlock_t tb_lock
;
553 extern int tb_invalidated_flag
;
555 #if !defined(CONFIG_USER_ONLY)
557 void tlb_fill(target_ulong addr
, int is_write
, int is_user
,
560 #define ACCESS_TYPE 3
561 #define MEMSUFFIX _code
562 #define env cpu_single_env
565 #include "softmmu_header.h"
568 #include "softmmu_header.h"
571 #include "softmmu_header.h"
574 #include "softmmu_header.h"
582 #if defined(CONFIG_USER_ONLY)
583 static inline target_ulong
get_phys_addr_code(CPUState
*env
, target_ulong addr
)
588 /* NOTE: this function can trigger an exception */
589 /* NOTE2: the returned address is not exactly the physical address: it
590 is the offset relative to phys_ram_base */
591 static inline target_ulong
get_phys_addr_code(CPUState
*env
, target_ulong addr
)
593 int is_user
, index
, pd
;
595 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
596 #if defined(TARGET_I386)
597 is_user
= ((env
->hflags
& HF_CPL_MASK
) == 3);
598 #elif defined (TARGET_PPC)
600 #elif defined (TARGET_MIPS)
601 is_user
= ((env
->hflags
& MIPS_HFLAG_MODE
) == MIPS_HFLAG_UM
);
602 #elif defined (TARGET_SPARC)
603 is_user
= (env
->psrs
== 0);
604 #elif defined (TARGET_ARM)
605 is_user
= ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
);
606 #elif defined (TARGET_SH4)
607 is_user
= ((env
->sr
& SR_MD
) == 0);
608 #elif defined (TARGET_ALPHA)
609 is_user
= ((env
->ps
>> 3) & 3);
610 #elif defined (TARGET_M68K)
611 is_user
= ((env
->sr
& SR_S
) == 0);
613 #error unimplemented CPU
615 if (__builtin_expect(env
->tlb_table
[is_user
][index
].addr_code
!=
616 (addr
& TARGET_PAGE_MASK
), 0)) {
619 pd
= env
->tlb_table
[is_user
][index
].addr_code
& ~TARGET_PAGE_MASK
;
620 if (pd
> IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
622 do_unassigned_access(addr
, 0, 1, 0);
624 cpu_abort(env
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
627 return addr
+ env
->tlb_table
[is_user
][index
].addend
- (unsigned long)phys_ram_base
;
632 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
634 int kqemu_init(CPUState
*env
);
635 int kqemu_cpu_exec(CPUState
*env
);
636 void kqemu_flush_page(CPUState
*env
, target_ulong addr
);
637 void kqemu_flush(CPUState
*env
, int global
);
638 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
);
639 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
);
640 void kqemu_cpu_interrupt(CPUState
*env
);
641 void kqemu_record_dump(void);
643 static inline int kqemu_is_ok(CPUState
*env
)
645 return(env
->kqemu_enabled
&&
646 (env
->cr
[0] & CR0_PE_MASK
) &&
647 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
) &&
648 (env
->eflags
& IF_MASK
) &&
649 !(env
->eflags
& VM_MASK
) &&
650 (env
->kqemu_enabled
== 2 ||
651 ((env
->hflags
& HF_CPL_MASK
) == 3 &&
652 (env
->eflags
& IOPL_MASK
) != IOPL_MASK
)));