]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * internal execution defines for qemu | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | ||
21 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ | |
22 | #define DEBUG_DISAS | |
23 | ||
24 | #ifndef glue | |
25 | #define xglue(x, y) x ## y | |
26 | #define glue(x, y) xglue(x, y) | |
27 | #define stringify(s) tostring(s) | |
28 | #define tostring(s) #s | |
29 | #endif | |
30 | ||
31 | #if __GNUC__ < 3 | |
32 | #define __builtin_expect(x, n) (x) | |
33 | #endif | |
34 | ||
35 | #ifdef __i386__ | |
36 | #define REGPARM(n) __attribute((regparm(n))) | |
37 | #else | |
38 | #define REGPARM(n) | |
39 | #endif | |
40 | ||
41 | /* is_jmp field values */ | |
42 | #define DISAS_NEXT 0 /* next instruction can be analyzed */ | |
43 | #define DISAS_JUMP 1 /* only pc was modified dynamically */ | |
44 | #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ | |
45 | #define DISAS_TB_JUMP 3 /* only pc was modified statically */ | |
46 | ||
47 | struct TranslationBlock; | |
48 | ||
49 | /* XXX: make safe guess about sizes */ | |
50 | #define MAX_OP_PER_INSTR 32 | |
51 | #define OPC_BUF_SIZE 512 | |
52 | #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) | |
53 | ||
54 | #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3) | |
55 | ||
56 | extern uint16_t gen_opc_buf[OPC_BUF_SIZE]; | |
57 | extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE]; | |
58 | extern long gen_labels[OPC_BUF_SIZE]; | |
59 | extern int nb_gen_labels; | |
60 | extern target_ulong gen_opc_pc[OPC_BUF_SIZE]; | |
61 | extern target_ulong gen_opc_npc[OPC_BUF_SIZE]; | |
62 | extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; | |
63 | extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; | |
64 | extern target_ulong gen_opc_jump_pc[2]; | |
65 | extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; | |
66 | ||
67 | typedef void (GenOpFunc)(void); | |
68 | typedef void (GenOpFunc1)(long); | |
69 | typedef void (GenOpFunc2)(long, long); | |
70 | typedef void (GenOpFunc3)(long, long, long); | |
71 | ||
72 | #if defined(TARGET_I386) | |
73 | ||
74 | void optimize_flags_init(void); | |
75 | ||
76 | #endif | |
77 | ||
78 | extern FILE *logfile; | |
79 | extern int loglevel; | |
80 | ||
81 | void muls64(int64_t *phigh, int64_t *plow, int64_t a, int64_t b); | |
82 | void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); | |
83 | ||
84 | int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); | |
85 | int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); | |
86 | void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf); | |
87 | int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, | |
88 | int max_code_size, int *gen_code_size_ptr); | |
89 | int cpu_restore_state(struct TranslationBlock *tb, | |
90 | CPUState *env, unsigned long searched_pc, | |
91 | void *puc); | |
92 | int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb, | |
93 | int max_code_size, int *gen_code_size_ptr); | |
94 | int cpu_restore_state_copy(struct TranslationBlock *tb, | |
95 | CPUState *env, unsigned long searched_pc, | |
96 | void *puc); | |
97 | void cpu_resume_from_signal(CPUState *env1, void *puc); | |
98 | void cpu_exec_init(CPUState *env); | |
99 | int page_unprotect(target_ulong address, unsigned long pc, void *puc); | |
100 | void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, | |
101 | int is_cpu_write_access); | |
102 | void tb_invalidate_page_range(target_ulong start, target_ulong end); | |
103 | void tlb_flush_page(CPUState *env, target_ulong addr); | |
104 | void tlb_flush(CPUState *env, int flush_global); | |
105 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, | |
106 | target_phys_addr_t paddr, int prot, | |
107 | int is_user, int is_softmmu); | |
108 | static inline int tlb_set_page(CPUState *env, target_ulong vaddr, | |
109 | target_phys_addr_t paddr, int prot, | |
110 | int is_user, int is_softmmu) | |
111 | { | |
112 | if (prot & PAGE_READ) | |
113 | prot |= PAGE_EXEC; | |
114 | return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); | |
115 | } | |
116 | ||
117 | #define CODE_GEN_MAX_SIZE 65536 | |
118 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ | |
119 | ||
120 | #define CODE_GEN_PHYS_HASH_BITS 15 | |
121 | #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) | |
122 | ||
123 | /* maximum total translate dcode allocated */ | |
124 | ||
125 | /* NOTE: the translated code area cannot be too big because on some | |
126 | archs the range of "fast" function calls is limited. Here is a | |
127 | summary of the ranges: | |
128 | ||
129 | i386 : signed 32 bits | |
130 | arm : signed 26 bits | |
131 | ppc : signed 24 bits | |
132 | sparc : signed 32 bits | |
133 | alpha : signed 23 bits | |
134 | */ | |
135 | ||
136 | #if defined(__alpha__) | |
137 | #define CODE_GEN_BUFFER_SIZE (2 * 1024 * 1024) | |
138 | #elif defined(__ia64) | |
139 | #define CODE_GEN_BUFFER_SIZE (4 * 1024 * 1024) /* range of addl */ | |
140 | #elif defined(__powerpc__) | |
141 | #define CODE_GEN_BUFFER_SIZE (6 * 1024 * 1024) | |
142 | #else | |
143 | #define CODE_GEN_BUFFER_SIZE (16 * 1024 * 1024) | |
144 | #endif | |
145 | ||
146 | //#define CODE_GEN_BUFFER_SIZE (128 * 1024) | |
147 | ||
148 | /* estimated block size for TB allocation */ | |
149 | /* XXX: use a per code average code fragment size and modulate it | |
150 | according to the host CPU */ | |
151 | #if defined(CONFIG_SOFTMMU) | |
152 | #define CODE_GEN_AVG_BLOCK_SIZE 128 | |
153 | #else | |
154 | #define CODE_GEN_AVG_BLOCK_SIZE 64 | |
155 | #endif | |
156 | ||
157 | #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE) | |
158 | ||
159 | #if defined(__powerpc__) | |
160 | #define USE_DIRECT_JUMP | |
161 | #endif | |
162 | #if defined(__i386__) && !defined(_WIN32) | |
163 | #define USE_DIRECT_JUMP | |
164 | #endif | |
165 | ||
166 | typedef struct TranslationBlock { | |
167 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ | |
168 | target_ulong cs_base; /* CS base for this block */ | |
169 | unsigned int flags; /* flags defining in which context the code was generated */ | |
170 | uint16_t size; /* size of target code for this block (1 <= | |
171 | size <= TARGET_PAGE_SIZE) */ | |
172 | uint16_t cflags; /* compile flags */ | |
173 | #define CF_CODE_COPY 0x0001 /* block was generated in code copy mode */ | |
174 | #define CF_TB_FP_USED 0x0002 /* fp ops are used in the TB */ | |
175 | #define CF_FP_USED 0x0004 /* fp ops are used in the TB or in a chained TB */ | |
176 | #define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */ | |
177 | ||
178 | uint8_t *tc_ptr; /* pointer to the translated code */ | |
179 | /* next matching tb for physical address. */ | |
180 | struct TranslationBlock *phys_hash_next; | |
181 | /* first and second physical page containing code. The lower bit | |
182 | of the pointer tells the index in page_next[] */ | |
183 | struct TranslationBlock *page_next[2]; | |
184 | target_ulong page_addr[2]; | |
185 | ||
186 | /* the following data are used to directly call another TB from | |
187 | the code of this one. */ | |
188 | uint16_t tb_next_offset[2]; /* offset of original jump target */ | |
189 | #ifdef USE_DIRECT_JUMP | |
190 | uint16_t tb_jmp_offset[4]; /* offset of jump instruction */ | |
191 | #else | |
192 | uint32_t tb_next[2]; /* address of jump generated code */ | |
193 | #endif | |
194 | /* list of TBs jumping to this one. This is a circular list using | |
195 | the two least significant bits of the pointers to tell what is | |
196 | the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = | |
197 | jmp_first */ | |
198 | struct TranslationBlock *jmp_next[2]; | |
199 | struct TranslationBlock *jmp_first; | |
200 | } TranslationBlock; | |
201 | ||
202 | static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) | |
203 | { | |
204 | target_ulong tmp; | |
205 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); | |
206 | return (tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK; | |
207 | } | |
208 | ||
209 | static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) | |
210 | { | |
211 | target_ulong tmp; | |
212 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); | |
213 | return (((tmp >> TB_JMP_PAGE_BITS) & TB_JMP_PAGE_MASK) | | |
214 | (tmp & TB_JMP_ADDR_MASK)); | |
215 | } | |
216 | ||
217 | static inline unsigned int tb_phys_hash_func(unsigned long pc) | |
218 | { | |
219 | return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); | |
220 | } | |
221 | ||
222 | TranslationBlock *tb_alloc(target_ulong pc); | |
223 | void tb_flush(CPUState *env); | |
224 | void tb_link_phys(TranslationBlock *tb, | |
225 | target_ulong phys_pc, target_ulong phys_page2); | |
226 | ||
227 | extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
228 | ||
229 | extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; | |
230 | extern uint8_t *code_gen_ptr; | |
231 | ||
232 | #if defined(USE_DIRECT_JUMP) | |
233 | ||
234 | #if defined(__powerpc__) | |
235 | static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) | |
236 | { | |
237 | uint32_t val, *ptr; | |
238 | ||
239 | /* patch the branch destination */ | |
240 | ptr = (uint32_t *)jmp_addr; | |
241 | val = *ptr; | |
242 | val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc); | |
243 | *ptr = val; | |
244 | /* flush icache */ | |
245 | asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory"); | |
246 | asm volatile ("sync" : : : "memory"); | |
247 | asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory"); | |
248 | asm volatile ("sync" : : : "memory"); | |
249 | asm volatile ("isync" : : : "memory"); | |
250 | } | |
251 | #elif defined(__i386__) | |
252 | static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) | |
253 | { | |
254 | /* patch the branch destination */ | |
255 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); | |
256 | /* no need to flush icache explicitely */ | |
257 | } | |
258 | #endif | |
259 | ||
260 | static inline void tb_set_jmp_target(TranslationBlock *tb, | |
261 | int n, unsigned long addr) | |
262 | { | |
263 | unsigned long offset; | |
264 | ||
265 | offset = tb->tb_jmp_offset[n]; | |
266 | tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); | |
267 | offset = tb->tb_jmp_offset[n + 2]; | |
268 | if (offset != 0xffff) | |
269 | tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); | |
270 | } | |
271 | ||
272 | #else | |
273 | ||
274 | /* set the jump target */ | |
275 | static inline void tb_set_jmp_target(TranslationBlock *tb, | |
276 | int n, unsigned long addr) | |
277 | { | |
278 | tb->tb_next[n] = addr; | |
279 | } | |
280 | ||
281 | #endif | |
282 | ||
283 | static inline void tb_add_jump(TranslationBlock *tb, int n, | |
284 | TranslationBlock *tb_next) | |
285 | { | |
286 | /* NOTE: this test is only needed for thread safety */ | |
287 | if (!tb->jmp_next[n]) { | |
288 | /* patch the native jump address */ | |
289 | tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); | |
290 | ||
291 | /* add in TB jmp circular list */ | |
292 | tb->jmp_next[n] = tb_next->jmp_first; | |
293 | tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n)); | |
294 | } | |
295 | } | |
296 | ||
297 | TranslationBlock *tb_find_pc(unsigned long pc_ptr); | |
298 | ||
299 | #ifndef offsetof | |
300 | #define offsetof(type, field) ((size_t) &((type *)0)->field) | |
301 | #endif | |
302 | ||
303 | #if defined(_WIN32) | |
304 | #define ASM_DATA_SECTION ".section \".data\"\n" | |
305 | #define ASM_PREVIOUS_SECTION ".section .text\n" | |
306 | #elif defined(__APPLE__) | |
307 | #define ASM_DATA_SECTION ".data\n" | |
308 | #define ASM_PREVIOUS_SECTION ".text\n" | |
309 | #else | |
310 | #define ASM_DATA_SECTION ".section \".data\"\n" | |
311 | #define ASM_PREVIOUS_SECTION ".previous\n" | |
312 | #endif | |
313 | ||
314 | #define ASM_OP_LABEL_NAME(n, opname) \ | |
315 | ASM_NAME(__op_label) #n "." ASM_NAME(opname) | |
316 | ||
317 | #if defined(__powerpc__) | |
318 | ||
319 | /* we patch the jump instruction directly */ | |
320 | #define GOTO_TB(opname, tbparam, n)\ | |
321 | do {\ | |
322 | asm volatile (ASM_DATA_SECTION\ | |
323 | ASM_OP_LABEL_NAME(n, opname) ":\n"\ | |
324 | ".long 1f\n"\ | |
325 | ASM_PREVIOUS_SECTION \ | |
326 | "b " ASM_NAME(__op_jmp) #n "\n"\ | |
327 | "1:\n");\ | |
328 | } while (0) | |
329 | ||
330 | #elif defined(__i386__) && defined(USE_DIRECT_JUMP) | |
331 | ||
332 | /* we patch the jump instruction directly */ | |
333 | #define GOTO_TB(opname, tbparam, n)\ | |
334 | do {\ | |
335 | asm volatile (".section .data\n"\ | |
336 | ASM_OP_LABEL_NAME(n, opname) ":\n"\ | |
337 | ".long 1f\n"\ | |
338 | ASM_PREVIOUS_SECTION \ | |
339 | "jmp " ASM_NAME(__op_jmp) #n "\n"\ | |
340 | "1:\n");\ | |
341 | } while (0) | |
342 | ||
343 | #else | |
344 | ||
345 | /* jump to next block operations (more portable code, does not need | |
346 | cache flushing, but slower because of indirect jump) */ | |
347 | #define GOTO_TB(opname, tbparam, n)\ | |
348 | do {\ | |
349 | static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\ | |
350 | static void __attribute__((unused)) *__op_label ## n \ | |
351 | __asm__(ASM_OP_LABEL_NAME(n, opname)) = &&label ## n;\ | |
352 | goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\ | |
353 | label ## n: ;\ | |
354 | dummy_label ## n: ;\ | |
355 | } while (0) | |
356 | ||
357 | #endif | |
358 | ||
359 | extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; | |
360 | extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
361 | extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; | |
362 | ||
363 | #if defined(__powerpc__) | |
364 | static inline int testandset (int *p) | |
365 | { | |
366 | int ret; | |
367 | __asm__ __volatile__ ( | |
368 | "0: lwarx %0,0,%1\n" | |
369 | " xor. %0,%3,%0\n" | |
370 | " bne 1f\n" | |
371 | " stwcx. %2,0,%1\n" | |
372 | " bne- 0b\n" | |
373 | "1: " | |
374 | : "=&r" (ret) | |
375 | : "r" (p), "r" (1), "r" (0) | |
376 | : "cr0", "memory"); | |
377 | return ret; | |
378 | } | |
379 | #elif defined(__i386__) | |
380 | static inline int testandset (int *p) | |
381 | { | |
382 | long int readval = 0; | |
383 | ||
384 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0" | |
385 | : "+m" (*p), "+a" (readval) | |
386 | : "r" (1) | |
387 | : "cc"); | |
388 | return readval; | |
389 | } | |
390 | #elif defined(__x86_64__) | |
391 | static inline int testandset (int *p) | |
392 | { | |
393 | long int readval = 0; | |
394 | ||
395 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0" | |
396 | : "+m" (*p), "+a" (readval) | |
397 | : "r" (1) | |
398 | : "cc"); | |
399 | return readval; | |
400 | } | |
401 | #elif defined(__s390__) | |
402 | static inline int testandset (int *p) | |
403 | { | |
404 | int ret; | |
405 | ||
406 | __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" | |
407 | " jl 0b" | |
408 | : "=&d" (ret) | |
409 | : "r" (1), "a" (p), "0" (*p) | |
410 | : "cc", "memory" ); | |
411 | return ret; | |
412 | } | |
413 | #elif defined(__alpha__) | |
414 | static inline int testandset (int *p) | |
415 | { | |
416 | int ret; | |
417 | unsigned long one; | |
418 | ||
419 | __asm__ __volatile__ ("0: mov 1,%2\n" | |
420 | " ldl_l %0,%1\n" | |
421 | " stl_c %2,%1\n" | |
422 | " beq %2,1f\n" | |
423 | ".subsection 2\n" | |
424 | "1: br 0b\n" | |
425 | ".previous" | |
426 | : "=r" (ret), "=m" (*p), "=r" (one) | |
427 | : "m" (*p)); | |
428 | return ret; | |
429 | } | |
430 | #elif defined(__sparc__) | |
431 | static inline int testandset (int *p) | |
432 | { | |
433 | int ret; | |
434 | ||
435 | __asm__ __volatile__("ldstub [%1], %0" | |
436 | : "=r" (ret) | |
437 | : "r" (p) | |
438 | : "memory"); | |
439 | ||
440 | return (ret ? 1 : 0); | |
441 | } | |
442 | #elif defined(__arm__) | |
443 | static inline int testandset (int *spinlock) | |
444 | { | |
445 | register unsigned int ret; | |
446 | __asm__ __volatile__("swp %0, %1, [%2]" | |
447 | : "=r"(ret) | |
448 | : "0"(1), "r"(spinlock)); | |
449 | ||
450 | return ret; | |
451 | } | |
452 | #elif defined(__mc68000) | |
453 | static inline int testandset (int *p) | |
454 | { | |
455 | char ret; | |
456 | __asm__ __volatile__("tas %1; sne %0" | |
457 | : "=r" (ret) | |
458 | : "m" (p) | |
459 | : "cc","memory"); | |
460 | return ret; | |
461 | } | |
462 | #elif defined(__ia64) | |
463 | ||
464 | #include <ia64intrin.h> | |
465 | ||
466 | static inline int testandset (int *p) | |
467 | { | |
468 | return __sync_lock_test_and_set (p, 1); | |
469 | } | |
470 | #elif defined(__mips__) | |
471 | static inline int testandset (int *p) | |
472 | { | |
473 | int ret; | |
474 | ||
475 | __asm__ __volatile__ ( | |
476 | " .set push \n" | |
477 | " .set noat \n" | |
478 | " .set mips2 \n" | |
479 | "1: li $1, 1 \n" | |
480 | " ll %0, %1 \n" | |
481 | " sc $1, %1 \n" | |
482 | " beqz $1, 1b \n" | |
483 | " .set pop " | |
484 | : "=r" (ret), "+R" (*p) | |
485 | : | |
486 | : "memory"); | |
487 | ||
488 | return ret; | |
489 | } | |
490 | #else | |
491 | #error unimplemented CPU support | |
492 | #endif | |
493 | ||
494 | typedef int spinlock_t; | |
495 | ||
496 | #define SPIN_LOCK_UNLOCKED 0 | |
497 | ||
498 | #if defined(CONFIG_USER_ONLY) | |
499 | static inline void spin_lock(spinlock_t *lock) | |
500 | { | |
501 | while (testandset(lock)); | |
502 | } | |
503 | ||
504 | static inline void spin_unlock(spinlock_t *lock) | |
505 | { | |
506 | *lock = 0; | |
507 | } | |
508 | ||
509 | static inline int spin_trylock(spinlock_t *lock) | |
510 | { | |
511 | return !testandset(lock); | |
512 | } | |
513 | #else | |
514 | static inline void spin_lock(spinlock_t *lock) | |
515 | { | |
516 | } | |
517 | ||
518 | static inline void spin_unlock(spinlock_t *lock) | |
519 | { | |
520 | } | |
521 | ||
522 | static inline int spin_trylock(spinlock_t *lock) | |
523 | { | |
524 | return 1; | |
525 | } | |
526 | #endif | |
527 | ||
528 | extern spinlock_t tb_lock; | |
529 | ||
530 | extern int tb_invalidated_flag; | |
531 | ||
532 | #if !defined(CONFIG_USER_ONLY) | |
533 | ||
534 | void tlb_fill(target_ulong addr, int is_write, int is_user, | |
535 | void *retaddr); | |
536 | ||
537 | #define ACCESS_TYPE 3 | |
538 | #define MEMSUFFIX _code | |
539 | #define env cpu_single_env | |
540 | ||
541 | #define DATA_SIZE 1 | |
542 | #include "softmmu_header.h" | |
543 | ||
544 | #define DATA_SIZE 2 | |
545 | #include "softmmu_header.h" | |
546 | ||
547 | #define DATA_SIZE 4 | |
548 | #include "softmmu_header.h" | |
549 | ||
550 | #define DATA_SIZE 8 | |
551 | #include "softmmu_header.h" | |
552 | ||
553 | #undef ACCESS_TYPE | |
554 | #undef MEMSUFFIX | |
555 | #undef env | |
556 | ||
557 | #endif | |
558 | ||
559 | #if defined(CONFIG_USER_ONLY) | |
560 | static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) | |
561 | { | |
562 | return addr; | |
563 | } | |
564 | #else | |
565 | /* NOTE: this function can trigger an exception */ | |
566 | /* NOTE2: the returned address is not exactly the physical address: it | |
567 | is the offset relative to phys_ram_base */ | |
568 | static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr) | |
569 | { | |
570 | int is_user, index, pd; | |
571 | ||
572 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
573 | #if defined(TARGET_I386) | |
574 | is_user = ((env->hflags & HF_CPL_MASK) == 3); | |
575 | #elif defined (TARGET_PPC) | |
576 | is_user = msr_pr; | |
577 | #elif defined (TARGET_MIPS) | |
578 | is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM); | |
579 | #elif defined (TARGET_SPARC) | |
580 | is_user = (env->psrs == 0); | |
581 | #elif defined (TARGET_ARM) | |
582 | is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR); | |
583 | #elif defined (TARGET_SH4) | |
584 | is_user = ((env->sr & SR_MD) == 0); | |
585 | #elif defined (TARGET_ALPHA) | |
586 | is_user = ((env->ps >> 3) & 3); | |
587 | #else | |
588 | #error unimplemented CPU | |
589 | #endif | |
590 | if (__builtin_expect(env->tlb_table[is_user][index].addr_code != | |
591 | (addr & TARGET_PAGE_MASK), 0)) { | |
592 | ldub_code(addr); | |
593 | } | |
594 | pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK; | |
595 | if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { | |
596 | cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); | |
597 | } | |
598 | return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base; | |
599 | } | |
600 | #endif | |
601 | ||
602 | #ifdef USE_KQEMU | |
603 | #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) | |
604 | ||
605 | int kqemu_init(CPUState *env); | |
606 | int kqemu_cpu_exec(CPUState *env); | |
607 | void kqemu_flush_page(CPUState *env, target_ulong addr); | |
608 | void kqemu_flush(CPUState *env, int global); | |
609 | void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); | |
610 | void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); | |
611 | void kqemu_cpu_interrupt(CPUState *env); | |
612 | void kqemu_record_dump(void); | |
613 | ||
614 | static inline int kqemu_is_ok(CPUState *env) | |
615 | { | |
616 | return(env->kqemu_enabled && | |
617 | (env->cr[0] & CR0_PE_MASK) && | |
618 | !(env->hflags & HF_INHIBIT_IRQ_MASK) && | |
619 | (env->eflags & IF_MASK) && | |
620 | !(env->eflags & VM_MASK) && | |
621 | (env->kqemu_enabled == 2 || | |
622 | ((env->hflags & HF_CPL_MASK) == 3 && | |
623 | (env->eflags & IOPL_MASK) != IOPL_MASK))); | |
624 | } | |
625 | ||
626 | #endif |