]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * i386 emulator main execution loop | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #include "config.h" | |
21 | #include "exec.h" | |
22 | #include "disas.h" | |
23 | ||
24 | int tb_invalidated_flag; | |
25 | ||
26 | //#define DEBUG_EXEC | |
27 | //#define DEBUG_SIGNAL | |
28 | ||
29 | #if defined(TARGET_ARM) || defined(TARGET_SPARC) | |
30 | /* XXX: unify with i386 target */ | |
31 | void cpu_loop_exit(void) | |
32 | { | |
33 | longjmp(env->jmp_env, 1); | |
34 | } | |
35 | #endif | |
36 | ||
37 | /* main execution loop */ | |
38 | ||
39 | int cpu_exec(CPUState *env1) | |
40 | { | |
41 | int saved_T0, saved_T1, saved_T2; | |
42 | CPUState *saved_env; | |
43 | #ifdef reg_EAX | |
44 | int saved_EAX; | |
45 | #endif | |
46 | #ifdef reg_ECX | |
47 | int saved_ECX; | |
48 | #endif | |
49 | #ifdef reg_EDX | |
50 | int saved_EDX; | |
51 | #endif | |
52 | #ifdef reg_EBX | |
53 | int saved_EBX; | |
54 | #endif | |
55 | #ifdef reg_ESP | |
56 | int saved_ESP; | |
57 | #endif | |
58 | #ifdef reg_EBP | |
59 | int saved_EBP; | |
60 | #endif | |
61 | #ifdef reg_ESI | |
62 | int saved_ESI; | |
63 | #endif | |
64 | #ifdef reg_EDI | |
65 | int saved_EDI; | |
66 | #endif | |
67 | #ifdef __sparc__ | |
68 | int saved_i7, tmp_T0; | |
69 | #endif | |
70 | int code_gen_size, ret, interrupt_request; | |
71 | void (*gen_func)(void); | |
72 | TranslationBlock *tb, **ptb; | |
73 | uint8_t *tc_ptr, *cs_base, *pc; | |
74 | unsigned int flags; | |
75 | ||
76 | /* first we save global registers */ | |
77 | saved_T0 = T0; | |
78 | saved_T1 = T1; | |
79 | saved_T2 = T2; | |
80 | saved_env = env; | |
81 | env = env1; | |
82 | #ifdef __sparc__ | |
83 | /* we also save i7 because longjmp may not restore it */ | |
84 | asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); | |
85 | #endif | |
86 | ||
87 | #if defined(TARGET_I386) | |
88 | #ifdef reg_EAX | |
89 | saved_EAX = EAX; | |
90 | EAX = env->regs[R_EAX]; | |
91 | #endif | |
92 | #ifdef reg_ECX | |
93 | saved_ECX = ECX; | |
94 | ECX = env->regs[R_ECX]; | |
95 | #endif | |
96 | #ifdef reg_EDX | |
97 | saved_EDX = EDX; | |
98 | EDX = env->regs[R_EDX]; | |
99 | #endif | |
100 | #ifdef reg_EBX | |
101 | saved_EBX = EBX; | |
102 | EBX = env->regs[R_EBX]; | |
103 | #endif | |
104 | #ifdef reg_ESP | |
105 | saved_ESP = ESP; | |
106 | ESP = env->regs[R_ESP]; | |
107 | #endif | |
108 | #ifdef reg_EBP | |
109 | saved_EBP = EBP; | |
110 | EBP = env->regs[R_EBP]; | |
111 | #endif | |
112 | #ifdef reg_ESI | |
113 | saved_ESI = ESI; | |
114 | ESI = env->regs[R_ESI]; | |
115 | #endif | |
116 | #ifdef reg_EDI | |
117 | saved_EDI = EDI; | |
118 | EDI = env->regs[R_EDI]; | |
119 | #endif | |
120 | ||
121 | /* put eflags in CPU temporary format */ | |
122 | CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | |
123 | DF = 1 - (2 * ((env->eflags >> 10) & 1)); | |
124 | CC_OP = CC_OP_EFLAGS; | |
125 | env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | |
126 | #elif defined(TARGET_ARM) | |
127 | { | |
128 | unsigned int psr; | |
129 | psr = env->cpsr; | |
130 | env->CF = (psr >> 29) & 1; | |
131 | env->NZF = (psr & 0xc0000000) ^ 0x40000000; | |
132 | env->VF = (psr << 3) & 0x80000000; | |
133 | env->cpsr = psr & ~0xf0000000; | |
134 | } | |
135 | #elif defined(TARGET_SPARC) | |
136 | #elif defined(TARGET_PPC) | |
137 | #else | |
138 | #error unsupported target CPU | |
139 | #endif | |
140 | env->exception_index = -1; | |
141 | ||
142 | /* prepare setjmp context for exception handling */ | |
143 | for(;;) { | |
144 | if (setjmp(env->jmp_env) == 0) { | |
145 | /* if an exception is pending, we execute it here */ | |
146 | if (env->exception_index >= 0) { | |
147 | if (env->exception_index >= EXCP_INTERRUPT) { | |
148 | /* exit request from the cpu execution loop */ | |
149 | ret = env->exception_index; | |
150 | break; | |
151 | } else if (env->user_mode_only) { | |
152 | /* if user mode only, we simulate a fake exception | |
153 | which will be hanlded outside the cpu execution | |
154 | loop */ | |
155 | #if defined(TARGET_I386) | |
156 | do_interrupt_user(env->exception_index, | |
157 | env->exception_is_int, | |
158 | env->error_code, | |
159 | env->exception_next_eip); | |
160 | #endif | |
161 | ret = env->exception_index; | |
162 | break; | |
163 | } else { | |
164 | #if defined(TARGET_I386) | |
165 | /* simulate a real cpu exception. On i386, it can | |
166 | trigger new exceptions, but we do not handle | |
167 | double or triple faults yet. */ | |
168 | do_interrupt(env->exception_index, | |
169 | env->exception_is_int, | |
170 | env->error_code, | |
171 | env->exception_next_eip, 0); | |
172 | #endif | |
173 | } | |
174 | env->exception_index = -1; | |
175 | } | |
176 | T0 = 0; /* force lookup of first TB */ | |
177 | for(;;) { | |
178 | #ifdef __sparc__ | |
179 | /* g1 can be modified by some libc? functions */ | |
180 | tmp_T0 = T0; | |
181 | #endif | |
182 | interrupt_request = env->interrupt_request; | |
183 | if (__builtin_expect(interrupt_request, 0)) { | |
184 | #if defined(TARGET_I386) | |
185 | /* if hardware interrupt pending, we execute it */ | |
186 | if ((interrupt_request & CPU_INTERRUPT_HARD) && | |
187 | (env->eflags & IF_MASK) && | |
188 | !(env->hflags & HF_INHIBIT_IRQ_MASK)) { | |
189 | int intno; | |
190 | intno = cpu_x86_get_pic_interrupt(env); | |
191 | if (loglevel) { | |
192 | fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); | |
193 | } | |
194 | do_interrupt(intno, 0, 0, 0, 1); | |
195 | env->interrupt_request &= ~CPU_INTERRUPT_HARD; | |
196 | /* ensure that no TB jump will be modified as | |
197 | the program flow was changed */ | |
198 | #ifdef __sparc__ | |
199 | tmp_T0 = 0; | |
200 | #else | |
201 | T0 = 0; | |
202 | #endif | |
203 | } | |
204 | #endif | |
205 | if (interrupt_request & CPU_INTERRUPT_EXIT) { | |
206 | env->interrupt_request &= ~CPU_INTERRUPT_EXIT; | |
207 | env->exception_index = EXCP_INTERRUPT; | |
208 | cpu_loop_exit(); | |
209 | } | |
210 | } | |
211 | #ifdef DEBUG_EXEC | |
212 | if (loglevel) { | |
213 | #if defined(TARGET_I386) | |
214 | /* restore flags in standard format */ | |
215 | env->regs[R_EAX] = EAX; | |
216 | env->regs[R_EBX] = EBX; | |
217 | env->regs[R_ECX] = ECX; | |
218 | env->regs[R_EDX] = EDX; | |
219 | env->regs[R_ESI] = ESI; | |
220 | env->regs[R_EDI] = EDI; | |
221 | env->regs[R_EBP] = EBP; | |
222 | env->regs[R_ESP] = ESP; | |
223 | env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); | |
224 | cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP); | |
225 | env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); | |
226 | #elif defined(TARGET_ARM) | |
227 | env->cpsr = compute_cpsr(); | |
228 | cpu_arm_dump_state(env, logfile, 0); | |
229 | env->cpsr &= ~0xf0000000; | |
230 | #elif defined(TARGET_SPARC) | |
231 | cpu_sparc_dump_state (env, logfile, 0); | |
232 | #elif defined(TARGET_PPC) | |
233 | cpu_ppc_dump_state(env, logfile, 0); | |
234 | #else | |
235 | #error unsupported target CPU | |
236 | #endif | |
237 | } | |
238 | #endif | |
239 | /* we record a subset of the CPU state. It will | |
240 | always be the same before a given translated block | |
241 | is executed. */ | |
242 | #if defined(TARGET_I386) | |
243 | flags = env->hflags; | |
244 | flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
245 | cs_base = env->segs[R_CS].base; | |
246 | pc = cs_base + env->eip; | |
247 | #elif defined(TARGET_ARM) | |
248 | flags = 0; | |
249 | cs_base = 0; | |
250 | pc = (uint8_t *)env->regs[15]; | |
251 | #elif defined(TARGET_SPARC) | |
252 | flags = 0; | |
253 | cs_base = env->npc; | |
254 | pc = (uint8_t *) env->pc; | |
255 | #elif defined(TARGET_PPC) | |
256 | flags = 0; | |
257 | cs_base = 0; | |
258 | pc = (uint8_t *)env->nip; | |
259 | #else | |
260 | #error unsupported CPU | |
261 | #endif | |
262 | tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, | |
263 | flags); | |
264 | if (!tb) { | |
265 | TranslationBlock **ptb1; | |
266 | unsigned int h; | |
267 | target_ulong phys_pc, phys_page1, phys_page2, virt_page2; | |
268 | ||
269 | ||
270 | spin_lock(&tb_lock); | |
271 | ||
272 | tb_invalidated_flag = 0; | |
273 | ||
274 | /* find translated block using physical mappings */ | |
275 | phys_pc = get_phys_addr_code(env, (unsigned long)pc); | |
276 | phys_page1 = phys_pc & TARGET_PAGE_MASK; | |
277 | phys_page2 = -1; | |
278 | h = tb_phys_hash_func(phys_pc); | |
279 | ptb1 = &tb_phys_hash[h]; | |
280 | for(;;) { | |
281 | tb = *ptb1; | |
282 | if (!tb) | |
283 | goto not_found; | |
284 | if (tb->pc == (unsigned long)pc && | |
285 | tb->page_addr[0] == phys_page1 && | |
286 | tb->cs_base == (unsigned long)cs_base && | |
287 | tb->flags == flags) { | |
288 | /* check next page if needed */ | |
289 | virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; | |
290 | if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { | |
291 | phys_page2 = get_phys_addr_code(env, virt_page2); | |
292 | if (tb->page_addr[1] == phys_page2) | |
293 | goto found; | |
294 | } else { | |
295 | goto found; | |
296 | } | |
297 | } | |
298 | ptb1 = &tb->phys_hash_next; | |
299 | } | |
300 | not_found: | |
301 | /* if no translated code available, then translate it now */ | |
302 | tb = tb_alloc((unsigned long)pc); | |
303 | if (!tb) { | |
304 | /* flush must be done */ | |
305 | tb_flush(env); | |
306 | /* cannot fail at this point */ | |
307 | tb = tb_alloc((unsigned long)pc); | |
308 | /* don't forget to invalidate previous TB info */ | |
309 | ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; | |
310 | T0 = 0; | |
311 | } | |
312 | tc_ptr = code_gen_ptr; | |
313 | tb->tc_ptr = tc_ptr; | |
314 | tb->cs_base = (unsigned long)cs_base; | |
315 | tb->flags = flags; | |
316 | cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); | |
317 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
318 | ||
319 | /* check next page if needed */ | |
320 | virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; | |
321 | phys_page2 = -1; | |
322 | if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { | |
323 | phys_page2 = get_phys_addr_code(env, virt_page2); | |
324 | } | |
325 | tb_link_phys(tb, phys_pc, phys_page2); | |
326 | ||
327 | found: | |
328 | if (tb_invalidated_flag) { | |
329 | /* as some TB could have been invalidated because | |
330 | of memory exceptions while generating the code, we | |
331 | must recompute the hash index here */ | |
332 | ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; | |
333 | while (*ptb != NULL) | |
334 | ptb = &(*ptb)->hash_next; | |
335 | T0 = 0; | |
336 | } | |
337 | /* we add the TB in the virtual pc hash table */ | |
338 | *ptb = tb; | |
339 | tb->hash_next = NULL; | |
340 | tb_link(tb); | |
341 | spin_unlock(&tb_lock); | |
342 | } | |
343 | #ifdef DEBUG_EXEC | |
344 | if (loglevel) { | |
345 | fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n", | |
346 | (long)tb->tc_ptr, (long)tb->pc, | |
347 | lookup_symbol((void *)tb->pc)); | |
348 | } | |
349 | #endif | |
350 | #ifdef __sparc__ | |
351 | T0 = tmp_T0; | |
352 | #endif | |
353 | /* see if we can patch the calling TB. */ | |
354 | if (T0 != 0) { | |
355 | spin_lock(&tb_lock); | |
356 | tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb); | |
357 | spin_unlock(&tb_lock); | |
358 | } | |
359 | tc_ptr = tb->tc_ptr; | |
360 | env->current_tb = tb; | |
361 | /* execute the generated code */ | |
362 | gen_func = (void *)tc_ptr; | |
363 | #if defined(__sparc__) | |
364 | __asm__ __volatile__("call %0\n\t" | |
365 | "mov %%o7,%%i0" | |
366 | : /* no outputs */ | |
367 | : "r" (gen_func) | |
368 | : "i0", "i1", "i2", "i3", "i4", "i5"); | |
369 | #elif defined(__arm__) | |
370 | asm volatile ("mov pc, %0\n\t" | |
371 | ".global exec_loop\n\t" | |
372 | "exec_loop:\n\t" | |
373 | : /* no outputs */ | |
374 | : "r" (gen_func) | |
375 | : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); | |
376 | #else | |
377 | gen_func(); | |
378 | #endif | |
379 | env->current_tb = NULL; | |
380 | /* reset soft MMU for next block (it can currently | |
381 | only be set by a memory fault) */ | |
382 | #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) | |
383 | if (env->hflags & HF_SOFTMMU_MASK) { | |
384 | env->hflags &= ~HF_SOFTMMU_MASK; | |
385 | /* do not allow linking to another block */ | |
386 | T0 = 0; | |
387 | } | |
388 | #endif | |
389 | } | |
390 | } else { | |
391 | } | |
392 | } /* for(;;) */ | |
393 | ||
394 | ||
395 | #if defined(TARGET_I386) | |
396 | /* restore flags in standard format */ | |
397 | env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); | |
398 | ||
399 | /* restore global registers */ | |
400 | #ifdef reg_EAX | |
401 | EAX = saved_EAX; | |
402 | #endif | |
403 | #ifdef reg_ECX | |
404 | ECX = saved_ECX; | |
405 | #endif | |
406 | #ifdef reg_EDX | |
407 | EDX = saved_EDX; | |
408 | #endif | |
409 | #ifdef reg_EBX | |
410 | EBX = saved_EBX; | |
411 | #endif | |
412 | #ifdef reg_ESP | |
413 | ESP = saved_ESP; | |
414 | #endif | |
415 | #ifdef reg_EBP | |
416 | EBP = saved_EBP; | |
417 | #endif | |
418 | #ifdef reg_ESI | |
419 | ESI = saved_ESI; | |
420 | #endif | |
421 | #ifdef reg_EDI | |
422 | EDI = saved_EDI; | |
423 | #endif | |
424 | #elif defined(TARGET_ARM) | |
425 | env->cpsr = compute_cpsr(); | |
426 | #elif defined(TARGET_SPARC) | |
427 | #elif defined(TARGET_PPC) | |
428 | #else | |
429 | #error unsupported target CPU | |
430 | #endif | |
431 | #ifdef __sparc__ | |
432 | asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); | |
433 | #endif | |
434 | T0 = saved_T0; | |
435 | T1 = saved_T1; | |
436 | T2 = saved_T2; | |
437 | env = saved_env; | |
438 | return ret; | |
439 | } | |
440 | ||
441 | #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY) | |
442 | ||
443 | void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector) | |
444 | { | |
445 | CPUX86State *saved_env; | |
446 | ||
447 | saved_env = env; | |
448 | env = s; | |
449 | if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { | |
450 | selector &= 0xffff; | |
451 | cpu_x86_load_seg_cache(env, seg_reg, selector, | |
452 | (uint8_t *)(selector << 4), 0xffff, 0); | |
453 | } else { | |
454 | load_seg(seg_reg, selector); | |
455 | } | |
456 | env = saved_env; | |
457 | } | |
458 | ||
459 | void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32) | |
460 | { | |
461 | CPUX86State *saved_env; | |
462 | ||
463 | saved_env = env; | |
464 | env = s; | |
465 | ||
466 | helper_fsave(ptr, data32); | |
467 | ||
468 | env = saved_env; | |
469 | } | |
470 | ||
471 | void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) | |
472 | { | |
473 | CPUX86State *saved_env; | |
474 | ||
475 | saved_env = env; | |
476 | env = s; | |
477 | ||
478 | helper_frstor(ptr, data32); | |
479 | ||
480 | env = saved_env; | |
481 | } | |
482 | ||
483 | #endif /* TARGET_I386 */ | |
484 | ||
485 | #undef EAX | |
486 | #undef ECX | |
487 | #undef EDX | |
488 | #undef EBX | |
489 | #undef ESP | |
490 | #undef EBP | |
491 | #undef ESI | |
492 | #undef EDI | |
493 | #undef EIP | |
494 | #include <signal.h> | |
495 | #include <sys/ucontext.h> | |
496 | ||
497 | #if defined(TARGET_I386) | |
498 | ||
499 | /* 'pc' is the host PC at which the exception was raised. 'address' is | |
500 | the effective address of the memory exception. 'is_write' is 1 if a | |
501 | write caused the exception and otherwise 0'. 'old_set' is the | |
502 | signal set which should be restored */ | |
503 | static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | |
504 | int is_write, sigset_t *old_set) | |
505 | { | |
506 | TranslationBlock *tb; | |
507 | int ret; | |
508 | ||
509 | if (cpu_single_env) | |
510 | env = cpu_single_env; /* XXX: find a correct solution for multithread */ | |
511 | #if defined(DEBUG_SIGNAL) | |
512 | printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", | |
513 | pc, address, is_write, *(unsigned long *)old_set); | |
514 | #endif | |
515 | /* XXX: locking issue */ | |
516 | if (is_write && page_unprotect(address)) { | |
517 | return 1; | |
518 | } | |
519 | /* see if it is an MMU fault */ | |
520 | ret = cpu_x86_handle_mmu_fault(env, address, is_write, | |
521 | ((env->hflags & HF_CPL_MASK) == 3), 0); | |
522 | if (ret < 0) | |
523 | return 0; /* not an MMU fault */ | |
524 | if (ret == 0) | |
525 | return 1; /* the MMU fault was handled without causing real CPU fault */ | |
526 | /* now we have a real cpu fault */ | |
527 | tb = tb_find_pc(pc); | |
528 | if (tb) { | |
529 | /* the PC is inside the translated code. It means that we have | |
530 | a virtual CPU fault */ | |
531 | cpu_restore_state(tb, env, pc); | |
532 | } | |
533 | if (ret == 1) { | |
534 | #if 0 | |
535 | printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", | |
536 | env->eip, env->cr[2], env->error_code); | |
537 | #endif | |
538 | /* we restore the process signal mask as the sigreturn should | |
539 | do it (XXX: use sigsetjmp) */ | |
540 | sigprocmask(SIG_SETMASK, old_set, NULL); | |
541 | raise_exception_err(EXCP0E_PAGE, env->error_code); | |
542 | } else { | |
543 | /* activate soft MMU for this block */ | |
544 | env->hflags |= HF_SOFTMMU_MASK; | |
545 | sigprocmask(SIG_SETMASK, old_set, NULL); | |
546 | cpu_loop_exit(); | |
547 | } | |
548 | /* never comes here */ | |
549 | return 1; | |
550 | } | |
551 | ||
552 | #elif defined(TARGET_ARM) | |
553 | static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | |
554 | int is_write, sigset_t *old_set) | |
555 | { | |
556 | /* XXX: do more */ | |
557 | return 0; | |
558 | } | |
559 | #elif defined(TARGET_SPARC) | |
560 | static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | |
561 | int is_write, sigset_t *old_set) | |
562 | { | |
563 | /* XXX: locking issue */ | |
564 | if (is_write && page_unprotect(address)) { | |
565 | return 1; | |
566 | } | |
567 | return 0; | |
568 | } | |
569 | #elif defined (TARGET_PPC) | |
570 | static inline int handle_cpu_signal(unsigned long pc, unsigned long address, | |
571 | int is_write, sigset_t *old_set) | |
572 | { | |
573 | TranslationBlock *tb; | |
574 | ||
575 | #if 0 | |
576 | if (cpu_single_env) | |
577 | env = cpu_single_env; /* XXX: find a correct solution for multithread */ | |
578 | #endif | |
579 | #if defined(DEBUG_SIGNAL) | |
580 | printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", | |
581 | pc, address, is_write, *(unsigned long *)old_set); | |
582 | #endif | |
583 | /* XXX: locking issue */ | |
584 | if (is_write && page_unprotect(address)) { | |
585 | return 1; | |
586 | } | |
587 | ||
588 | /* now we have a real cpu fault */ | |
589 | tb = tb_find_pc(pc); | |
590 | if (tb) { | |
591 | /* the PC is inside the translated code. It means that we have | |
592 | a virtual CPU fault */ | |
593 | cpu_restore_state(tb, env, pc); | |
594 | } | |
595 | #if 0 | |
596 | printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n", | |
597 | env->eip, env->cr[2], env->error_code); | |
598 | #endif | |
599 | /* we restore the process signal mask as the sigreturn should | |
600 | do it (XXX: use sigsetjmp) */ | |
601 | sigprocmask(SIG_SETMASK, old_set, NULL); | |
602 | raise_exception_err(EXCP_PROGRAM, env->error_code); | |
603 | /* never comes here */ | |
604 | return 1; | |
605 | } | |
606 | #else | |
607 | #error unsupported target CPU | |
608 | #endif | |
609 | ||
610 | #if defined(__i386__) | |
611 | ||
612 | int cpu_signal_handler(int host_signum, struct siginfo *info, | |
613 | void *puc) | |
614 | { | |
615 | struct ucontext *uc = puc; | |
616 | unsigned long pc; | |
617 | ||
618 | #ifndef REG_EIP | |
619 | /* for glibc 2.1 */ | |
620 | #define REG_EIP EIP | |
621 | #define REG_ERR ERR | |
622 | #define REG_TRAPNO TRAPNO | |
623 | #endif | |
624 | pc = uc->uc_mcontext.gregs[REG_EIP]; | |
625 | return handle_cpu_signal(pc, (unsigned long)info->si_addr, | |
626 | uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ? | |
627 | (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0, | |
628 | &uc->uc_sigmask); | |
629 | } | |
630 | ||
631 | #elif defined(__powerpc) | |
632 | ||
633 | int cpu_signal_handler(int host_signum, struct siginfo *info, | |
634 | void *puc) | |
635 | { | |
636 | struct ucontext *uc = puc; | |
637 | struct pt_regs *regs = uc->uc_mcontext.regs; | |
638 | unsigned long pc; | |
639 | int is_write; | |
640 | ||
641 | pc = regs->nip; | |
642 | is_write = 0; | |
643 | #if 0 | |
644 | /* ppc 4xx case */ | |
645 | if (regs->dsisr & 0x00800000) | |
646 | is_write = 1; | |
647 | #else | |
648 | if (regs->trap != 0x400 && (regs->dsisr & 0x02000000)) | |
649 | is_write = 1; | |
650 | #endif | |
651 | return handle_cpu_signal(pc, (unsigned long)info->si_addr, | |
652 | is_write, &uc->uc_sigmask); | |
653 | } | |
654 | ||
655 | #elif defined(__alpha__) | |
656 | ||
657 | int cpu_signal_handler(int host_signum, struct siginfo *info, | |
658 | void *puc) | |
659 | { | |
660 | struct ucontext *uc = puc; | |
661 | uint32_t *pc = uc->uc_mcontext.sc_pc; | |
662 | uint32_t insn = *pc; | |
663 | int is_write = 0; | |
664 | ||
665 | /* XXX: need kernel patch to get write flag faster */ | |
666 | switch (insn >> 26) { | |
667 | case 0x0d: // stw | |
668 | case 0x0e: // stb | |
669 | case 0x0f: // stq_u | |
670 | case 0x24: // stf | |
671 | case 0x25: // stg | |
672 | case 0x26: // sts | |
673 | case 0x27: // stt | |
674 | case 0x2c: // stl | |
675 | case 0x2d: // stq | |
676 | case 0x2e: // stl_c | |
677 | case 0x2f: // stq_c | |
678 | is_write = 1; | |
679 | } | |
680 | ||
681 | return handle_cpu_signal(pc, (unsigned long)info->si_addr, | |
682 | is_write, &uc->uc_sigmask); | |
683 | } | |
684 | #elif defined(__sparc__) | |
685 | ||
686 | int cpu_signal_handler(int host_signum, struct siginfo *info, | |
687 | void *puc) | |
688 | { | |
689 | uint32_t *regs = (uint32_t *)(info + 1); | |
690 | void *sigmask = (regs + 20); | |
691 | unsigned long pc; | |
692 | int is_write; | |
693 | uint32_t insn; | |
694 | ||
695 | /* XXX: is there a standard glibc define ? */ | |
696 | pc = regs[1]; | |
697 | /* XXX: need kernel patch to get write flag faster */ | |
698 | is_write = 0; | |
699 | insn = *(uint32_t *)pc; | |
700 | if ((insn >> 30) == 3) { | |
701 | switch((insn >> 19) & 0x3f) { | |
702 | case 0x05: // stb | |
703 | case 0x06: // sth | |
704 | case 0x04: // st | |
705 | case 0x07: // std | |
706 | case 0x24: // stf | |
707 | case 0x27: // stdf | |
708 | case 0x25: // stfsr | |
709 | is_write = 1; | |
710 | break; | |
711 | } | |
712 | } | |
713 | return handle_cpu_signal(pc, (unsigned long)info->si_addr, | |
714 | is_write, sigmask); | |
715 | } | |
716 | ||
717 | #elif defined(__arm__) | |
718 | ||
719 | int cpu_signal_handler(int host_signum, struct siginfo *info, | |
720 | void *puc) | |
721 | { | |
722 | struct ucontext *uc = puc; | |
723 | unsigned long pc; | |
724 | int is_write; | |
725 | ||
726 | pc = uc->uc_mcontext.gregs[R15]; | |
727 | /* XXX: compute is_write */ | |
728 | is_write = 0; | |
729 | return handle_cpu_signal(pc, (unsigned long)info->si_addr, | |
730 | is_write, | |
731 | &uc->uc_sigmask); | |
732 | } | |
733 | ||
734 | #elif defined(__mc68000) | |
735 | ||
736 | int cpu_signal_handler(int host_signum, struct siginfo *info, | |
737 | void *puc) | |
738 | { | |
739 | struct ucontext *uc = puc; | |
740 | unsigned long pc; | |
741 | int is_write; | |
742 | ||
743 | pc = uc->uc_mcontext.gregs[16]; | |
744 | /* XXX: compute is_write */ | |
745 | is_write = 0; | |
746 | return handle_cpu_signal(pc, (unsigned long)info->si_addr, | |
747 | is_write, | |
748 | &uc->uc_sigmask); | |
749 | } | |
750 | ||
751 | #else | |
752 | ||
753 | #error host CPU specific signal handler needed | |
754 | ||
755 | #endif |