]> git.proxmox.com Git - qemu.git/blob - linux-user/signal.c
ia64 support
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <stdarg.h>
24 #include <unistd.h>
25 #include <signal.h>
26 #include <errno.h>
27 #include <sys/ucontext.h>
28
29 #ifdef __ia64__
30 #undef uc_mcontext
31 #undef uc_sigmask
32 #undef uc_stack
33 #undef uc_link
34 #endif
35
36 #include "qemu.h"
37
38 //#define DEBUG_SIGNAL
39
40 #define MAX_SIGQUEUE_SIZE 1024
41
42 struct sigqueue {
43 struct sigqueue *next;
44 target_siginfo_t info;
45 };
46
47 struct emulated_sigaction {
48 struct target_sigaction sa;
49 int pending; /* true if signal is pending */
50 struct sigqueue *first;
51 struct sigqueue info; /* in order to always have memory for the
52 first signal, we put it here */
53 };
54
55 static struct emulated_sigaction sigact_table[TARGET_NSIG];
56 static struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */
57 static struct sigqueue *first_free; /* first free siginfo queue entry */
58 static int signal_pending; /* non zero if a signal may be pending */
59
60 static void host_signal_handler(int host_signum, siginfo_t *info,
61 void *puc);
62
63 /* XXX: do it properly */
64 static inline int host_to_target_signal(int sig)
65 {
66 return sig;
67 }
68
69 static inline int target_to_host_signal(int sig)
70 {
71 return sig;
72 }
73
74 void host_to_target_sigset(target_sigset_t *d, sigset_t *s)
75 {
76 int i;
77 for(i = 0;i < TARGET_NSIG_WORDS; i++) {
78 d->sig[i] = tswapl(((unsigned long *)s)[i]);
79 }
80 }
81
82 void target_to_host_sigset(sigset_t *d, target_sigset_t *s)
83 {
84 int i;
85 for(i = 0;i < TARGET_NSIG_WORDS; i++) {
86 ((unsigned long *)d)[i] = tswapl(s->sig[i]);
87 }
88 }
89
90 void host_to_target_old_sigset(target_ulong *old_sigset,
91 const sigset_t *sigset)
92 {
93 *old_sigset = tswap32(*(unsigned long *)sigset & 0xffffffff);
94 }
95
96 void target_to_host_old_sigset(sigset_t *sigset,
97 const target_ulong *old_sigset)
98 {
99 sigemptyset(sigset);
100 *(unsigned long *)sigset = tswapl(*old_sigset);
101 }
102
103 /* siginfo conversion */
104
105 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
106 const siginfo_t *info)
107 {
108 int sig;
109 sig = host_to_target_signal(info->si_signo);
110 tinfo->si_signo = sig;
111 tinfo->si_errno = 0;
112 tinfo->si_code = 0;
113 if (sig == SIGILL || sig == SIGFPE || sig == SIGSEGV || sig == SIGBUS) {
114 /* should never come here, but who knows. The information for
115 the target is irrelevant */
116 tinfo->_sifields._sigfault._addr = 0;
117 } else if (sig >= TARGET_SIGRTMIN) {
118 tinfo->_sifields._rt._pid = info->si_pid;
119 tinfo->_sifields._rt._uid = info->si_uid;
120 /* XXX: potential problem if 64 bit */
121 tinfo->_sifields._rt._sigval.sival_ptr =
122 (target_ulong)info->si_value.sival_ptr;
123 }
124 }
125
126 static void tswap_siginfo(target_siginfo_t *tinfo,
127 const target_siginfo_t *info)
128 {
129 int sig;
130 sig = info->si_signo;
131 tinfo->si_signo = tswap32(sig);
132 tinfo->si_errno = tswap32(info->si_errno);
133 tinfo->si_code = tswap32(info->si_code);
134 if (sig == SIGILL || sig == SIGFPE || sig == SIGSEGV || sig == SIGBUS) {
135 tinfo->_sifields._sigfault._addr =
136 tswapl(info->_sifields._sigfault._addr);
137 } else if (sig >= TARGET_SIGRTMIN) {
138 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
139 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
140 tinfo->_sifields._rt._sigval.sival_ptr =
141 tswapl(info->_sifields._rt._sigval.sival_ptr);
142 }
143 }
144
145
146 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
147 {
148 host_to_target_siginfo_noswap(tinfo, info);
149 tswap_siginfo(tinfo, tinfo);
150 }
151
152 /* XXX: we support only POSIX RT signals are used. */
153 /* XXX: find a solution for 64 bit (additionnal malloced data is needed) */
154 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
155 {
156 info->si_signo = tswap32(tinfo->si_signo);
157 info->si_errno = tswap32(tinfo->si_errno);
158 info->si_code = tswap32(tinfo->si_code);
159 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
160 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
161 info->si_value.sival_ptr =
162 (void *)tswapl(tinfo->_sifields._rt._sigval.sival_ptr);
163 }
164
165 void signal_init(void)
166 {
167 struct sigaction act;
168 int i;
169
170 /* set all host signal handlers. ALL signals are blocked during
171 the handlers to serialize them. */
172 sigfillset(&act.sa_mask);
173 act.sa_flags = SA_SIGINFO;
174 act.sa_sigaction = host_signal_handler;
175 for(i = 1; i < NSIG; i++) {
176 sigaction(i, &act, NULL);
177 }
178
179 memset(sigact_table, 0, sizeof(sigact_table));
180
181 first_free = &sigqueue_table[0];
182 for(i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++)
183 sigqueue_table[i].next = &sigqueue_table[i + 1];
184 sigqueue_table[MAX_SIGQUEUE_SIZE - 1].next = NULL;
185 }
186
187 /* signal queue handling */
188
189 static inline struct sigqueue *alloc_sigqueue(void)
190 {
191 struct sigqueue *q = first_free;
192 if (!q)
193 return NULL;
194 first_free = q->next;
195 return q;
196 }
197
198 static inline void free_sigqueue(struct sigqueue *q)
199 {
200 q->next = first_free;
201 first_free = q;
202 }
203
204 /* abort execution with signal */
205 void __attribute((noreturn)) force_sig(int sig)
206 {
207 int host_sig;
208 host_sig = target_to_host_signal(sig);
209 fprintf(stderr, "qemu: uncaught target signal %d (%s) - exiting\n",
210 sig, strsignal(host_sig));
211 #if 1
212 _exit(-host_sig);
213 #else
214 {
215 struct sigaction act;
216 sigemptyset(&act.sa_mask);
217 act.sa_flags = SA_SIGINFO;
218 act.sa_sigaction = SIG_DFL;
219 sigaction(SIGABRT, &act, NULL);
220 abort();
221 }
222 #endif
223 }
224
225 /* queue a signal so that it will be send to the virtual CPU as soon
226 as possible */
227 int queue_signal(int sig, target_siginfo_t *info)
228 {
229 struct emulated_sigaction *k;
230 struct sigqueue *q, **pq;
231 target_ulong handler;
232
233 #if defined(DEBUG_SIGNAL)
234 fprintf(stderr, "queue_signal: sig=%d\n",
235 sig);
236 #endif
237 k = &sigact_table[sig - 1];
238 handler = k->sa._sa_handler;
239 if (handler == TARGET_SIG_DFL) {
240 /* default handler : ignore some signal. The other are fatal */
241 if (sig != TARGET_SIGCHLD &&
242 sig != TARGET_SIGURG &&
243 sig != TARGET_SIGWINCH) {
244 force_sig(sig);
245 } else {
246 return 0; /* indicate ignored */
247 }
248 } else if (handler == TARGET_SIG_IGN) {
249 /* ignore signal */
250 return 0;
251 } else if (handler == TARGET_SIG_ERR) {
252 force_sig(sig);
253 } else {
254 pq = &k->first;
255 if (sig < TARGET_SIGRTMIN) {
256 /* if non real time signal, we queue exactly one signal */
257 if (!k->pending)
258 q = &k->info;
259 else
260 return 0;
261 } else {
262 if (!k->pending) {
263 /* first signal */
264 q = &k->info;
265 } else {
266 q = alloc_sigqueue();
267 if (!q)
268 return -EAGAIN;
269 while (*pq != NULL)
270 pq = &(*pq)->next;
271 }
272 }
273 *pq = q;
274 q->info = *info;
275 q->next = NULL;
276 k->pending = 1;
277 /* signal that a new signal is pending */
278 signal_pending = 1;
279 return 1; /* indicates that the signal was queued */
280 }
281 }
282
283 #if defined(DEBUG_SIGNAL)
284 #ifdef __i386__
285 static void dump_regs(struct ucontext *uc)
286 {
287 fprintf(stderr,
288 "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
289 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
290 "EFL=%08x EIP=%08x\n",
291 uc->uc_mcontext.gregs[EAX],
292 uc->uc_mcontext.gregs[EBX],
293 uc->uc_mcontext.gregs[ECX],
294 uc->uc_mcontext.gregs[EDX],
295 uc->uc_mcontext.gregs[ESI],
296 uc->uc_mcontext.gregs[EDI],
297 uc->uc_mcontext.gregs[EBP],
298 uc->uc_mcontext.gregs[ESP],
299 uc->uc_mcontext.gregs[EFL],
300 uc->uc_mcontext.gregs[EIP]);
301 }
302 #else
303 static void dump_regs(struct ucontext *uc)
304 {
305 }
306 #endif
307
308 #endif
309
310 static void host_signal_handler(int host_signum, siginfo_t *info,
311 void *puc)
312 {
313 int sig;
314 target_siginfo_t tinfo;
315
316 /* the CPU emulator uses some host signals to detect exceptions,
317 we we forward to it some signals */
318 if (host_signum == SIGSEGV || host_signum == SIGBUS) {
319 if (cpu_x86_signal_handler(host_signum, info, puc))
320 return;
321 }
322
323 /* get target signal number */
324 sig = host_to_target_signal(host_signum);
325 if (sig < 1 || sig > TARGET_NSIG)
326 return;
327 #if defined(DEBUG_SIGNAL)
328 fprintf(stderr, "qemu: got signal %d\n", sig);
329 dump_regs(puc);
330 #endif
331 host_to_target_siginfo_noswap(&tinfo, info);
332 if (queue_signal(sig, &tinfo) == 1) {
333 /* interrupt the virtual CPU as soon as possible */
334 cpu_x86_interrupt(global_env);
335 }
336 }
337
338 int do_sigaction(int sig, const struct target_sigaction *act,
339 struct target_sigaction *oact)
340 {
341 struct emulated_sigaction *k;
342
343 if (sig < 1 || sig > TARGET_NSIG)
344 return -EINVAL;
345 k = &sigact_table[sig - 1];
346 #if defined(DEBUG_SIGNAL) && 0
347 fprintf(stderr, "sigaction sig=%d act=0x%08x, oact=0x%08x\n",
348 sig, (int)act, (int)oact);
349 #endif
350 if (oact) {
351 oact->_sa_handler = tswapl(k->sa._sa_handler);
352 oact->sa_flags = tswapl(k->sa.sa_flags);
353 oact->sa_restorer = tswapl(k->sa.sa_restorer);
354 oact->sa_mask = k->sa.sa_mask;
355 }
356 if (act) {
357 k->sa._sa_handler = tswapl(act->_sa_handler);
358 k->sa.sa_flags = tswapl(act->sa_flags);
359 k->sa.sa_restorer = tswapl(act->sa_restorer);
360 k->sa.sa_mask = act->sa_mask;
361 }
362 return 0;
363 }
364
365 #ifdef TARGET_I386
366
367 /* from the Linux kernel */
368
369 struct target_fpreg {
370 uint16_t significand[4];
371 uint16_t exponent;
372 };
373
374 struct target_fpxreg {
375 uint16_t significand[4];
376 uint16_t exponent;
377 uint16_t padding[3];
378 };
379
380 struct target_xmmreg {
381 target_ulong element[4];
382 };
383
384 struct target_fpstate {
385 /* Regular FPU environment */
386 target_ulong cw;
387 target_ulong sw;
388 target_ulong tag;
389 target_ulong ipoff;
390 target_ulong cssel;
391 target_ulong dataoff;
392 target_ulong datasel;
393 struct target_fpreg _st[8];
394 uint16_t status;
395 uint16_t magic; /* 0xffff = regular FPU data only */
396
397 /* FXSR FPU environment */
398 target_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
399 target_ulong mxcsr;
400 target_ulong reserved;
401 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
402 struct target_xmmreg _xmm[8];
403 target_ulong padding[56];
404 };
405
406 #define X86_FXSR_MAGIC 0x0000
407
408 struct target_sigcontext {
409 uint16_t gs, __gsh;
410 uint16_t fs, __fsh;
411 uint16_t es, __esh;
412 uint16_t ds, __dsh;
413 target_ulong edi;
414 target_ulong esi;
415 target_ulong ebp;
416 target_ulong esp;
417 target_ulong ebx;
418 target_ulong edx;
419 target_ulong ecx;
420 target_ulong eax;
421 target_ulong trapno;
422 target_ulong err;
423 target_ulong eip;
424 uint16_t cs, __csh;
425 target_ulong eflags;
426 target_ulong esp_at_signal;
427 uint16_t ss, __ssh;
428 target_ulong fpstate; /* pointer */
429 target_ulong oldmask;
430 target_ulong cr2;
431 };
432
433 typedef struct target_sigaltstack {
434 target_ulong ss_sp;
435 int ss_flags;
436 target_ulong ss_size;
437 } target_stack_t;
438
439 struct target_ucontext {
440 target_ulong uc_flags;
441 target_ulong uc_link;
442 target_stack_t uc_stack;
443 struct target_sigcontext uc_mcontext;
444 target_sigset_t uc_sigmask; /* mask last for extensibility */
445 };
446
447 struct sigframe
448 {
449 target_ulong pretcode;
450 int sig;
451 struct target_sigcontext sc;
452 struct target_fpstate fpstate;
453 target_ulong extramask[TARGET_NSIG_WORDS-1];
454 char retcode[8];
455 };
456
457 struct rt_sigframe
458 {
459 target_ulong pretcode;
460 int sig;
461 target_ulong pinfo;
462 target_ulong puc;
463 struct target_siginfo info;
464 struct target_ucontext uc;
465 struct target_fpstate fpstate;
466 char retcode[8];
467 };
468
469 /*
470 * Set up a signal frame.
471 */
472
473 #define __put_user(x,ptr)\
474 ({\
475 int size = sizeof(*ptr);\
476 switch(size) {\
477 case 1:\
478 stb(ptr, (typeof(*ptr))(x));\
479 break;\
480 case 2:\
481 stw(ptr, (typeof(*ptr))(x));\
482 break;\
483 case 4:\
484 stl(ptr, (typeof(*ptr))(x));\
485 break;\
486 case 8:\
487 stq(ptr, (typeof(*ptr))(x));\
488 break;\
489 default:\
490 abort();\
491 }\
492 0;\
493 })
494
495 #define get_user(val, ptr) (typeof(*ptr))(*(ptr))
496
497
498 #define __copy_to_user(dst, src, size)\
499 ({\
500 memcpy(dst, src, size);\
501 0;\
502 })
503
504 static inline int copy_siginfo_to_user(target_siginfo_t *tinfo,
505 const target_siginfo_t *info)
506 {
507 tswap_siginfo(tinfo, info);
508 return 0;
509 }
510
511 /* XXX: save x87 state */
512 static int
513 setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
514 CPUX86State *env, unsigned long mask)
515 {
516 int err = 0;
517
518 err |= __put_user(env->segs[R_GS], (unsigned int *)&sc->gs);
519 err |= __put_user(env->segs[R_FS], (unsigned int *)&sc->fs);
520 err |= __put_user(env->segs[R_ES], (unsigned int *)&sc->es);
521 err |= __put_user(env->segs[R_DS], (unsigned int *)&sc->ds);
522 err |= __put_user(env->regs[R_EDI], &sc->edi);
523 err |= __put_user(env->regs[R_ESI], &sc->esi);
524 err |= __put_user(env->regs[R_EBP], &sc->ebp);
525 err |= __put_user(env->regs[R_ESP], &sc->esp);
526 err |= __put_user(env->regs[R_EBX], &sc->ebx);
527 err |= __put_user(env->regs[R_EDX], &sc->edx);
528 err |= __put_user(env->regs[R_ECX], &sc->ecx);
529 err |= __put_user(env->regs[R_EAX], &sc->eax);
530 err |= __put_user(/*current->thread.trap_no*/ 0, &sc->trapno);
531 err |= __put_user(/*current->thread.error_code*/ 0, &sc->err);
532 err |= __put_user(env->eip, &sc->eip);
533 err |= __put_user(env->segs[R_CS], (unsigned int *)&sc->cs);
534 err |= __put_user(env->eflags, &sc->eflags);
535 err |= __put_user(env->regs[R_ESP], &sc->esp_at_signal);
536 err |= __put_user(env->segs[R_SS], (unsigned int *)&sc->ss);
537 #if 0
538 tmp = save_i387(fpstate);
539 if (tmp < 0)
540 err = 1;
541 else
542 err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
543 #else
544 err |= __put_user(0, &sc->fpstate);
545 #endif
546 /* non-iBCS2 extensions.. */
547 err |= __put_user(mask, &sc->oldmask);
548 err |= __put_user(/*current->thread.cr2*/ 0, &sc->cr2);
549 return err;
550 }
551
552 /*
553 * Determine which stack to use..
554 */
555
556 static inline void *
557 get_sigframe(struct emulated_sigaction *ka, CPUX86State *env, size_t frame_size)
558 {
559 unsigned long esp;
560
561 /* Default to using normal stack */
562 esp = env->regs[R_ESP];
563 #if 0
564 /* This is the X/Open sanctioned signal stack switching. */
565 if (ka->sa.sa_flags & SA_ONSTACK) {
566 if (sas_ss_flags(esp) == 0)
567 esp = current->sas_ss_sp + current->sas_ss_size;
568 }
569
570 /* This is the legacy signal stack switching. */
571 else if ((regs->xss & 0xffff) != __USER_DS &&
572 !(ka->sa.sa_flags & SA_RESTORER) &&
573 ka->sa.sa_restorer) {
574 esp = (unsigned long) ka->sa.sa_restorer;
575 }
576 #endif
577 return (void *)((esp - frame_size) & -8ul);
578 }
579
580 static void setup_frame(int sig, struct emulated_sigaction *ka,
581 target_sigset_t *set, CPUX86State *env)
582 {
583 struct sigframe *frame;
584 int err = 0;
585
586 frame = get_sigframe(ka, env, sizeof(*frame));
587
588 #if 0
589 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
590 goto give_sigsegv;
591 #endif
592 err |= __put_user((/*current->exec_domain
593 && current->exec_domain->signal_invmap
594 && sig < 32
595 ? current->exec_domain->signal_invmap[sig]
596 : */ sig),
597 &frame->sig);
598 if (err)
599 goto give_sigsegv;
600
601 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0]);
602 if (err)
603 goto give_sigsegv;
604
605 if (TARGET_NSIG_WORDS > 1) {
606 err |= __copy_to_user(frame->extramask, &set->sig[1],
607 sizeof(frame->extramask));
608 }
609 if (err)
610 goto give_sigsegv;
611
612 /* Set up to return from userspace. If provided, use a stub
613 already in userspace. */
614 if (ka->sa.sa_flags & TARGET_SA_RESTORER) {
615 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
616 } else {
617 err |= __put_user(frame->retcode, &frame->pretcode);
618 /* This is popl %eax ; movl $,%eax ; int $0x80 */
619 err |= __put_user(0xb858, (short *)(frame->retcode+0));
620 err |= __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
621 err |= __put_user(0x80cd, (short *)(frame->retcode+6));
622 }
623
624 if (err)
625 goto give_sigsegv;
626
627 /* Set up registers for signal handler */
628 env->regs[R_ESP] = (unsigned long) frame;
629 env->eip = (unsigned long) ka->sa._sa_handler;
630
631 cpu_x86_load_seg(env, R_DS, __USER_DS);
632 cpu_x86_load_seg(env, R_ES, __USER_DS);
633 cpu_x86_load_seg(env, R_SS, __USER_DS);
634 cpu_x86_load_seg(env, R_CS, __USER_CS);
635 env->eflags &= ~TF_MASK;
636
637 return;
638
639 give_sigsegv:
640 if (sig == TARGET_SIGSEGV)
641 ka->sa._sa_handler = TARGET_SIG_DFL;
642 force_sig(TARGET_SIGSEGV /* , current */);
643 }
644
645 static void setup_rt_frame(int sig, struct emulated_sigaction *ka,
646 target_siginfo_t *info,
647 target_sigset_t *set, CPUX86State *env)
648 {
649 struct rt_sigframe *frame;
650 int err = 0;
651
652 frame = get_sigframe(ka, env, sizeof(*frame));
653
654 #if 0
655 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
656 goto give_sigsegv;
657 #endif
658
659 err |= __put_user((/*current->exec_domain
660 && current->exec_domain->signal_invmap
661 && sig < 32
662 ? current->exec_domain->signal_invmap[sig]
663 : */sig),
664 &frame->sig);
665 err |= __put_user((target_ulong)&frame->info, &frame->pinfo);
666 err |= __put_user((target_ulong)&frame->uc, &frame->puc);
667 err |= copy_siginfo_to_user(&frame->info, info);
668 if (err)
669 goto give_sigsegv;
670
671 /* Create the ucontext. */
672 err |= __put_user(0, &frame->uc.uc_flags);
673 err |= __put_user(0, &frame->uc.uc_link);
674 err |= __put_user(/*current->sas_ss_sp*/ 0, &frame->uc.uc_stack.ss_sp);
675 err |= __put_user(/* sas_ss_flags(regs->esp) */ 0,
676 &frame->uc.uc_stack.ss_flags);
677 err |= __put_user(/* current->sas_ss_size */ 0, &frame->uc.uc_stack.ss_size);
678 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
679 env, set->sig[0]);
680 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
681 if (err)
682 goto give_sigsegv;
683
684 /* Set up to return from userspace. If provided, use a stub
685 already in userspace. */
686 if (ka->sa.sa_flags & TARGET_SA_RESTORER) {
687 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
688 } else {
689 err |= __put_user(frame->retcode, &frame->pretcode);
690 /* This is movl $,%eax ; int $0x80 */
691 err |= __put_user(0xb8, (char *)(frame->retcode+0));
692 err |= __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
693 err |= __put_user(0x80cd, (short *)(frame->retcode+5));
694 }
695
696 if (err)
697 goto give_sigsegv;
698
699 /* Set up registers for signal handler */
700 env->regs[R_ESP] = (unsigned long) frame;
701 env->eip = (unsigned long) ka->sa._sa_handler;
702
703 cpu_x86_load_seg(env, R_DS, __USER_DS);
704 cpu_x86_load_seg(env, R_ES, __USER_DS);
705 cpu_x86_load_seg(env, R_SS, __USER_DS);
706 cpu_x86_load_seg(env, R_CS, __USER_CS);
707 env->eflags &= ~TF_MASK;
708
709 return;
710
711 give_sigsegv:
712 if (sig == TARGET_SIGSEGV)
713 ka->sa._sa_handler = TARGET_SIG_DFL;
714 force_sig(TARGET_SIGSEGV /* , current */);
715 }
716
717 static int
718 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
719 {
720 unsigned int err = 0;
721
722
723
724 #define COPY(x) err |= __get_user(regs->x, &sc->x)
725
726 #define COPY_SEG(seg) \
727 { unsigned short tmp; \
728 err |= __get_user(tmp, &sc->seg); \
729 regs->x##seg = tmp; }
730
731 #define COPY_SEG_STRICT(seg) \
732 { unsigned short tmp; \
733 err |= __get_user(tmp, &sc->seg); \
734 regs->x##seg = tmp|3; }
735
736 #define GET_SEG(seg) \
737 { unsigned short tmp; \
738 err |= __get_user(tmp, &sc->seg); \
739 loadsegment(seg,tmp); }
740
741 cpu_x86_load_seg(env, R_GS, lduw(&sc->gs));
742 cpu_x86_load_seg(env, R_FS, lduw(&sc->fs));
743 cpu_x86_load_seg(env, R_ES, lduw(&sc->es));
744 cpu_x86_load_seg(env, R_DS, lduw(&sc->ds));
745
746 env->regs[R_EDI] = ldl(&sc->edi);
747 env->regs[R_ESI] = ldl(&sc->esi);
748 env->regs[R_EBP] = ldl(&sc->ebp);
749 env->regs[R_ESP] = ldl(&sc->esp);
750 env->regs[R_EBX] = ldl(&sc->ebx);
751 env->regs[R_EDX] = ldl(&sc->edx);
752 env->regs[R_ECX] = ldl(&sc->ecx);
753 env->eip = ldl(&sc->eip);
754
755 cpu_x86_load_seg(env, R_CS, lduw(&sc->cs) | 3);
756 cpu_x86_load_seg(env, R_SS, lduw(&sc->ss) | 3);
757
758 {
759 unsigned int tmpflags;
760 tmpflags = ldl(&sc->eflags);
761 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
762 // regs->orig_eax = -1; /* disable syscall checks */
763 }
764
765 #if 0
766 {
767 struct _fpstate * buf;
768 err |= __get_user(buf, &sc->fpstate);
769 if (buf) {
770 if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
771 goto badframe;
772 err |= restore_i387(buf);
773 }
774 }
775 #endif
776 *peax = ldl(&sc->eax);
777 return err;
778 #if 0
779 badframe:
780 return 1;
781 #endif
782 }
783
784 long do_sigreturn(CPUX86State *env)
785 {
786 struct sigframe *frame = (struct sigframe *)(env->regs[R_ESP] - 8);
787 target_sigset_t target_set;
788 sigset_t set;
789 int eax, i;
790
791 /* set blocked signals */
792 target_set.sig[0] = frame->sc.oldmask;
793 for(i = 1; i < TARGET_NSIG_WORDS; i++)
794 target_set.sig[i] = frame->extramask[i - 1];
795
796 target_to_host_sigset(&set, &target_set);
797 sigprocmask(SIG_SETMASK, &set, NULL);
798
799 /* restore registers */
800 if (restore_sigcontext(env, &frame->sc, &eax))
801 goto badframe;
802 return eax;
803
804 badframe:
805 force_sig(TARGET_SIGSEGV);
806 return 0;
807 }
808
809 long do_rt_sigreturn(CPUX86State *env)
810 {
811 struct rt_sigframe *frame = (struct rt_sigframe *)(env->regs[R_ESP] - 4);
812 target_sigset_t target_set;
813 sigset_t set;
814 // stack_t st;
815 int eax;
816
817 #if 0
818 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
819 goto badframe;
820 #endif
821 memcpy(&target_set, &frame->uc.uc_sigmask, sizeof(target_sigset_t));
822
823 target_to_host_sigset(&set, &target_set);
824 sigprocmask(SIG_SETMASK, &set, NULL);
825
826 if (restore_sigcontext(env, &frame->uc.uc_mcontext, &eax))
827 goto badframe;
828
829 #if 0
830 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
831 goto badframe;
832 /* It is more difficult to avoid calling this function than to
833 call it and ignore errors. */
834 do_sigaltstack(&st, NULL, regs->esp);
835 #endif
836 return eax;
837
838 badframe:
839 force_sig(TARGET_SIGSEGV);
840 return 0;
841 }
842
843 #endif
844
845 void process_pending_signals(void *cpu_env)
846 {
847 int sig;
848 target_ulong handler;
849 sigset_t set, old_set;
850 target_sigset_t target_old_set;
851 struct emulated_sigaction *k;
852 struct sigqueue *q;
853
854 if (!signal_pending)
855 return;
856
857 k = sigact_table;
858 for(sig = 1; sig <= TARGET_NSIG; sig++) {
859 if (k->pending)
860 goto handle_signal;
861 k++;
862 }
863 /* if no signal is pending, just return */
864 signal_pending = 0;
865 return;
866
867 handle_signal:
868 #ifdef DEBUG_SIGNAL
869 fprintf(stderr, "qemu: process signal %d\n", sig);
870 #endif
871 /* dequeue signal */
872 q = k->first;
873 k->first = q->next;
874 if (!k->first)
875 k->pending = 0;
876
877 handler = k->sa._sa_handler;
878 if (handler == TARGET_SIG_DFL) {
879 /* default handler : ignore some signal. The other are fatal */
880 if (sig != TARGET_SIGCHLD &&
881 sig != TARGET_SIGURG &&
882 sig != TARGET_SIGWINCH) {
883 force_sig(sig);
884 }
885 } else if (handler == TARGET_SIG_IGN) {
886 /* ignore sig */
887 } else if (handler == TARGET_SIG_ERR) {
888 force_sig(sig);
889 } else {
890 /* compute the blocked signals during the handler execution */
891 target_to_host_sigset(&set, &k->sa.sa_mask);
892 /* SA_NODEFER indicates that the current signal should not be
893 blocked during the handler */
894 if (!(k->sa.sa_flags & TARGET_SA_NODEFER))
895 sigaddset(&set, target_to_host_signal(sig));
896
897 /* block signals in the handler using Linux */
898 sigprocmask(SIG_BLOCK, &set, &old_set);
899 /* save the previous blocked signal state to restore it at the
900 end of the signal execution (see do_sigreturn) */
901 host_to_target_sigset(&target_old_set, &old_set);
902
903 /* if the CPU is in VM86 mode, we restore the 32 bit values */
904 #ifdef TARGET_I386
905 {
906 CPUX86State *env = cpu_env;
907 if (env->eflags & VM_MASK)
908 save_v86_state(env);
909 }
910 #endif
911 /* prepare the stack frame of the virtual CPU */
912 if (k->sa.sa_flags & TARGET_SA_SIGINFO)
913 setup_rt_frame(sig, k, &q->info, &target_old_set, cpu_env);
914 else
915 setup_frame(sig, k, &target_old_set, cpu_env);
916 if (k->sa.sa_flags & TARGET_SA_RESETHAND)
917 k->sa._sa_handler = TARGET_SIG_DFL;
918 }
919 if (q != &k->info)
920 free_sigqueue(q);
921 }
922
923