]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
0fa15f088b01bb0656d3a48ca093be1fa2318b6c
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "strace.h"
26 #include "loader.h"
27 #include "trace.h"
28 #include "signal-common.h"
29
30 static struct target_sigaction sigact_table[TARGET_NSIG];
31
32 static void host_signal_handler(int host_signum, siginfo_t *info,
33 void *puc);
34
35
36 /*
37 * System includes define _NSIG as SIGRTMAX + 1,
38 * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
39 * and the first signal is SIGHUP defined as 1
40 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
41 * a process exists without sending it a signal.
42 */
43 #ifdef __SIGRTMAX
44 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
45 #endif
46 static uint8_t host_to_target_signal_table[_NSIG] = {
47 [SIGHUP] = TARGET_SIGHUP,
48 [SIGINT] = TARGET_SIGINT,
49 [SIGQUIT] = TARGET_SIGQUIT,
50 [SIGILL] = TARGET_SIGILL,
51 [SIGTRAP] = TARGET_SIGTRAP,
52 [SIGABRT] = TARGET_SIGABRT,
53 /* [SIGIOT] = TARGET_SIGIOT,*/
54 [SIGBUS] = TARGET_SIGBUS,
55 [SIGFPE] = TARGET_SIGFPE,
56 [SIGKILL] = TARGET_SIGKILL,
57 [SIGUSR1] = TARGET_SIGUSR1,
58 [SIGSEGV] = TARGET_SIGSEGV,
59 [SIGUSR2] = TARGET_SIGUSR2,
60 [SIGPIPE] = TARGET_SIGPIPE,
61 [SIGALRM] = TARGET_SIGALRM,
62 [SIGTERM] = TARGET_SIGTERM,
63 #ifdef SIGSTKFLT
64 [SIGSTKFLT] = TARGET_SIGSTKFLT,
65 #endif
66 [SIGCHLD] = TARGET_SIGCHLD,
67 [SIGCONT] = TARGET_SIGCONT,
68 [SIGSTOP] = TARGET_SIGSTOP,
69 [SIGTSTP] = TARGET_SIGTSTP,
70 [SIGTTIN] = TARGET_SIGTTIN,
71 [SIGTTOU] = TARGET_SIGTTOU,
72 [SIGURG] = TARGET_SIGURG,
73 [SIGXCPU] = TARGET_SIGXCPU,
74 [SIGXFSZ] = TARGET_SIGXFSZ,
75 [SIGVTALRM] = TARGET_SIGVTALRM,
76 [SIGPROF] = TARGET_SIGPROF,
77 [SIGWINCH] = TARGET_SIGWINCH,
78 [SIGIO] = TARGET_SIGIO,
79 [SIGPWR] = TARGET_SIGPWR,
80 [SIGSYS] = TARGET_SIGSYS,
81 /* next signals stay the same */
82 };
83
84 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
85
86 /* valid sig is between 1 and _NSIG - 1 */
87 int host_to_target_signal(int sig)
88 {
89 if (sig < 1 || sig >= _NSIG) {
90 return sig;
91 }
92 return host_to_target_signal_table[sig];
93 }
94
95 /* valid sig is between 1 and TARGET_NSIG */
96 int target_to_host_signal(int sig)
97 {
98 if (sig < 1 || sig > TARGET_NSIG) {
99 return sig;
100 }
101 return target_to_host_signal_table[sig];
102 }
103
104 static inline void target_sigaddset(target_sigset_t *set, int signum)
105 {
106 signum--;
107 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
108 set->sig[signum / TARGET_NSIG_BPW] |= mask;
109 }
110
111 static inline int target_sigismember(const target_sigset_t *set, int signum)
112 {
113 signum--;
114 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
115 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
116 }
117
118 void host_to_target_sigset_internal(target_sigset_t *d,
119 const sigset_t *s)
120 {
121 int host_sig, target_sig;
122 target_sigemptyset(d);
123 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
124 target_sig = host_to_target_signal(host_sig);
125 if (target_sig < 1 || target_sig > TARGET_NSIG) {
126 continue;
127 }
128 if (sigismember(s, host_sig)) {
129 target_sigaddset(d, target_sig);
130 }
131 }
132 }
133
134 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
135 {
136 target_sigset_t d1;
137 int i;
138
139 host_to_target_sigset_internal(&d1, s);
140 for(i = 0;i < TARGET_NSIG_WORDS; i++)
141 d->sig[i] = tswapal(d1.sig[i]);
142 }
143
144 void target_to_host_sigset_internal(sigset_t *d,
145 const target_sigset_t *s)
146 {
147 int host_sig, target_sig;
148 sigemptyset(d);
149 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
150 host_sig = target_to_host_signal(target_sig);
151 if (host_sig < 1 || host_sig >= _NSIG) {
152 continue;
153 }
154 if (target_sigismember(s, target_sig)) {
155 sigaddset(d, host_sig);
156 }
157 }
158 }
159
160 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
161 {
162 target_sigset_t s1;
163 int i;
164
165 for(i = 0;i < TARGET_NSIG_WORDS; i++)
166 s1.sig[i] = tswapal(s->sig[i]);
167 target_to_host_sigset_internal(d, &s1);
168 }
169
170 void host_to_target_old_sigset(abi_ulong *old_sigset,
171 const sigset_t *sigset)
172 {
173 target_sigset_t d;
174 host_to_target_sigset(&d, sigset);
175 *old_sigset = d.sig[0];
176 }
177
178 void target_to_host_old_sigset(sigset_t *sigset,
179 const abi_ulong *old_sigset)
180 {
181 target_sigset_t d;
182 int i;
183
184 d.sig[0] = *old_sigset;
185 for(i = 1;i < TARGET_NSIG_WORDS; i++)
186 d.sig[i] = 0;
187 target_to_host_sigset(sigset, &d);
188 }
189
190 int block_signals(void)
191 {
192 TaskState *ts = (TaskState *)thread_cpu->opaque;
193 sigset_t set;
194
195 /* It's OK to block everything including SIGSEGV, because we won't
196 * run any further guest code before unblocking signals in
197 * process_pending_signals().
198 */
199 sigfillset(&set);
200 sigprocmask(SIG_SETMASK, &set, 0);
201
202 return qatomic_xchg(&ts->signal_pending, 1);
203 }
204
205 /* Wrapper for sigprocmask function
206 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
207 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
208 * a signal was already pending and the syscall must be restarted, or
209 * 0 on success.
210 * If set is NULL, this is guaranteed not to fail.
211 */
212 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
213 {
214 TaskState *ts = (TaskState *)thread_cpu->opaque;
215
216 if (oldset) {
217 *oldset = ts->signal_mask;
218 }
219
220 if (set) {
221 int i;
222
223 if (block_signals()) {
224 return -TARGET_ERESTARTSYS;
225 }
226
227 switch (how) {
228 case SIG_BLOCK:
229 sigorset(&ts->signal_mask, &ts->signal_mask, set);
230 break;
231 case SIG_UNBLOCK:
232 for (i = 1; i <= NSIG; ++i) {
233 if (sigismember(set, i)) {
234 sigdelset(&ts->signal_mask, i);
235 }
236 }
237 break;
238 case SIG_SETMASK:
239 ts->signal_mask = *set;
240 break;
241 default:
242 g_assert_not_reached();
243 }
244
245 /* Silently ignore attempts to change blocking status of KILL or STOP */
246 sigdelset(&ts->signal_mask, SIGKILL);
247 sigdelset(&ts->signal_mask, SIGSTOP);
248 }
249 return 0;
250 }
251
252 #if !defined(TARGET_NIOS2)
253 /* Just set the guest's signal mask to the specified value; the
254 * caller is assumed to have called block_signals() already.
255 */
256 void set_sigmask(const sigset_t *set)
257 {
258 TaskState *ts = (TaskState *)thread_cpu->opaque;
259
260 ts->signal_mask = *set;
261 }
262 #endif
263
264 /* sigaltstack management */
265
266 int on_sig_stack(unsigned long sp)
267 {
268 TaskState *ts = (TaskState *)thread_cpu->opaque;
269
270 return (sp - ts->sigaltstack_used.ss_sp
271 < ts->sigaltstack_used.ss_size);
272 }
273
274 int sas_ss_flags(unsigned long sp)
275 {
276 TaskState *ts = (TaskState *)thread_cpu->opaque;
277
278 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
279 : on_sig_stack(sp) ? SS_ONSTACK : 0);
280 }
281
282 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
283 {
284 /*
285 * This is the X/Open sanctioned signal stack switching.
286 */
287 TaskState *ts = (TaskState *)thread_cpu->opaque;
288
289 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
290 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
291 }
292 return sp;
293 }
294
295 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
296 {
297 TaskState *ts = (TaskState *)thread_cpu->opaque;
298
299 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
300 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
301 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
302 }
303
304 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
305 {
306 TaskState *ts = (TaskState *)thread_cpu->opaque;
307 size_t minstacksize = TARGET_MINSIGSTKSZ;
308 target_stack_t ss;
309
310 #if defined(TARGET_PPC64)
311 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
312 struct image_info *image = ts->info;
313 if (get_ppc64_abi(image) > 1) {
314 minstacksize = 4096;
315 }
316 #endif
317
318 __get_user(ss.ss_sp, &uss->ss_sp);
319 __get_user(ss.ss_size, &uss->ss_size);
320 __get_user(ss.ss_flags, &uss->ss_flags);
321
322 if (on_sig_stack(get_sp_from_cpustate(env))) {
323 return -TARGET_EPERM;
324 }
325
326 switch (ss.ss_flags) {
327 default:
328 return -TARGET_EINVAL;
329
330 case TARGET_SS_DISABLE:
331 ss.ss_size = 0;
332 ss.ss_sp = 0;
333 break;
334
335 case TARGET_SS_ONSTACK:
336 case 0:
337 if (ss.ss_size < minstacksize) {
338 return -TARGET_ENOMEM;
339 }
340 break;
341 }
342
343 ts->sigaltstack_used.ss_sp = ss.ss_sp;
344 ts->sigaltstack_used.ss_size = ss.ss_size;
345 return 0;
346 }
347
348 /* siginfo conversion */
349
350 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
351 const siginfo_t *info)
352 {
353 int sig = host_to_target_signal(info->si_signo);
354 int si_code = info->si_code;
355 int si_type;
356 tinfo->si_signo = sig;
357 tinfo->si_errno = 0;
358 tinfo->si_code = info->si_code;
359
360 /* This memset serves two purposes:
361 * (1) ensure we don't leak random junk to the guest later
362 * (2) placate false positives from gcc about fields
363 * being used uninitialized if it chooses to inline both this
364 * function and tswap_siginfo() into host_to_target_siginfo().
365 */
366 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
367
368 /* This is awkward, because we have to use a combination of
369 * the si_code and si_signo to figure out which of the union's
370 * members are valid. (Within the host kernel it is always possible
371 * to tell, but the kernel carefully avoids giving userspace the
372 * high 16 bits of si_code, so we don't have the information to
373 * do this the easy way...) We therefore make our best guess,
374 * bearing in mind that a guest can spoof most of the si_codes
375 * via rt_sigqueueinfo() if it likes.
376 *
377 * Once we have made our guess, we record it in the top 16 bits of
378 * the si_code, so that tswap_siginfo() later can use it.
379 * tswap_siginfo() will strip these top bits out before writing
380 * si_code to the guest (sign-extending the lower bits).
381 */
382
383 switch (si_code) {
384 case SI_USER:
385 case SI_TKILL:
386 case SI_KERNEL:
387 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
388 * These are the only unspoofable si_code values.
389 */
390 tinfo->_sifields._kill._pid = info->si_pid;
391 tinfo->_sifields._kill._uid = info->si_uid;
392 si_type = QEMU_SI_KILL;
393 break;
394 default:
395 /* Everything else is spoofable. Make best guess based on signal */
396 switch (sig) {
397 case TARGET_SIGCHLD:
398 tinfo->_sifields._sigchld._pid = info->si_pid;
399 tinfo->_sifields._sigchld._uid = info->si_uid;
400 tinfo->_sifields._sigchld._status = info->si_status;
401 tinfo->_sifields._sigchld._utime = info->si_utime;
402 tinfo->_sifields._sigchld._stime = info->si_stime;
403 si_type = QEMU_SI_CHLD;
404 break;
405 case TARGET_SIGIO:
406 tinfo->_sifields._sigpoll._band = info->si_band;
407 tinfo->_sifields._sigpoll._fd = info->si_fd;
408 si_type = QEMU_SI_POLL;
409 break;
410 default:
411 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
412 tinfo->_sifields._rt._pid = info->si_pid;
413 tinfo->_sifields._rt._uid = info->si_uid;
414 /* XXX: potential problem if 64 bit */
415 tinfo->_sifields._rt._sigval.sival_ptr
416 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
417 si_type = QEMU_SI_RT;
418 break;
419 }
420 break;
421 }
422
423 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
424 }
425
426 void tswap_siginfo(target_siginfo_t *tinfo,
427 const target_siginfo_t *info)
428 {
429 int si_type = extract32(info->si_code, 16, 16);
430 int si_code = sextract32(info->si_code, 0, 16);
431
432 __put_user(info->si_signo, &tinfo->si_signo);
433 __put_user(info->si_errno, &tinfo->si_errno);
434 __put_user(si_code, &tinfo->si_code);
435
436 /* We can use our internal marker of which fields in the structure
437 * are valid, rather than duplicating the guesswork of
438 * host_to_target_siginfo_noswap() here.
439 */
440 switch (si_type) {
441 case QEMU_SI_KILL:
442 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
443 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
444 break;
445 case QEMU_SI_TIMER:
446 __put_user(info->_sifields._timer._timer1,
447 &tinfo->_sifields._timer._timer1);
448 __put_user(info->_sifields._timer._timer2,
449 &tinfo->_sifields._timer._timer2);
450 break;
451 case QEMU_SI_POLL:
452 __put_user(info->_sifields._sigpoll._band,
453 &tinfo->_sifields._sigpoll._band);
454 __put_user(info->_sifields._sigpoll._fd,
455 &tinfo->_sifields._sigpoll._fd);
456 break;
457 case QEMU_SI_FAULT:
458 __put_user(info->_sifields._sigfault._addr,
459 &tinfo->_sifields._sigfault._addr);
460 break;
461 case QEMU_SI_CHLD:
462 __put_user(info->_sifields._sigchld._pid,
463 &tinfo->_sifields._sigchld._pid);
464 __put_user(info->_sifields._sigchld._uid,
465 &tinfo->_sifields._sigchld._uid);
466 __put_user(info->_sifields._sigchld._status,
467 &tinfo->_sifields._sigchld._status);
468 __put_user(info->_sifields._sigchld._utime,
469 &tinfo->_sifields._sigchld._utime);
470 __put_user(info->_sifields._sigchld._stime,
471 &tinfo->_sifields._sigchld._stime);
472 break;
473 case QEMU_SI_RT:
474 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
475 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
476 __put_user(info->_sifields._rt._sigval.sival_ptr,
477 &tinfo->_sifields._rt._sigval.sival_ptr);
478 break;
479 default:
480 g_assert_not_reached();
481 }
482 }
483
484 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
485 {
486 target_siginfo_t tgt_tmp;
487 host_to_target_siginfo_noswap(&tgt_tmp, info);
488 tswap_siginfo(tinfo, &tgt_tmp);
489 }
490
491 /* XXX: we support only POSIX RT signals are used. */
492 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
493 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
494 {
495 /* This conversion is used only for the rt_sigqueueinfo syscall,
496 * and so we know that the _rt fields are the valid ones.
497 */
498 abi_ulong sival_ptr;
499
500 __get_user(info->si_signo, &tinfo->si_signo);
501 __get_user(info->si_errno, &tinfo->si_errno);
502 __get_user(info->si_code, &tinfo->si_code);
503 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
504 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
505 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
506 info->si_value.sival_ptr = (void *)(long)sival_ptr;
507 }
508
509 static int fatal_signal (int sig)
510 {
511 switch (sig) {
512 case TARGET_SIGCHLD:
513 case TARGET_SIGURG:
514 case TARGET_SIGWINCH:
515 /* Ignored by default. */
516 return 0;
517 case TARGET_SIGCONT:
518 case TARGET_SIGSTOP:
519 case TARGET_SIGTSTP:
520 case TARGET_SIGTTIN:
521 case TARGET_SIGTTOU:
522 /* Job control signals. */
523 return 0;
524 default:
525 return 1;
526 }
527 }
528
529 /* returns 1 if given signal should dump core if not handled */
530 static int core_dump_signal(int sig)
531 {
532 switch (sig) {
533 case TARGET_SIGABRT:
534 case TARGET_SIGFPE:
535 case TARGET_SIGILL:
536 case TARGET_SIGQUIT:
537 case TARGET_SIGSEGV:
538 case TARGET_SIGTRAP:
539 case TARGET_SIGBUS:
540 return (1);
541 default:
542 return (0);
543 }
544 }
545
546 static void signal_table_init(void)
547 {
548 int host_sig, target_sig, count;
549
550 /*
551 * Signals are supported starting from TARGET_SIGRTMIN and going up
552 * until we run out of host realtime signals.
553 * glibc at least uses only the lower 2 rt signals and probably
554 * nobody's using the upper ones.
555 * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
556 * To fix this properly we need to do manual signal delivery multiplexed
557 * over a single host signal.
558 * Attempts for configure "missing" signals via sigaction will be
559 * silently ignored.
560 */
561 for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
562 target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
563 if (target_sig <= TARGET_NSIG) {
564 host_to_target_signal_table[host_sig] = target_sig;
565 }
566 }
567
568 /* generate signal conversion tables */
569 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
570 target_to_host_signal_table[target_sig] = _NSIG; /* poison */
571 }
572 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
573 if (host_to_target_signal_table[host_sig] == 0) {
574 host_to_target_signal_table[host_sig] = host_sig;
575 }
576 target_sig = host_to_target_signal_table[host_sig];
577 if (target_sig <= TARGET_NSIG) {
578 target_to_host_signal_table[target_sig] = host_sig;
579 }
580 }
581
582 if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
583 for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
584 if (target_to_host_signal_table[target_sig] == _NSIG) {
585 count++;
586 }
587 }
588 trace_signal_table_init(count);
589 }
590 }
591
592 void signal_init(void)
593 {
594 TaskState *ts = (TaskState *)thread_cpu->opaque;
595 struct sigaction act;
596 struct sigaction oact;
597 int i;
598 int host_sig;
599
600 /* initialize signal conversion tables */
601 signal_table_init();
602
603 /* Set the signal mask from the host mask. */
604 sigprocmask(0, 0, &ts->signal_mask);
605
606 sigfillset(&act.sa_mask);
607 act.sa_flags = SA_SIGINFO;
608 act.sa_sigaction = host_signal_handler;
609 for(i = 1; i <= TARGET_NSIG; i++) {
610 #ifdef CONFIG_GPROF
611 if (i == TARGET_SIGPROF) {
612 continue;
613 }
614 #endif
615 host_sig = target_to_host_signal(i);
616 sigaction(host_sig, NULL, &oact);
617 if (oact.sa_sigaction == (void *)SIG_IGN) {
618 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
619 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
620 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
621 }
622 /* If there's already a handler installed then something has
623 gone horribly wrong, so don't even try to handle that case. */
624 /* Install some handlers for our own use. We need at least
625 SIGSEGV and SIGBUS, to detect exceptions. We can not just
626 trap all signals because it affects syscall interrupt
627 behavior. But do trap all default-fatal signals. */
628 if (fatal_signal (i))
629 sigaction(host_sig, &act, NULL);
630 }
631 }
632
633 /* Force a synchronously taken signal. The kernel force_sig() function
634 * also forces the signal to "not blocked, not ignored", but for QEMU
635 * that work is done in process_pending_signals().
636 */
637 void force_sig(int sig)
638 {
639 CPUState *cpu = thread_cpu;
640 CPUArchState *env = cpu->env_ptr;
641 target_siginfo_t info;
642
643 info.si_signo = sig;
644 info.si_errno = 0;
645 info.si_code = TARGET_SI_KERNEL;
646 info._sifields._kill._pid = 0;
647 info._sifields._kill._uid = 0;
648 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
649 }
650
651 /* Force a SIGSEGV if we couldn't write to memory trying to set
652 * up the signal frame. oldsig is the signal we were trying to handle
653 * at the point of failure.
654 */
655 #if !defined(TARGET_RISCV)
656 void force_sigsegv(int oldsig)
657 {
658 if (oldsig == SIGSEGV) {
659 /* Make sure we don't try to deliver the signal again; this will
660 * end up with handle_pending_signal() calling dump_core_and_abort().
661 */
662 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
663 }
664 force_sig(TARGET_SIGSEGV);
665 }
666
667 #endif
668
669 /* abort execution with signal */
670 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
671 {
672 CPUState *cpu = thread_cpu;
673 CPUArchState *env = cpu->env_ptr;
674 TaskState *ts = (TaskState *)cpu->opaque;
675 int host_sig, core_dumped = 0;
676 struct sigaction act;
677
678 host_sig = target_to_host_signal(target_sig);
679 trace_user_force_sig(env, target_sig, host_sig);
680 gdb_signalled(env, target_sig);
681
682 /* dump core if supported by target binary format */
683 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
684 stop_all_tasks();
685 core_dumped =
686 ((*ts->bprm->core_dump)(target_sig, env) == 0);
687 }
688 if (core_dumped) {
689 /* we already dumped the core of target process, we don't want
690 * a coredump of qemu itself */
691 struct rlimit nodump;
692 getrlimit(RLIMIT_CORE, &nodump);
693 nodump.rlim_cur=0;
694 setrlimit(RLIMIT_CORE, &nodump);
695 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
696 target_sig, strsignal(host_sig), "core dumped" );
697 }
698
699 /* The proper exit code for dying from an uncaught signal is
700 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
701 * a negative value. To get the proper exit code we need to
702 * actually die from an uncaught signal. Here the default signal
703 * handler is installed, we send ourself a signal and we wait for
704 * it to arrive. */
705 sigfillset(&act.sa_mask);
706 act.sa_handler = SIG_DFL;
707 act.sa_flags = 0;
708 sigaction(host_sig, &act, NULL);
709
710 /* For some reason raise(host_sig) doesn't send the signal when
711 * statically linked on x86-64. */
712 kill(getpid(), host_sig);
713
714 /* Make sure the signal isn't masked (just reuse the mask inside
715 of act) */
716 sigdelset(&act.sa_mask, host_sig);
717 sigsuspend(&act.sa_mask);
718
719 /* unreachable */
720 abort();
721 }
722
723 /* queue a signal so that it will be send to the virtual CPU as soon
724 as possible */
725 int queue_signal(CPUArchState *env, int sig, int si_type,
726 target_siginfo_t *info)
727 {
728 CPUState *cpu = env_cpu(env);
729 TaskState *ts = cpu->opaque;
730
731 trace_user_queue_signal(env, sig);
732
733 info->si_code = deposit32(info->si_code, 16, 16, si_type);
734
735 ts->sync_signal.info = *info;
736 ts->sync_signal.pending = sig;
737 /* signal that a new signal is pending */
738 qatomic_set(&ts->signal_pending, 1);
739 return 1; /* indicates that the signal was queued */
740 }
741
742 #ifndef HAVE_SAFE_SYSCALL
743 static inline void rewind_if_in_safe_syscall(void *puc)
744 {
745 /* Default version: never rewind */
746 }
747 #endif
748
749 static void host_signal_handler(int host_signum, siginfo_t *info,
750 void *puc)
751 {
752 CPUArchState *env = thread_cpu->env_ptr;
753 CPUState *cpu = env_cpu(env);
754 TaskState *ts = cpu->opaque;
755
756 int sig;
757 target_siginfo_t tinfo;
758 ucontext_t *uc = puc;
759 struct emulated_sigtable *k;
760
761 /* the CPU emulator uses some host signals to detect exceptions,
762 we forward to it some signals */
763 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
764 && info->si_code > 0) {
765 if (cpu_signal_handler(host_signum, info, puc))
766 return;
767 }
768
769 /* get target signal number */
770 sig = host_to_target_signal(host_signum);
771 if (sig < 1 || sig > TARGET_NSIG)
772 return;
773 trace_user_host_signal(env, host_signum, sig);
774
775 rewind_if_in_safe_syscall(puc);
776
777 host_to_target_siginfo_noswap(&tinfo, info);
778 k = &ts->sigtab[sig - 1];
779 k->info = tinfo;
780 k->pending = sig;
781 ts->signal_pending = 1;
782
783 /* Block host signals until target signal handler entered. We
784 * can't block SIGSEGV or SIGBUS while we're executing guest
785 * code in case the guest code provokes one in the window between
786 * now and it getting out to the main loop. Signals will be
787 * unblocked again in process_pending_signals().
788 *
789 * WARNING: we cannot use sigfillset() here because the uc_sigmask
790 * field is a kernel sigset_t, which is much smaller than the
791 * libc sigset_t which sigfillset() operates on. Using sigfillset()
792 * would write 0xff bytes off the end of the structure and trash
793 * data on the struct.
794 * We can't use sizeof(uc->uc_sigmask) either, because the libc
795 * headers define the struct field with the wrong (too large) type.
796 */
797 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
798 sigdelset(&uc->uc_sigmask, SIGSEGV);
799 sigdelset(&uc->uc_sigmask, SIGBUS);
800
801 /* interrupt the virtual CPU as soon as possible */
802 cpu_exit(thread_cpu);
803 }
804
805 /* do_sigaltstack() returns target values and errnos. */
806 /* compare linux/kernel/signal.c:do_sigaltstack() */
807 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
808 CPUArchState *env)
809 {
810 target_stack_t oss, *uoss = NULL;
811 abi_long ret = -TARGET_EFAULT;
812
813 if (uoss_addr) {
814 /* Verify writability now, but do not alter user memory yet. */
815 if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
816 goto out;
817 }
818 target_save_altstack(&oss, env);
819 }
820
821 if (uss_addr) {
822 target_stack_t *uss;
823
824 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
825 goto out;
826 }
827 ret = target_restore_altstack(uss, env);
828 if (ret) {
829 goto out;
830 }
831 }
832
833 if (uoss_addr) {
834 memcpy(uoss, &oss, sizeof(oss));
835 unlock_user_struct(uoss, uoss_addr, 1);
836 uoss = NULL;
837 }
838 ret = 0;
839
840 out:
841 if (uoss) {
842 unlock_user_struct(uoss, uoss_addr, 0);
843 }
844 return ret;
845 }
846
847 /* do_sigaction() return target values and host errnos */
848 int do_sigaction(int sig, const struct target_sigaction *act,
849 struct target_sigaction *oact, abi_ulong ka_restorer)
850 {
851 struct target_sigaction *k;
852 struct sigaction act1;
853 int host_sig;
854 int ret = 0;
855
856 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
857
858 if (sig < 1 || sig > TARGET_NSIG) {
859 return -TARGET_EINVAL;
860 }
861
862 if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
863 return -TARGET_EINVAL;
864 }
865
866 if (block_signals()) {
867 return -TARGET_ERESTARTSYS;
868 }
869
870 k = &sigact_table[sig - 1];
871 if (oact) {
872 __put_user(k->_sa_handler, &oact->_sa_handler);
873 __put_user(k->sa_flags, &oact->sa_flags);
874 #ifdef TARGET_ARCH_HAS_SA_RESTORER
875 __put_user(k->sa_restorer, &oact->sa_restorer);
876 #endif
877 /* Not swapped. */
878 oact->sa_mask = k->sa_mask;
879 }
880 if (act) {
881 /* FIXME: This is not threadsafe. */
882 __get_user(k->_sa_handler, &act->_sa_handler);
883 __get_user(k->sa_flags, &act->sa_flags);
884 #ifdef TARGET_ARCH_HAS_SA_RESTORER
885 __get_user(k->sa_restorer, &act->sa_restorer);
886 #endif
887 #ifdef TARGET_ARCH_HAS_KA_RESTORER
888 k->ka_restorer = ka_restorer;
889 #endif
890 /* To be swapped in target_to_host_sigset. */
891 k->sa_mask = act->sa_mask;
892
893 /* we update the host linux signal state */
894 host_sig = target_to_host_signal(sig);
895 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
896 if (host_sig > SIGRTMAX) {
897 /* we don't have enough host signals to map all target signals */
898 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
899 sig);
900 /*
901 * we don't return an error here because some programs try to
902 * register an handler for all possible rt signals even if they
903 * don't need it.
904 * An error here can abort them whereas there can be no problem
905 * to not have the signal available later.
906 * This is the case for golang,
907 * See https://github.com/golang/go/issues/33746
908 * So we silently ignore the error.
909 */
910 return 0;
911 }
912 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
913 sigfillset(&act1.sa_mask);
914 act1.sa_flags = SA_SIGINFO;
915 if (k->sa_flags & TARGET_SA_RESTART)
916 act1.sa_flags |= SA_RESTART;
917 /* NOTE: it is important to update the host kernel signal
918 ignore state to avoid getting unexpected interrupted
919 syscalls */
920 if (k->_sa_handler == TARGET_SIG_IGN) {
921 act1.sa_sigaction = (void *)SIG_IGN;
922 } else if (k->_sa_handler == TARGET_SIG_DFL) {
923 if (fatal_signal (sig))
924 act1.sa_sigaction = host_signal_handler;
925 else
926 act1.sa_sigaction = (void *)SIG_DFL;
927 } else {
928 act1.sa_sigaction = host_signal_handler;
929 }
930 ret = sigaction(host_sig, &act1, NULL);
931 }
932 }
933 return ret;
934 }
935
936 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
937 struct emulated_sigtable *k)
938 {
939 CPUState *cpu = env_cpu(cpu_env);
940 abi_ulong handler;
941 sigset_t set;
942 target_sigset_t target_old_set;
943 struct target_sigaction *sa;
944 TaskState *ts = cpu->opaque;
945
946 trace_user_handle_signal(cpu_env, sig);
947 /* dequeue signal */
948 k->pending = 0;
949
950 sig = gdb_handlesig(cpu, sig);
951 if (!sig) {
952 sa = NULL;
953 handler = TARGET_SIG_IGN;
954 } else {
955 sa = &sigact_table[sig - 1];
956 handler = sa->_sa_handler;
957 }
958
959 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
960 print_taken_signal(sig, &k->info);
961 }
962
963 if (handler == TARGET_SIG_DFL) {
964 /* default handler : ignore some signal. The other are job control or fatal */
965 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
966 kill(getpid(),SIGSTOP);
967 } else if (sig != TARGET_SIGCHLD &&
968 sig != TARGET_SIGURG &&
969 sig != TARGET_SIGWINCH &&
970 sig != TARGET_SIGCONT) {
971 dump_core_and_abort(sig);
972 }
973 } else if (handler == TARGET_SIG_IGN) {
974 /* ignore sig */
975 } else if (handler == TARGET_SIG_ERR) {
976 dump_core_and_abort(sig);
977 } else {
978 /* compute the blocked signals during the handler execution */
979 sigset_t *blocked_set;
980
981 target_to_host_sigset(&set, &sa->sa_mask);
982 /* SA_NODEFER indicates that the current signal should not be
983 blocked during the handler */
984 if (!(sa->sa_flags & TARGET_SA_NODEFER))
985 sigaddset(&set, target_to_host_signal(sig));
986
987 /* save the previous blocked signal state to restore it at the
988 end of the signal execution (see do_sigreturn) */
989 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
990
991 /* block signals in the handler */
992 blocked_set = ts->in_sigsuspend ?
993 &ts->sigsuspend_mask : &ts->signal_mask;
994 sigorset(&ts->signal_mask, blocked_set, &set);
995 ts->in_sigsuspend = 0;
996
997 /* if the CPU is in VM86 mode, we restore the 32 bit values */
998 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
999 {
1000 CPUX86State *env = cpu_env;
1001 if (env->eflags & VM_MASK)
1002 save_v86_state(env);
1003 }
1004 #endif
1005 /* prepare the stack frame of the virtual CPU */
1006 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1007 if (sa->sa_flags & TARGET_SA_SIGINFO) {
1008 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1009 } else {
1010 setup_frame(sig, sa, &target_old_set, cpu_env);
1011 }
1012 #else
1013 /* These targets do not have traditional signals. */
1014 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1015 #endif
1016 if (sa->sa_flags & TARGET_SA_RESETHAND) {
1017 sa->_sa_handler = TARGET_SIG_DFL;
1018 }
1019 }
1020 }
1021
1022 void process_pending_signals(CPUArchState *cpu_env)
1023 {
1024 CPUState *cpu = env_cpu(cpu_env);
1025 int sig;
1026 TaskState *ts = cpu->opaque;
1027 sigset_t set;
1028 sigset_t *blocked_set;
1029
1030 while (qatomic_read(&ts->signal_pending)) {
1031 /* FIXME: This is not threadsafe. */
1032 sigfillset(&set);
1033 sigprocmask(SIG_SETMASK, &set, 0);
1034
1035 restart_scan:
1036 sig = ts->sync_signal.pending;
1037 if (sig) {
1038 /* Synchronous signals are forced,
1039 * see force_sig_info() and callers in Linux
1040 * Note that not all of our queue_signal() calls in QEMU correspond
1041 * to force_sig_info() calls in Linux (some are send_sig_info()).
1042 * However it seems like a kernel bug to me to allow the process
1043 * to block a synchronous signal since it could then just end up
1044 * looping round and round indefinitely.
1045 */
1046 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1047 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1048 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1049 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1050 }
1051
1052 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1053 }
1054
1055 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1056 blocked_set = ts->in_sigsuspend ?
1057 &ts->sigsuspend_mask : &ts->signal_mask;
1058
1059 if (ts->sigtab[sig - 1].pending &&
1060 (!sigismember(blocked_set,
1061 target_to_host_signal_table[sig]))) {
1062 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1063 /* Restart scan from the beginning, as handle_pending_signal
1064 * might have resulted in a new synchronous signal (eg SIGSEGV).
1065 */
1066 goto restart_scan;
1067 }
1068 }
1069
1070 /* if no signal is pending, unblock signals and recheck (the act
1071 * of unblocking might cause us to take another host signal which
1072 * will set signal_pending again).
1073 */
1074 qatomic_set(&ts->signal_pending, 0);
1075 ts->in_sigsuspend = 0;
1076 set = ts->signal_mask;
1077 sigdelset(&set, SIGSEGV);
1078 sigdelset(&set, SIGBUS);
1079 sigprocmask(SIG_SETMASK, &set, 0);
1080 }
1081 ts->in_sigsuspend = 0;
1082 }