]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/signal.c
target/ppc: fix signal delivery for ppc64abi32
[mirror_qemu.git] / linux-user / signal.c
CommitLineData
31e31b8a 1/*
66fb9763 2 * Emulation of Linux signals
5fafdf24 3 *
31e31b8a
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
31e31b8a 18 */
d39594e9 19#include "qemu/osdep.h"
a70dadc7 20#include "qemu/bitops.h"
31e31b8a 21#include <sys/ucontext.h>
edf8e2af 22#include <sys/resource.h>
31e31b8a 23
3ef693a0 24#include "qemu.h"
c8ee0a44 25#include "trace.h"
befb7447 26#include "signal-common.h"
66fb9763 27
624f7979 28static struct target_sigaction sigact_table[TARGET_NSIG];
31e31b8a 29
5fafdf24 30static void host_signal_handler(int host_signum, siginfo_t *info,
66fb9763
FB
31 void *puc);
32
3ca05588 33static uint8_t host_to_target_signal_table[_NSIG] = {
9e5f5284
FB
34 [SIGHUP] = TARGET_SIGHUP,
35 [SIGINT] = TARGET_SIGINT,
36 [SIGQUIT] = TARGET_SIGQUIT,
37 [SIGILL] = TARGET_SIGILL,
38 [SIGTRAP] = TARGET_SIGTRAP,
39 [SIGABRT] = TARGET_SIGABRT,
01e3b763 40/* [SIGIOT] = TARGET_SIGIOT,*/
9e5f5284
FB
41 [SIGBUS] = TARGET_SIGBUS,
42 [SIGFPE] = TARGET_SIGFPE,
43 [SIGKILL] = TARGET_SIGKILL,
44 [SIGUSR1] = TARGET_SIGUSR1,
45 [SIGSEGV] = TARGET_SIGSEGV,
46 [SIGUSR2] = TARGET_SIGUSR2,
47 [SIGPIPE] = TARGET_SIGPIPE,
48 [SIGALRM] = TARGET_SIGALRM,
49 [SIGTERM] = TARGET_SIGTERM,
50#ifdef SIGSTKFLT
51 [SIGSTKFLT] = TARGET_SIGSTKFLT,
52#endif
53 [SIGCHLD] = TARGET_SIGCHLD,
54 [SIGCONT] = TARGET_SIGCONT,
55 [SIGSTOP] = TARGET_SIGSTOP,
56 [SIGTSTP] = TARGET_SIGTSTP,
57 [SIGTTIN] = TARGET_SIGTTIN,
58 [SIGTTOU] = TARGET_SIGTTOU,
59 [SIGURG] = TARGET_SIGURG,
60 [SIGXCPU] = TARGET_SIGXCPU,
61 [SIGXFSZ] = TARGET_SIGXFSZ,
62 [SIGVTALRM] = TARGET_SIGVTALRM,
63 [SIGPROF] = TARGET_SIGPROF,
64 [SIGWINCH] = TARGET_SIGWINCH,
65 [SIGIO] = TARGET_SIGIO,
66 [SIGPWR] = TARGET_SIGPWR,
67 [SIGSYS] = TARGET_SIGSYS,
68 /* next signals stay the same */
624f7979 69 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
b4916d7b 70 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
624f7979
PB
71 To fix this properly we need to do manual signal delivery multiplexed
72 over a single host signal. */
73 [__SIGRTMIN] = __SIGRTMAX,
74 [__SIGRTMAX] = __SIGRTMIN,
9e5f5284 75};
3ca05588 76static uint8_t target_to_host_signal_table[_NSIG];
9e5f5284 77
1d9d8b55 78int host_to_target_signal(int sig)
31e31b8a 79{
167c50d8 80 if (sig < 0 || sig >= _NSIG)
4cb05961 81 return sig;
9e5f5284 82 return host_to_target_signal_table[sig];
31e31b8a
FB
83}
84
4cb05961 85int target_to_host_signal(int sig)
31e31b8a 86{
167c50d8 87 if (sig < 0 || sig >= _NSIG)
4cb05961 88 return sig;
9e5f5284 89 return target_to_host_signal_table[sig];
31e31b8a
FB
90}
91
c227f099 92static inline void target_sigaddset(target_sigset_t *set, int signum)
f5545b5c
PB
93{
94 signum--;
95 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
96 set->sig[signum / TARGET_NSIG_BPW] |= mask;
97}
98
c227f099 99static inline int target_sigismember(const target_sigset_t *set, int signum)
f5545b5c
PB
100{
101 signum--;
102 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
104}
105
befb7447
LV
106void host_to_target_sigset_internal(target_sigset_t *d,
107 const sigset_t *s)
66fb9763
FB
108{
109 int i;
f5545b5c
PB
110 target_sigemptyset(d);
111 for (i = 1; i <= TARGET_NSIG; i++) {
112 if (sigismember(s, i)) {
113 target_sigaddset(d, host_to_target_signal(i));
114 }
66fb9763
FB
115 }
116}
117
c227f099 118void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
9231944d 119{
c227f099 120 target_sigset_t d1;
9231944d
FB
121 int i;
122
123 host_to_target_sigset_internal(&d1, s);
124 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 125 d->sig[i] = tswapal(d1.sig[i]);
9231944d
FB
126}
127
befb7447
LV
128void target_to_host_sigset_internal(sigset_t *d,
129 const target_sigset_t *s)
66fb9763
FB
130{
131 int i;
f5545b5c
PB
132 sigemptyset(d);
133 for (i = 1; i <= TARGET_NSIG; i++) {
134 if (target_sigismember(s, i)) {
135 sigaddset(d, target_to_host_signal(i));
136 }
da7c8647 137 }
66fb9763
FB
138}
139
c227f099 140void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
9231944d 141{
c227f099 142 target_sigset_t s1;
9231944d
FB
143 int i;
144
145 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 146 s1.sig[i] = tswapal(s->sig[i]);
9231944d
FB
147 target_to_host_sigset_internal(d, &s1);
148}
3b46e624 149
992f48a0 150void host_to_target_old_sigset(abi_ulong *old_sigset,
66fb9763
FB
151 const sigset_t *sigset)
152{
c227f099 153 target_sigset_t d;
9e5f5284
FB
154 host_to_target_sigset(&d, sigset);
155 *old_sigset = d.sig[0];
66fb9763
FB
156}
157
5fafdf24 158void target_to_host_old_sigset(sigset_t *sigset,
992f48a0 159 const abi_ulong *old_sigset)
66fb9763 160{
c227f099 161 target_sigset_t d;
9e5f5284
FB
162 int i;
163
164 d.sig[0] = *old_sigset;
165 for(i = 1;i < TARGET_NSIG_WORDS; i++)
166 d.sig[i] = 0;
167 target_to_host_sigset(sigset, &d);
66fb9763
FB
168}
169
3d3efba0
PM
170int block_signals(void)
171{
172 TaskState *ts = (TaskState *)thread_cpu->opaque;
173 sigset_t set;
3d3efba0
PM
174
175 /* It's OK to block everything including SIGSEGV, because we won't
176 * run any further guest code before unblocking signals in
177 * process_pending_signals().
178 */
179 sigfillset(&set);
180 sigprocmask(SIG_SETMASK, &set, 0);
181
9be38598 182 return atomic_xchg(&ts->signal_pending, 1);
3d3efba0
PM
183}
184
1c275925
AB
185/* Wrapper for sigprocmask function
186 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
3d3efba0
PM
187 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
188 * a signal was already pending and the syscall must be restarted, or
189 * 0 on success.
190 * If set is NULL, this is guaranteed not to fail.
1c275925
AB
191 */
192int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
193{
3d3efba0
PM
194 TaskState *ts = (TaskState *)thread_cpu->opaque;
195
196 if (oldset) {
197 *oldset = ts->signal_mask;
198 }
a7ec0f98
PM
199
200 if (set) {
3d3efba0 201 int i;
a7ec0f98 202
3d3efba0
PM
203 if (block_signals()) {
204 return -TARGET_ERESTARTSYS;
205 }
a7ec0f98
PM
206
207 switch (how) {
208 case SIG_BLOCK:
3d3efba0 209 sigorset(&ts->signal_mask, &ts->signal_mask, set);
a7ec0f98
PM
210 break;
211 case SIG_UNBLOCK:
3d3efba0
PM
212 for (i = 1; i <= NSIG; ++i) {
213 if (sigismember(set, i)) {
214 sigdelset(&ts->signal_mask, i);
215 }
a7ec0f98
PM
216 }
217 break;
218 case SIG_SETMASK:
3d3efba0 219 ts->signal_mask = *set;
a7ec0f98
PM
220 break;
221 default:
222 g_assert_not_reached();
223 }
a7ec0f98 224
3d3efba0
PM
225 /* Silently ignore attempts to change blocking status of KILL or STOP */
226 sigdelset(&ts->signal_mask, SIGKILL);
227 sigdelset(&ts->signal_mask, SIGSTOP);
a7ec0f98 228 }
3d3efba0 229 return 0;
1c275925
AB
230}
231
e8f29049 232#if !defined(TARGET_NIOS2)
3d3efba0
PM
233/* Just set the guest's signal mask to the specified value; the
234 * caller is assumed to have called block_signals() already.
235 */
befb7447 236void set_sigmask(const sigset_t *set)
9eede5b6 237{
3d3efba0
PM
238 TaskState *ts = (TaskState *)thread_cpu->opaque;
239
240 ts->signal_mask = *set;
9eede5b6
PM
241}
242#endif
243
465e237b
LV
244/* sigaltstack management */
245
246int on_sig_stack(unsigned long sp)
247{
5bfce0b7
PM
248 TaskState *ts = (TaskState *)thread_cpu->opaque;
249
250 return (sp - ts->sigaltstack_used.ss_sp
251 < ts->sigaltstack_used.ss_size);
465e237b
LV
252}
253
254int sas_ss_flags(unsigned long sp)
255{
5bfce0b7
PM
256 TaskState *ts = (TaskState *)thread_cpu->opaque;
257
258 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
465e237b
LV
259 : on_sig_stack(sp) ? SS_ONSTACK : 0);
260}
261
262abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
263{
264 /*
265 * This is the X/Open sanctioned signal stack switching.
266 */
5bfce0b7
PM
267 TaskState *ts = (TaskState *)thread_cpu->opaque;
268
465e237b 269 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
5bfce0b7 270 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
465e237b
LV
271 }
272 return sp;
273}
274
275void target_save_altstack(target_stack_t *uss, CPUArchState *env)
276{
5bfce0b7
PM
277 TaskState *ts = (TaskState *)thread_cpu->opaque;
278
279 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
465e237b 280 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
5bfce0b7 281 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
465e237b
LV
282}
283
9de5e440
FB
284/* siginfo conversion */
285
c227f099 286static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
9de5e440 287 const siginfo_t *info)
66fb9763 288{
a05c6409 289 int sig = host_to_target_signal(info->si_signo);
a70dadc7
PM
290 int si_code = info->si_code;
291 int si_type;
9de5e440
FB
292 tinfo->si_signo = sig;
293 tinfo->si_errno = 0;
afd7cd92 294 tinfo->si_code = info->si_code;
a05c6409 295
55d72a7e
PM
296 /* This memset serves two purposes:
297 * (1) ensure we don't leak random junk to the guest later
298 * (2) placate false positives from gcc about fields
299 * being used uninitialized if it chooses to inline both this
300 * function and tswap_siginfo() into host_to_target_siginfo().
301 */
302 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
303
a70dadc7
PM
304 /* This is awkward, because we have to use a combination of
305 * the si_code and si_signo to figure out which of the union's
306 * members are valid. (Within the host kernel it is always possible
307 * to tell, but the kernel carefully avoids giving userspace the
308 * high 16 bits of si_code, so we don't have the information to
309 * do this the easy way...) We therefore make our best guess,
310 * bearing in mind that a guest can spoof most of the si_codes
311 * via rt_sigqueueinfo() if it likes.
312 *
313 * Once we have made our guess, we record it in the top 16 bits of
314 * the si_code, so that tswap_siginfo() later can use it.
315 * tswap_siginfo() will strip these top bits out before writing
316 * si_code to the guest (sign-extending the lower bits).
317 */
318
319 switch (si_code) {
320 case SI_USER:
321 case SI_TKILL:
322 case SI_KERNEL:
323 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
324 * These are the only unspoofable si_code values.
325 */
326 tinfo->_sifields._kill._pid = info->si_pid;
327 tinfo->_sifields._kill._uid = info->si_uid;
328 si_type = QEMU_SI_KILL;
329 break;
330 default:
331 /* Everything else is spoofable. Make best guess based on signal */
332 switch (sig) {
333 case TARGET_SIGCHLD:
334 tinfo->_sifields._sigchld._pid = info->si_pid;
335 tinfo->_sifields._sigchld._uid = info->si_uid;
336 tinfo->_sifields._sigchld._status
da7c8647 337 = host_to_target_waitstatus(info->si_status);
a70dadc7
PM
338 tinfo->_sifields._sigchld._utime = info->si_utime;
339 tinfo->_sifields._sigchld._stime = info->si_stime;
340 si_type = QEMU_SI_CHLD;
341 break;
342 case TARGET_SIGIO:
343 tinfo->_sifields._sigpoll._band = info->si_band;
344 tinfo->_sifields._sigpoll._fd = info->si_fd;
345 si_type = QEMU_SI_POLL;
346 break;
347 default:
348 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
349 tinfo->_sifields._rt._pid = info->si_pid;
350 tinfo->_sifields._rt._uid = info->si_uid;
351 /* XXX: potential problem if 64 bit */
352 tinfo->_sifields._rt._sigval.sival_ptr
da7c8647 353 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
a70dadc7
PM
354 si_type = QEMU_SI_RT;
355 break;
356 }
357 break;
9de5e440 358 }
a70dadc7
PM
359
360 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
9de5e440
FB
361}
362
befb7447
LV
363void tswap_siginfo(target_siginfo_t *tinfo,
364 const target_siginfo_t *info)
9de5e440 365{
a70dadc7
PM
366 int si_type = extract32(info->si_code, 16, 16);
367 int si_code = sextract32(info->si_code, 0, 16);
368
369 __put_user(info->si_signo, &tinfo->si_signo);
370 __put_user(info->si_errno, &tinfo->si_errno);
371 __put_user(si_code, &tinfo->si_code);
372
373 /* We can use our internal marker of which fields in the structure
374 * are valid, rather than duplicating the guesswork of
375 * host_to_target_siginfo_noswap() here.
376 */
377 switch (si_type) {
378 case QEMU_SI_KILL:
379 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
380 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
381 break;
382 case QEMU_SI_TIMER:
383 __put_user(info->_sifields._timer._timer1,
384 &tinfo->_sifields._timer._timer1);
385 __put_user(info->_sifields._timer._timer2,
386 &tinfo->_sifields._timer._timer2);
387 break;
388 case QEMU_SI_POLL:
389 __put_user(info->_sifields._sigpoll._band,
390 &tinfo->_sifields._sigpoll._band);
391 __put_user(info->_sifields._sigpoll._fd,
392 &tinfo->_sifields._sigpoll._fd);
393 break;
394 case QEMU_SI_FAULT:
395 __put_user(info->_sifields._sigfault._addr,
396 &tinfo->_sifields._sigfault._addr);
397 break;
398 case QEMU_SI_CHLD:
399 __put_user(info->_sifields._sigchld._pid,
400 &tinfo->_sifields._sigchld._pid);
401 __put_user(info->_sifields._sigchld._uid,
402 &tinfo->_sifields._sigchld._uid);
403 __put_user(info->_sifields._sigchld._status,
404 &tinfo->_sifields._sigchld._status);
405 __put_user(info->_sifields._sigchld._utime,
406 &tinfo->_sifields._sigchld._utime);
407 __put_user(info->_sifields._sigchld._stime,
408 &tinfo->_sifields._sigchld._stime);
409 break;
410 case QEMU_SI_RT:
411 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
412 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
413 __put_user(info->_sifields._rt._sigval.sival_ptr,
414 &tinfo->_sifields._rt._sigval.sival_ptr);
415 break;
416 default:
417 g_assert_not_reached();
9de5e440
FB
418 }
419}
420
c227f099 421void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
9de5e440 422{
55d72a7e
PM
423 target_siginfo_t tgt_tmp;
424 host_to_target_siginfo_noswap(&tgt_tmp, info);
425 tswap_siginfo(tinfo, &tgt_tmp);
66fb9763
FB
426}
427
9de5e440 428/* XXX: we support only POSIX RT signals are used. */
aa1f17c1 429/* XXX: find a solution for 64 bit (additional malloced data is needed) */
c227f099 430void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
66fb9763 431{
90c0f080
PM
432 /* This conversion is used only for the rt_sigqueueinfo syscall,
433 * and so we know that the _rt fields are the valid ones.
434 */
435 abi_ulong sival_ptr;
436
437 __get_user(info->si_signo, &tinfo->si_signo);
438 __get_user(info->si_errno, &tinfo->si_errno);
439 __get_user(info->si_code, &tinfo->si_code);
440 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
441 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
442 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
443 info->si_value.sival_ptr = (void *)(long)sival_ptr;
66fb9763
FB
444}
445
ca587a8e
AJ
446static int fatal_signal (int sig)
447{
448 switch (sig) {
449 case TARGET_SIGCHLD:
450 case TARGET_SIGURG:
451 case TARGET_SIGWINCH:
452 /* Ignored by default. */
453 return 0;
454 case TARGET_SIGCONT:
455 case TARGET_SIGSTOP:
456 case TARGET_SIGTSTP:
457 case TARGET_SIGTTIN:
458 case TARGET_SIGTTOU:
459 /* Job control signals. */
460 return 0;
461 default:
462 return 1;
463 }
464}
465
edf8e2af
MW
466/* returns 1 if given signal should dump core if not handled */
467static int core_dump_signal(int sig)
468{
469 switch (sig) {
470 case TARGET_SIGABRT:
471 case TARGET_SIGFPE:
472 case TARGET_SIGILL:
473 case TARGET_SIGQUIT:
474 case TARGET_SIGSEGV:
475 case TARGET_SIGTRAP:
476 case TARGET_SIGBUS:
477 return (1);
478 default:
479 return (0);
480 }
481}
482
31e31b8a
FB
483void signal_init(void)
484{
3d3efba0 485 TaskState *ts = (TaskState *)thread_cpu->opaque;
31e31b8a 486 struct sigaction act;
624f7979 487 struct sigaction oact;
9e5f5284 488 int i, j;
624f7979 489 int host_sig;
31e31b8a 490
9e5f5284 491 /* generate signal conversion tables */
3ca05588 492 for(i = 1; i < _NSIG; i++) {
9e5f5284
FB
493 if (host_to_target_signal_table[i] == 0)
494 host_to_target_signal_table[i] = i;
495 }
3ca05588 496 for(i = 1; i < _NSIG; i++) {
9e5f5284
FB
497 j = host_to_target_signal_table[i];
498 target_to_host_signal_table[j] = i;
499 }
3b46e624 500
3d3efba0
PM
501 /* Set the signal mask from the host mask. */
502 sigprocmask(0, 0, &ts->signal_mask);
503
9de5e440
FB
504 /* set all host signal handlers. ALL signals are blocked during
505 the handlers to serialize them. */
624f7979
PB
506 memset(sigact_table, 0, sizeof(sigact_table));
507
9de5e440 508 sigfillset(&act.sa_mask);
31e31b8a
FB
509 act.sa_flags = SA_SIGINFO;
510 act.sa_sigaction = host_signal_handler;
624f7979 511 for(i = 1; i <= TARGET_NSIG; i++) {
716cdbe0
AB
512#ifdef TARGET_GPROF
513 if (i == SIGPROF) {
514 continue;
515 }
516#endif
624f7979
PB
517 host_sig = target_to_host_signal(i);
518 sigaction(host_sig, NULL, &oact);
519 if (oact.sa_sigaction == (void *)SIG_IGN) {
520 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
521 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
522 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
523 }
524 /* If there's already a handler installed then something has
525 gone horribly wrong, so don't even try to handle that case. */
ca587a8e
AJ
526 /* Install some handlers for our own use. We need at least
527 SIGSEGV and SIGBUS, to detect exceptions. We can not just
528 trap all signals because it affects syscall interrupt
529 behavior. But do trap all default-fatal signals. */
530 if (fatal_signal (i))
624f7979 531 sigaction(host_sig, &act, NULL);
31e31b8a 532 }
66fb9763
FB
533}
534
c599d4d6
PM
535/* Force a synchronously taken signal. The kernel force_sig() function
536 * also forces the signal to "not blocked, not ignored", but for QEMU
537 * that work is done in process_pending_signals().
538 */
befb7447 539void force_sig(int sig)
c599d4d6
PM
540{
541 CPUState *cpu = thread_cpu;
542 CPUArchState *env = cpu->env_ptr;
543 target_siginfo_t info;
544
545 info.si_signo = sig;
546 info.si_errno = 0;
547 info.si_code = TARGET_SI_KERNEL;
548 info._sifields._kill._pid = 0;
549 info._sifields._kill._uid = 0;
550 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
551}
09391669
PM
552
553/* Force a SIGSEGV if we couldn't write to memory trying to set
554 * up the signal frame. oldsig is the signal we were trying to handle
555 * at the point of failure.
556 */
47ae93cd 557#if !defined(TARGET_RISCV)
befb7447 558void force_sigsegv(int oldsig)
09391669 559{
09391669
PM
560 if (oldsig == SIGSEGV) {
561 /* Make sure we don't try to deliver the signal again; this will
c599d4d6 562 * end up with handle_pending_signal() calling dump_core_and_abort().
09391669
PM
563 */
564 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
565 }
c4b35744 566 force_sig(TARGET_SIGSEGV);
09391669 567}
66fb9763 568
47ae93cd
MC
569#endif
570
9de5e440 571/* abort execution with signal */
c599d4d6 572static void QEMU_NORETURN dump_core_and_abort(int target_sig)
66fb9763 573{
0429a971
AF
574 CPUState *cpu = thread_cpu;
575 CPUArchState *env = cpu->env_ptr;
576 TaskState *ts = (TaskState *)cpu->opaque;
edf8e2af 577 int host_sig, core_dumped = 0;
603e4fd7 578 struct sigaction act;
c8ee0a44 579
66393fb9 580 host_sig = target_to_host_signal(target_sig);
c8ee0a44 581 trace_user_force_sig(env, target_sig, host_sig);
a2247f8e 582 gdb_signalled(env, target_sig);
603e4fd7 583
edf8e2af 584 /* dump core if supported by target binary format */
66393fb9 585 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
edf8e2af
MW
586 stop_all_tasks();
587 core_dumped =
a2247f8e 588 ((*ts->bprm->core_dump)(target_sig, env) == 0);
edf8e2af
MW
589 }
590 if (core_dumped) {
591 /* we already dumped the core of target process, we don't want
592 * a coredump of qemu itself */
593 struct rlimit nodump;
594 getrlimit(RLIMIT_CORE, &nodump);
595 nodump.rlim_cur=0;
596 setrlimit(RLIMIT_CORE, &nodump);
597 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
66393fb9 598 target_sig, strsignal(host_sig), "core dumped" );
edf8e2af
MW
599 }
600
0c58751c 601 /* The proper exit code for dying from an uncaught signal is
603e4fd7
AJ
602 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
603 * a negative value. To get the proper exit code we need to
604 * actually die from an uncaught signal. Here the default signal
605 * handler is installed, we send ourself a signal and we wait for
606 * it to arrive. */
607 sigfillset(&act.sa_mask);
608 act.sa_handler = SIG_DFL;
3a5d30bf 609 act.sa_flags = 0;
603e4fd7
AJ
610 sigaction(host_sig, &act, NULL);
611
612 /* For some reason raise(host_sig) doesn't send the signal when
613 * statically linked on x86-64. */
614 kill(getpid(), host_sig);
615
616 /* Make sure the signal isn't masked (just reuse the mask inside
617 of act) */
618 sigdelset(&act.sa_mask, host_sig);
619 sigsuspend(&act.sa_mask);
620
621 /* unreachable */
a6c6f76c 622 abort();
66fb9763
FB
623}
624
9de5e440
FB
625/* queue a signal so that it will be send to the virtual CPU as soon
626 as possible */
9d2803f7
PM
627int queue_signal(CPUArchState *env, int sig, int si_type,
628 target_siginfo_t *info)
31e31b8a 629{
29a0af61 630 CPUState *cpu = env_cpu(env);
0429a971 631 TaskState *ts = cpu->opaque;
66fb9763 632
c8ee0a44 633 trace_user_queue_signal(env, sig);
907f5fdd 634
9d2803f7 635 info->si_code = deposit32(info->si_code, 16, 16, si_type);
a70dadc7 636
655ed67c
TB
637 ts->sync_signal.info = *info;
638 ts->sync_signal.pending = sig;
907f5fdd
TB
639 /* signal that a new signal is pending */
640 atomic_set(&ts->signal_pending, 1);
641 return 1; /* indicates that the signal was queued */
9de5e440
FB
642}
643
4d330cee
TB
644#ifndef HAVE_SAFE_SYSCALL
645static inline void rewind_if_in_safe_syscall(void *puc)
646{
647 /* Default version: never rewind */
648}
649#endif
650
5fafdf24 651static void host_signal_handler(int host_signum, siginfo_t *info,
9de5e440
FB
652 void *puc)
653{
a2247f8e 654 CPUArchState *env = thread_cpu->env_ptr;
29a0af61 655 CPUState *cpu = env_cpu(env);
655ed67c
TB
656 TaskState *ts = cpu->opaque;
657
9de5e440 658 int sig;
c227f099 659 target_siginfo_t tinfo;
3d3efba0 660 ucontext_t *uc = puc;
655ed67c 661 struct emulated_sigtable *k;
9de5e440
FB
662
663 /* the CPU emulator uses some host signals to detect exceptions,
eaa449b9 664 we forward to it some signals */
ca587a8e 665 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
eaa449b9 666 && info->si_code > 0) {
b346ff46 667 if (cpu_signal_handler(host_signum, info, puc))
9de5e440
FB
668 return;
669 }
670
671 /* get target signal number */
672 sig = host_to_target_signal(host_signum);
673 if (sig < 1 || sig > TARGET_NSIG)
674 return;
c8ee0a44 675 trace_user_host_signal(env, host_signum, sig);
4d330cee
TB
676
677 rewind_if_in_safe_syscall(puc);
678
9de5e440 679 host_to_target_siginfo_noswap(&tinfo, info);
655ed67c
TB
680 k = &ts->sigtab[sig - 1];
681 k->info = tinfo;
682 k->pending = sig;
683 ts->signal_pending = 1;
684
685 /* Block host signals until target signal handler entered. We
686 * can't block SIGSEGV or SIGBUS while we're executing guest
687 * code in case the guest code provokes one in the window between
688 * now and it getting out to the main loop. Signals will be
689 * unblocked again in process_pending_signals().
1d48fdd9
PM
690 *
691 * WARNING: we cannot use sigfillset() here because the uc_sigmask
692 * field is a kernel sigset_t, which is much smaller than the
693 * libc sigset_t which sigfillset() operates on. Using sigfillset()
694 * would write 0xff bytes off the end of the structure and trash
695 * data on the struct.
696 * We can't use sizeof(uc->uc_sigmask) either, because the libc
697 * headers define the struct field with the wrong (too large) type.
655ed67c 698 */
1d48fdd9 699 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
655ed67c
TB
700 sigdelset(&uc->uc_sigmask, SIGSEGV);
701 sigdelset(&uc->uc_sigmask, SIGBUS);
3d3efba0 702
655ed67c
TB
703 /* interrupt the virtual CPU as soon as possible */
704 cpu_exit(thread_cpu);
66fb9763
FB
705}
706
0da46a6e 707/* do_sigaltstack() returns target values and errnos. */
579a97f7
FB
708/* compare linux/kernel/signal.c:do_sigaltstack() */
709abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
a04e134a
TS
710{
711 int ret;
712 struct target_sigaltstack oss;
5bfce0b7 713 TaskState *ts = (TaskState *)thread_cpu->opaque;
a04e134a
TS
714
715 /* XXX: test errors */
579a97f7 716 if(uoss_addr)
a04e134a 717 {
5bfce0b7
PM
718 __put_user(ts->sigaltstack_used.ss_sp, &oss.ss_sp);
719 __put_user(ts->sigaltstack_used.ss_size, &oss.ss_size);
a04e134a
TS
720 __put_user(sas_ss_flags(sp), &oss.ss_flags);
721 }
722
579a97f7 723 if(uss_addr)
a04e134a 724 {
579a97f7
FB
725 struct target_sigaltstack *uss;
726 struct target_sigaltstack ss;
0903c8be
TM
727 size_t minstacksize = TARGET_MINSIGSTKSZ;
728
729#if defined(TARGET_PPC64)
730 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
731 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
732 if (get_ppc64_abi(image) > 1) {
733 minstacksize = 4096;
734 }
735#endif
a04e134a 736
7d37435b 737 ret = -TARGET_EFAULT;
9eeb8306 738 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
a04e134a 739 goto out;
9eeb8306
RV
740 }
741 __get_user(ss.ss_sp, &uss->ss_sp);
742 __get_user(ss.ss_size, &uss->ss_size);
743 __get_user(ss.ss_flags, &uss->ss_flags);
579a97f7 744 unlock_user_struct(uss, uss_addr, 0);
a04e134a 745
7d37435b
PB
746 ret = -TARGET_EPERM;
747 if (on_sig_stack(sp))
a04e134a
TS
748 goto out;
749
7d37435b
PB
750 ret = -TARGET_EINVAL;
751 if (ss.ss_flags != TARGET_SS_DISABLE
a04e134a
TS
752 && ss.ss_flags != TARGET_SS_ONSTACK
753 && ss.ss_flags != 0)
754 goto out;
755
7d37435b 756 if (ss.ss_flags == TARGET_SS_DISABLE) {
a04e134a
TS
757 ss.ss_size = 0;
758 ss.ss_sp = 0;
7d37435b 759 } else {
0da46a6e 760 ret = -TARGET_ENOMEM;
0903c8be 761 if (ss.ss_size < minstacksize) {
a04e134a 762 goto out;
0903c8be 763 }
7d37435b 764 }
a04e134a 765
5bfce0b7
PM
766 ts->sigaltstack_used.ss_sp = ss.ss_sp;
767 ts->sigaltstack_used.ss_size = ss.ss_size;
a04e134a
TS
768 }
769
579a97f7 770 if (uoss_addr) {
0da46a6e 771 ret = -TARGET_EFAULT;
579a97f7 772 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
a04e134a 773 goto out;
a04e134a
TS
774 }
775
776 ret = 0;
777out:
778 return ret;
779}
780
ef6a778e 781/* do_sigaction() return target values and host errnos */
66fb9763
FB
782int do_sigaction(int sig, const struct target_sigaction *act,
783 struct target_sigaction *oact)
784{
624f7979 785 struct target_sigaction *k;
773b93ee
FB
786 struct sigaction act1;
787 int host_sig;
0da46a6e 788 int ret = 0;
66fb9763 789
ef6a778e
TB
790 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
791 return -TARGET_EINVAL;
792 }
793
794 if (block_signals()) {
795 return -TARGET_ERESTARTSYS;
796 }
797
66fb9763 798 k = &sigact_table[sig - 1];
66fb9763 799 if (oact) {
d2565875
RH
800 __put_user(k->_sa_handler, &oact->_sa_handler);
801 __put_user(k->sa_flags, &oact->sa_flags);
7f047de1 802#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 803 __put_user(k->sa_restorer, &oact->sa_restorer);
388bb21a 804#endif
d2565875 805 /* Not swapped. */
624f7979 806 oact->sa_mask = k->sa_mask;
66fb9763
FB
807 }
808 if (act) {
624f7979 809 /* FIXME: This is not threadsafe. */
d2565875
RH
810 __get_user(k->_sa_handler, &act->_sa_handler);
811 __get_user(k->sa_flags, &act->sa_flags);
7f047de1 812#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 813 __get_user(k->sa_restorer, &act->sa_restorer);
388bb21a 814#endif
d2565875 815 /* To be swapped in target_to_host_sigset. */
624f7979 816 k->sa_mask = act->sa_mask;
773b93ee
FB
817
818 /* we update the host linux signal state */
819 host_sig = target_to_host_signal(sig);
820 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
821 sigfillset(&act1.sa_mask);
822 act1.sa_flags = SA_SIGINFO;
624f7979 823 if (k->sa_flags & TARGET_SA_RESTART)
773b93ee
FB
824 act1.sa_flags |= SA_RESTART;
825 /* NOTE: it is important to update the host kernel signal
826 ignore state to avoid getting unexpected interrupted
827 syscalls */
624f7979 828 if (k->_sa_handler == TARGET_SIG_IGN) {
773b93ee 829 act1.sa_sigaction = (void *)SIG_IGN;
624f7979 830 } else if (k->_sa_handler == TARGET_SIG_DFL) {
ca587a8e
AJ
831 if (fatal_signal (sig))
832 act1.sa_sigaction = host_signal_handler;
833 else
834 act1.sa_sigaction = (void *)SIG_DFL;
773b93ee
FB
835 } else {
836 act1.sa_sigaction = host_signal_handler;
837 }
0da46a6e 838 ret = sigaction(host_sig, &act1, NULL);
773b93ee 839 }
66fb9763 840 }
0da46a6e 841 return ret;
66fb9763
FB
842}
843
31efaef1
PM
844static void handle_pending_signal(CPUArchState *cpu_env, int sig,
845 struct emulated_sigtable *k)
eb552501 846{
29a0af61 847 CPUState *cpu = env_cpu(cpu_env);
eb552501 848 abi_ulong handler;
3d3efba0 849 sigset_t set;
eb552501
PM
850 target_sigset_t target_old_set;
851 struct target_sigaction *sa;
eb552501 852 TaskState *ts = cpu->opaque;
66fb9763 853
c8ee0a44 854 trace_user_handle_signal(cpu_env, sig);
66fb9763 855 /* dequeue signal */
907f5fdd 856 k->pending = 0;
3b46e624 857
db6b81d4 858 sig = gdb_handlesig(cpu, sig);
1fddef4b 859 if (!sig) {
ca587a8e
AJ
860 sa = NULL;
861 handler = TARGET_SIG_IGN;
862 } else {
863 sa = &sigact_table[sig - 1];
864 handler = sa->_sa_handler;
1fddef4b 865 }
66fb9763 866
0cb581d6
PM
867 if (do_strace) {
868 print_taken_signal(sig, &k->info);
869 }
870
66fb9763 871 if (handler == TARGET_SIG_DFL) {
ca587a8e
AJ
872 /* default handler : ignore some signal. The other are job control or fatal */
873 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
874 kill(getpid(),SIGSTOP);
875 } else if (sig != TARGET_SIGCHLD &&
876 sig != TARGET_SIGURG &&
877 sig != TARGET_SIGWINCH &&
878 sig != TARGET_SIGCONT) {
c599d4d6 879 dump_core_and_abort(sig);
66fb9763
FB
880 }
881 } else if (handler == TARGET_SIG_IGN) {
882 /* ignore sig */
883 } else if (handler == TARGET_SIG_ERR) {
c599d4d6 884 dump_core_and_abort(sig);
66fb9763 885 } else {
9de5e440 886 /* compute the blocked signals during the handler execution */
3d3efba0
PM
887 sigset_t *blocked_set;
888
624f7979 889 target_to_host_sigset(&set, &sa->sa_mask);
9de5e440
FB
890 /* SA_NODEFER indicates that the current signal should not be
891 blocked during the handler */
624f7979 892 if (!(sa->sa_flags & TARGET_SA_NODEFER))
9de5e440 893 sigaddset(&set, target_to_host_signal(sig));
3b46e624 894
9de5e440
FB
895 /* save the previous blocked signal state to restore it at the
896 end of the signal execution (see do_sigreturn) */
3d3efba0
PM
897 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
898
899 /* block signals in the handler */
900 blocked_set = ts->in_sigsuspend ?
901 &ts->sigsuspend_mask : &ts->signal_mask;
902 sigorset(&ts->signal_mask, blocked_set, &set);
903 ts->in_sigsuspend = 0;
9de5e440 904
bc8a22cc 905 /* if the CPU is in VM86 mode, we restore the 32 bit values */
84409ddb 906#if defined(TARGET_I386) && !defined(TARGET_X86_64)
bc8a22cc
FB
907 {
908 CPUX86State *env = cpu_env;
909 if (env->eflags & VM_MASK)
910 save_v86_state(env);
911 }
912#endif
9de5e440 913 /* prepare the stack frame of the virtual CPU */
cb6ac802
LV
914#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
915 if (sa->sa_flags & TARGET_SA_SIGINFO) {
907f5fdd 916 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
cb6ac802 917 } else {
624f7979 918 setup_frame(sig, sa, &target_old_set, cpu_env);
cb6ac802
LV
919 }
920#else
921 /* These targets do not have traditional signals. */
922 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
ff970904 923#endif
7ec87e06 924 if (sa->sa_flags & TARGET_SA_RESETHAND) {
624f7979 925 sa->_sa_handler = TARGET_SIG_DFL;
7ec87e06 926 }
31e31b8a 927 }
66fb9763 928}
e902d588
PM
929
930void process_pending_signals(CPUArchState *cpu_env)
931{
29a0af61 932 CPUState *cpu = env_cpu(cpu_env);
e902d588
PM
933 int sig;
934 TaskState *ts = cpu->opaque;
3d3efba0
PM
935 sigset_t set;
936 sigset_t *blocked_set;
e902d588 937
3d3efba0
PM
938 while (atomic_read(&ts->signal_pending)) {
939 /* FIXME: This is not threadsafe. */
940 sigfillset(&set);
941 sigprocmask(SIG_SETMASK, &set, 0);
942
8bd3773c 943 restart_scan:
655ed67c
TB
944 sig = ts->sync_signal.pending;
945 if (sig) {
946 /* Synchronous signals are forced,
947 * see force_sig_info() and callers in Linux
948 * Note that not all of our queue_signal() calls in QEMU correspond
949 * to force_sig_info() calls in Linux (some are send_sig_info()).
950 * However it seems like a kernel bug to me to allow the process
951 * to block a synchronous signal since it could then just end up
952 * looping round and round indefinitely.
953 */
954 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
955 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
956 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
957 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
958 }
959
31efaef1 960 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
655ed67c
TB
961 }
962
3d3efba0
PM
963 for (sig = 1; sig <= TARGET_NSIG; sig++) {
964 blocked_set = ts->in_sigsuspend ?
965 &ts->sigsuspend_mask : &ts->signal_mask;
966
967 if (ts->sigtab[sig - 1].pending &&
968 (!sigismember(blocked_set,
655ed67c 969 target_to_host_signal_table[sig]))) {
31efaef1 970 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
8bd3773c
PM
971 /* Restart scan from the beginning, as handle_pending_signal
972 * might have resulted in a new synchronous signal (eg SIGSEGV).
973 */
974 goto restart_scan;
3d3efba0 975 }
e902d588 976 }
3d3efba0
PM
977
978 /* if no signal is pending, unblock signals and recheck (the act
979 * of unblocking might cause us to take another host signal which
980 * will set signal_pending again).
981 */
982 atomic_set(&ts->signal_pending, 0);
983 ts->in_sigsuspend = 0;
984 set = ts->signal_mask;
985 sigdelset(&set, SIGSEGV);
986 sigdelset(&set, SIGBUS);
987 sigprocmask(SIG_SETMASK, &set, 0);
988 }
989 ts->in_sigsuspend = 0;
e902d588 990}