]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/signal.c
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-6.0-pull-request...
[mirror_qemu.git] / linux-user / signal.c
CommitLineData
31e31b8a 1/*
66fb9763 2 * Emulation of Linux signals
5fafdf24 3 *
31e31b8a
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
31e31b8a 18 */
d39594e9 19#include "qemu/osdep.h"
a70dadc7 20#include "qemu/bitops.h"
31e31b8a 21#include <sys/ucontext.h>
edf8e2af 22#include <sys/resource.h>
31e31b8a 23
3ef693a0 24#include "qemu.h"
c8ee0a44 25#include "trace.h"
befb7447 26#include "signal-common.h"
66fb9763 27
624f7979 28static struct target_sigaction sigact_table[TARGET_NSIG];
31e31b8a 29
5fafdf24 30static void host_signal_handler(int host_signum, siginfo_t *info,
66fb9763
FB
31 void *puc);
32
9fcff3a6
LV
33
34/*
35 * System includes define _NSIG as SIGRTMAX + 1,
36 * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
37 * and the first signal is SIGHUP defined as 1
38 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
39 * a process exists without sending it a signal.
40 */
41QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
3ca05588 42static uint8_t host_to_target_signal_table[_NSIG] = {
9e5f5284
FB
43 [SIGHUP] = TARGET_SIGHUP,
44 [SIGINT] = TARGET_SIGINT,
45 [SIGQUIT] = TARGET_SIGQUIT,
46 [SIGILL] = TARGET_SIGILL,
47 [SIGTRAP] = TARGET_SIGTRAP,
48 [SIGABRT] = TARGET_SIGABRT,
01e3b763 49/* [SIGIOT] = TARGET_SIGIOT,*/
9e5f5284
FB
50 [SIGBUS] = TARGET_SIGBUS,
51 [SIGFPE] = TARGET_SIGFPE,
52 [SIGKILL] = TARGET_SIGKILL,
53 [SIGUSR1] = TARGET_SIGUSR1,
54 [SIGSEGV] = TARGET_SIGSEGV,
55 [SIGUSR2] = TARGET_SIGUSR2,
56 [SIGPIPE] = TARGET_SIGPIPE,
57 [SIGALRM] = TARGET_SIGALRM,
58 [SIGTERM] = TARGET_SIGTERM,
59#ifdef SIGSTKFLT
60 [SIGSTKFLT] = TARGET_SIGSTKFLT,
61#endif
62 [SIGCHLD] = TARGET_SIGCHLD,
63 [SIGCONT] = TARGET_SIGCONT,
64 [SIGSTOP] = TARGET_SIGSTOP,
65 [SIGTSTP] = TARGET_SIGTSTP,
66 [SIGTTIN] = TARGET_SIGTTIN,
67 [SIGTTOU] = TARGET_SIGTTOU,
68 [SIGURG] = TARGET_SIGURG,
69 [SIGXCPU] = TARGET_SIGXCPU,
70 [SIGXFSZ] = TARGET_SIGXFSZ,
71 [SIGVTALRM] = TARGET_SIGVTALRM,
72 [SIGPROF] = TARGET_SIGPROF,
73 [SIGWINCH] = TARGET_SIGWINCH,
74 [SIGIO] = TARGET_SIGIO,
75 [SIGPWR] = TARGET_SIGPWR,
76 [SIGSYS] = TARGET_SIGSYS,
77 /* next signals stay the same */
78};
9e5f5284 79
9fcff3a6
LV
80static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
81
82/* valid sig is between 1 and _NSIG - 1 */
1d9d8b55 83int host_to_target_signal(int sig)
31e31b8a 84{
9fcff3a6 85 if (sig < 1 || sig >= _NSIG) {
4cb05961 86 return sig;
9fcff3a6 87 }
9e5f5284 88 return host_to_target_signal_table[sig];
31e31b8a
FB
89}
90
9fcff3a6 91/* valid sig is between 1 and TARGET_NSIG */
4cb05961 92int target_to_host_signal(int sig)
31e31b8a 93{
9fcff3a6 94 if (sig < 1 || sig > TARGET_NSIG) {
4cb05961 95 return sig;
9fcff3a6 96 }
9e5f5284 97 return target_to_host_signal_table[sig];
31e31b8a
FB
98}
99
c227f099 100static inline void target_sigaddset(target_sigset_t *set, int signum)
f5545b5c
PB
101{
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105}
106
c227f099 107static inline int target_sigismember(const target_sigset_t *set, int signum)
f5545b5c
PB
108{
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112}
113
befb7447
LV
114void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
66fb9763 116{
9fcff3a6 117 int host_sig, target_sig;
f5545b5c 118 target_sigemptyset(d);
9fcff3a6
LV
119 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
120 target_sig = host_to_target_signal(host_sig);
121 if (target_sig < 1 || target_sig > TARGET_NSIG) {
122 continue;
123 }
124 if (sigismember(s, host_sig)) {
125 target_sigaddset(d, target_sig);
f5545b5c 126 }
66fb9763
FB
127 }
128}
129
c227f099 130void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
9231944d 131{
c227f099 132 target_sigset_t d1;
9231944d
FB
133 int i;
134
135 host_to_target_sigset_internal(&d1, s);
136 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 137 d->sig[i] = tswapal(d1.sig[i]);
9231944d
FB
138}
139
befb7447
LV
140void target_to_host_sigset_internal(sigset_t *d,
141 const target_sigset_t *s)
66fb9763 142{
9fcff3a6 143 int host_sig, target_sig;
f5545b5c 144 sigemptyset(d);
9fcff3a6
LV
145 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
146 host_sig = target_to_host_signal(target_sig);
147 if (host_sig < 1 || host_sig >= _NSIG) {
148 continue;
149 }
150 if (target_sigismember(s, target_sig)) {
151 sigaddset(d, host_sig);
f5545b5c 152 }
da7c8647 153 }
66fb9763
FB
154}
155
c227f099 156void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
9231944d 157{
c227f099 158 target_sigset_t s1;
9231944d
FB
159 int i;
160
161 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 162 s1.sig[i] = tswapal(s->sig[i]);
9231944d
FB
163 target_to_host_sigset_internal(d, &s1);
164}
3b46e624 165
992f48a0 166void host_to_target_old_sigset(abi_ulong *old_sigset,
66fb9763
FB
167 const sigset_t *sigset)
168{
c227f099 169 target_sigset_t d;
9e5f5284
FB
170 host_to_target_sigset(&d, sigset);
171 *old_sigset = d.sig[0];
66fb9763
FB
172}
173
5fafdf24 174void target_to_host_old_sigset(sigset_t *sigset,
992f48a0 175 const abi_ulong *old_sigset)
66fb9763 176{
c227f099 177 target_sigset_t d;
9e5f5284
FB
178 int i;
179
180 d.sig[0] = *old_sigset;
181 for(i = 1;i < TARGET_NSIG_WORDS; i++)
182 d.sig[i] = 0;
183 target_to_host_sigset(sigset, &d);
66fb9763
FB
184}
185
3d3efba0
PM
186int block_signals(void)
187{
188 TaskState *ts = (TaskState *)thread_cpu->opaque;
189 sigset_t set;
3d3efba0
PM
190
191 /* It's OK to block everything including SIGSEGV, because we won't
192 * run any further guest code before unblocking signals in
193 * process_pending_signals().
194 */
195 sigfillset(&set);
196 sigprocmask(SIG_SETMASK, &set, 0);
197
d73415a3 198 return qatomic_xchg(&ts->signal_pending, 1);
3d3efba0
PM
199}
200
1c275925
AB
201/* Wrapper for sigprocmask function
202 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
3d3efba0
PM
203 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
204 * a signal was already pending and the syscall must be restarted, or
205 * 0 on success.
206 * If set is NULL, this is guaranteed not to fail.
1c275925
AB
207 */
208int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
209{
3d3efba0
PM
210 TaskState *ts = (TaskState *)thread_cpu->opaque;
211
212 if (oldset) {
213 *oldset = ts->signal_mask;
214 }
a7ec0f98
PM
215
216 if (set) {
3d3efba0 217 int i;
a7ec0f98 218
3d3efba0
PM
219 if (block_signals()) {
220 return -TARGET_ERESTARTSYS;
221 }
a7ec0f98
PM
222
223 switch (how) {
224 case SIG_BLOCK:
3d3efba0 225 sigorset(&ts->signal_mask, &ts->signal_mask, set);
a7ec0f98
PM
226 break;
227 case SIG_UNBLOCK:
3d3efba0
PM
228 for (i = 1; i <= NSIG; ++i) {
229 if (sigismember(set, i)) {
230 sigdelset(&ts->signal_mask, i);
231 }
a7ec0f98
PM
232 }
233 break;
234 case SIG_SETMASK:
3d3efba0 235 ts->signal_mask = *set;
a7ec0f98
PM
236 break;
237 default:
238 g_assert_not_reached();
239 }
a7ec0f98 240
3d3efba0
PM
241 /* Silently ignore attempts to change blocking status of KILL or STOP */
242 sigdelset(&ts->signal_mask, SIGKILL);
243 sigdelset(&ts->signal_mask, SIGSTOP);
a7ec0f98 244 }
3d3efba0 245 return 0;
1c275925
AB
246}
247
e8f29049 248#if !defined(TARGET_NIOS2)
3d3efba0
PM
249/* Just set the guest's signal mask to the specified value; the
250 * caller is assumed to have called block_signals() already.
251 */
befb7447 252void set_sigmask(const sigset_t *set)
9eede5b6 253{
3d3efba0
PM
254 TaskState *ts = (TaskState *)thread_cpu->opaque;
255
256 ts->signal_mask = *set;
9eede5b6
PM
257}
258#endif
259
465e237b
LV
260/* sigaltstack management */
261
262int on_sig_stack(unsigned long sp)
263{
5bfce0b7
PM
264 TaskState *ts = (TaskState *)thread_cpu->opaque;
265
266 return (sp - ts->sigaltstack_used.ss_sp
267 < ts->sigaltstack_used.ss_size);
465e237b
LV
268}
269
270int sas_ss_flags(unsigned long sp)
271{
5bfce0b7
PM
272 TaskState *ts = (TaskState *)thread_cpu->opaque;
273
274 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
465e237b
LV
275 : on_sig_stack(sp) ? SS_ONSTACK : 0);
276}
277
278abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
279{
280 /*
281 * This is the X/Open sanctioned signal stack switching.
282 */
5bfce0b7
PM
283 TaskState *ts = (TaskState *)thread_cpu->opaque;
284
465e237b 285 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
5bfce0b7 286 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
465e237b
LV
287 }
288 return sp;
289}
290
291void target_save_altstack(target_stack_t *uss, CPUArchState *env)
292{
5bfce0b7
PM
293 TaskState *ts = (TaskState *)thread_cpu->opaque;
294
295 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
465e237b 296 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
5bfce0b7 297 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
465e237b
LV
298}
299
9de5e440
FB
300/* siginfo conversion */
301
c227f099 302static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
9de5e440 303 const siginfo_t *info)
66fb9763 304{
a05c6409 305 int sig = host_to_target_signal(info->si_signo);
a70dadc7
PM
306 int si_code = info->si_code;
307 int si_type;
9de5e440
FB
308 tinfo->si_signo = sig;
309 tinfo->si_errno = 0;
afd7cd92 310 tinfo->si_code = info->si_code;
a05c6409 311
55d72a7e
PM
312 /* This memset serves two purposes:
313 * (1) ensure we don't leak random junk to the guest later
314 * (2) placate false positives from gcc about fields
315 * being used uninitialized if it chooses to inline both this
316 * function and tswap_siginfo() into host_to_target_siginfo().
317 */
318 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
319
a70dadc7
PM
320 /* This is awkward, because we have to use a combination of
321 * the si_code and si_signo to figure out which of the union's
322 * members are valid. (Within the host kernel it is always possible
323 * to tell, but the kernel carefully avoids giving userspace the
324 * high 16 bits of si_code, so we don't have the information to
325 * do this the easy way...) We therefore make our best guess,
326 * bearing in mind that a guest can spoof most of the si_codes
327 * via rt_sigqueueinfo() if it likes.
328 *
329 * Once we have made our guess, we record it in the top 16 bits of
330 * the si_code, so that tswap_siginfo() later can use it.
331 * tswap_siginfo() will strip these top bits out before writing
332 * si_code to the guest (sign-extending the lower bits).
333 */
334
335 switch (si_code) {
336 case SI_USER:
337 case SI_TKILL:
338 case SI_KERNEL:
339 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
340 * These are the only unspoofable si_code values.
341 */
342 tinfo->_sifields._kill._pid = info->si_pid;
343 tinfo->_sifields._kill._uid = info->si_uid;
344 si_type = QEMU_SI_KILL;
345 break;
346 default:
347 /* Everything else is spoofable. Make best guess based on signal */
348 switch (sig) {
349 case TARGET_SIGCHLD:
350 tinfo->_sifields._sigchld._pid = info->si_pid;
351 tinfo->_sifields._sigchld._uid = info->si_uid;
1c3dfb50 352 tinfo->_sifields._sigchld._status = info->si_status;
a70dadc7
PM
353 tinfo->_sifields._sigchld._utime = info->si_utime;
354 tinfo->_sifields._sigchld._stime = info->si_stime;
355 si_type = QEMU_SI_CHLD;
356 break;
357 case TARGET_SIGIO:
358 tinfo->_sifields._sigpoll._band = info->si_band;
359 tinfo->_sifields._sigpoll._fd = info->si_fd;
360 si_type = QEMU_SI_POLL;
361 break;
362 default:
363 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
364 tinfo->_sifields._rt._pid = info->si_pid;
365 tinfo->_sifields._rt._uid = info->si_uid;
366 /* XXX: potential problem if 64 bit */
367 tinfo->_sifields._rt._sigval.sival_ptr
da7c8647 368 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
a70dadc7
PM
369 si_type = QEMU_SI_RT;
370 break;
371 }
372 break;
9de5e440 373 }
a70dadc7
PM
374
375 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
9de5e440
FB
376}
377
befb7447
LV
378void tswap_siginfo(target_siginfo_t *tinfo,
379 const target_siginfo_t *info)
9de5e440 380{
a70dadc7
PM
381 int si_type = extract32(info->si_code, 16, 16);
382 int si_code = sextract32(info->si_code, 0, 16);
383
384 __put_user(info->si_signo, &tinfo->si_signo);
385 __put_user(info->si_errno, &tinfo->si_errno);
386 __put_user(si_code, &tinfo->si_code);
387
388 /* We can use our internal marker of which fields in the structure
389 * are valid, rather than duplicating the guesswork of
390 * host_to_target_siginfo_noswap() here.
391 */
392 switch (si_type) {
393 case QEMU_SI_KILL:
394 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
395 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
396 break;
397 case QEMU_SI_TIMER:
398 __put_user(info->_sifields._timer._timer1,
399 &tinfo->_sifields._timer._timer1);
400 __put_user(info->_sifields._timer._timer2,
401 &tinfo->_sifields._timer._timer2);
402 break;
403 case QEMU_SI_POLL:
404 __put_user(info->_sifields._sigpoll._band,
405 &tinfo->_sifields._sigpoll._band);
406 __put_user(info->_sifields._sigpoll._fd,
407 &tinfo->_sifields._sigpoll._fd);
408 break;
409 case QEMU_SI_FAULT:
410 __put_user(info->_sifields._sigfault._addr,
411 &tinfo->_sifields._sigfault._addr);
412 break;
413 case QEMU_SI_CHLD:
414 __put_user(info->_sifields._sigchld._pid,
415 &tinfo->_sifields._sigchld._pid);
416 __put_user(info->_sifields._sigchld._uid,
417 &tinfo->_sifields._sigchld._uid);
418 __put_user(info->_sifields._sigchld._status,
419 &tinfo->_sifields._sigchld._status);
420 __put_user(info->_sifields._sigchld._utime,
421 &tinfo->_sifields._sigchld._utime);
422 __put_user(info->_sifields._sigchld._stime,
423 &tinfo->_sifields._sigchld._stime);
424 break;
425 case QEMU_SI_RT:
426 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
427 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
428 __put_user(info->_sifields._rt._sigval.sival_ptr,
429 &tinfo->_sifields._rt._sigval.sival_ptr);
430 break;
431 default:
432 g_assert_not_reached();
9de5e440
FB
433 }
434}
435
c227f099 436void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
9de5e440 437{
55d72a7e
PM
438 target_siginfo_t tgt_tmp;
439 host_to_target_siginfo_noswap(&tgt_tmp, info);
440 tswap_siginfo(tinfo, &tgt_tmp);
66fb9763
FB
441}
442
9de5e440 443/* XXX: we support only POSIX RT signals are used. */
aa1f17c1 444/* XXX: find a solution for 64 bit (additional malloced data is needed) */
c227f099 445void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
66fb9763 446{
90c0f080
PM
447 /* This conversion is used only for the rt_sigqueueinfo syscall,
448 * and so we know that the _rt fields are the valid ones.
449 */
450 abi_ulong sival_ptr;
451
452 __get_user(info->si_signo, &tinfo->si_signo);
453 __get_user(info->si_errno, &tinfo->si_errno);
454 __get_user(info->si_code, &tinfo->si_code);
455 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
456 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
457 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
458 info->si_value.sival_ptr = (void *)(long)sival_ptr;
66fb9763
FB
459}
460
ca587a8e
AJ
461static int fatal_signal (int sig)
462{
463 switch (sig) {
464 case TARGET_SIGCHLD:
465 case TARGET_SIGURG:
466 case TARGET_SIGWINCH:
467 /* Ignored by default. */
468 return 0;
469 case TARGET_SIGCONT:
470 case TARGET_SIGSTOP:
471 case TARGET_SIGTSTP:
472 case TARGET_SIGTTIN:
473 case TARGET_SIGTTOU:
474 /* Job control signals. */
475 return 0;
476 default:
477 return 1;
478 }
479}
480
edf8e2af
MW
481/* returns 1 if given signal should dump core if not handled */
482static int core_dump_signal(int sig)
483{
484 switch (sig) {
485 case TARGET_SIGABRT:
486 case TARGET_SIGFPE:
487 case TARGET_SIGILL:
488 case TARGET_SIGQUIT:
489 case TARGET_SIGSEGV:
490 case TARGET_SIGTRAP:
491 case TARGET_SIGBUS:
492 return (1);
493 default:
494 return (0);
495 }
496}
497
365510fb
LV
498static void signal_table_init(void)
499{
6bc024e7 500 int host_sig, target_sig, count;
365510fb
LV
501
502 /*
6bc024e7
LV
503 * Signals are supported starting from TARGET_SIGRTMIN and going up
504 * until we run out of host realtime signals.
505 * glibc at least uses only the lower 2 rt signals and probably
506 * nobody's using the upper ones.
507 * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
365510fb
LV
508 * To fix this properly we need to do manual signal delivery multiplexed
509 * over a single host signal.
6bc024e7
LV
510 * Attempts for configure "missing" signals via sigaction will be
511 * silently ignored.
365510fb 512 */
6bc024e7
LV
513 for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
514 target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
515 if (target_sig <= TARGET_NSIG) {
516 host_to_target_signal_table[host_sig] = target_sig;
517 }
518 }
365510fb
LV
519
520 /* generate signal conversion tables */
6bc024e7
LV
521 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
522 target_to_host_signal_table[target_sig] = _NSIG; /* poison */
523 }
365510fb
LV
524 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
525 if (host_to_target_signal_table[host_sig] == 0) {
526 host_to_target_signal_table[host_sig] = host_sig;
527 }
365510fb 528 target_sig = host_to_target_signal_table[host_sig];
9fcff3a6
LV
529 if (target_sig <= TARGET_NSIG) {
530 target_to_host_signal_table[target_sig] = host_sig;
531 }
365510fb 532 }
6bc024e7
LV
533
534 if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
535 for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
536 if (target_to_host_signal_table[target_sig] == _NSIG) {
537 count++;
538 }
539 }
540 trace_signal_table_init(count);
541 }
365510fb
LV
542}
543
31e31b8a
FB
544void signal_init(void)
545{
3d3efba0 546 TaskState *ts = (TaskState *)thread_cpu->opaque;
31e31b8a 547 struct sigaction act;
624f7979 548 struct sigaction oact;
365510fb 549 int i;
624f7979 550 int host_sig;
31e31b8a 551
365510fb
LV
552 /* initialize signal conversion tables */
553 signal_table_init();
3b46e624 554
3d3efba0
PM
555 /* Set the signal mask from the host mask. */
556 sigprocmask(0, 0, &ts->signal_mask);
557
9de5e440 558 sigfillset(&act.sa_mask);
31e31b8a
FB
559 act.sa_flags = SA_SIGINFO;
560 act.sa_sigaction = host_signal_handler;
624f7979 561 for(i = 1; i <= TARGET_NSIG; i++) {
4cc600d2 562#ifdef CONFIG_GPROF
9fcff3a6 563 if (i == TARGET_SIGPROF) {
716cdbe0
AB
564 continue;
565 }
566#endif
624f7979
PB
567 host_sig = target_to_host_signal(i);
568 sigaction(host_sig, NULL, &oact);
569 if (oact.sa_sigaction == (void *)SIG_IGN) {
570 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
571 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
572 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
573 }
574 /* If there's already a handler installed then something has
575 gone horribly wrong, so don't even try to handle that case. */
ca587a8e
AJ
576 /* Install some handlers for our own use. We need at least
577 SIGSEGV and SIGBUS, to detect exceptions. We can not just
578 trap all signals because it affects syscall interrupt
579 behavior. But do trap all default-fatal signals. */
580 if (fatal_signal (i))
624f7979 581 sigaction(host_sig, &act, NULL);
31e31b8a 582 }
66fb9763
FB
583}
584
c599d4d6
PM
585/* Force a synchronously taken signal. The kernel force_sig() function
586 * also forces the signal to "not blocked, not ignored", but for QEMU
587 * that work is done in process_pending_signals().
588 */
befb7447 589void force_sig(int sig)
c599d4d6
PM
590{
591 CPUState *cpu = thread_cpu;
592 CPUArchState *env = cpu->env_ptr;
593 target_siginfo_t info;
594
595 info.si_signo = sig;
596 info.si_errno = 0;
597 info.si_code = TARGET_SI_KERNEL;
598 info._sifields._kill._pid = 0;
599 info._sifields._kill._uid = 0;
600 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
601}
09391669
PM
602
603/* Force a SIGSEGV if we couldn't write to memory trying to set
604 * up the signal frame. oldsig is the signal we were trying to handle
605 * at the point of failure.
606 */
47ae93cd 607#if !defined(TARGET_RISCV)
befb7447 608void force_sigsegv(int oldsig)
09391669 609{
09391669
PM
610 if (oldsig == SIGSEGV) {
611 /* Make sure we don't try to deliver the signal again; this will
c599d4d6 612 * end up with handle_pending_signal() calling dump_core_and_abort().
09391669
PM
613 */
614 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
615 }
c4b35744 616 force_sig(TARGET_SIGSEGV);
09391669 617}
66fb9763 618
47ae93cd
MC
619#endif
620
9de5e440 621/* abort execution with signal */
c599d4d6 622static void QEMU_NORETURN dump_core_and_abort(int target_sig)
66fb9763 623{
0429a971
AF
624 CPUState *cpu = thread_cpu;
625 CPUArchState *env = cpu->env_ptr;
626 TaskState *ts = (TaskState *)cpu->opaque;
edf8e2af 627 int host_sig, core_dumped = 0;
603e4fd7 628 struct sigaction act;
c8ee0a44 629
66393fb9 630 host_sig = target_to_host_signal(target_sig);
c8ee0a44 631 trace_user_force_sig(env, target_sig, host_sig);
a2247f8e 632 gdb_signalled(env, target_sig);
603e4fd7 633
edf8e2af 634 /* dump core if supported by target binary format */
66393fb9 635 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
edf8e2af
MW
636 stop_all_tasks();
637 core_dumped =
a2247f8e 638 ((*ts->bprm->core_dump)(target_sig, env) == 0);
edf8e2af
MW
639 }
640 if (core_dumped) {
641 /* we already dumped the core of target process, we don't want
642 * a coredump of qemu itself */
643 struct rlimit nodump;
644 getrlimit(RLIMIT_CORE, &nodump);
645 nodump.rlim_cur=0;
646 setrlimit(RLIMIT_CORE, &nodump);
647 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
66393fb9 648 target_sig, strsignal(host_sig), "core dumped" );
edf8e2af
MW
649 }
650
0c58751c 651 /* The proper exit code for dying from an uncaught signal is
603e4fd7
AJ
652 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
653 * a negative value. To get the proper exit code we need to
654 * actually die from an uncaught signal. Here the default signal
655 * handler is installed, we send ourself a signal and we wait for
656 * it to arrive. */
657 sigfillset(&act.sa_mask);
658 act.sa_handler = SIG_DFL;
3a5d30bf 659 act.sa_flags = 0;
603e4fd7
AJ
660 sigaction(host_sig, &act, NULL);
661
662 /* For some reason raise(host_sig) doesn't send the signal when
663 * statically linked on x86-64. */
664 kill(getpid(), host_sig);
665
666 /* Make sure the signal isn't masked (just reuse the mask inside
667 of act) */
668 sigdelset(&act.sa_mask, host_sig);
669 sigsuspend(&act.sa_mask);
670
671 /* unreachable */
a6c6f76c 672 abort();
66fb9763
FB
673}
674
9de5e440
FB
675/* queue a signal so that it will be send to the virtual CPU as soon
676 as possible */
9d2803f7
PM
677int queue_signal(CPUArchState *env, int sig, int si_type,
678 target_siginfo_t *info)
31e31b8a 679{
29a0af61 680 CPUState *cpu = env_cpu(env);
0429a971 681 TaskState *ts = cpu->opaque;
66fb9763 682
c8ee0a44 683 trace_user_queue_signal(env, sig);
907f5fdd 684
9d2803f7 685 info->si_code = deposit32(info->si_code, 16, 16, si_type);
a70dadc7 686
655ed67c
TB
687 ts->sync_signal.info = *info;
688 ts->sync_signal.pending = sig;
907f5fdd 689 /* signal that a new signal is pending */
d73415a3 690 qatomic_set(&ts->signal_pending, 1);
907f5fdd 691 return 1; /* indicates that the signal was queued */
9de5e440
FB
692}
693
4d330cee
TB
694#ifndef HAVE_SAFE_SYSCALL
695static inline void rewind_if_in_safe_syscall(void *puc)
696{
697 /* Default version: never rewind */
698}
699#endif
700
5fafdf24 701static void host_signal_handler(int host_signum, siginfo_t *info,
9de5e440
FB
702 void *puc)
703{
a2247f8e 704 CPUArchState *env = thread_cpu->env_ptr;
29a0af61 705 CPUState *cpu = env_cpu(env);
655ed67c
TB
706 TaskState *ts = cpu->opaque;
707
9de5e440 708 int sig;
c227f099 709 target_siginfo_t tinfo;
3d3efba0 710 ucontext_t *uc = puc;
655ed67c 711 struct emulated_sigtable *k;
9de5e440
FB
712
713 /* the CPU emulator uses some host signals to detect exceptions,
eaa449b9 714 we forward to it some signals */
ca587a8e 715 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
eaa449b9 716 && info->si_code > 0) {
b346ff46 717 if (cpu_signal_handler(host_signum, info, puc))
9de5e440
FB
718 return;
719 }
720
721 /* get target signal number */
722 sig = host_to_target_signal(host_signum);
723 if (sig < 1 || sig > TARGET_NSIG)
724 return;
c8ee0a44 725 trace_user_host_signal(env, host_signum, sig);
4d330cee
TB
726
727 rewind_if_in_safe_syscall(puc);
728
9de5e440 729 host_to_target_siginfo_noswap(&tinfo, info);
655ed67c
TB
730 k = &ts->sigtab[sig - 1];
731 k->info = tinfo;
732 k->pending = sig;
733 ts->signal_pending = 1;
734
735 /* Block host signals until target signal handler entered. We
736 * can't block SIGSEGV or SIGBUS while we're executing guest
737 * code in case the guest code provokes one in the window between
738 * now and it getting out to the main loop. Signals will be
739 * unblocked again in process_pending_signals().
1d48fdd9
PM
740 *
741 * WARNING: we cannot use sigfillset() here because the uc_sigmask
742 * field is a kernel sigset_t, which is much smaller than the
743 * libc sigset_t which sigfillset() operates on. Using sigfillset()
744 * would write 0xff bytes off the end of the structure and trash
745 * data on the struct.
746 * We can't use sizeof(uc->uc_sigmask) either, because the libc
747 * headers define the struct field with the wrong (too large) type.
655ed67c 748 */
1d48fdd9 749 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
655ed67c
TB
750 sigdelset(&uc->uc_sigmask, SIGSEGV);
751 sigdelset(&uc->uc_sigmask, SIGBUS);
3d3efba0 752
655ed67c
TB
753 /* interrupt the virtual CPU as soon as possible */
754 cpu_exit(thread_cpu);
66fb9763
FB
755}
756
0da46a6e 757/* do_sigaltstack() returns target values and errnos. */
579a97f7
FB
758/* compare linux/kernel/signal.c:do_sigaltstack() */
759abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
a04e134a
TS
760{
761 int ret;
762 struct target_sigaltstack oss;
5bfce0b7 763 TaskState *ts = (TaskState *)thread_cpu->opaque;
a04e134a
TS
764
765 /* XXX: test errors */
579a97f7 766 if(uoss_addr)
a04e134a 767 {
5bfce0b7
PM
768 __put_user(ts->sigaltstack_used.ss_sp, &oss.ss_sp);
769 __put_user(ts->sigaltstack_used.ss_size, &oss.ss_size);
a04e134a
TS
770 __put_user(sas_ss_flags(sp), &oss.ss_flags);
771 }
772
579a97f7 773 if(uss_addr)
a04e134a 774 {
579a97f7
FB
775 struct target_sigaltstack *uss;
776 struct target_sigaltstack ss;
0903c8be
TM
777 size_t minstacksize = TARGET_MINSIGSTKSZ;
778
779#if defined(TARGET_PPC64)
780 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
781 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
782 if (get_ppc64_abi(image) > 1) {
783 minstacksize = 4096;
784 }
785#endif
a04e134a 786
7d37435b 787 ret = -TARGET_EFAULT;
9eeb8306 788 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
a04e134a 789 goto out;
9eeb8306
RV
790 }
791 __get_user(ss.ss_sp, &uss->ss_sp);
792 __get_user(ss.ss_size, &uss->ss_size);
793 __get_user(ss.ss_flags, &uss->ss_flags);
579a97f7 794 unlock_user_struct(uss, uss_addr, 0);
a04e134a 795
7d37435b
PB
796 ret = -TARGET_EPERM;
797 if (on_sig_stack(sp))
a04e134a
TS
798 goto out;
799
7d37435b
PB
800 ret = -TARGET_EINVAL;
801 if (ss.ss_flags != TARGET_SS_DISABLE
a04e134a
TS
802 && ss.ss_flags != TARGET_SS_ONSTACK
803 && ss.ss_flags != 0)
804 goto out;
805
7d37435b 806 if (ss.ss_flags == TARGET_SS_DISABLE) {
a04e134a
TS
807 ss.ss_size = 0;
808 ss.ss_sp = 0;
7d37435b 809 } else {
0da46a6e 810 ret = -TARGET_ENOMEM;
0903c8be 811 if (ss.ss_size < minstacksize) {
a04e134a 812 goto out;
0903c8be 813 }
7d37435b 814 }
a04e134a 815
5bfce0b7
PM
816 ts->sigaltstack_used.ss_sp = ss.ss_sp;
817 ts->sigaltstack_used.ss_size = ss.ss_size;
a04e134a
TS
818 }
819
579a97f7 820 if (uoss_addr) {
0da46a6e 821 ret = -TARGET_EFAULT;
579a97f7 822 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
a04e134a 823 goto out;
a04e134a
TS
824 }
825
826 ret = 0;
827out:
828 return ret;
829}
830
ef6a778e 831/* do_sigaction() return target values and host errnos */
66fb9763
FB
832int do_sigaction(int sig, const struct target_sigaction *act,
833 struct target_sigaction *oact)
834{
624f7979 835 struct target_sigaction *k;
773b93ee
FB
836 struct sigaction act1;
837 int host_sig;
0da46a6e 838 int ret = 0;
66fb9763 839
6bc024e7
LV
840 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
841
ef6a778e
TB
842 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
843 return -TARGET_EINVAL;
844 }
845
846 if (block_signals()) {
847 return -TARGET_ERESTARTSYS;
848 }
849
66fb9763 850 k = &sigact_table[sig - 1];
66fb9763 851 if (oact) {
d2565875
RH
852 __put_user(k->_sa_handler, &oact->_sa_handler);
853 __put_user(k->sa_flags, &oact->sa_flags);
7f047de1 854#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 855 __put_user(k->sa_restorer, &oact->sa_restorer);
388bb21a 856#endif
d2565875 857 /* Not swapped. */
624f7979 858 oact->sa_mask = k->sa_mask;
66fb9763
FB
859 }
860 if (act) {
624f7979 861 /* FIXME: This is not threadsafe. */
d2565875
RH
862 __get_user(k->_sa_handler, &act->_sa_handler);
863 __get_user(k->sa_flags, &act->sa_flags);
7f047de1 864#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 865 __get_user(k->sa_restorer, &act->sa_restorer);
388bb21a 866#endif
d2565875 867 /* To be swapped in target_to_host_sigset. */
624f7979 868 k->sa_mask = act->sa_mask;
773b93ee
FB
869
870 /* we update the host linux signal state */
871 host_sig = target_to_host_signal(sig);
6bc024e7
LV
872 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
873 if (host_sig > SIGRTMAX) {
874 /* we don't have enough host signals to map all target signals */
875 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
876 sig);
877 /*
878 * we don't return an error here because some programs try to
879 * register an handler for all possible rt signals even if they
880 * don't need it.
881 * An error here can abort them whereas there can be no problem
882 * to not have the signal available later.
883 * This is the case for golang,
884 * See https://github.com/golang/go/issues/33746
885 * So we silently ignore the error.
886 */
887 return 0;
888 }
773b93ee
FB
889 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
890 sigfillset(&act1.sa_mask);
891 act1.sa_flags = SA_SIGINFO;
624f7979 892 if (k->sa_flags & TARGET_SA_RESTART)
773b93ee
FB
893 act1.sa_flags |= SA_RESTART;
894 /* NOTE: it is important to update the host kernel signal
895 ignore state to avoid getting unexpected interrupted
896 syscalls */
624f7979 897 if (k->_sa_handler == TARGET_SIG_IGN) {
773b93ee 898 act1.sa_sigaction = (void *)SIG_IGN;
624f7979 899 } else if (k->_sa_handler == TARGET_SIG_DFL) {
ca587a8e
AJ
900 if (fatal_signal (sig))
901 act1.sa_sigaction = host_signal_handler;
902 else
903 act1.sa_sigaction = (void *)SIG_DFL;
773b93ee
FB
904 } else {
905 act1.sa_sigaction = host_signal_handler;
906 }
0da46a6e 907 ret = sigaction(host_sig, &act1, NULL);
773b93ee 908 }
66fb9763 909 }
0da46a6e 910 return ret;
66fb9763
FB
911}
912
31efaef1
PM
913static void handle_pending_signal(CPUArchState *cpu_env, int sig,
914 struct emulated_sigtable *k)
eb552501 915{
29a0af61 916 CPUState *cpu = env_cpu(cpu_env);
eb552501 917 abi_ulong handler;
3d3efba0 918 sigset_t set;
eb552501
PM
919 target_sigset_t target_old_set;
920 struct target_sigaction *sa;
eb552501 921 TaskState *ts = cpu->opaque;
66fb9763 922
c8ee0a44 923 trace_user_handle_signal(cpu_env, sig);
66fb9763 924 /* dequeue signal */
907f5fdd 925 k->pending = 0;
3b46e624 926
db6b81d4 927 sig = gdb_handlesig(cpu, sig);
1fddef4b 928 if (!sig) {
ca587a8e
AJ
929 sa = NULL;
930 handler = TARGET_SIG_IGN;
931 } else {
932 sa = &sigact_table[sig - 1];
933 handler = sa->_sa_handler;
1fddef4b 934 }
66fb9763 935
4b25a506 936 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
0cb581d6
PM
937 print_taken_signal(sig, &k->info);
938 }
939
66fb9763 940 if (handler == TARGET_SIG_DFL) {
ca587a8e
AJ
941 /* default handler : ignore some signal. The other are job control or fatal */
942 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
943 kill(getpid(),SIGSTOP);
944 } else if (sig != TARGET_SIGCHLD &&
945 sig != TARGET_SIGURG &&
946 sig != TARGET_SIGWINCH &&
947 sig != TARGET_SIGCONT) {
c599d4d6 948 dump_core_and_abort(sig);
66fb9763
FB
949 }
950 } else if (handler == TARGET_SIG_IGN) {
951 /* ignore sig */
952 } else if (handler == TARGET_SIG_ERR) {
c599d4d6 953 dump_core_and_abort(sig);
66fb9763 954 } else {
9de5e440 955 /* compute the blocked signals during the handler execution */
3d3efba0
PM
956 sigset_t *blocked_set;
957
624f7979 958 target_to_host_sigset(&set, &sa->sa_mask);
9de5e440
FB
959 /* SA_NODEFER indicates that the current signal should not be
960 blocked during the handler */
624f7979 961 if (!(sa->sa_flags & TARGET_SA_NODEFER))
9de5e440 962 sigaddset(&set, target_to_host_signal(sig));
3b46e624 963
9de5e440
FB
964 /* save the previous blocked signal state to restore it at the
965 end of the signal execution (see do_sigreturn) */
3d3efba0
PM
966 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
967
968 /* block signals in the handler */
969 blocked_set = ts->in_sigsuspend ?
970 &ts->sigsuspend_mask : &ts->signal_mask;
971 sigorset(&ts->signal_mask, blocked_set, &set);
972 ts->in_sigsuspend = 0;
9de5e440 973
bc8a22cc 974 /* if the CPU is in VM86 mode, we restore the 32 bit values */
84409ddb 975#if defined(TARGET_I386) && !defined(TARGET_X86_64)
bc8a22cc
FB
976 {
977 CPUX86State *env = cpu_env;
978 if (env->eflags & VM_MASK)
979 save_v86_state(env);
980 }
981#endif
9de5e440 982 /* prepare the stack frame of the virtual CPU */
cb6ac802
LV
983#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
984 if (sa->sa_flags & TARGET_SA_SIGINFO) {
907f5fdd 985 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
cb6ac802 986 } else {
624f7979 987 setup_frame(sig, sa, &target_old_set, cpu_env);
cb6ac802
LV
988 }
989#else
990 /* These targets do not have traditional signals. */
991 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
ff970904 992#endif
7ec87e06 993 if (sa->sa_flags & TARGET_SA_RESETHAND) {
624f7979 994 sa->_sa_handler = TARGET_SIG_DFL;
7ec87e06 995 }
31e31b8a 996 }
66fb9763 997}
e902d588
PM
998
999void process_pending_signals(CPUArchState *cpu_env)
1000{
29a0af61 1001 CPUState *cpu = env_cpu(cpu_env);
e902d588
PM
1002 int sig;
1003 TaskState *ts = cpu->opaque;
3d3efba0
PM
1004 sigset_t set;
1005 sigset_t *blocked_set;
e902d588 1006
d73415a3 1007 while (qatomic_read(&ts->signal_pending)) {
3d3efba0
PM
1008 /* FIXME: This is not threadsafe. */
1009 sigfillset(&set);
1010 sigprocmask(SIG_SETMASK, &set, 0);
1011
8bd3773c 1012 restart_scan:
655ed67c
TB
1013 sig = ts->sync_signal.pending;
1014 if (sig) {
1015 /* Synchronous signals are forced,
1016 * see force_sig_info() and callers in Linux
1017 * Note that not all of our queue_signal() calls in QEMU correspond
1018 * to force_sig_info() calls in Linux (some are send_sig_info()).
1019 * However it seems like a kernel bug to me to allow the process
1020 * to block a synchronous signal since it could then just end up
1021 * looping round and round indefinitely.
1022 */
1023 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1024 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1025 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1026 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1027 }
1028
31efaef1 1029 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
655ed67c
TB
1030 }
1031
3d3efba0
PM
1032 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1033 blocked_set = ts->in_sigsuspend ?
1034 &ts->sigsuspend_mask : &ts->signal_mask;
1035
1036 if (ts->sigtab[sig - 1].pending &&
1037 (!sigismember(blocked_set,
655ed67c 1038 target_to_host_signal_table[sig]))) {
31efaef1 1039 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
8bd3773c
PM
1040 /* Restart scan from the beginning, as handle_pending_signal
1041 * might have resulted in a new synchronous signal (eg SIGSEGV).
1042 */
1043 goto restart_scan;
3d3efba0 1044 }
e902d588 1045 }
3d3efba0
PM
1046
1047 /* if no signal is pending, unblock signals and recheck (the act
1048 * of unblocking might cause us to take another host signal which
1049 * will set signal_pending again).
1050 */
d73415a3 1051 qatomic_set(&ts->signal_pending, 0);
3d3efba0
PM
1052 ts->in_sigsuspend = 0;
1053 set = ts->signal_mask;
1054 sigdelset(&set, SIGSEGV);
1055 sigdelset(&set, SIGBUS);
1056 sigprocmask(SIG_SETMASK, &set, 0);
1057 }
1058 ts->in_sigsuspend = 0;
e902d588 1059}