]>
Commit | Line | Data |
---|---|---|
84778508 BS |
1 | /* |
2 | * Emulation of BSD signals | |
3 | * | |
4 | * Copyright (c) 2003 - 2008 Fabrice Bellard | |
1366ef81 | 5 | * Copyright (c) 2013 Stacey Son |
84778508 BS |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
8167ee88 | 18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
84778508 | 19 | */ |
84778508 | 20 | |
5abfac27 | 21 | #include "qemu/osdep.h" |
84778508 | 22 | #include "qemu.h" |
0ef59989 | 23 | #include "signal-common.h" |
6ddc1abe | 24 | #include "trace.h" |
fc9f9bdd | 25 | #include "hw/core/tcg-cpu-ops.h" |
85fc1b5d | 26 | #include "host-signal.h" |
84778508 | 27 | |
149076ad WL |
28 | static struct target_sigaction sigact_table[TARGET_NSIG]; |
29 | static void host_signal_handler(int host_sig, siginfo_t *info, void *puc); | |
c93cbac1 WL |
30 | static void target_to_host_sigset_internal(sigset_t *d, |
31 | const target_sigset_t *s); | |
32 | ||
46f4f76d WL |
33 | static inline int on_sig_stack(TaskState *ts, unsigned long sp) |
34 | { | |
35 | return sp - ts->sigaltstack_used.ss_sp < ts->sigaltstack_used.ss_size; | |
36 | } | |
37 | ||
38 | static inline int sas_ss_flags(TaskState *ts, unsigned long sp) | |
39 | { | |
40 | return ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE : | |
41 | on_sig_stack(ts, sp) ? SS_ONSTACK : 0; | |
42 | } | |
149076ad | 43 | |
1366ef81 WL |
44 | /* |
45 | * The BSD ABIs use the same singal numbers across all the CPU architectures, so | |
46 | * (unlike Linux) these functions are just the identity mapping. This might not | |
47 | * be true for XyzBSD running on AbcBSD, which doesn't currently work. | |
48 | */ | |
49 | int host_to_target_signal(int sig) | |
50 | { | |
51 | return sig; | |
52 | } | |
53 | ||
54 | int target_to_host_signal(int sig) | |
55 | { | |
56 | return sig; | |
57 | } | |
58 | ||
c93cbac1 WL |
59 | static inline void target_sigemptyset(target_sigset_t *set) |
60 | { | |
61 | memset(set, 0, sizeof(*set)); | |
62 | } | |
63 | ||
64 | static inline void target_sigaddset(target_sigset_t *set, int signum) | |
65 | { | |
66 | signum--; | |
67 | uint32_t mask = (uint32_t)1 << (signum % TARGET_NSIG_BPW); | |
68 | set->__bits[signum / TARGET_NSIG_BPW] |= mask; | |
69 | } | |
70 | ||
71 | static inline int target_sigismember(const target_sigset_t *set, int signum) | |
72 | { | |
73 | signum--; | |
74 | abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); | |
75 | return (set->__bits[signum / TARGET_NSIG_BPW] & mask) != 0; | |
76 | } | |
77 | ||
aae57ac3 WL |
78 | /* Adjust the signal context to rewind out of safe-syscall if we're in it */ |
79 | static inline void rewind_if_in_safe_syscall(void *puc) | |
80 | { | |
81 | ucontext_t *uc = (ucontext_t *)puc; | |
82 | uintptr_t pcreg = host_signal_pc(uc); | |
83 | ||
84 | if (pcreg > (uintptr_t)safe_syscall_start | |
85 | && pcreg < (uintptr_t)safe_syscall_end) { | |
86 | host_signal_set_pc(uc, (uintptr_t)safe_syscall_start); | |
87 | } | |
88 | } | |
89 | ||
c93cbac1 WL |
90 | /* |
91 | * Note: The following take advantage of the BSD signal property that all | |
92 | * signals are available on all architectures. | |
93 | */ | |
94 | static void host_to_target_sigset_internal(target_sigset_t *d, | |
95 | const sigset_t *s) | |
96 | { | |
97 | int i; | |
98 | ||
99 | target_sigemptyset(d); | |
100 | for (i = 1; i <= NSIG; i++) { | |
101 | if (sigismember(s, i)) { | |
102 | target_sigaddset(d, host_to_target_signal(i)); | |
103 | } | |
104 | } | |
105 | } | |
106 | ||
107 | void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) | |
108 | { | |
109 | target_sigset_t d1; | |
110 | int i; | |
111 | ||
112 | host_to_target_sigset_internal(&d1, s); | |
113 | for (i = 0; i < _SIG_WORDS; i++) { | |
114 | d->__bits[i] = tswap32(d1.__bits[i]); | |
115 | } | |
116 | } | |
117 | ||
118 | static void target_to_host_sigset_internal(sigset_t *d, | |
119 | const target_sigset_t *s) | |
120 | { | |
121 | int i; | |
122 | ||
123 | sigemptyset(d); | |
124 | for (i = 1; i <= TARGET_NSIG; i++) { | |
125 | if (target_sigismember(s, i)) { | |
126 | sigaddset(d, target_to_host_signal(i)); | |
127 | } | |
128 | } | |
129 | } | |
130 | ||
131 | void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) | |
132 | { | |
133 | target_sigset_t s1; | |
134 | int i; | |
135 | ||
136 | for (i = 0; i < TARGET_NSIG_WORDS; i++) { | |
137 | s1.__bits[i] = tswap32(s->__bits[i]); | |
138 | } | |
139 | target_to_host_sigset_internal(d, &s1); | |
140 | } | |
141 | ||
c34f2aaf WL |
142 | static bool has_trapno(int tsig) |
143 | { | |
144 | return tsig == TARGET_SIGILL || | |
145 | tsig == TARGET_SIGFPE || | |
146 | tsig == TARGET_SIGSEGV || | |
147 | tsig == TARGET_SIGBUS || | |
148 | tsig == TARGET_SIGTRAP; | |
149 | } | |
150 | ||
c34f2aaf WL |
151 | /* Siginfo conversion. */ |
152 | ||
153 | /* | |
154 | * Populate tinfo w/o swapping based on guessing which fields are valid. | |
155 | */ | |
156 | static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, | |
157 | const siginfo_t *info) | |
158 | { | |
159 | int sig = host_to_target_signal(info->si_signo); | |
160 | int si_code = info->si_code; | |
161 | int si_type; | |
162 | ||
163 | /* | |
164 | * Make sure we that the variable portion of the target siginfo is zeroed | |
165 | * out so we don't leak anything into that. | |
166 | */ | |
167 | memset(&tinfo->_reason, 0, sizeof(tinfo->_reason)); | |
168 | ||
169 | /* | |
170 | * This is awkward, because we have to use a combination of the si_code and | |
171 | * si_signo to figure out which of the union's members are valid.o We | |
172 | * therefore make our best guess. | |
173 | * | |
174 | * Once we have made our guess, we record it in the top 16 bits of | |
175 | * the si_code, so that tswap_siginfo() later can use it. | |
176 | * tswap_siginfo() will strip these top bits out before writing | |
177 | * si_code to the guest (sign-extending the lower bits). | |
178 | */ | |
179 | tinfo->si_signo = sig; | |
180 | tinfo->si_errno = info->si_errno; | |
181 | tinfo->si_code = info->si_code; | |
182 | tinfo->si_pid = info->si_pid; | |
183 | tinfo->si_uid = info->si_uid; | |
184 | tinfo->si_status = info->si_status; | |
185 | tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr; | |
186 | /* | |
187 | * si_value is opaque to kernel. On all FreeBSD platforms, | |
188 | * sizeof(sival_ptr) >= sizeof(sival_int) so the following | |
189 | * always will copy the larger element. | |
190 | */ | |
191 | tinfo->si_value.sival_ptr = | |
192 | (abi_ulong)(unsigned long)info->si_value.sival_ptr; | |
193 | ||
194 | switch (si_code) { | |
195 | /* | |
196 | * All the SI_xxx codes that are defined here are global to | |
197 | * all the signals (they have values that none of the other, | |
198 | * more specific signal info will set). | |
199 | */ | |
200 | case SI_USER: | |
201 | case SI_LWP: | |
202 | case SI_KERNEL: | |
203 | case SI_QUEUE: | |
204 | case SI_ASYNCIO: | |
205 | /* | |
206 | * Only the fixed parts are valid (though FreeBSD doesn't always | |
207 | * set all the fields to non-zero values. | |
208 | */ | |
209 | si_type = QEMU_SI_NOINFO; | |
210 | break; | |
211 | case SI_TIMER: | |
212 | tinfo->_reason._timer._timerid = info->_reason._timer._timerid; | |
213 | tinfo->_reason._timer._overrun = info->_reason._timer._overrun; | |
214 | si_type = QEMU_SI_TIMER; | |
215 | break; | |
216 | case SI_MESGQ: | |
217 | tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd; | |
218 | si_type = QEMU_SI_MESGQ; | |
219 | break; | |
220 | default: | |
221 | /* | |
222 | * We have to go based on the signal number now to figure out | |
223 | * what's valid. | |
224 | */ | |
225 | if (has_trapno(sig)) { | |
226 | tinfo->_reason._fault._trapno = info->_reason._fault._trapno; | |
227 | si_type = QEMU_SI_FAULT; | |
228 | } | |
229 | #ifdef TARGET_SIGPOLL | |
230 | /* | |
231 | * FreeBSD never had SIGPOLL, but emulates it for Linux so there's | |
232 | * a chance it may popup in the future. | |
233 | */ | |
234 | if (sig == TARGET_SIGPOLL) { | |
235 | tinfo->_reason._poll._band = info->_reason._poll._band; | |
236 | si_type = QEMU_SI_POLL; | |
237 | } | |
238 | #endif | |
239 | /* | |
240 | * Unsure that this can actually be generated, and our support for | |
241 | * capsicum is somewhere between weak and non-existant, but if we get | |
242 | * one, then we know what to save. | |
243 | */ | |
244 | if (sig == TARGET_SIGTRAP) { | |
245 | tinfo->_reason._capsicum._syscall = | |
246 | info->_reason._capsicum._syscall; | |
247 | si_type = QEMU_SI_CAPSICUM; | |
248 | } | |
249 | break; | |
250 | } | |
251 | tinfo->si_code = deposit32(si_code, 24, 8, si_type); | |
252 | } | |
253 | ||
08eb66d5 WL |
254 | static void tswap_siginfo(target_siginfo_t *tinfo, const target_siginfo_t *info) |
255 | { | |
256 | int si_type = extract32(info->si_code, 24, 8); | |
257 | int si_code = sextract32(info->si_code, 0, 24); | |
258 | ||
259 | __put_user(info->si_signo, &tinfo->si_signo); | |
260 | __put_user(info->si_errno, &tinfo->si_errno); | |
261 | __put_user(si_code, &tinfo->si_code); /* Zero out si_type, it's internal */ | |
262 | __put_user(info->si_pid, &tinfo->si_pid); | |
263 | __put_user(info->si_uid, &tinfo->si_uid); | |
264 | __put_user(info->si_status, &tinfo->si_status); | |
265 | __put_user(info->si_addr, &tinfo->si_addr); | |
266 | /* | |
267 | * Unswapped, because we passed it through mostly untouched. si_value is | |
268 | * opaque to the kernel, so we didn't bother with potentially wasting cycles | |
269 | * to swap it into host byte order. | |
270 | */ | |
271 | tinfo->si_value.sival_ptr = info->si_value.sival_ptr; | |
272 | ||
273 | /* | |
274 | * We can use our internal marker of which fields in the structure | |
275 | * are valid, rather than duplicating the guesswork of | |
276 | * host_to_target_siginfo_noswap() here. | |
277 | */ | |
278 | switch (si_type) { | |
279 | case QEMU_SI_NOINFO: /* No additional info */ | |
280 | break; | |
281 | case QEMU_SI_FAULT: | |
282 | __put_user(info->_reason._fault._trapno, | |
283 | &tinfo->_reason._fault._trapno); | |
284 | break; | |
285 | case QEMU_SI_TIMER: | |
286 | __put_user(info->_reason._timer._timerid, | |
287 | &tinfo->_reason._timer._timerid); | |
288 | __put_user(info->_reason._timer._overrun, | |
289 | &tinfo->_reason._timer._overrun); | |
290 | break; | |
291 | case QEMU_SI_MESGQ: | |
292 | __put_user(info->_reason._mesgq._mqd, &tinfo->_reason._mesgq._mqd); | |
293 | break; | |
294 | case QEMU_SI_POLL: | |
295 | /* Note: Not generated on FreeBSD */ | |
296 | __put_user(info->_reason._poll._band, &tinfo->_reason._poll._band); | |
297 | break; | |
298 | case QEMU_SI_CAPSICUM: | |
299 | __put_user(info->_reason._capsicum._syscall, | |
300 | &tinfo->_reason._capsicum._syscall); | |
301 | break; | |
302 | default: | |
303 | g_assert_not_reached(); | |
304 | } | |
305 | } | |
306 | ||
394cf694 WL |
307 | int block_signals(void) |
308 | { | |
309 | TaskState *ts = (TaskState *)thread_cpu->opaque; | |
310 | sigset_t set; | |
311 | ||
312 | /* | |
313 | * It's OK to block everything including SIGSEGV, because we won't run any | |
314 | * further guest code before unblocking signals in | |
315 | * process_pending_signals(). We depend on the FreeBSD behaivor here where | |
316 | * this will only affect this thread's signal mask. We don't use | |
317 | * pthread_sigmask which might seem more correct because that routine also | |
318 | * does odd things with SIGCANCEL to implement pthread_cancel(). | |
319 | */ | |
320 | sigfillset(&set); | |
321 | sigprocmask(SIG_SETMASK, &set, 0); | |
322 | ||
323 | return qatomic_xchg(&ts->signal_pending, 1); | |
324 | } | |
325 | ||
37714547 WL |
326 | /* Returns 1 if given signal should dump core if not handled. */ |
327 | static int core_dump_signal(int sig) | |
328 | { | |
329 | switch (sig) { | |
330 | case TARGET_SIGABRT: | |
331 | case TARGET_SIGFPE: | |
332 | case TARGET_SIGILL: | |
333 | case TARGET_SIGQUIT: | |
334 | case TARGET_SIGSEGV: | |
335 | case TARGET_SIGTRAP: | |
336 | case TARGET_SIGBUS: | |
337 | return 1; | |
338 | default: | |
339 | return 0; | |
340 | } | |
341 | } | |
342 | ||
343 | /* Abort execution with signal. */ | |
344 | static void QEMU_NORETURN dump_core_and_abort(int target_sig) | |
345 | { | |
346 | CPUArchState *env = thread_cpu->env_ptr; | |
347 | CPUState *cpu = env_cpu(env); | |
348 | TaskState *ts = cpu->opaque; | |
349 | int core_dumped = 0; | |
350 | int host_sig; | |
351 | struct sigaction act; | |
352 | ||
353 | host_sig = target_to_host_signal(target_sig); | |
354 | gdb_signalled(env, target_sig); | |
355 | ||
356 | /* Dump core if supported by target binary format */ | |
357 | if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { | |
358 | stop_all_tasks(); | |
359 | core_dumped = | |
360 | ((*ts->bprm->core_dump)(target_sig, env) == 0); | |
361 | } | |
362 | if (core_dumped) { | |
363 | struct rlimit nodump; | |
364 | ||
365 | /* | |
366 | * We already dumped the core of target process, we don't want | |
367 | * a coredump of qemu itself. | |
368 | */ | |
369 | getrlimit(RLIMIT_CORE, &nodump); | |
370 | nodump.rlim_cur = 0; | |
371 | setrlimit(RLIMIT_CORE, &nodump); | |
372 | (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) " | |
373 | "- %s\n", target_sig, strsignal(host_sig), "core dumped"); | |
374 | } | |
375 | ||
376 | /* | |
377 | * The proper exit code for dying from an uncaught signal is | |
378 | * -<signal>. The kernel doesn't allow exit() or _exit() to pass | |
379 | * a negative value. To get the proper exit code we need to | |
380 | * actually die from an uncaught signal. Here the default signal | |
381 | * handler is installed, we send ourself a signal and we wait for | |
382 | * it to arrive. | |
383 | */ | |
384 | memset(&act, 0, sizeof(act)); | |
385 | sigfillset(&act.sa_mask); | |
386 | act.sa_handler = SIG_DFL; | |
387 | sigaction(host_sig, &act, NULL); | |
388 | ||
389 | kill(getpid(), host_sig); | |
390 | ||
391 | /* | |
392 | * Make sure the signal isn't masked (just reuse the mask inside | |
393 | * of act). | |
394 | */ | |
395 | sigdelset(&act.sa_mask, host_sig); | |
396 | sigsuspend(&act.sa_mask); | |
397 | ||
398 | /* unreachable */ | |
399 | abort(); | |
400 | } | |
401 | ||
5abfac27 WL |
402 | /* |
403 | * Queue a signal so that it will be send to the virtual CPU as soon as | |
404 | * possible. | |
405 | */ | |
e32a6301 WL |
406 | void queue_signal(CPUArchState *env, int sig, int si_type, |
407 | target_siginfo_t *info) | |
5abfac27 | 408 | { |
38be620c WL |
409 | CPUState *cpu = env_cpu(env); |
410 | TaskState *ts = cpu->opaque; | |
411 | ||
412 | trace_user_queue_signal(env, sig); | |
413 | ||
414 | info->si_code = deposit32(info->si_code, 24, 8, si_type); | |
415 | ||
416 | ts->sync_signal.info = *info; | |
417 | ts->sync_signal.pending = sig; | |
418 | /* Signal that a new signal is pending. */ | |
419 | qatomic_set(&ts->signal_pending, 1); | |
420 | return; | |
5abfac27 WL |
421 | } |
422 | ||
149076ad WL |
423 | static int fatal_signal(int sig) |
424 | { | |
425 | ||
426 | switch (sig) { | |
427 | case TARGET_SIGCHLD: | |
428 | case TARGET_SIGURG: | |
429 | case TARGET_SIGWINCH: | |
430 | case TARGET_SIGINFO: | |
431 | /* Ignored by default. */ | |
432 | return 0; | |
433 | case TARGET_SIGCONT: | |
434 | case TARGET_SIGSTOP: | |
435 | case TARGET_SIGTSTP: | |
436 | case TARGET_SIGTTIN: | |
437 | case TARGET_SIGTTOU: | |
438 | /* Job control signals. */ | |
439 | return 0; | |
440 | default: | |
441 | return 1; | |
442 | } | |
443 | } | |
444 | ||
0ef59989 WL |
445 | /* |
446 | * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the | |
447 | * 'force' part is handled in process_pending_signals(). | |
448 | */ | |
449 | void force_sig_fault(int sig, int code, abi_ulong addr) | |
450 | { | |
451 | CPUState *cpu = thread_cpu; | |
452 | CPUArchState *env = cpu->env_ptr; | |
453 | target_siginfo_t info = {}; | |
454 | ||
455 | info.si_signo = sig; | |
456 | info.si_errno = 0; | |
457 | info.si_code = code; | |
458 | info.si_addr = addr; | |
e32a6301 | 459 | queue_signal(env, sig, QEMU_SI_FAULT, &info); |
0ef59989 WL |
460 | } |
461 | ||
149076ad WL |
462 | static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) |
463 | { | |
e625c7ef WL |
464 | CPUArchState *env = thread_cpu->env_ptr; |
465 | CPUState *cpu = env_cpu(env); | |
466 | TaskState *ts = cpu->opaque; | |
467 | target_siginfo_t tinfo; | |
468 | ucontext_t *uc = puc; | |
469 | struct emulated_sigtable *k; | |
470 | int guest_sig; | |
471 | uintptr_t pc = 0; | |
472 | bool sync_sig = false; | |
473 | ||
474 | /* | |
475 | * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special | |
476 | * handling wrt signal blocking and unwinding. | |
477 | */ | |
478 | if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) { | |
479 | MMUAccessType access_type; | |
480 | uintptr_t host_addr; | |
481 | abi_ptr guest_addr; | |
482 | bool is_write; | |
483 | ||
484 | host_addr = (uintptr_t)info->si_addr; | |
485 | ||
486 | /* | |
487 | * Convert forcefully to guest address space: addresses outside | |
488 | * reserved_va are still valid to report via SEGV_MAPERR. | |
489 | */ | |
490 | guest_addr = h2g_nocheck(host_addr); | |
491 | ||
492 | pc = host_signal_pc(uc); | |
493 | is_write = host_signal_write(info, uc); | |
494 | access_type = adjust_signal_pc(&pc, is_write); | |
495 | ||
496 | if (host_sig == SIGSEGV) { | |
497 | bool maperr = true; | |
498 | ||
499 | if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { | |
500 | /* If this was a write to a TB protected page, restart. */ | |
501 | if (is_write && | |
502 | handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask, | |
503 | pc, guest_addr)) { | |
504 | return; | |
505 | } | |
506 | ||
507 | /* | |
508 | * With reserved_va, the whole address space is PROT_NONE, | |
509 | * which means that we may get ACCERR when we want MAPERR. | |
510 | */ | |
511 | if (page_get_flags(guest_addr) & PAGE_VALID) { | |
512 | maperr = false; | |
513 | } else { | |
514 | info->si_code = SEGV_MAPERR; | |
515 | } | |
516 | } | |
517 | ||
518 | sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); | |
519 | cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); | |
520 | } else { | |
521 | sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); | |
522 | if (info->si_code == BUS_ADRALN) { | |
523 | cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); | |
524 | } | |
525 | } | |
526 | ||
527 | sync_sig = true; | |
528 | } | |
529 | ||
530 | /* Get the target signal number. */ | |
531 | guest_sig = host_to_target_signal(host_sig); | |
532 | if (guest_sig < 1 || guest_sig > TARGET_NSIG) { | |
533 | return; | |
534 | } | |
535 | trace_user_host_signal(cpu, host_sig, guest_sig); | |
536 | ||
537 | host_to_target_siginfo_noswap(&tinfo, info); | |
538 | ||
539 | k = &ts->sigtab[guest_sig - 1]; | |
540 | k->info = tinfo; | |
541 | k->pending = guest_sig; | |
542 | ts->signal_pending = 1; | |
543 | ||
544 | /* | |
545 | * For synchronous signals, unwind the cpu state to the faulting | |
546 | * insn and then exit back to the main loop so that the signal | |
547 | * is delivered immediately. | |
548 | */ | |
549 | if (sync_sig) { | |
550 | cpu->exception_index = EXCP_INTERRUPT; | |
551 | cpu_loop_exit_restore(cpu, pc); | |
552 | } | |
553 | ||
554 | rewind_if_in_safe_syscall(puc); | |
555 | ||
556 | /* | |
557 | * Block host signals until target signal handler entered. We | |
558 | * can't block SIGSEGV or SIGBUS while we're executing guest | |
559 | * code in case the guest code provokes one in the window between | |
560 | * now and it getting out to the main loop. Signals will be | |
561 | * unblocked again in process_pending_signals(). | |
562 | */ | |
563 | sigfillset(&uc->uc_sigmask); | |
564 | sigdelset(&uc->uc_sigmask, SIGSEGV); | |
565 | sigdelset(&uc->uc_sigmask, SIGBUS); | |
566 | ||
567 | /* Interrupt the virtual CPU as soon as possible. */ | |
568 | cpu_exit(thread_cpu); | |
149076ad WL |
569 | } |
570 | ||
43ed4267 WL |
571 | /* do_sigaltstack() returns target values and errnos. */ |
572 | /* compare to kern/kern_sig.c sys_sigaltstack() and kern_sigaltstack() */ | |
573 | abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) | |
574 | { | |
575 | TaskState *ts = (TaskState *)thread_cpu->opaque; | |
576 | int ret; | |
577 | target_stack_t oss; | |
578 | ||
579 | if (uoss_addr) { | |
580 | /* Save current signal stack params */ | |
581 | oss.ss_sp = tswapl(ts->sigaltstack_used.ss_sp); | |
582 | oss.ss_size = tswapl(ts->sigaltstack_used.ss_size); | |
583 | oss.ss_flags = tswapl(sas_ss_flags(ts, sp)); | |
584 | } | |
585 | ||
586 | if (uss_addr) { | |
587 | target_stack_t *uss; | |
588 | target_stack_t ss; | |
589 | size_t minstacksize = TARGET_MINSIGSTKSZ; | |
590 | ||
591 | ret = -TARGET_EFAULT; | |
592 | if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { | |
593 | goto out; | |
594 | } | |
595 | __get_user(ss.ss_sp, &uss->ss_sp); | |
596 | __get_user(ss.ss_size, &uss->ss_size); | |
597 | __get_user(ss.ss_flags, &uss->ss_flags); | |
598 | unlock_user_struct(uss, uss_addr, 0); | |
599 | ||
600 | ret = -TARGET_EPERM; | |
601 | if (on_sig_stack(ts, sp)) { | |
602 | goto out; | |
603 | } | |
604 | ||
605 | ret = -TARGET_EINVAL; | |
606 | if (ss.ss_flags != TARGET_SS_DISABLE | |
607 | && ss.ss_flags != TARGET_SS_ONSTACK | |
608 | && ss.ss_flags != 0) { | |
609 | goto out; | |
610 | } | |
611 | ||
612 | if (ss.ss_flags == TARGET_SS_DISABLE) { | |
613 | ss.ss_size = 0; | |
614 | ss.ss_sp = 0; | |
615 | } else { | |
616 | ret = -TARGET_ENOMEM; | |
617 | if (ss.ss_size < minstacksize) { | |
618 | goto out; | |
619 | } | |
620 | } | |
621 | ||
622 | ts->sigaltstack_used.ss_sp = ss.ss_sp; | |
623 | ts->sigaltstack_used.ss_size = ss.ss_size; | |
624 | } | |
625 | ||
626 | if (uoss_addr) { | |
627 | ret = -TARGET_EFAULT; | |
628 | if (copy_to_user(uoss_addr, &oss, sizeof(oss))) { | |
629 | goto out; | |
630 | } | |
631 | } | |
632 | ||
633 | ret = 0; | |
634 | out: | |
635 | return ret; | |
636 | } | |
637 | ||
394cf694 WL |
638 | /* do_sigaction() return host values and errnos */ |
639 | int do_sigaction(int sig, const struct target_sigaction *act, | |
640 | struct target_sigaction *oact) | |
641 | { | |
642 | struct target_sigaction *k; | |
643 | struct sigaction act1; | |
644 | int host_sig; | |
645 | int ret = 0; | |
646 | ||
647 | if (sig < 1 || sig > TARGET_NSIG) { | |
648 | return -TARGET_EINVAL; | |
649 | } | |
650 | ||
651 | if ((sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) && | |
652 | act != NULL && act->_sa_handler != TARGET_SIG_DFL) { | |
653 | return -TARGET_EINVAL; | |
654 | } | |
655 | ||
656 | if (block_signals()) { | |
657 | return -TARGET_ERESTART; | |
658 | } | |
659 | ||
660 | k = &sigact_table[sig - 1]; | |
661 | if (oact) { | |
662 | oact->_sa_handler = tswapal(k->_sa_handler); | |
663 | oact->sa_flags = tswap32(k->sa_flags); | |
664 | oact->sa_mask = k->sa_mask; | |
665 | } | |
666 | if (act) { | |
667 | k->_sa_handler = tswapal(act->_sa_handler); | |
668 | k->sa_flags = tswap32(act->sa_flags); | |
669 | k->sa_mask = act->sa_mask; | |
670 | ||
671 | /* Update the host signal state. */ | |
672 | host_sig = target_to_host_signal(sig); | |
673 | if (host_sig != SIGSEGV && host_sig != SIGBUS) { | |
674 | memset(&act1, 0, sizeof(struct sigaction)); | |
675 | sigfillset(&act1.sa_mask); | |
676 | act1.sa_flags = SA_SIGINFO; | |
677 | if (k->sa_flags & TARGET_SA_RESTART) { | |
678 | act1.sa_flags |= SA_RESTART; | |
679 | } | |
680 | /* | |
681 | * Note: It is important to update the host kernel signal mask to | |
682 | * avoid getting unexpected interrupted system calls. | |
683 | */ | |
684 | if (k->_sa_handler == TARGET_SIG_IGN) { | |
685 | act1.sa_sigaction = (void *)SIG_IGN; | |
686 | } else if (k->_sa_handler == TARGET_SIG_DFL) { | |
687 | if (fatal_signal(sig)) { | |
688 | act1.sa_sigaction = host_signal_handler; | |
689 | } else { | |
690 | act1.sa_sigaction = (void *)SIG_DFL; | |
691 | } | |
692 | } else { | |
693 | act1.sa_sigaction = host_signal_handler; | |
694 | } | |
695 | ret = sigaction(host_sig, &act1, NULL); | |
696 | } | |
697 | } | |
698 | return ret; | |
699 | } | |
700 | ||
46f4f76d WL |
701 | static inline abi_ulong get_sigframe(struct target_sigaction *ka, |
702 | CPUArchState *env, size_t frame_size) | |
703 | { | |
704 | TaskState *ts = (TaskState *)thread_cpu->opaque; | |
705 | abi_ulong sp; | |
706 | ||
707 | /* Use default user stack */ | |
708 | sp = get_sp_from_cpustate(env); | |
709 | ||
710 | if ((ka->sa_flags & TARGET_SA_ONSTACK) && sas_ss_flags(ts, sp) == 0) { | |
711 | sp = ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size; | |
712 | } | |
713 | ||
714 | /* TODO: make this a target_arch function / define */ | |
715 | #if defined(TARGET_ARM) | |
716 | return (sp - frame_size) & ~7; | |
717 | #elif defined(TARGET_AARCH64) | |
718 | return (sp - frame_size) & ~15; | |
719 | #else | |
720 | return sp - frame_size; | |
721 | #endif | |
722 | } | |
723 | ||
724 | /* compare to $M/$M/exec_machdep.c sendsig and sys/kern/kern_sig.c sigexit */ | |
725 | ||
726 | static void setup_frame(int sig, int code, struct target_sigaction *ka, | |
727 | target_sigset_t *set, target_siginfo_t *tinfo, CPUArchState *env) | |
728 | { | |
729 | struct target_sigframe *frame; | |
730 | abi_ulong frame_addr; | |
731 | int i; | |
732 | ||
733 | frame_addr = get_sigframe(ka, env, sizeof(*frame)); | |
734 | trace_user_setup_frame(env, frame_addr); | |
735 | if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { | |
736 | unlock_user_struct(frame, frame_addr, 1); | |
737 | dump_core_and_abort(TARGET_SIGILL); | |
738 | return; | |
739 | } | |
740 | ||
741 | memset(frame, 0, sizeof(*frame)); | |
742 | setup_sigframe_arch(env, frame_addr, frame, 0); | |
743 | ||
744 | for (i = 0; i < TARGET_NSIG_WORDS; i++) { | |
745 | __put_user(set->__bits[i], &frame->sf_uc.uc_sigmask.__bits[i]); | |
746 | } | |
747 | ||
748 | if (tinfo) { | |
749 | frame->sf_si.si_signo = tinfo->si_signo; | |
750 | frame->sf_si.si_errno = tinfo->si_errno; | |
751 | frame->sf_si.si_code = tinfo->si_code; | |
752 | frame->sf_si.si_pid = tinfo->si_pid; | |
753 | frame->sf_si.si_uid = tinfo->si_uid; | |
754 | frame->sf_si.si_status = tinfo->si_status; | |
755 | frame->sf_si.si_addr = tinfo->si_addr; | |
756 | /* see host_to_target_siginfo_noswap() for more details */ | |
757 | frame->sf_si.si_value.sival_ptr = tinfo->si_value.sival_ptr; | |
758 | /* | |
759 | * At this point, whatever is in the _reason union is complete | |
760 | * and in target order, so just copy the whole thing over, even | |
761 | * if it's too large for this specific signal. | |
762 | * host_to_target_siginfo_noswap() and tswap_siginfo() have ensured | |
763 | * that's so. | |
764 | */ | |
765 | memcpy(&frame->sf_si._reason, &tinfo->_reason, | |
766 | sizeof(tinfo->_reason)); | |
767 | } | |
768 | ||
769 | set_sigtramp_args(env, sig, frame, frame_addr, ka); | |
770 | ||
771 | unlock_user_struct(frame, frame_addr, 1); | |
772 | } | |
773 | ||
c885ae0e WL |
774 | static int reset_signal_mask(target_ucontext_t *ucontext) |
775 | { | |
776 | int i; | |
777 | sigset_t blocked; | |
778 | target_sigset_t target_set; | |
779 | TaskState *ts = (TaskState *)thread_cpu->opaque; | |
780 | ||
781 | for (i = 0; i < TARGET_NSIG_WORDS; i++) { | |
782 | if (__get_user(target_set.__bits[i], | |
783 | &ucontext->uc_sigmask.__bits[i])) { | |
784 | return -TARGET_EFAULT; | |
785 | } | |
786 | } | |
787 | target_to_host_sigset_internal(&blocked, &target_set); | |
788 | ts->signal_mask = blocked; | |
789 | ||
790 | return 0; | |
791 | } | |
792 | ||
793 | /* See sys/$M/$M/exec_machdep.c sigreturn() */ | |
794 | long do_sigreturn(CPUArchState *env, abi_ulong addr) | |
795 | { | |
796 | long ret; | |
797 | abi_ulong target_ucontext; | |
798 | target_ucontext_t *ucontext = NULL; | |
799 | ||
800 | /* Get the target ucontext address from the stack frame */ | |
801 | ret = get_ucontext_sigreturn(env, addr, &target_ucontext); | |
802 | if (is_error(ret)) { | |
803 | return ret; | |
804 | } | |
805 | trace_user_do_sigreturn(env, addr); | |
806 | if (!lock_user_struct(VERIFY_READ, ucontext, target_ucontext, 0)) { | |
807 | goto badframe; | |
808 | } | |
809 | ||
810 | /* Set the register state back to before the signal. */ | |
811 | if (set_mcontext(env, &ucontext->uc_mcontext, 1)) { | |
812 | goto badframe; | |
813 | } | |
814 | ||
815 | /* And reset the signal mask. */ | |
816 | if (reset_signal_mask(ucontext)) { | |
817 | goto badframe; | |
818 | } | |
819 | ||
820 | unlock_user_struct(ucontext, target_ucontext, 0); | |
821 | return -TARGET_EJUSTRETURN; | |
822 | ||
823 | badframe: | |
824 | if (ucontext != NULL) { | |
825 | unlock_user_struct(ucontext, target_ucontext, 0); | |
826 | } | |
827 | return -TARGET_EFAULT; | |
828 | } | |
829 | ||
84778508 BS |
830 | void signal_init(void) |
831 | { | |
149076ad WL |
832 | TaskState *ts = (TaskState *)thread_cpu->opaque; |
833 | struct sigaction act; | |
834 | struct sigaction oact; | |
835 | int i; | |
836 | int host_sig; | |
837 | ||
838 | /* Set the signal mask from the host mask. */ | |
839 | sigprocmask(0, 0, &ts->signal_mask); | |
840 | ||
841 | sigfillset(&act.sa_mask); | |
842 | act.sa_sigaction = host_signal_handler; | |
843 | act.sa_flags = SA_SIGINFO; | |
844 | ||
845 | for (i = 1; i <= TARGET_NSIG; i++) { | |
846 | #ifdef CONFIG_GPROF | |
847 | if (i == TARGET_SIGPROF) { | |
848 | continue; | |
849 | } | |
850 | #endif | |
851 | host_sig = target_to_host_signal(i); | |
852 | sigaction(host_sig, NULL, &oact); | |
853 | if (oact.sa_sigaction == (void *)SIG_IGN) { | |
854 | sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; | |
855 | } else if (oact.sa_sigaction == (void *)SIG_DFL) { | |
856 | sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; | |
857 | } | |
858 | /* | |
859 | * If there's already a handler installed then something has | |
860 | * gone horribly wrong, so don't even try to handle that case. | |
861 | * Install some handlers for our own use. We need at least | |
862 | * SIGSEGV and SIGBUS, to detect exceptions. We can not just | |
863 | * trap all signals because it affects syscall interrupt | |
864 | * behavior. But do trap all default-fatal signals. | |
865 | */ | |
866 | if (fatal_signal(i)) { | |
867 | sigaction(host_sig, &act, NULL); | |
868 | } | |
869 | } | |
84778508 BS |
870 | } |
871 | ||
6c6d4b56 WL |
872 | static void handle_pending_signal(CPUArchState *env, int sig, |
873 | struct emulated_sigtable *k) | |
874 | { | |
875 | CPUState *cpu = env_cpu(env); | |
876 | TaskState *ts = cpu->opaque; | |
877 | struct target_sigaction *sa; | |
878 | int code; | |
879 | sigset_t set; | |
880 | abi_ulong handler; | |
881 | target_siginfo_t tinfo; | |
882 | target_sigset_t target_old_set; | |
883 | ||
884 | trace_user_handle_signal(env, sig); | |
885 | ||
886 | k->pending = 0; | |
887 | ||
888 | sig = gdb_handlesig(cpu, sig); | |
889 | if (!sig) { | |
890 | sa = NULL; | |
891 | handler = TARGET_SIG_IGN; | |
892 | } else { | |
893 | sa = &sigact_table[sig - 1]; | |
894 | handler = sa->_sa_handler; | |
895 | } | |
896 | ||
897 | if (do_strace) { | |
898 | print_taken_signal(sig, &k->info); | |
899 | } | |
900 | ||
901 | if (handler == TARGET_SIG_DFL) { | |
902 | /* | |
903 | * default handler : ignore some signal. The other are job | |
904 | * control or fatal. | |
905 | */ | |
906 | if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || | |
907 | sig == TARGET_SIGTTOU) { | |
908 | kill(getpid(), SIGSTOP); | |
909 | } else if (sig != TARGET_SIGCHLD && sig != TARGET_SIGURG && | |
910 | sig != TARGET_SIGINFO && sig != TARGET_SIGWINCH && | |
911 | sig != TARGET_SIGCONT) { | |
912 | dump_core_and_abort(sig); | |
913 | } | |
914 | } else if (handler == TARGET_SIG_IGN) { | |
915 | /* ignore sig */ | |
916 | } else if (handler == TARGET_SIG_ERR) { | |
917 | dump_core_and_abort(sig); | |
918 | } else { | |
919 | /* compute the blocked signals during the handler execution */ | |
920 | sigset_t *blocked_set; | |
921 | ||
922 | target_to_host_sigset(&set, &sa->sa_mask); | |
923 | /* | |
924 | * SA_NODEFER indicates that the current signal should not be | |
925 | * blocked during the handler. | |
926 | */ | |
927 | if (!(sa->sa_flags & TARGET_SA_NODEFER)) { | |
928 | sigaddset(&set, target_to_host_signal(sig)); | |
929 | } | |
930 | ||
931 | /* | |
932 | * Save the previous blocked signal state to restore it at the | |
933 | * end of the signal execution (see do_sigreturn). | |
934 | */ | |
935 | host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); | |
936 | ||
937 | blocked_set = ts->in_sigsuspend ? | |
938 | &ts->sigsuspend_mask : &ts->signal_mask; | |
939 | sigorset(&ts->signal_mask, blocked_set, &set); | |
940 | ts->in_sigsuspend = false; | |
941 | sigprocmask(SIG_SETMASK, &ts->signal_mask, NULL); | |
942 | ||
943 | /* XXX VM86 on x86 ??? */ | |
944 | ||
945 | code = k->info.si_code; /* From host, so no si_type */ | |
946 | /* prepare the stack frame of the virtual CPU */ | |
947 | if (sa->sa_flags & TARGET_SA_SIGINFO) { | |
948 | tswap_siginfo(&tinfo, &k->info); | |
949 | setup_frame(sig, code, sa, &target_old_set, &tinfo, env); | |
950 | } else { | |
951 | setup_frame(sig, code, sa, &target_old_set, NULL, env); | |
952 | } | |
953 | if (sa->sa_flags & TARGET_SA_RESETHAND) { | |
954 | sa->_sa_handler = TARGET_SIG_DFL; | |
955 | } | |
956 | } | |
957 | } | |
958 | ||
d7acd317 | 959 | void process_pending_signals(CPUArchState *env) |
84778508 | 960 | { |
d7acd317 WL |
961 | CPUState *cpu = env_cpu(env); |
962 | int sig; | |
963 | sigset_t *blocked_set, set; | |
964 | struct emulated_sigtable *k; | |
965 | TaskState *ts = cpu->opaque; | |
966 | ||
967 | while (qatomic_read(&ts->signal_pending)) { | |
968 | sigfillset(&set); | |
969 | sigprocmask(SIG_SETMASK, &set, 0); | |
970 | ||
971 | restart_scan: | |
972 | sig = ts->sync_signal.pending; | |
973 | if (sig) { | |
974 | /* | |
975 | * Synchronous signals are forced by the emulated CPU in some way. | |
976 | * If they are set to ignore, restore the default handler (see | |
977 | * sys/kern_sig.c trapsignal() and execsigs() for this behavior) | |
978 | * though maybe this is done only when forcing exit for non SIGCHLD. | |
979 | */ | |
980 | if (sigismember(&ts->signal_mask, target_to_host_signal(sig)) || | |
981 | sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { | |
982 | sigdelset(&ts->signal_mask, target_to_host_signal(sig)); | |
983 | sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; | |
984 | } | |
985 | handle_pending_signal(env, sig, &ts->sync_signal); | |
986 | } | |
987 | ||
988 | k = ts->sigtab; | |
989 | for (sig = 1; sig <= TARGET_NSIG; sig++, k++) { | |
990 | blocked_set = ts->in_sigsuspend ? | |
991 | &ts->sigsuspend_mask : &ts->signal_mask; | |
992 | if (k->pending && | |
993 | !sigismember(blocked_set, target_to_host_signal(sig))) { | |
994 | handle_pending_signal(env, sig, k); | |
995 | /* | |
996 | * Restart scan from the beginning, as handle_pending_signal | |
997 | * might have resulted in a new synchronous signal (eg SIGSEGV). | |
998 | */ | |
999 | goto restart_scan; | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * Unblock signals and check one more time. Unblocking signals may cause | |
1005 | * us to take another host signal, which will set signal_pending again. | |
1006 | */ | |
1007 | qatomic_set(&ts->signal_pending, 0); | |
1008 | ts->in_sigsuspend = false; | |
1009 | set = ts->signal_mask; | |
1010 | sigdelset(&set, SIGSEGV); | |
1011 | sigdelset(&set, SIGBUS); | |
1012 | sigprocmask(SIG_SETMASK, &set, 0); | |
1013 | } | |
1014 | ts->in_sigsuspend = false; | |
84778508 | 1015 | } |
835b04ed WL |
1016 | |
1017 | void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, | |
1018 | MMUAccessType access_type, bool maperr, uintptr_t ra) | |
1019 | { | |
fc9f9bdd WL |
1020 | const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; |
1021 | ||
1022 | if (tcg_ops->record_sigsegv) { | |
1023 | tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); | |
1024 | } | |
1025 | ||
1026 | force_sig_fault(TARGET_SIGSEGV, | |
1027 | maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR, | |
1028 | addr); | |
1029 | cpu->exception_index = EXCP_INTERRUPT; | |
1030 | cpu_loop_exit_restore(cpu, ra); | |
835b04ed WL |
1031 | } |
1032 | ||
1033 | void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, | |
1034 | MMUAccessType access_type, uintptr_t ra) | |
1035 | { | |
cfdee273 WL |
1036 | const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; |
1037 | ||
1038 | if (tcg_ops->record_sigbus) { | |
1039 | tcg_ops->record_sigbus(cpu, addr, access_type, ra); | |
1040 | } | |
1041 | ||
1042 | force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); | |
1043 | cpu->exception_index = EXCP_INTERRUPT; | |
1044 | cpu_loop_exit_restore(cpu, ra); | |
835b04ed | 1045 | } |