]> git.proxmox.com Git - mirror_qemu.git/blob - bsd-user/signal.c
ccda7adbeef4edebd4f2a86b079449ddd36d4ba3
[mirror_qemu.git] / bsd-user / signal.c
1 /*
2 * Emulation of BSD signals
3 *
4 * Copyright (c) 2003 - 2008 Fabrice Bellard
5 * Copyright (c) 2013 Stacey Son
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "qemu.h"
23 #include "signal-common.h"
24 #include "trace.h"
25 #include "hw/core/tcg-cpu-ops.h"
26 #include "host-signal.h"
27
28 /*
29 * Stubbed out routines until we merge signal support from bsd-user
30 * fork.
31 */
32
33 static struct target_sigaction sigact_table[TARGET_NSIG];
34 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc);
35
36 /*
37 * The BSD ABIs use the same singal numbers across all the CPU architectures, so
38 * (unlike Linux) these functions are just the identity mapping. This might not
39 * be true for XyzBSD running on AbcBSD, which doesn't currently work.
40 */
41 int host_to_target_signal(int sig)
42 {
43 return sig;
44 }
45
46 int target_to_host_signal(int sig)
47 {
48 return sig;
49 }
50
51 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
52 static inline void rewind_if_in_safe_syscall(void *puc)
53 {
54 ucontext_t *uc = (ucontext_t *)puc;
55 uintptr_t pcreg = host_signal_pc(uc);
56
57 if (pcreg > (uintptr_t)safe_syscall_start
58 && pcreg < (uintptr_t)safe_syscall_end) {
59 host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
60 }
61 }
62
63 static bool has_trapno(int tsig)
64 {
65 return tsig == TARGET_SIGILL ||
66 tsig == TARGET_SIGFPE ||
67 tsig == TARGET_SIGSEGV ||
68 tsig == TARGET_SIGBUS ||
69 tsig == TARGET_SIGTRAP;
70 }
71
72 /* Siginfo conversion. */
73
74 /*
75 * Populate tinfo w/o swapping based on guessing which fields are valid.
76 */
77 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
78 const siginfo_t *info)
79 {
80 int sig = host_to_target_signal(info->si_signo);
81 int si_code = info->si_code;
82 int si_type;
83
84 /*
85 * Make sure we that the variable portion of the target siginfo is zeroed
86 * out so we don't leak anything into that.
87 */
88 memset(&tinfo->_reason, 0, sizeof(tinfo->_reason));
89
90 /*
91 * This is awkward, because we have to use a combination of the si_code and
92 * si_signo to figure out which of the union's members are valid.o We
93 * therefore make our best guess.
94 *
95 * Once we have made our guess, we record it in the top 16 bits of
96 * the si_code, so that tswap_siginfo() later can use it.
97 * tswap_siginfo() will strip these top bits out before writing
98 * si_code to the guest (sign-extending the lower bits).
99 */
100 tinfo->si_signo = sig;
101 tinfo->si_errno = info->si_errno;
102 tinfo->si_code = info->si_code;
103 tinfo->si_pid = info->si_pid;
104 tinfo->si_uid = info->si_uid;
105 tinfo->si_status = info->si_status;
106 tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr;
107 /*
108 * si_value is opaque to kernel. On all FreeBSD platforms,
109 * sizeof(sival_ptr) >= sizeof(sival_int) so the following
110 * always will copy the larger element.
111 */
112 tinfo->si_value.sival_ptr =
113 (abi_ulong)(unsigned long)info->si_value.sival_ptr;
114
115 switch (si_code) {
116 /*
117 * All the SI_xxx codes that are defined here are global to
118 * all the signals (they have values that none of the other,
119 * more specific signal info will set).
120 */
121 case SI_USER:
122 case SI_LWP:
123 case SI_KERNEL:
124 case SI_QUEUE:
125 case SI_ASYNCIO:
126 /*
127 * Only the fixed parts are valid (though FreeBSD doesn't always
128 * set all the fields to non-zero values.
129 */
130 si_type = QEMU_SI_NOINFO;
131 break;
132 case SI_TIMER:
133 tinfo->_reason._timer._timerid = info->_reason._timer._timerid;
134 tinfo->_reason._timer._overrun = info->_reason._timer._overrun;
135 si_type = QEMU_SI_TIMER;
136 break;
137 case SI_MESGQ:
138 tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd;
139 si_type = QEMU_SI_MESGQ;
140 break;
141 default:
142 /*
143 * We have to go based on the signal number now to figure out
144 * what's valid.
145 */
146 if (has_trapno(sig)) {
147 tinfo->_reason._fault._trapno = info->_reason._fault._trapno;
148 si_type = QEMU_SI_FAULT;
149 }
150 #ifdef TARGET_SIGPOLL
151 /*
152 * FreeBSD never had SIGPOLL, but emulates it for Linux so there's
153 * a chance it may popup in the future.
154 */
155 if (sig == TARGET_SIGPOLL) {
156 tinfo->_reason._poll._band = info->_reason._poll._band;
157 si_type = QEMU_SI_POLL;
158 }
159 #endif
160 /*
161 * Unsure that this can actually be generated, and our support for
162 * capsicum is somewhere between weak and non-existant, but if we get
163 * one, then we know what to save.
164 */
165 if (sig == TARGET_SIGTRAP) {
166 tinfo->_reason._capsicum._syscall =
167 info->_reason._capsicum._syscall;
168 si_type = QEMU_SI_CAPSICUM;
169 }
170 break;
171 }
172 tinfo->si_code = deposit32(si_code, 24, 8, si_type);
173 }
174
175 /* Returns 1 if given signal should dump core if not handled. */
176 static int core_dump_signal(int sig)
177 {
178 switch (sig) {
179 case TARGET_SIGABRT:
180 case TARGET_SIGFPE:
181 case TARGET_SIGILL:
182 case TARGET_SIGQUIT:
183 case TARGET_SIGSEGV:
184 case TARGET_SIGTRAP:
185 case TARGET_SIGBUS:
186 return 1;
187 default:
188 return 0;
189 }
190 }
191
192 /* Abort execution with signal. */
193 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
194 {
195 CPUArchState *env = thread_cpu->env_ptr;
196 CPUState *cpu = env_cpu(env);
197 TaskState *ts = cpu->opaque;
198 int core_dumped = 0;
199 int host_sig;
200 struct sigaction act;
201
202 host_sig = target_to_host_signal(target_sig);
203 gdb_signalled(env, target_sig);
204
205 /* Dump core if supported by target binary format */
206 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
207 stop_all_tasks();
208 core_dumped =
209 ((*ts->bprm->core_dump)(target_sig, env) == 0);
210 }
211 if (core_dumped) {
212 struct rlimit nodump;
213
214 /*
215 * We already dumped the core of target process, we don't want
216 * a coredump of qemu itself.
217 */
218 getrlimit(RLIMIT_CORE, &nodump);
219 nodump.rlim_cur = 0;
220 setrlimit(RLIMIT_CORE, &nodump);
221 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) "
222 "- %s\n", target_sig, strsignal(host_sig), "core dumped");
223 }
224
225 /*
226 * The proper exit code for dying from an uncaught signal is
227 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
228 * a negative value. To get the proper exit code we need to
229 * actually die from an uncaught signal. Here the default signal
230 * handler is installed, we send ourself a signal and we wait for
231 * it to arrive.
232 */
233 memset(&act, 0, sizeof(act));
234 sigfillset(&act.sa_mask);
235 act.sa_handler = SIG_DFL;
236 sigaction(host_sig, &act, NULL);
237
238 kill(getpid(), host_sig);
239
240 /*
241 * Make sure the signal isn't masked (just reuse the mask inside
242 * of act).
243 */
244 sigdelset(&act.sa_mask, host_sig);
245 sigsuspend(&act.sa_mask);
246
247 /* unreachable */
248 abort();
249 }
250
251 /*
252 * Queue a signal so that it will be send to the virtual CPU as soon as
253 * possible.
254 */
255 void queue_signal(CPUArchState *env, int sig, int si_type,
256 target_siginfo_t *info)
257 {
258 qemu_log_mask(LOG_UNIMP, "No signal queueing, dropping signal %d\n", sig);
259 }
260
261 static int fatal_signal(int sig)
262 {
263
264 switch (sig) {
265 case TARGET_SIGCHLD:
266 case TARGET_SIGURG:
267 case TARGET_SIGWINCH:
268 case TARGET_SIGINFO:
269 /* Ignored by default. */
270 return 0;
271 case TARGET_SIGCONT:
272 case TARGET_SIGSTOP:
273 case TARGET_SIGTSTP:
274 case TARGET_SIGTTIN:
275 case TARGET_SIGTTOU:
276 /* Job control signals. */
277 return 0;
278 default:
279 return 1;
280 }
281 }
282
283 /*
284 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
285 * 'force' part is handled in process_pending_signals().
286 */
287 void force_sig_fault(int sig, int code, abi_ulong addr)
288 {
289 CPUState *cpu = thread_cpu;
290 CPUArchState *env = cpu->env_ptr;
291 target_siginfo_t info = {};
292
293 info.si_signo = sig;
294 info.si_errno = 0;
295 info.si_code = code;
296 info.si_addr = addr;
297 queue_signal(env, sig, QEMU_SI_FAULT, &info);
298 }
299
300 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
301 {
302 CPUArchState *env = thread_cpu->env_ptr;
303 CPUState *cpu = env_cpu(env);
304 TaskState *ts = cpu->opaque;
305 target_siginfo_t tinfo;
306 ucontext_t *uc = puc;
307 struct emulated_sigtable *k;
308 int guest_sig;
309 uintptr_t pc = 0;
310 bool sync_sig = false;
311
312 /*
313 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
314 * handling wrt signal blocking and unwinding.
315 */
316 if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
317 MMUAccessType access_type;
318 uintptr_t host_addr;
319 abi_ptr guest_addr;
320 bool is_write;
321
322 host_addr = (uintptr_t)info->si_addr;
323
324 /*
325 * Convert forcefully to guest address space: addresses outside
326 * reserved_va are still valid to report via SEGV_MAPERR.
327 */
328 guest_addr = h2g_nocheck(host_addr);
329
330 pc = host_signal_pc(uc);
331 is_write = host_signal_write(info, uc);
332 access_type = adjust_signal_pc(&pc, is_write);
333
334 if (host_sig == SIGSEGV) {
335 bool maperr = true;
336
337 if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
338 /* If this was a write to a TB protected page, restart. */
339 if (is_write &&
340 handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
341 pc, guest_addr)) {
342 return;
343 }
344
345 /*
346 * With reserved_va, the whole address space is PROT_NONE,
347 * which means that we may get ACCERR when we want MAPERR.
348 */
349 if (page_get_flags(guest_addr) & PAGE_VALID) {
350 maperr = false;
351 } else {
352 info->si_code = SEGV_MAPERR;
353 }
354 }
355
356 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
357 cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
358 } else {
359 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
360 if (info->si_code == BUS_ADRALN) {
361 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
362 }
363 }
364
365 sync_sig = true;
366 }
367
368 /* Get the target signal number. */
369 guest_sig = host_to_target_signal(host_sig);
370 if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
371 return;
372 }
373 trace_user_host_signal(cpu, host_sig, guest_sig);
374
375 host_to_target_siginfo_noswap(&tinfo, info);
376
377 k = &ts->sigtab[guest_sig - 1];
378 k->info = tinfo;
379 k->pending = guest_sig;
380 ts->signal_pending = 1;
381
382 /*
383 * For synchronous signals, unwind the cpu state to the faulting
384 * insn and then exit back to the main loop so that the signal
385 * is delivered immediately.
386 */
387 if (sync_sig) {
388 cpu->exception_index = EXCP_INTERRUPT;
389 cpu_loop_exit_restore(cpu, pc);
390 }
391
392 rewind_if_in_safe_syscall(puc);
393
394 /*
395 * Block host signals until target signal handler entered. We
396 * can't block SIGSEGV or SIGBUS while we're executing guest
397 * code in case the guest code provokes one in the window between
398 * now and it getting out to the main loop. Signals will be
399 * unblocked again in process_pending_signals().
400 */
401 sigfillset(&uc->uc_sigmask);
402 sigdelset(&uc->uc_sigmask, SIGSEGV);
403 sigdelset(&uc->uc_sigmask, SIGBUS);
404
405 /* Interrupt the virtual CPU as soon as possible. */
406 cpu_exit(thread_cpu);
407 }
408
409 void signal_init(void)
410 {
411 TaskState *ts = (TaskState *)thread_cpu->opaque;
412 struct sigaction act;
413 struct sigaction oact;
414 int i;
415 int host_sig;
416
417 /* Set the signal mask from the host mask. */
418 sigprocmask(0, 0, &ts->signal_mask);
419
420 sigfillset(&act.sa_mask);
421 act.sa_sigaction = host_signal_handler;
422 act.sa_flags = SA_SIGINFO;
423
424 for (i = 1; i <= TARGET_NSIG; i++) {
425 #ifdef CONFIG_GPROF
426 if (i == TARGET_SIGPROF) {
427 continue;
428 }
429 #endif
430 host_sig = target_to_host_signal(i);
431 sigaction(host_sig, NULL, &oact);
432 if (oact.sa_sigaction == (void *)SIG_IGN) {
433 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
434 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
435 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
436 }
437 /*
438 * If there's already a handler installed then something has
439 * gone horribly wrong, so don't even try to handle that case.
440 * Install some handlers for our own use. We need at least
441 * SIGSEGV and SIGBUS, to detect exceptions. We can not just
442 * trap all signals because it affects syscall interrupt
443 * behavior. But do trap all default-fatal signals.
444 */
445 if (fatal_signal(i)) {
446 sigaction(host_sig, &act, NULL);
447 }
448 }
449 }
450
451 void process_pending_signals(CPUArchState *cpu_env)
452 {
453 }
454
455 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
456 MMUAccessType access_type, bool maperr, uintptr_t ra)
457 {
458 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
459
460 if (tcg_ops->record_sigsegv) {
461 tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
462 }
463
464 force_sig_fault(TARGET_SIGSEGV,
465 maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
466 addr);
467 cpu->exception_index = EXCP_INTERRUPT;
468 cpu_loop_exit_restore(cpu, ra);
469 }
470
471 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
472 MMUAccessType access_type, uintptr_t ra)
473 {
474 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
475
476 if (tcg_ops->record_sigbus) {
477 tcg_ops->record_sigbus(cpu, addr, access_type, ra);
478 }
479
480 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
481 cpu->exception_index = EXCP_INTERRUPT;
482 cpu_loop_exit_restore(cpu, ra);
483 }