]>
Commit | Line | Data |
---|---|---|
84778508 BS |
1 | /* |
2 | * Emulation of BSD signals | |
3 | * | |
4 | * Copyright (c) 2003 - 2008 Fabrice Bellard | |
1366ef81 | 5 | * Copyright (c) 2013 Stacey Son |
84778508 BS |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
8167ee88 | 18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
84778508 | 19 | */ |
84778508 | 20 | |
5abfac27 | 21 | #include "qemu/osdep.h" |
84778508 | 22 | #include "qemu.h" |
0ef59989 | 23 | #include "signal-common.h" |
6ddc1abe | 24 | #include "trace.h" |
fc9f9bdd | 25 | #include "hw/core/tcg-cpu-ops.h" |
85fc1b5d | 26 | #include "host-signal.h" |
84778508 | 27 | |
835b04ed WL |
28 | /* |
29 | * Stubbed out routines until we merge signal support from bsd-user | |
30 | * fork. | |
31 | */ | |
32 | ||
149076ad WL |
33 | static struct target_sigaction sigact_table[TARGET_NSIG]; |
34 | static void host_signal_handler(int host_sig, siginfo_t *info, void *puc); | |
c93cbac1 WL |
35 | static void target_to_host_sigset_internal(sigset_t *d, |
36 | const target_sigset_t *s); | |
37 | ||
149076ad | 38 | |
1366ef81 WL |
39 | /* |
40 | * The BSD ABIs use the same singal numbers across all the CPU architectures, so | |
41 | * (unlike Linux) these functions are just the identity mapping. This might not | |
42 | * be true for XyzBSD running on AbcBSD, which doesn't currently work. | |
43 | */ | |
44 | int host_to_target_signal(int sig) | |
45 | { | |
46 | return sig; | |
47 | } | |
48 | ||
49 | int target_to_host_signal(int sig) | |
50 | { | |
51 | return sig; | |
52 | } | |
53 | ||
c93cbac1 WL |
54 | static inline void target_sigemptyset(target_sigset_t *set) |
55 | { | |
56 | memset(set, 0, sizeof(*set)); | |
57 | } | |
58 | ||
59 | static inline void target_sigaddset(target_sigset_t *set, int signum) | |
60 | { | |
61 | signum--; | |
62 | uint32_t mask = (uint32_t)1 << (signum % TARGET_NSIG_BPW); | |
63 | set->__bits[signum / TARGET_NSIG_BPW] |= mask; | |
64 | } | |
65 | ||
66 | static inline int target_sigismember(const target_sigset_t *set, int signum) | |
67 | { | |
68 | signum--; | |
69 | abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); | |
70 | return (set->__bits[signum / TARGET_NSIG_BPW] & mask) != 0; | |
71 | } | |
72 | ||
aae57ac3 WL |
73 | /* Adjust the signal context to rewind out of safe-syscall if we're in it */ |
74 | static inline void rewind_if_in_safe_syscall(void *puc) | |
75 | { | |
76 | ucontext_t *uc = (ucontext_t *)puc; | |
77 | uintptr_t pcreg = host_signal_pc(uc); | |
78 | ||
79 | if (pcreg > (uintptr_t)safe_syscall_start | |
80 | && pcreg < (uintptr_t)safe_syscall_end) { | |
81 | host_signal_set_pc(uc, (uintptr_t)safe_syscall_start); | |
82 | } | |
83 | } | |
84 | ||
c93cbac1 WL |
85 | /* |
86 | * Note: The following take advantage of the BSD signal property that all | |
87 | * signals are available on all architectures. | |
88 | */ | |
89 | static void host_to_target_sigset_internal(target_sigset_t *d, | |
90 | const sigset_t *s) | |
91 | { | |
92 | int i; | |
93 | ||
94 | target_sigemptyset(d); | |
95 | for (i = 1; i <= NSIG; i++) { | |
96 | if (sigismember(s, i)) { | |
97 | target_sigaddset(d, host_to_target_signal(i)); | |
98 | } | |
99 | } | |
100 | } | |
101 | ||
102 | void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) | |
103 | { | |
104 | target_sigset_t d1; | |
105 | int i; | |
106 | ||
107 | host_to_target_sigset_internal(&d1, s); | |
108 | for (i = 0; i < _SIG_WORDS; i++) { | |
109 | d->__bits[i] = tswap32(d1.__bits[i]); | |
110 | } | |
111 | } | |
112 | ||
113 | static void target_to_host_sigset_internal(sigset_t *d, | |
114 | const target_sigset_t *s) | |
115 | { | |
116 | int i; | |
117 | ||
118 | sigemptyset(d); | |
119 | for (i = 1; i <= TARGET_NSIG; i++) { | |
120 | if (target_sigismember(s, i)) { | |
121 | sigaddset(d, target_to_host_signal(i)); | |
122 | } | |
123 | } | |
124 | } | |
125 | ||
126 | void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) | |
127 | { | |
128 | target_sigset_t s1; | |
129 | int i; | |
130 | ||
131 | for (i = 0; i < TARGET_NSIG_WORDS; i++) { | |
132 | s1.__bits[i] = tswap32(s->__bits[i]); | |
133 | } | |
134 | target_to_host_sigset_internal(d, &s1); | |
135 | } | |
136 | ||
c34f2aaf WL |
137 | static bool has_trapno(int tsig) |
138 | { | |
139 | return tsig == TARGET_SIGILL || | |
140 | tsig == TARGET_SIGFPE || | |
141 | tsig == TARGET_SIGSEGV || | |
142 | tsig == TARGET_SIGBUS || | |
143 | tsig == TARGET_SIGTRAP; | |
144 | } | |
145 | ||
c34f2aaf WL |
146 | /* Siginfo conversion. */ |
147 | ||
148 | /* | |
149 | * Populate tinfo w/o swapping based on guessing which fields are valid. | |
150 | */ | |
151 | static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, | |
152 | const siginfo_t *info) | |
153 | { | |
154 | int sig = host_to_target_signal(info->si_signo); | |
155 | int si_code = info->si_code; | |
156 | int si_type; | |
157 | ||
158 | /* | |
159 | * Make sure we that the variable portion of the target siginfo is zeroed | |
160 | * out so we don't leak anything into that. | |
161 | */ | |
162 | memset(&tinfo->_reason, 0, sizeof(tinfo->_reason)); | |
163 | ||
164 | /* | |
165 | * This is awkward, because we have to use a combination of the si_code and | |
166 | * si_signo to figure out which of the union's members are valid.o We | |
167 | * therefore make our best guess. | |
168 | * | |
169 | * Once we have made our guess, we record it in the top 16 bits of | |
170 | * the si_code, so that tswap_siginfo() later can use it. | |
171 | * tswap_siginfo() will strip these top bits out before writing | |
172 | * si_code to the guest (sign-extending the lower bits). | |
173 | */ | |
174 | tinfo->si_signo = sig; | |
175 | tinfo->si_errno = info->si_errno; | |
176 | tinfo->si_code = info->si_code; | |
177 | tinfo->si_pid = info->si_pid; | |
178 | tinfo->si_uid = info->si_uid; | |
179 | tinfo->si_status = info->si_status; | |
180 | tinfo->si_addr = (abi_ulong)(unsigned long)info->si_addr; | |
181 | /* | |
182 | * si_value is opaque to kernel. On all FreeBSD platforms, | |
183 | * sizeof(sival_ptr) >= sizeof(sival_int) so the following | |
184 | * always will copy the larger element. | |
185 | */ | |
186 | tinfo->si_value.sival_ptr = | |
187 | (abi_ulong)(unsigned long)info->si_value.sival_ptr; | |
188 | ||
189 | switch (si_code) { | |
190 | /* | |
191 | * All the SI_xxx codes that are defined here are global to | |
192 | * all the signals (they have values that none of the other, | |
193 | * more specific signal info will set). | |
194 | */ | |
195 | case SI_USER: | |
196 | case SI_LWP: | |
197 | case SI_KERNEL: | |
198 | case SI_QUEUE: | |
199 | case SI_ASYNCIO: | |
200 | /* | |
201 | * Only the fixed parts are valid (though FreeBSD doesn't always | |
202 | * set all the fields to non-zero values. | |
203 | */ | |
204 | si_type = QEMU_SI_NOINFO; | |
205 | break; | |
206 | case SI_TIMER: | |
207 | tinfo->_reason._timer._timerid = info->_reason._timer._timerid; | |
208 | tinfo->_reason._timer._overrun = info->_reason._timer._overrun; | |
209 | si_type = QEMU_SI_TIMER; | |
210 | break; | |
211 | case SI_MESGQ: | |
212 | tinfo->_reason._mesgq._mqd = info->_reason._mesgq._mqd; | |
213 | si_type = QEMU_SI_MESGQ; | |
214 | break; | |
215 | default: | |
216 | /* | |
217 | * We have to go based on the signal number now to figure out | |
218 | * what's valid. | |
219 | */ | |
220 | if (has_trapno(sig)) { | |
221 | tinfo->_reason._fault._trapno = info->_reason._fault._trapno; | |
222 | si_type = QEMU_SI_FAULT; | |
223 | } | |
224 | #ifdef TARGET_SIGPOLL | |
225 | /* | |
226 | * FreeBSD never had SIGPOLL, but emulates it for Linux so there's | |
227 | * a chance it may popup in the future. | |
228 | */ | |
229 | if (sig == TARGET_SIGPOLL) { | |
230 | tinfo->_reason._poll._band = info->_reason._poll._band; | |
231 | si_type = QEMU_SI_POLL; | |
232 | } | |
233 | #endif | |
234 | /* | |
235 | * Unsure that this can actually be generated, and our support for | |
236 | * capsicum is somewhere between weak and non-existant, but if we get | |
237 | * one, then we know what to save. | |
238 | */ | |
239 | if (sig == TARGET_SIGTRAP) { | |
240 | tinfo->_reason._capsicum._syscall = | |
241 | info->_reason._capsicum._syscall; | |
242 | si_type = QEMU_SI_CAPSICUM; | |
243 | } | |
244 | break; | |
245 | } | |
246 | tinfo->si_code = deposit32(si_code, 24, 8, si_type); | |
247 | } | |
248 | ||
37714547 WL |
249 | /* Returns 1 if given signal should dump core if not handled. */ |
250 | static int core_dump_signal(int sig) | |
251 | { | |
252 | switch (sig) { | |
253 | case TARGET_SIGABRT: | |
254 | case TARGET_SIGFPE: | |
255 | case TARGET_SIGILL: | |
256 | case TARGET_SIGQUIT: | |
257 | case TARGET_SIGSEGV: | |
258 | case TARGET_SIGTRAP: | |
259 | case TARGET_SIGBUS: | |
260 | return 1; | |
261 | default: | |
262 | return 0; | |
263 | } | |
264 | } | |
265 | ||
266 | /* Abort execution with signal. */ | |
267 | static void QEMU_NORETURN dump_core_and_abort(int target_sig) | |
268 | { | |
269 | CPUArchState *env = thread_cpu->env_ptr; | |
270 | CPUState *cpu = env_cpu(env); | |
271 | TaskState *ts = cpu->opaque; | |
272 | int core_dumped = 0; | |
273 | int host_sig; | |
274 | struct sigaction act; | |
275 | ||
276 | host_sig = target_to_host_signal(target_sig); | |
277 | gdb_signalled(env, target_sig); | |
278 | ||
279 | /* Dump core if supported by target binary format */ | |
280 | if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { | |
281 | stop_all_tasks(); | |
282 | core_dumped = | |
283 | ((*ts->bprm->core_dump)(target_sig, env) == 0); | |
284 | } | |
285 | if (core_dumped) { | |
286 | struct rlimit nodump; | |
287 | ||
288 | /* | |
289 | * We already dumped the core of target process, we don't want | |
290 | * a coredump of qemu itself. | |
291 | */ | |
292 | getrlimit(RLIMIT_CORE, &nodump); | |
293 | nodump.rlim_cur = 0; | |
294 | setrlimit(RLIMIT_CORE, &nodump); | |
295 | (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) " | |
296 | "- %s\n", target_sig, strsignal(host_sig), "core dumped"); | |
297 | } | |
298 | ||
299 | /* | |
300 | * The proper exit code for dying from an uncaught signal is | |
301 | * -<signal>. The kernel doesn't allow exit() or _exit() to pass | |
302 | * a negative value. To get the proper exit code we need to | |
303 | * actually die from an uncaught signal. Here the default signal | |
304 | * handler is installed, we send ourself a signal and we wait for | |
305 | * it to arrive. | |
306 | */ | |
307 | memset(&act, 0, sizeof(act)); | |
308 | sigfillset(&act.sa_mask); | |
309 | act.sa_handler = SIG_DFL; | |
310 | sigaction(host_sig, &act, NULL); | |
311 | ||
312 | kill(getpid(), host_sig); | |
313 | ||
314 | /* | |
315 | * Make sure the signal isn't masked (just reuse the mask inside | |
316 | * of act). | |
317 | */ | |
318 | sigdelset(&act.sa_mask, host_sig); | |
319 | sigsuspend(&act.sa_mask); | |
320 | ||
321 | /* unreachable */ | |
322 | abort(); | |
323 | } | |
324 | ||
5abfac27 WL |
325 | /* |
326 | * Queue a signal so that it will be send to the virtual CPU as soon as | |
327 | * possible. | |
328 | */ | |
e32a6301 WL |
329 | void queue_signal(CPUArchState *env, int sig, int si_type, |
330 | target_siginfo_t *info) | |
5abfac27 | 331 | { |
38be620c WL |
332 | CPUState *cpu = env_cpu(env); |
333 | TaskState *ts = cpu->opaque; | |
334 | ||
335 | trace_user_queue_signal(env, sig); | |
336 | ||
337 | info->si_code = deposit32(info->si_code, 24, 8, si_type); | |
338 | ||
339 | ts->sync_signal.info = *info; | |
340 | ts->sync_signal.pending = sig; | |
341 | /* Signal that a new signal is pending. */ | |
342 | qatomic_set(&ts->signal_pending, 1); | |
343 | return; | |
5abfac27 WL |
344 | } |
345 | ||
149076ad WL |
346 | static int fatal_signal(int sig) |
347 | { | |
348 | ||
349 | switch (sig) { | |
350 | case TARGET_SIGCHLD: | |
351 | case TARGET_SIGURG: | |
352 | case TARGET_SIGWINCH: | |
353 | case TARGET_SIGINFO: | |
354 | /* Ignored by default. */ | |
355 | return 0; | |
356 | case TARGET_SIGCONT: | |
357 | case TARGET_SIGSTOP: | |
358 | case TARGET_SIGTSTP: | |
359 | case TARGET_SIGTTIN: | |
360 | case TARGET_SIGTTOU: | |
361 | /* Job control signals. */ | |
362 | return 0; | |
363 | default: | |
364 | return 1; | |
365 | } | |
366 | } | |
367 | ||
0ef59989 WL |
368 | /* |
369 | * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the | |
370 | * 'force' part is handled in process_pending_signals(). | |
371 | */ | |
372 | void force_sig_fault(int sig, int code, abi_ulong addr) | |
373 | { | |
374 | CPUState *cpu = thread_cpu; | |
375 | CPUArchState *env = cpu->env_ptr; | |
376 | target_siginfo_t info = {}; | |
377 | ||
378 | info.si_signo = sig; | |
379 | info.si_errno = 0; | |
380 | info.si_code = code; | |
381 | info.si_addr = addr; | |
e32a6301 | 382 | queue_signal(env, sig, QEMU_SI_FAULT, &info); |
0ef59989 WL |
383 | } |
384 | ||
149076ad WL |
385 | static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) |
386 | { | |
e625c7ef WL |
387 | CPUArchState *env = thread_cpu->env_ptr; |
388 | CPUState *cpu = env_cpu(env); | |
389 | TaskState *ts = cpu->opaque; | |
390 | target_siginfo_t tinfo; | |
391 | ucontext_t *uc = puc; | |
392 | struct emulated_sigtable *k; | |
393 | int guest_sig; | |
394 | uintptr_t pc = 0; | |
395 | bool sync_sig = false; | |
396 | ||
397 | /* | |
398 | * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special | |
399 | * handling wrt signal blocking and unwinding. | |
400 | */ | |
401 | if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) { | |
402 | MMUAccessType access_type; | |
403 | uintptr_t host_addr; | |
404 | abi_ptr guest_addr; | |
405 | bool is_write; | |
406 | ||
407 | host_addr = (uintptr_t)info->si_addr; | |
408 | ||
409 | /* | |
410 | * Convert forcefully to guest address space: addresses outside | |
411 | * reserved_va are still valid to report via SEGV_MAPERR. | |
412 | */ | |
413 | guest_addr = h2g_nocheck(host_addr); | |
414 | ||
415 | pc = host_signal_pc(uc); | |
416 | is_write = host_signal_write(info, uc); | |
417 | access_type = adjust_signal_pc(&pc, is_write); | |
418 | ||
419 | if (host_sig == SIGSEGV) { | |
420 | bool maperr = true; | |
421 | ||
422 | if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { | |
423 | /* If this was a write to a TB protected page, restart. */ | |
424 | if (is_write && | |
425 | handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask, | |
426 | pc, guest_addr)) { | |
427 | return; | |
428 | } | |
429 | ||
430 | /* | |
431 | * With reserved_va, the whole address space is PROT_NONE, | |
432 | * which means that we may get ACCERR when we want MAPERR. | |
433 | */ | |
434 | if (page_get_flags(guest_addr) & PAGE_VALID) { | |
435 | maperr = false; | |
436 | } else { | |
437 | info->si_code = SEGV_MAPERR; | |
438 | } | |
439 | } | |
440 | ||
441 | sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); | |
442 | cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); | |
443 | } else { | |
444 | sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); | |
445 | if (info->si_code == BUS_ADRALN) { | |
446 | cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); | |
447 | } | |
448 | } | |
449 | ||
450 | sync_sig = true; | |
451 | } | |
452 | ||
453 | /* Get the target signal number. */ | |
454 | guest_sig = host_to_target_signal(host_sig); | |
455 | if (guest_sig < 1 || guest_sig > TARGET_NSIG) { | |
456 | return; | |
457 | } | |
458 | trace_user_host_signal(cpu, host_sig, guest_sig); | |
459 | ||
460 | host_to_target_siginfo_noswap(&tinfo, info); | |
461 | ||
462 | k = &ts->sigtab[guest_sig - 1]; | |
463 | k->info = tinfo; | |
464 | k->pending = guest_sig; | |
465 | ts->signal_pending = 1; | |
466 | ||
467 | /* | |
468 | * For synchronous signals, unwind the cpu state to the faulting | |
469 | * insn and then exit back to the main loop so that the signal | |
470 | * is delivered immediately. | |
471 | */ | |
472 | if (sync_sig) { | |
473 | cpu->exception_index = EXCP_INTERRUPT; | |
474 | cpu_loop_exit_restore(cpu, pc); | |
475 | } | |
476 | ||
477 | rewind_if_in_safe_syscall(puc); | |
478 | ||
479 | /* | |
480 | * Block host signals until target signal handler entered. We | |
481 | * can't block SIGSEGV or SIGBUS while we're executing guest | |
482 | * code in case the guest code provokes one in the window between | |
483 | * now and it getting out to the main loop. Signals will be | |
484 | * unblocked again in process_pending_signals(). | |
485 | */ | |
486 | sigfillset(&uc->uc_sigmask); | |
487 | sigdelset(&uc->uc_sigmask, SIGSEGV); | |
488 | sigdelset(&uc->uc_sigmask, SIGBUS); | |
489 | ||
490 | /* Interrupt the virtual CPU as soon as possible. */ | |
491 | cpu_exit(thread_cpu); | |
149076ad WL |
492 | } |
493 | ||
84778508 BS |
494 | void signal_init(void) |
495 | { | |
149076ad WL |
496 | TaskState *ts = (TaskState *)thread_cpu->opaque; |
497 | struct sigaction act; | |
498 | struct sigaction oact; | |
499 | int i; | |
500 | int host_sig; | |
501 | ||
502 | /* Set the signal mask from the host mask. */ | |
503 | sigprocmask(0, 0, &ts->signal_mask); | |
504 | ||
505 | sigfillset(&act.sa_mask); | |
506 | act.sa_sigaction = host_signal_handler; | |
507 | act.sa_flags = SA_SIGINFO; | |
508 | ||
509 | for (i = 1; i <= TARGET_NSIG; i++) { | |
510 | #ifdef CONFIG_GPROF | |
511 | if (i == TARGET_SIGPROF) { | |
512 | continue; | |
513 | } | |
514 | #endif | |
515 | host_sig = target_to_host_signal(i); | |
516 | sigaction(host_sig, NULL, &oact); | |
517 | if (oact.sa_sigaction == (void *)SIG_IGN) { | |
518 | sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; | |
519 | } else if (oact.sa_sigaction == (void *)SIG_DFL) { | |
520 | sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; | |
521 | } | |
522 | /* | |
523 | * If there's already a handler installed then something has | |
524 | * gone horribly wrong, so don't even try to handle that case. | |
525 | * Install some handlers for our own use. We need at least | |
526 | * SIGSEGV and SIGBUS, to detect exceptions. We can not just | |
527 | * trap all signals because it affects syscall interrupt | |
528 | * behavior. But do trap all default-fatal signals. | |
529 | */ | |
530 | if (fatal_signal(i)) { | |
531 | sigaction(host_sig, &act, NULL); | |
532 | } | |
533 | } | |
84778508 BS |
534 | } |
535 | ||
9349b4f9 | 536 | void process_pending_signals(CPUArchState *cpu_env) |
84778508 BS |
537 | { |
538 | } | |
835b04ed WL |
539 | |
540 | void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, | |
541 | MMUAccessType access_type, bool maperr, uintptr_t ra) | |
542 | { | |
fc9f9bdd WL |
543 | const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; |
544 | ||
545 | if (tcg_ops->record_sigsegv) { | |
546 | tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); | |
547 | } | |
548 | ||
549 | force_sig_fault(TARGET_SIGSEGV, | |
550 | maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR, | |
551 | addr); | |
552 | cpu->exception_index = EXCP_INTERRUPT; | |
553 | cpu_loop_exit_restore(cpu, ra); | |
835b04ed WL |
554 | } |
555 | ||
556 | void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, | |
557 | MMUAccessType access_type, uintptr_t ra) | |
558 | { | |
cfdee273 WL |
559 | const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; |
560 | ||
561 | if (tcg_ops->record_sigbus) { | |
562 | tcg_ops->record_sigbus(cpu, addr, access_type, ra); | |
563 | } | |
564 | ||
565 | force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); | |
566 | cpu->exception_index = EXCP_INTERRUPT; | |
567 | cpu_loop_exit_restore(cpu, ra); | |
835b04ed | 568 | } |