2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
26 #include "target_signal.h"
28 #include "signal-common.h"
30 struct target_sigaltstack target_sigaltstack_used
= {
33 .ss_flags
= TARGET_SS_DISABLE
,
36 static struct target_sigaction sigact_table
[TARGET_NSIG
];
38 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
41 static uint8_t host_to_target_signal_table
[_NSIG
] = {
42 [SIGHUP
] = TARGET_SIGHUP
,
43 [SIGINT
] = TARGET_SIGINT
,
44 [SIGQUIT
] = TARGET_SIGQUIT
,
45 [SIGILL
] = TARGET_SIGILL
,
46 [SIGTRAP
] = TARGET_SIGTRAP
,
47 [SIGABRT
] = TARGET_SIGABRT
,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS
] = TARGET_SIGBUS
,
50 [SIGFPE
] = TARGET_SIGFPE
,
51 [SIGKILL
] = TARGET_SIGKILL
,
52 [SIGUSR1
] = TARGET_SIGUSR1
,
53 [SIGSEGV
] = TARGET_SIGSEGV
,
54 [SIGUSR2
] = TARGET_SIGUSR2
,
55 [SIGPIPE
] = TARGET_SIGPIPE
,
56 [SIGALRM
] = TARGET_SIGALRM
,
57 [SIGTERM
] = TARGET_SIGTERM
,
59 [SIGSTKFLT
] = TARGET_SIGSTKFLT
,
61 [SIGCHLD
] = TARGET_SIGCHLD
,
62 [SIGCONT
] = TARGET_SIGCONT
,
63 [SIGSTOP
] = TARGET_SIGSTOP
,
64 [SIGTSTP
] = TARGET_SIGTSTP
,
65 [SIGTTIN
] = TARGET_SIGTTIN
,
66 [SIGTTOU
] = TARGET_SIGTTOU
,
67 [SIGURG
] = TARGET_SIGURG
,
68 [SIGXCPU
] = TARGET_SIGXCPU
,
69 [SIGXFSZ
] = TARGET_SIGXFSZ
,
70 [SIGVTALRM
] = TARGET_SIGVTALRM
,
71 [SIGPROF
] = TARGET_SIGPROF
,
72 [SIGWINCH
] = TARGET_SIGWINCH
,
73 [SIGIO
] = TARGET_SIGIO
,
74 [SIGPWR
] = TARGET_SIGPWR
,
75 [SIGSYS
] = TARGET_SIGSYS
,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN
] = __SIGRTMAX
,
82 [__SIGRTMAX
] = __SIGRTMIN
,
84 static uint8_t target_to_host_signal_table
[_NSIG
];
86 int host_to_target_signal(int sig
)
88 if (sig
< 0 || sig
>= _NSIG
)
90 return host_to_target_signal_table
[sig
];
93 int target_to_host_signal(int sig
)
95 if (sig
< 0 || sig
>= _NSIG
)
97 return target_to_host_signal_table
[sig
];
100 static inline void target_sigaddset(target_sigset_t
*set
, int signum
)
103 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
104 set
->sig
[signum
/ TARGET_NSIG_BPW
] |= mask
;
107 static inline int target_sigismember(const target_sigset_t
*set
, int signum
)
110 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
111 return ((set
->sig
[signum
/ TARGET_NSIG_BPW
] & mask
) != 0);
114 void host_to_target_sigset_internal(target_sigset_t
*d
,
118 target_sigemptyset(d
);
119 for (i
= 1; i
<= TARGET_NSIG
; i
++) {
120 if (sigismember(s
, i
)) {
121 target_sigaddset(d
, host_to_target_signal(i
));
126 void host_to_target_sigset(target_sigset_t
*d
, const sigset_t
*s
)
131 host_to_target_sigset_internal(&d1
, s
);
132 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
133 d
->sig
[i
] = tswapal(d1
.sig
[i
]);
136 void target_to_host_sigset_internal(sigset_t
*d
,
137 const target_sigset_t
*s
)
141 for (i
= 1; i
<= TARGET_NSIG
; i
++) {
142 if (target_sigismember(s
, i
)) {
143 sigaddset(d
, target_to_host_signal(i
));
148 void target_to_host_sigset(sigset_t
*d
, const target_sigset_t
*s
)
153 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
154 s1
.sig
[i
] = tswapal(s
->sig
[i
]);
155 target_to_host_sigset_internal(d
, &s1
);
158 void host_to_target_old_sigset(abi_ulong
*old_sigset
,
159 const sigset_t
*sigset
)
162 host_to_target_sigset(&d
, sigset
);
163 *old_sigset
= d
.sig
[0];
166 void target_to_host_old_sigset(sigset_t
*sigset
,
167 const abi_ulong
*old_sigset
)
172 d
.sig
[0] = *old_sigset
;
173 for(i
= 1;i
< TARGET_NSIG_WORDS
; i
++)
175 target_to_host_sigset(sigset
, &d
);
178 int block_signals(void)
180 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
188 sigprocmask(SIG_SETMASK
, &set
, 0);
190 return atomic_xchg(&ts
->signal_pending
, 1);
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
198 * If set is NULL, this is guaranteed not to fail.
200 int do_sigprocmask(int how
, const sigset_t
*set
, sigset_t
*oldset
)
202 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
205 *oldset
= ts
->signal_mask
;
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS
;
217 sigorset(&ts
->signal_mask
, &ts
->signal_mask
, set
);
220 for (i
= 1; i
<= NSIG
; ++i
) {
221 if (sigismember(set
, i
)) {
222 sigdelset(&ts
->signal_mask
, i
);
227 ts
->signal_mask
= *set
;
230 g_assert_not_reached();
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts
->signal_mask
, SIGKILL
);
235 sigdelset(&ts
->signal_mask
, SIGSTOP
);
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
244 void set_sigmask(const sigset_t
*set
)
246 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
248 ts
->signal_mask
= *set
;
252 /* siginfo conversion */
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t
*tinfo
,
255 const siginfo_t
*info
)
257 int sig
= host_to_target_signal(info
->si_signo
);
258 int si_code
= info
->si_code
;
260 tinfo
->si_signo
= sig
;
262 tinfo
->si_code
= info
->si_code
;
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
270 memset(tinfo
->_sifields
._pad
, 0, sizeof(tinfo
->_sifields
._pad
));
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
294 tinfo
->_sifields
._kill
._pid
= info
->si_pid
;
295 tinfo
->_sifields
._kill
._uid
= info
->si_uid
;
296 si_type
= QEMU_SI_KILL
;
299 /* Everything else is spoofable. Make best guess based on signal */
302 tinfo
->_sifields
._sigchld
._pid
= info
->si_pid
;
303 tinfo
->_sifields
._sigchld
._uid
= info
->si_uid
;
304 tinfo
->_sifields
._sigchld
._status
305 = host_to_target_waitstatus(info
->si_status
);
306 tinfo
->_sifields
._sigchld
._utime
= info
->si_utime
;
307 tinfo
->_sifields
._sigchld
._stime
= info
->si_stime
;
308 si_type
= QEMU_SI_CHLD
;
311 tinfo
->_sifields
._sigpoll
._band
= info
->si_band
;
312 tinfo
->_sifields
._sigpoll
._fd
= info
->si_fd
;
313 si_type
= QEMU_SI_POLL
;
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo
->_sifields
._rt
._pid
= info
->si_pid
;
318 tinfo
->_sifields
._rt
._uid
= info
->si_uid
;
319 /* XXX: potential problem if 64 bit */
320 tinfo
->_sifields
._rt
._sigval
.sival_ptr
321 = (abi_ulong
)(unsigned long)info
->si_value
.sival_ptr
;
322 si_type
= QEMU_SI_RT
;
328 tinfo
->si_code
= deposit32(si_code
, 16, 16, si_type
);
331 void tswap_siginfo(target_siginfo_t
*tinfo
,
332 const target_siginfo_t
*info
)
334 int si_type
= extract32(info
->si_code
, 16, 16);
335 int si_code
= sextract32(info
->si_code
, 0, 16);
337 __put_user(info
->si_signo
, &tinfo
->si_signo
);
338 __put_user(info
->si_errno
, &tinfo
->si_errno
);
339 __put_user(si_code
, &tinfo
->si_code
);
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
347 __put_user(info
->_sifields
._kill
._pid
, &tinfo
->_sifields
._kill
._pid
);
348 __put_user(info
->_sifields
._kill
._uid
, &tinfo
->_sifields
._kill
._uid
);
351 __put_user(info
->_sifields
._timer
._timer1
,
352 &tinfo
->_sifields
._timer
._timer1
);
353 __put_user(info
->_sifields
._timer
._timer2
,
354 &tinfo
->_sifields
._timer
._timer2
);
357 __put_user(info
->_sifields
._sigpoll
._band
,
358 &tinfo
->_sifields
._sigpoll
._band
);
359 __put_user(info
->_sifields
._sigpoll
._fd
,
360 &tinfo
->_sifields
._sigpoll
._fd
);
363 __put_user(info
->_sifields
._sigfault
._addr
,
364 &tinfo
->_sifields
._sigfault
._addr
);
367 __put_user(info
->_sifields
._sigchld
._pid
,
368 &tinfo
->_sifields
._sigchld
._pid
);
369 __put_user(info
->_sifields
._sigchld
._uid
,
370 &tinfo
->_sifields
._sigchld
._uid
);
371 __put_user(info
->_sifields
._sigchld
._status
,
372 &tinfo
->_sifields
._sigchld
._status
);
373 __put_user(info
->_sifields
._sigchld
._utime
,
374 &tinfo
->_sifields
._sigchld
._utime
);
375 __put_user(info
->_sifields
._sigchld
._stime
,
376 &tinfo
->_sifields
._sigchld
._stime
);
379 __put_user(info
->_sifields
._rt
._pid
, &tinfo
->_sifields
._rt
._pid
);
380 __put_user(info
->_sifields
._rt
._uid
, &tinfo
->_sifields
._rt
._uid
);
381 __put_user(info
->_sifields
._rt
._sigval
.sival_ptr
,
382 &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
385 g_assert_not_reached();
389 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
)
391 target_siginfo_t tgt_tmp
;
392 host_to_target_siginfo_noswap(&tgt_tmp
, info
);
393 tswap_siginfo(tinfo
, &tgt_tmp
);
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
)
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
405 __get_user(info
->si_signo
, &tinfo
->si_signo
);
406 __get_user(info
->si_errno
, &tinfo
->si_errno
);
407 __get_user(info
->si_code
, &tinfo
->si_code
);
408 __get_user(info
->si_pid
, &tinfo
->_sifields
._rt
._pid
);
409 __get_user(info
->si_uid
, &tinfo
->_sifields
._rt
._uid
);
410 __get_user(sival_ptr
, &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
411 info
->si_value
.sival_ptr
= (void *)(long)sival_ptr
;
414 static int fatal_signal (int sig
)
419 case TARGET_SIGWINCH
:
420 /* Ignored by default. */
427 /* Job control signals. */
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig
)
451 void signal_init(void)
453 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
454 struct sigaction act
;
455 struct sigaction oact
;
459 /* generate signal conversion tables */
460 for(i
= 1; i
< _NSIG
; i
++) {
461 if (host_to_target_signal_table
[i
] == 0)
462 host_to_target_signal_table
[i
] = i
;
464 for(i
= 1; i
< _NSIG
; i
++) {
465 j
= host_to_target_signal_table
[i
];
466 target_to_host_signal_table
[j
] = i
;
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts
->signal_mask
);
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table
, 0, sizeof(sigact_table
));
476 sigfillset(&act
.sa_mask
);
477 act
.sa_flags
= SA_SIGINFO
;
478 act
.sa_sigaction
= host_signal_handler
;
479 for(i
= 1; i
<= TARGET_NSIG
; i
++) {
480 host_sig
= target_to_host_signal(i
);
481 sigaction(host_sig
, NULL
, &oact
);
482 if (oact
.sa_sigaction
== (void *)SIG_IGN
) {
483 sigact_table
[i
- 1]._sa_handler
= TARGET_SIG_IGN
;
484 } else if (oact
.sa_sigaction
== (void *)SIG_DFL
) {
485 sigact_table
[i
- 1]._sa_handler
= TARGET_SIG_DFL
;
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i
))
494 sigaction(host_sig
, &act
, NULL
);
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
502 void force_sig(int sig
)
504 CPUState
*cpu
= thread_cpu
;
505 CPUArchState
*env
= cpu
->env_ptr
;
506 target_siginfo_t info
;
510 info
.si_code
= TARGET_SI_KERNEL
;
511 info
._sifields
._kill
._pid
= 0;
512 info
._sifields
._kill
._uid
= 0;
513 queue_signal(env
, info
.si_signo
, QEMU_SI_KILL
, &info
);
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig
)
523 if (oldsig
== SIGSEGV
) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
527 sigact_table
[oldsig
- 1]._sa_handler
= TARGET_SIG_DFL
;
529 force_sig(TARGET_SIGSEGV
);
534 /* abort execution with signal */
535 static void QEMU_NORETURN
dump_core_and_abort(int target_sig
)
537 CPUState
*cpu
= thread_cpu
;
538 CPUArchState
*env
= cpu
->env_ptr
;
539 TaskState
*ts
= (TaskState
*)cpu
->opaque
;
540 int host_sig
, core_dumped
= 0;
541 struct sigaction act
;
543 host_sig
= target_to_host_signal(target_sig
);
544 trace_user_force_sig(env
, target_sig
, host_sig
);
545 gdb_signalled(env
, target_sig
);
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig
) && (ts
->bprm
->core_dump
!= NULL
)) {
551 ((*ts
->bprm
->core_dump
)(target_sig
, env
) == 0);
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump
;
557 getrlimit(RLIMIT_CORE
, &nodump
);
559 setrlimit(RLIMIT_CORE
, &nodump
);
560 (void) fprintf(stderr
, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig
, strsignal(host_sig
), "core dumped" );
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
570 sigfillset(&act
.sa_mask
);
571 act
.sa_handler
= SIG_DFL
;
573 sigaction(host_sig
, &act
, NULL
);
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig
);
579 /* Make sure the signal isn't masked (just reuse the mask inside
581 sigdelset(&act
.sa_mask
, host_sig
);
582 sigsuspend(&act
.sa_mask
);
588 /* queue a signal so that it will be send to the virtual CPU as soon
590 int queue_signal(CPUArchState
*env
, int sig
, int si_type
,
591 target_siginfo_t
*info
)
593 CPUState
*cpu
= ENV_GET_CPU(env
);
594 TaskState
*ts
= cpu
->opaque
;
596 trace_user_queue_signal(env
, sig
);
598 info
->si_code
= deposit32(info
->si_code
, 16, 16, si_type
);
600 ts
->sync_signal
.info
= *info
;
601 ts
->sync_signal
.pending
= sig
;
602 /* signal that a new signal is pending */
603 atomic_set(&ts
->signal_pending
, 1);
604 return 1; /* indicates that the signal was queued */
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc
)
610 /* Default version: never rewind */
614 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
617 CPUArchState
*env
= thread_cpu
->env_ptr
;
618 CPUState
*cpu
= ENV_GET_CPU(env
);
619 TaskState
*ts
= cpu
->opaque
;
622 target_siginfo_t tinfo
;
623 ucontext_t
*uc
= puc
;
624 struct emulated_sigtable
*k
;
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum
== SIGSEGV
|| host_signum
== SIGBUS
)
629 && info
->si_code
> 0) {
630 if (cpu_signal_handler(host_signum
, info
, puc
))
634 /* get target signal number */
635 sig
= host_to_target_signal(host_signum
);
636 if (sig
< 1 || sig
> TARGET_NSIG
)
638 trace_user_host_signal(env
, host_signum
, sig
);
640 rewind_if_in_safe_syscall(puc
);
642 host_to_target_siginfo_noswap(&tinfo
, info
);
643 k
= &ts
->sigtab
[sig
- 1];
646 ts
->signal_pending
= 1;
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
662 memset(&uc
->uc_sigmask
, 0xff, SIGSET_T_SIZE
);
663 sigdelset(&uc
->uc_sigmask
, SIGSEGV
);
664 sigdelset(&uc
->uc_sigmask
, SIGBUS
);
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu
);
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
, abi_ulong sp
)
675 struct target_sigaltstack oss
;
677 /* XXX: test errors */
680 __put_user(target_sigaltstack_used
.ss_sp
, &oss
.ss_sp
);
681 __put_user(target_sigaltstack_used
.ss_size
, &oss
.ss_size
);
682 __put_user(sas_ss_flags(sp
), &oss
.ss_flags
);
687 struct target_sigaltstack
*uss
;
688 struct target_sigaltstack ss
;
689 size_t minstacksize
= TARGET_MINSIGSTKSZ
;
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info
*image
= ((TaskState
*)thread_cpu
->opaque
)->info
;
694 if (get_ppc64_abi(image
) > 1) {
699 ret
= -TARGET_EFAULT
;
700 if (!lock_user_struct(VERIFY_READ
, uss
, uss_addr
, 1)) {
703 __get_user(ss
.ss_sp
, &uss
->ss_sp
);
704 __get_user(ss
.ss_size
, &uss
->ss_size
);
705 __get_user(ss
.ss_flags
, &uss
->ss_flags
);
706 unlock_user_struct(uss
, uss_addr
, 0);
709 if (on_sig_stack(sp
))
712 ret
= -TARGET_EINVAL
;
713 if (ss
.ss_flags
!= TARGET_SS_DISABLE
714 && ss
.ss_flags
!= TARGET_SS_ONSTACK
718 if (ss
.ss_flags
== TARGET_SS_DISABLE
) {
722 ret
= -TARGET_ENOMEM
;
723 if (ss
.ss_size
< minstacksize
) {
728 target_sigaltstack_used
.ss_sp
= ss
.ss_sp
;
729 target_sigaltstack_used
.ss_size
= ss
.ss_size
;
733 ret
= -TARGET_EFAULT
;
734 if (copy_to_user(uoss_addr
, &oss
, sizeof(oss
)))
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig
, const struct target_sigaction
*act
,
745 struct target_sigaction
*oact
)
747 struct target_sigaction
*k
;
748 struct sigaction act1
;
752 if (sig
< 1 || sig
> TARGET_NSIG
|| sig
== TARGET_SIGKILL
|| sig
== TARGET_SIGSTOP
) {
753 return -TARGET_EINVAL
;
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS
;
760 k
= &sigact_table
[sig
- 1];
762 __put_user(k
->_sa_handler
, &oact
->_sa_handler
);
763 __put_user(k
->sa_flags
, &oact
->sa_flags
);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k
->sa_restorer
, &oact
->sa_restorer
);
768 oact
->sa_mask
= k
->sa_mask
;
771 /* FIXME: This is not threadsafe. */
772 __get_user(k
->_sa_handler
, &act
->_sa_handler
);
773 __get_user(k
->sa_flags
, &act
->sa_flags
);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k
->sa_restorer
, &act
->sa_restorer
);
777 /* To be swapped in target_to_host_sigset. */
778 k
->sa_mask
= act
->sa_mask
;
780 /* we update the host linux signal state */
781 host_sig
= target_to_host_signal(sig
);
782 if (host_sig
!= SIGSEGV
&& host_sig
!= SIGBUS
) {
783 sigfillset(&act1
.sa_mask
);
784 act1
.sa_flags
= SA_SIGINFO
;
785 if (k
->sa_flags
& TARGET_SA_RESTART
)
786 act1
.sa_flags
|= SA_RESTART
;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
790 if (k
->_sa_handler
== TARGET_SIG_IGN
) {
791 act1
.sa_sigaction
= (void *)SIG_IGN
;
792 } else if (k
->_sa_handler
== TARGET_SIG_DFL
) {
793 if (fatal_signal (sig
))
794 act1
.sa_sigaction
= host_signal_handler
;
796 act1
.sa_sigaction
= (void *)SIG_DFL
;
798 act1
.sa_sigaction
= host_signal_handler
;
800 ret
= sigaction(host_sig
, &act1
, NULL
);
806 #if defined(TARGET_I386)
807 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
809 struct target_fpreg
{
810 uint16_t significand
[4];
814 struct target_fpxreg
{
815 uint16_t significand
[4];
820 struct target_xmmreg
{
824 struct target_fpstate_32
{
825 /* Regular FPU environment */
833 struct target_fpreg st
[8];
835 uint16_t magic
; /* 0xffff = regular FPU data only */
837 /* FXSR FPU environment */
838 uint32_t _fxsr_env
[6]; /* FXSR FPU env is ignored */
841 struct target_fpxreg fxsr_st
[8]; /* FXSR FPU reg data is ignored */
842 struct target_xmmreg xmm
[8];
843 uint32_t padding
[56];
846 struct target_fpstate_64
{
856 uint32_t st_space
[32];
857 uint32_t xmm_space
[64];
858 uint32_t reserved
[24];
861 #ifndef TARGET_X86_64
862 # define target_fpstate target_fpstate_32
864 # define target_fpstate target_fpstate_64
867 struct target_sigcontext_32
{
885 uint32_t esp_at_signal
;
887 uint32_t fpstate
; /* pointer */
892 struct target_sigcontext_64
{
924 uint64_t fpstate
; /* pointer */
928 #ifndef TARGET_X86_64
929 # define target_sigcontext target_sigcontext_32
931 # define target_sigcontext target_sigcontext_64
934 /* see Linux/include/uapi/asm-generic/ucontext.h */
935 struct target_ucontext
{
938 target_stack_t tuc_stack
;
939 struct target_sigcontext tuc_mcontext
;
940 target_sigset_t tuc_sigmask
; /* mask last for extensibility */
943 #ifndef TARGET_X86_64
947 struct target_sigcontext sc
;
948 struct target_fpstate fpstate
;
949 abi_ulong extramask
[TARGET_NSIG_WORDS
-1];
958 struct target_siginfo info
;
959 struct target_ucontext uc
;
960 struct target_fpstate fpstate
;
968 struct target_ucontext uc
;
969 struct target_siginfo info
;
970 struct target_fpstate fpstate
;
976 * Set up a signal frame.
979 /* XXX: save x87 state */
980 static void setup_sigcontext(struct target_sigcontext
*sc
,
981 struct target_fpstate
*fpstate
, CPUX86State
*env
, abi_ulong mask
,
982 abi_ulong fpstate_addr
)
984 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
985 #ifndef TARGET_X86_64
988 /* already locked in setup_frame() */
989 __put_user(env
->segs
[R_GS
].selector
, (unsigned int *)&sc
->gs
);
990 __put_user(env
->segs
[R_FS
].selector
, (unsigned int *)&sc
->fs
);
991 __put_user(env
->segs
[R_ES
].selector
, (unsigned int *)&sc
->es
);
992 __put_user(env
->segs
[R_DS
].selector
, (unsigned int *)&sc
->ds
);
993 __put_user(env
->regs
[R_EDI
], &sc
->edi
);
994 __put_user(env
->regs
[R_ESI
], &sc
->esi
);
995 __put_user(env
->regs
[R_EBP
], &sc
->ebp
);
996 __put_user(env
->regs
[R_ESP
], &sc
->esp
);
997 __put_user(env
->regs
[R_EBX
], &sc
->ebx
);
998 __put_user(env
->regs
[R_EDX
], &sc
->edx
);
999 __put_user(env
->regs
[R_ECX
], &sc
->ecx
);
1000 __put_user(env
->regs
[R_EAX
], &sc
->eax
);
1001 __put_user(cs
->exception_index
, &sc
->trapno
);
1002 __put_user(env
->error_code
, &sc
->err
);
1003 __put_user(env
->eip
, &sc
->eip
);
1004 __put_user(env
->segs
[R_CS
].selector
, (unsigned int *)&sc
->cs
);
1005 __put_user(env
->eflags
, &sc
->eflags
);
1006 __put_user(env
->regs
[R_ESP
], &sc
->esp_at_signal
);
1007 __put_user(env
->segs
[R_SS
].selector
, (unsigned int *)&sc
->ss
);
1009 cpu_x86_fsave(env
, fpstate_addr
, 1);
1010 fpstate
->status
= fpstate
->sw
;
1012 __put_user(magic
, &fpstate
->magic
);
1013 __put_user(fpstate_addr
, &sc
->fpstate
);
1015 /* non-iBCS2 extensions.. */
1016 __put_user(mask
, &sc
->oldmask
);
1017 __put_user(env
->cr
[2], &sc
->cr2
);
1019 __put_user(env
->regs
[R_EDI
], &sc
->rdi
);
1020 __put_user(env
->regs
[R_ESI
], &sc
->rsi
);
1021 __put_user(env
->regs
[R_EBP
], &sc
->rbp
);
1022 __put_user(env
->regs
[R_ESP
], &sc
->rsp
);
1023 __put_user(env
->regs
[R_EBX
], &sc
->rbx
);
1024 __put_user(env
->regs
[R_EDX
], &sc
->rdx
);
1025 __put_user(env
->regs
[R_ECX
], &sc
->rcx
);
1026 __put_user(env
->regs
[R_EAX
], &sc
->rax
);
1028 __put_user(env
->regs
[8], &sc
->r8
);
1029 __put_user(env
->regs
[9], &sc
->r9
);
1030 __put_user(env
->regs
[10], &sc
->r10
);
1031 __put_user(env
->regs
[11], &sc
->r11
);
1032 __put_user(env
->regs
[12], &sc
->r12
);
1033 __put_user(env
->regs
[13], &sc
->r13
);
1034 __put_user(env
->regs
[14], &sc
->r14
);
1035 __put_user(env
->regs
[15], &sc
->r15
);
1037 __put_user(cs
->exception_index
, &sc
->trapno
);
1038 __put_user(env
->error_code
, &sc
->err
);
1039 __put_user(env
->eip
, &sc
->rip
);
1041 __put_user(env
->eflags
, &sc
->eflags
);
1042 __put_user(env
->segs
[R_CS
].selector
, &sc
->cs
);
1043 __put_user((uint16_t)0, &sc
->gs
);
1044 __put_user((uint16_t)0, &sc
->fs
);
1045 __put_user(env
->segs
[R_SS
].selector
, &sc
->ss
);
1047 __put_user(mask
, &sc
->oldmask
);
1048 __put_user(env
->cr
[2], &sc
->cr2
);
1050 /* fpstate_addr must be 16 byte aligned for fxsave */
1051 assert(!(fpstate_addr
& 0xf));
1053 cpu_x86_fxsave(env
, fpstate_addr
);
1054 __put_user(fpstate_addr
, &sc
->fpstate
);
1059 * Determine which stack to use..
1062 static inline abi_ulong
1063 get_sigframe(struct target_sigaction
*ka
, CPUX86State
*env
, size_t frame_size
)
1067 /* Default to using normal stack */
1068 esp
= env
->regs
[R_ESP
];
1069 #ifdef TARGET_X86_64
1070 esp
-= 128; /* this is the redzone */
1073 /* This is the X/Open sanctioned signal stack switching. */
1074 if (ka
->sa_flags
& TARGET_SA_ONSTACK
) {
1075 if (sas_ss_flags(esp
) == 0) {
1076 esp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
1079 #ifndef TARGET_X86_64
1080 /* This is the legacy signal stack switching. */
1081 if ((env
->segs
[R_SS
].selector
& 0xffff) != __USER_DS
&&
1082 !(ka
->sa_flags
& TARGET_SA_RESTORER
) &&
1084 esp
= (unsigned long) ka
->sa_restorer
;
1089 #ifndef TARGET_X86_64
1090 return (esp
- frame_size
) & -8ul;
1092 return ((esp
- frame_size
) & (~15ul)) - 8;
1096 #ifndef TARGET_X86_64
1097 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1098 static void setup_frame(int sig
, struct target_sigaction
*ka
,
1099 target_sigset_t
*set
, CPUX86State
*env
)
1101 abi_ulong frame_addr
;
1102 struct sigframe
*frame
;
1105 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
1106 trace_user_setup_frame(env
, frame_addr
);
1108 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0))
1111 __put_user(sig
, &frame
->sig
);
1113 setup_sigcontext(&frame
->sc
, &frame
->fpstate
, env
, set
->sig
[0],
1114 frame_addr
+ offsetof(struct sigframe
, fpstate
));
1116 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
1117 __put_user(set
->sig
[i
], &frame
->extramask
[i
- 1]);
1120 /* Set up to return from userspace. If provided, use a stub
1121 already in userspace. */
1122 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
1123 __put_user(ka
->sa_restorer
, &frame
->pretcode
);
1126 abi_ulong retcode_addr
;
1127 retcode_addr
= frame_addr
+ offsetof(struct sigframe
, retcode
);
1128 __put_user(retcode_addr
, &frame
->pretcode
);
1129 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1131 __put_user(val16
, (uint16_t *)(frame
->retcode
+0));
1132 __put_user(TARGET_NR_sigreturn
, (int *)(frame
->retcode
+2));
1134 __put_user(val16
, (uint16_t *)(frame
->retcode
+6));
1137 /* Set up registers for signal handler */
1138 env
->regs
[R_ESP
] = frame_addr
;
1139 env
->eip
= ka
->_sa_handler
;
1141 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
1142 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
1143 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
1144 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
1145 env
->eflags
&= ~TF_MASK
;
1147 unlock_user_struct(frame
, frame_addr
, 1);
1156 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1157 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
1158 target_siginfo_t
*info
,
1159 target_sigset_t
*set
, CPUX86State
*env
)
1161 abi_ulong frame_addr
;
1162 #ifndef TARGET_X86_64
1165 struct rt_sigframe
*frame
;
1168 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
1169 trace_user_setup_rt_frame(env
, frame_addr
);
1171 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0))
1174 /* These fields are only in rt_sigframe on 32 bit */
1175 #ifndef TARGET_X86_64
1176 __put_user(sig
, &frame
->sig
);
1177 addr
= frame_addr
+ offsetof(struct rt_sigframe
, info
);
1178 __put_user(addr
, &frame
->pinfo
);
1179 addr
= frame_addr
+ offsetof(struct rt_sigframe
, uc
);
1180 __put_user(addr
, &frame
->puc
);
1182 if (ka
->sa_flags
& TARGET_SA_SIGINFO
) {
1183 tswap_siginfo(&frame
->info
, info
);
1186 /* Create the ucontext. */
1187 __put_user(0, &frame
->uc
.tuc_flags
);
1188 __put_user(0, &frame
->uc
.tuc_link
);
1189 __put_user(target_sigaltstack_used
.ss_sp
, &frame
->uc
.tuc_stack
.ss_sp
);
1190 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)),
1191 &frame
->uc
.tuc_stack
.ss_flags
);
1192 __put_user(target_sigaltstack_used
.ss_size
,
1193 &frame
->uc
.tuc_stack
.ss_size
);
1194 setup_sigcontext(&frame
->uc
.tuc_mcontext
, &frame
->fpstate
, env
,
1195 set
->sig
[0], frame_addr
+ offsetof(struct rt_sigframe
, fpstate
));
1197 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
1198 __put_user(set
->sig
[i
], &frame
->uc
.tuc_sigmask
.sig
[i
]);
1201 /* Set up to return from userspace. If provided, use a stub
1202 already in userspace. */
1203 #ifndef TARGET_X86_64
1204 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
1205 __put_user(ka
->sa_restorer
, &frame
->pretcode
);
1208 addr
= frame_addr
+ offsetof(struct rt_sigframe
, retcode
);
1209 __put_user(addr
, &frame
->pretcode
);
1210 /* This is movl $,%eax ; int $0x80 */
1211 __put_user(0xb8, (char *)(frame
->retcode
+0));
1212 __put_user(TARGET_NR_rt_sigreturn
, (int *)(frame
->retcode
+1));
1214 __put_user(val16
, (uint16_t *)(frame
->retcode
+5));
1217 /* XXX: Would be slightly better to return -EFAULT here if test fails
1218 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1219 __put_user(ka
->sa_restorer
, &frame
->pretcode
);
1222 /* Set up registers for signal handler */
1223 env
->regs
[R_ESP
] = frame_addr
;
1224 env
->eip
= ka
->_sa_handler
;
1226 #ifndef TARGET_X86_64
1227 env
->regs
[R_EAX
] = sig
;
1228 env
->regs
[R_EDX
] = (unsigned long)&frame
->info
;
1229 env
->regs
[R_ECX
] = (unsigned long)&frame
->uc
;
1231 env
->regs
[R_EAX
] = 0;
1232 env
->regs
[R_EDI
] = sig
;
1233 env
->regs
[R_ESI
] = (unsigned long)&frame
->info
;
1234 env
->regs
[R_EDX
] = (unsigned long)&frame
->uc
;
1237 cpu_x86_load_seg(env
, R_DS
, __USER_DS
);
1238 cpu_x86_load_seg(env
, R_ES
, __USER_DS
);
1239 cpu_x86_load_seg(env
, R_CS
, __USER_CS
);
1240 cpu_x86_load_seg(env
, R_SS
, __USER_DS
);
1241 env
->eflags
&= ~TF_MASK
;
1243 unlock_user_struct(frame
, frame_addr
, 1);
1252 restore_sigcontext(CPUX86State
*env
, struct target_sigcontext
*sc
)
1254 unsigned int err
= 0;
1255 abi_ulong fpstate_addr
;
1256 unsigned int tmpflags
;
1258 #ifndef TARGET_X86_64
1259 cpu_x86_load_seg(env
, R_GS
, tswap16(sc
->gs
));
1260 cpu_x86_load_seg(env
, R_FS
, tswap16(sc
->fs
));
1261 cpu_x86_load_seg(env
, R_ES
, tswap16(sc
->es
));
1262 cpu_x86_load_seg(env
, R_DS
, tswap16(sc
->ds
));
1264 env
->regs
[R_EDI
] = tswapl(sc
->edi
);
1265 env
->regs
[R_ESI
] = tswapl(sc
->esi
);
1266 env
->regs
[R_EBP
] = tswapl(sc
->ebp
);
1267 env
->regs
[R_ESP
] = tswapl(sc
->esp
);
1268 env
->regs
[R_EBX
] = tswapl(sc
->ebx
);
1269 env
->regs
[R_EDX
] = tswapl(sc
->edx
);
1270 env
->regs
[R_ECX
] = tswapl(sc
->ecx
);
1271 env
->regs
[R_EAX
] = tswapl(sc
->eax
);
1273 env
->eip
= tswapl(sc
->eip
);
1275 env
->regs
[8] = tswapl(sc
->r8
);
1276 env
->regs
[9] = tswapl(sc
->r9
);
1277 env
->regs
[10] = tswapl(sc
->r10
);
1278 env
->regs
[11] = tswapl(sc
->r11
);
1279 env
->regs
[12] = tswapl(sc
->r12
);
1280 env
->regs
[13] = tswapl(sc
->r13
);
1281 env
->regs
[14] = tswapl(sc
->r14
);
1282 env
->regs
[15] = tswapl(sc
->r15
);
1284 env
->regs
[R_EDI
] = tswapl(sc
->rdi
);
1285 env
->regs
[R_ESI
] = tswapl(sc
->rsi
);
1286 env
->regs
[R_EBP
] = tswapl(sc
->rbp
);
1287 env
->regs
[R_EBX
] = tswapl(sc
->rbx
);
1288 env
->regs
[R_EDX
] = tswapl(sc
->rdx
);
1289 env
->regs
[R_EAX
] = tswapl(sc
->rax
);
1290 env
->regs
[R_ECX
] = tswapl(sc
->rcx
);
1291 env
->regs
[R_ESP
] = tswapl(sc
->rsp
);
1293 env
->eip
= tswapl(sc
->rip
);
1296 cpu_x86_load_seg(env
, R_CS
, lduw_p(&sc
->cs
) | 3);
1297 cpu_x86_load_seg(env
, R_SS
, lduw_p(&sc
->ss
) | 3);
1299 tmpflags
= tswapl(sc
->eflags
);
1300 env
->eflags
= (env
->eflags
& ~0x40DD5) | (tmpflags
& 0x40DD5);
1301 // regs->orig_eax = -1; /* disable syscall checks */
1303 fpstate_addr
= tswapl(sc
->fpstate
);
1304 if (fpstate_addr
!= 0) {
1305 if (!access_ok(VERIFY_READ
, fpstate_addr
,
1306 sizeof(struct target_fpstate
)))
1308 #ifndef TARGET_X86_64
1309 cpu_x86_frstor(env
, fpstate_addr
, 1);
1311 cpu_x86_fxrstor(env
, fpstate_addr
);
1320 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1321 #ifndef TARGET_X86_64
1322 long do_sigreturn(CPUX86State
*env
)
1324 struct sigframe
*frame
;
1325 abi_ulong frame_addr
= env
->regs
[R_ESP
] - 8;
1326 target_sigset_t target_set
;
1330 trace_user_do_sigreturn(env
, frame_addr
);
1331 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1))
1333 /* set blocked signals */
1334 __get_user(target_set
.sig
[0], &frame
->sc
.oldmask
);
1335 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
1336 __get_user(target_set
.sig
[i
], &frame
->extramask
[i
- 1]);
1339 target_to_host_sigset_internal(&set
, &target_set
);
1342 /* restore registers */
1343 if (restore_sigcontext(env
, &frame
->sc
))
1345 unlock_user_struct(frame
, frame_addr
, 0);
1346 return -TARGET_QEMU_ESIGRETURN
;
1349 unlock_user_struct(frame
, frame_addr
, 0);
1350 force_sig(TARGET_SIGSEGV
);
1351 return -TARGET_QEMU_ESIGRETURN
;
1355 long do_rt_sigreturn(CPUX86State
*env
)
1357 abi_ulong frame_addr
;
1358 struct rt_sigframe
*frame
;
1361 frame_addr
= env
->regs
[R_ESP
] - sizeof(abi_ulong
);
1362 trace_user_do_rt_sigreturn(env
, frame_addr
);
1363 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1))
1365 target_to_host_sigset(&set
, &frame
->uc
.tuc_sigmask
);
1368 if (restore_sigcontext(env
, &frame
->uc
.tuc_mcontext
)) {
1372 if (do_sigaltstack(frame_addr
+ offsetof(struct rt_sigframe
, uc
.tuc_stack
), 0,
1373 get_sp_from_cpustate(env
)) == -EFAULT
) {
1377 unlock_user_struct(frame
, frame_addr
, 0);
1378 return -TARGET_QEMU_ESIGRETURN
;
1381 unlock_user_struct(frame
, frame_addr
, 0);
1382 force_sig(TARGET_SIGSEGV
);
1383 return -TARGET_QEMU_ESIGRETURN
;
1386 #elif defined(TARGET_SPARC)
1388 #define __SUNOS_MAXWIN 31
1390 /* This is what SunOS does, so shall I. */
1391 struct target_sigcontext
{
1392 abi_ulong sigc_onstack
; /* state to restore */
1394 abi_ulong sigc_mask
; /* sigmask to restore */
1395 abi_ulong sigc_sp
; /* stack pointer */
1396 abi_ulong sigc_pc
; /* program counter */
1397 abi_ulong sigc_npc
; /* next program counter */
1398 abi_ulong sigc_psr
; /* for condition codes etc */
1399 abi_ulong sigc_g1
; /* User uses these two registers */
1400 abi_ulong sigc_o0
; /* within the trampoline code. */
1402 /* Now comes information regarding the users window set
1403 * at the time of the signal.
1405 abi_ulong sigc_oswins
; /* outstanding windows */
1407 /* stack ptrs for each regwin buf */
1408 char *sigc_spbuf
[__SUNOS_MAXWIN
];
1410 /* Windows to restore after signal */
1412 abi_ulong locals
[8];
1414 } sigc_wbuf
[__SUNOS_MAXWIN
];
1416 /* A Sparc stack frame */
1417 struct sparc_stackf
{
1418 abi_ulong locals
[8];
1420 /* It's simpler to treat fp and callers_pc as elements of ins[]
1421 * since we never need to access them ourselves.
1425 abi_ulong xxargs
[1];
1434 abi_ulong u_regs
[16]; /* globals and ins */
1440 abi_ulong si_float_regs
[32];
1441 unsigned long si_fsr
;
1442 unsigned long si_fpqdepth
;
1444 unsigned long *insn_addr
;
1447 } qemu_siginfo_fpu_t
;
1450 struct target_signal_frame
{
1451 struct sparc_stackf ss
;
1454 abi_ulong insns
[2] __attribute__ ((aligned (8)));
1455 abi_ulong extramask
[TARGET_NSIG_WORDS
- 1];
1456 abi_ulong extra_size
; /* Should be 0 */
1457 qemu_siginfo_fpu_t fpu_state
;
1459 struct target_rt_signal_frame
{
1460 struct sparc_stackf ss
;
1465 unsigned int insns
[2];
1467 unsigned int extra_size
; /* Should be 0 */
1468 qemu_siginfo_fpu_t fpu_state
;
1482 #define UREG_FP UREG_I6
1483 #define UREG_SP UREG_O6
1485 static inline abi_ulong
get_sigframe(struct target_sigaction
*sa
,
1487 unsigned long framesize
)
1491 sp
= env
->regwptr
[UREG_FP
];
1493 /* This is the X/Open sanctioned signal stack switching. */
1494 if (sa
->sa_flags
& TARGET_SA_ONSTACK
) {
1495 if (!on_sig_stack(sp
)
1496 && !((target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
) & 7)) {
1497 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
1500 return sp
- framesize
;
1504 setup___siginfo(__siginfo_t
*si
, CPUSPARCState
*env
, abi_ulong mask
)
1508 __put_user(env
->psr
, &si
->si_regs
.psr
);
1509 __put_user(env
->pc
, &si
->si_regs
.pc
);
1510 __put_user(env
->npc
, &si
->si_regs
.npc
);
1511 __put_user(env
->y
, &si
->si_regs
.y
);
1512 for (i
=0; i
< 8; i
++) {
1513 __put_user(env
->gregs
[i
], &si
->si_regs
.u_regs
[i
]);
1515 for (i
=0; i
< 8; i
++) {
1516 __put_user(env
->regwptr
[UREG_I0
+ i
], &si
->si_regs
.u_regs
[i
+8]);
1518 __put_user(mask
, &si
->si_mask
);
1524 setup_sigcontext(struct target_sigcontext
*sc
, /*struct _fpstate *fpstate,*/
1525 CPUSPARCState
*env
, unsigned long mask
)
1529 __put_user(mask
, &sc
->sigc_mask
);
1530 __put_user(env
->regwptr
[UREG_SP
], &sc
->sigc_sp
);
1531 __put_user(env
->pc
, &sc
->sigc_pc
);
1532 __put_user(env
->npc
, &sc
->sigc_npc
);
1533 __put_user(env
->psr
, &sc
->sigc_psr
);
1534 __put_user(env
->gregs
[1], &sc
->sigc_g1
);
1535 __put_user(env
->regwptr
[UREG_O0
], &sc
->sigc_o0
);
1540 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
1542 static void setup_frame(int sig
, struct target_sigaction
*ka
,
1543 target_sigset_t
*set
, CPUSPARCState
*env
)
1546 struct target_signal_frame
*sf
;
1547 int sigframe_size
, err
, i
;
1549 /* 1. Make sure everything is clean */
1550 //synchronize_user_stack();
1552 sigframe_size
= NF_ALIGNEDSZ
;
1553 sf_addr
= get_sigframe(ka
, env
, sigframe_size
);
1554 trace_user_setup_frame(env
, sf_addr
);
1556 sf
= lock_user(VERIFY_WRITE
, sf_addr
,
1557 sizeof(struct target_signal_frame
), 0);
1562 if (invalid_frame_pointer(sf
, sigframe_size
))
1563 goto sigill_and_return
;
1565 /* 2. Save the current process state */
1566 err
= setup___siginfo(&sf
->info
, env
, set
->sig
[0]);
1567 __put_user(0, &sf
->extra_size
);
1569 //save_fpu_state(regs, &sf->fpu_state);
1570 //__put_user(&sf->fpu_state, &sf->fpu_save);
1572 __put_user(set
->sig
[0], &sf
->info
.si_mask
);
1573 for (i
= 0; i
< TARGET_NSIG_WORDS
- 1; i
++) {
1574 __put_user(set
->sig
[i
+ 1], &sf
->extramask
[i
]);
1577 for (i
= 0; i
< 8; i
++) {
1578 __put_user(env
->regwptr
[i
+ UREG_L0
], &sf
->ss
.locals
[i
]);
1580 for (i
= 0; i
< 8; i
++) {
1581 __put_user(env
->regwptr
[i
+ UREG_I0
], &sf
->ss
.ins
[i
]);
1586 /* 3. signal handler back-trampoline and parameters */
1587 env
->regwptr
[UREG_FP
] = sf_addr
;
1588 env
->regwptr
[UREG_I0
] = sig
;
1589 env
->regwptr
[UREG_I1
] = sf_addr
+
1590 offsetof(struct target_signal_frame
, info
);
1591 env
->regwptr
[UREG_I2
] = sf_addr
+
1592 offsetof(struct target_signal_frame
, info
);
1594 /* 4. signal handler */
1595 env
->pc
= ka
->_sa_handler
;
1596 env
->npc
= (env
->pc
+ 4);
1597 /* 5. return to kernel instructions */
1598 if (ka
->ka_restorer
) {
1599 env
->regwptr
[UREG_I7
] = ka
->ka_restorer
;
1603 env
->regwptr
[UREG_I7
] = sf_addr
+
1604 offsetof(struct target_signal_frame
, insns
) - 2 * 4;
1606 /* mov __NR_sigreturn, %g1 */
1608 __put_user(val32
, &sf
->insns
[0]);
1612 __put_user(val32
, &sf
->insns
[1]);
1616 /* Flush instruction space. */
1617 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
1620 unlock_user(sf
, sf_addr
, sizeof(struct target_signal_frame
));
1624 force_sig(TARGET_SIGILL
);
1627 unlock_user(sf
, sf_addr
, sizeof(struct target_signal_frame
));
1631 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
1632 target_siginfo_t
*info
,
1633 target_sigset_t
*set
, CPUSPARCState
*env
)
1635 fprintf(stderr
, "setup_rt_frame: not implemented\n");
1638 long do_sigreturn(CPUSPARCState
*env
)
1641 struct target_signal_frame
*sf
;
1642 uint32_t up_psr
, pc
, npc
;
1643 target_sigset_t set
;
1647 sf_addr
= env
->regwptr
[UREG_FP
];
1648 trace_user_do_sigreturn(env
, sf_addr
);
1649 if (!lock_user_struct(VERIFY_READ
, sf
, sf_addr
, 1)) {
1653 /* 1. Make sure we are not getting garbage from the user */
1658 __get_user(pc
, &sf
->info
.si_regs
.pc
);
1659 __get_user(npc
, &sf
->info
.si_regs
.npc
);
1661 if ((pc
| npc
) & 3) {
1665 /* 2. Restore the state */
1666 __get_user(up_psr
, &sf
->info
.si_regs
.psr
);
1668 /* User can only change condition codes and FPU enabling in %psr. */
1669 env
->psr
= (up_psr
& (PSR_ICC
/* | PSR_EF */))
1670 | (env
->psr
& ~(PSR_ICC
/* | PSR_EF */));
1674 __get_user(env
->y
, &sf
->info
.si_regs
.y
);
1675 for (i
=0; i
< 8; i
++) {
1676 __get_user(env
->gregs
[i
], &sf
->info
.si_regs
.u_regs
[i
]);
1678 for (i
=0; i
< 8; i
++) {
1679 __get_user(env
->regwptr
[i
+ UREG_I0
], &sf
->info
.si_regs
.u_regs
[i
+8]);
1682 /* FIXME: implement FPU save/restore:
1683 * __get_user(fpu_save, &sf->fpu_save);
1685 * err |= restore_fpu_state(env, fpu_save);
1688 /* This is pretty much atomic, no amount locking would prevent
1689 * the races which exist anyways.
1691 __get_user(set
.sig
[0], &sf
->info
.si_mask
);
1692 for(i
= 1; i
< TARGET_NSIG_WORDS
; i
++) {
1693 __get_user(set
.sig
[i
], &sf
->extramask
[i
- 1]);
1696 target_to_host_sigset_internal(&host_set
, &set
);
1697 set_sigmask(&host_set
);
1702 unlock_user_struct(sf
, sf_addr
, 0);
1703 return -TARGET_QEMU_ESIGRETURN
;
1706 unlock_user_struct(sf
, sf_addr
, 0);
1707 force_sig(TARGET_SIGSEGV
);
1708 return -TARGET_QEMU_ESIGRETURN
;
1711 long do_rt_sigreturn(CPUSPARCState
*env
)
1713 trace_user_do_rt_sigreturn(env
, 0);
1714 fprintf(stderr
, "do_rt_sigreturn: not implemented\n");
1715 return -TARGET_ENOSYS
;
1718 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1719 #define SPARC_MC_TSTATE 0
1720 #define SPARC_MC_PC 1
1721 #define SPARC_MC_NPC 2
1722 #define SPARC_MC_Y 3
1723 #define SPARC_MC_G1 4
1724 #define SPARC_MC_G2 5
1725 #define SPARC_MC_G3 6
1726 #define SPARC_MC_G4 7
1727 #define SPARC_MC_G5 8
1728 #define SPARC_MC_G6 9
1729 #define SPARC_MC_G7 10
1730 #define SPARC_MC_O0 11
1731 #define SPARC_MC_O1 12
1732 #define SPARC_MC_O2 13
1733 #define SPARC_MC_O3 14
1734 #define SPARC_MC_O4 15
1735 #define SPARC_MC_O5 16
1736 #define SPARC_MC_O6 17
1737 #define SPARC_MC_O7 18
1738 #define SPARC_MC_NGREG 19
1740 typedef abi_ulong target_mc_greg_t
;
1741 typedef target_mc_greg_t target_mc_gregset_t
[SPARC_MC_NGREG
];
1743 struct target_mc_fq
{
1744 abi_ulong
*mcfq_addr
;
1748 struct target_mc_fpu
{
1752 //uint128_t qregs[16];
1754 abi_ulong mcfpu_fsr
;
1755 abi_ulong mcfpu_fprs
;
1756 abi_ulong mcfpu_gsr
;
1757 struct target_mc_fq
*mcfpu_fq
;
1758 unsigned char mcfpu_qcnt
;
1759 unsigned char mcfpu_qentsz
;
1760 unsigned char mcfpu_enab
;
1762 typedef struct target_mc_fpu target_mc_fpu_t
;
1765 target_mc_gregset_t mc_gregs
;
1766 target_mc_greg_t mc_fp
;
1767 target_mc_greg_t mc_i7
;
1768 target_mc_fpu_t mc_fpregs
;
1769 } target_mcontext_t
;
1771 struct target_ucontext
{
1772 struct target_ucontext
*tuc_link
;
1773 abi_ulong tuc_flags
;
1774 target_sigset_t tuc_sigmask
;
1775 target_mcontext_t tuc_mcontext
;
1778 /* A V9 register window */
1779 struct target_reg_window
{
1780 abi_ulong locals
[8];
1784 #define TARGET_STACK_BIAS 2047
1786 /* {set, get}context() needed for 64-bit SparcLinux userland. */
1787 void sparc64_set_context(CPUSPARCState
*env
)
1790 struct target_ucontext
*ucp
;
1791 target_mc_gregset_t
*grp
;
1792 abi_ulong pc
, npc
, tstate
;
1793 abi_ulong fp
, i7
, w_addr
;
1796 ucp_addr
= env
->regwptr
[UREG_I0
];
1797 if (!lock_user_struct(VERIFY_READ
, ucp
, ucp_addr
, 1)) {
1800 grp
= &ucp
->tuc_mcontext
.mc_gregs
;
1801 __get_user(pc
, &((*grp
)[SPARC_MC_PC
]));
1802 __get_user(npc
, &((*grp
)[SPARC_MC_NPC
]));
1803 if ((pc
| npc
) & 3) {
1806 if (env
->regwptr
[UREG_I1
]) {
1807 target_sigset_t target_set
;
1810 if (TARGET_NSIG_WORDS
== 1) {
1811 __get_user(target_set
.sig
[0], &ucp
->tuc_sigmask
.sig
[0]);
1813 abi_ulong
*src
, *dst
;
1814 src
= ucp
->tuc_sigmask
.sig
;
1815 dst
= target_set
.sig
;
1816 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++, dst
++, src
++) {
1817 __get_user(*dst
, src
);
1820 target_to_host_sigset_internal(&set
, &target_set
);
1825 __get_user(env
->y
, &((*grp
)[SPARC_MC_Y
]));
1826 __get_user(tstate
, &((*grp
)[SPARC_MC_TSTATE
]));
1827 env
->asi
= (tstate
>> 24) & 0xff;
1828 cpu_put_ccr(env
, tstate
>> 32);
1829 cpu_put_cwp64(env
, tstate
& 0x1f);
1830 __get_user(env
->gregs
[1], (&(*grp
)[SPARC_MC_G1
]));
1831 __get_user(env
->gregs
[2], (&(*grp
)[SPARC_MC_G2
]));
1832 __get_user(env
->gregs
[3], (&(*grp
)[SPARC_MC_G3
]));
1833 __get_user(env
->gregs
[4], (&(*grp
)[SPARC_MC_G4
]));
1834 __get_user(env
->gregs
[5], (&(*grp
)[SPARC_MC_G5
]));
1835 __get_user(env
->gregs
[6], (&(*grp
)[SPARC_MC_G6
]));
1836 __get_user(env
->gregs
[7], (&(*grp
)[SPARC_MC_G7
]));
1837 __get_user(env
->regwptr
[UREG_I0
], (&(*grp
)[SPARC_MC_O0
]));
1838 __get_user(env
->regwptr
[UREG_I1
], (&(*grp
)[SPARC_MC_O1
]));
1839 __get_user(env
->regwptr
[UREG_I2
], (&(*grp
)[SPARC_MC_O2
]));
1840 __get_user(env
->regwptr
[UREG_I3
], (&(*grp
)[SPARC_MC_O3
]));
1841 __get_user(env
->regwptr
[UREG_I4
], (&(*grp
)[SPARC_MC_O4
]));
1842 __get_user(env
->regwptr
[UREG_I5
], (&(*grp
)[SPARC_MC_O5
]));
1843 __get_user(env
->regwptr
[UREG_I6
], (&(*grp
)[SPARC_MC_O6
]));
1844 __get_user(env
->regwptr
[UREG_I7
], (&(*grp
)[SPARC_MC_O7
]));
1846 __get_user(fp
, &(ucp
->tuc_mcontext
.mc_fp
));
1847 __get_user(i7
, &(ucp
->tuc_mcontext
.mc_i7
));
1849 w_addr
= TARGET_STACK_BIAS
+env
->regwptr
[UREG_I6
];
1850 if (put_user(fp
, w_addr
+ offsetof(struct target_reg_window
, ins
[6]),
1854 if (put_user(i7
, w_addr
+ offsetof(struct target_reg_window
, ins
[7]),
1858 /* FIXME this does not match how the kernel handles the FPU in
1859 * its sparc64_set_context implementation. In particular the FPU
1860 * is only restored if fenab is non-zero in:
1861 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
1863 __get_user(env
->fprs
, &(ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fprs
));
1865 uint32_t *src
= ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fregs
.sregs
;
1866 for (i
= 0; i
< 64; i
++, src
++) {
1868 __get_user(env
->fpr
[i
/2].l
.lower
, src
);
1870 __get_user(env
->fpr
[i
/2].l
.upper
, src
);
1874 __get_user(env
->fsr
,
1875 &(ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fsr
));
1876 __get_user(env
->gsr
,
1877 &(ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_gsr
));
1878 unlock_user_struct(ucp
, ucp_addr
, 0);
1881 unlock_user_struct(ucp
, ucp_addr
, 0);
1882 force_sig(TARGET_SIGSEGV
);
1885 void sparc64_get_context(CPUSPARCState
*env
)
1888 struct target_ucontext
*ucp
;
1889 target_mc_gregset_t
*grp
;
1890 target_mcontext_t
*mcp
;
1891 abi_ulong fp
, i7
, w_addr
;
1894 target_sigset_t target_set
;
1897 ucp_addr
= env
->regwptr
[UREG_I0
];
1898 if (!lock_user_struct(VERIFY_WRITE
, ucp
, ucp_addr
, 0)) {
1902 mcp
= &ucp
->tuc_mcontext
;
1903 grp
= &mcp
->mc_gregs
;
1905 /* Skip over the trap instruction, first. */
1909 /* If we're only reading the signal mask then do_sigprocmask()
1910 * is guaranteed not to fail, which is important because we don't
1911 * have any way to signal a failure or restart this operation since
1912 * this is not a normal syscall.
1914 err
= do_sigprocmask(0, NULL
, &set
);
1916 host_to_target_sigset_internal(&target_set
, &set
);
1917 if (TARGET_NSIG_WORDS
== 1) {
1918 __put_user(target_set
.sig
[0],
1919 (abi_ulong
*)&ucp
->tuc_sigmask
);
1921 abi_ulong
*src
, *dst
;
1922 src
= target_set
.sig
;
1923 dst
= ucp
->tuc_sigmask
.sig
;
1924 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++, dst
++, src
++) {
1925 __put_user(*src
, dst
);
1931 /* XXX: tstate must be saved properly */
1932 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
1933 __put_user(env
->pc
, &((*grp
)[SPARC_MC_PC
]));
1934 __put_user(env
->npc
, &((*grp
)[SPARC_MC_NPC
]));
1935 __put_user(env
->y
, &((*grp
)[SPARC_MC_Y
]));
1936 __put_user(env
->gregs
[1], &((*grp
)[SPARC_MC_G1
]));
1937 __put_user(env
->gregs
[2], &((*grp
)[SPARC_MC_G2
]));
1938 __put_user(env
->gregs
[3], &((*grp
)[SPARC_MC_G3
]));
1939 __put_user(env
->gregs
[4], &((*grp
)[SPARC_MC_G4
]));
1940 __put_user(env
->gregs
[5], &((*grp
)[SPARC_MC_G5
]));
1941 __put_user(env
->gregs
[6], &((*grp
)[SPARC_MC_G6
]));
1942 __put_user(env
->gregs
[7], &((*grp
)[SPARC_MC_G7
]));
1943 __put_user(env
->regwptr
[UREG_I0
], &((*grp
)[SPARC_MC_O0
]));
1944 __put_user(env
->regwptr
[UREG_I1
], &((*grp
)[SPARC_MC_O1
]));
1945 __put_user(env
->regwptr
[UREG_I2
], &((*grp
)[SPARC_MC_O2
]));
1946 __put_user(env
->regwptr
[UREG_I3
], &((*grp
)[SPARC_MC_O3
]));
1947 __put_user(env
->regwptr
[UREG_I4
], &((*grp
)[SPARC_MC_O4
]));
1948 __put_user(env
->regwptr
[UREG_I5
], &((*grp
)[SPARC_MC_O5
]));
1949 __put_user(env
->regwptr
[UREG_I6
], &((*grp
)[SPARC_MC_O6
]));
1950 __put_user(env
->regwptr
[UREG_I7
], &((*grp
)[SPARC_MC_O7
]));
1952 w_addr
= TARGET_STACK_BIAS
+env
->regwptr
[UREG_I6
];
1954 if (get_user(fp
, w_addr
+ offsetof(struct target_reg_window
, ins
[6]),
1958 if (get_user(i7
, w_addr
+ offsetof(struct target_reg_window
, ins
[7]),
1962 __put_user(fp
, &(mcp
->mc_fp
));
1963 __put_user(i7
, &(mcp
->mc_i7
));
1966 uint32_t *dst
= ucp
->tuc_mcontext
.mc_fpregs
.mcfpu_fregs
.sregs
;
1967 for (i
= 0; i
< 64; i
++, dst
++) {
1969 __put_user(env
->fpr
[i
/2].l
.lower
, dst
);
1971 __put_user(env
->fpr
[i
/2].l
.upper
, dst
);
1975 __put_user(env
->fsr
, &(mcp
->mc_fpregs
.mcfpu_fsr
));
1976 __put_user(env
->gsr
, &(mcp
->mc_fpregs
.mcfpu_gsr
));
1977 __put_user(env
->fprs
, &(mcp
->mc_fpregs
.mcfpu_fprs
));
1981 unlock_user_struct(ucp
, ucp_addr
, 1);
1984 unlock_user_struct(ucp
, ucp_addr
, 1);
1985 force_sig(TARGET_SIGSEGV
);
1988 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
1990 # if defined(TARGET_ABI_MIPSO32)
1991 struct target_sigcontext
{
1992 uint32_t sc_regmask
; /* Unused */
1995 uint64_t sc_regs
[32];
1996 uint64_t sc_fpregs
[32];
1997 uint32_t sc_ownedfp
; /* Unused */
1998 uint32_t sc_fpc_csr
;
1999 uint32_t sc_fpc_eir
; /* Unused */
2000 uint32_t sc_used_math
;
2001 uint32_t sc_dsp
; /* dsp status, was sc_ssflags */
2005 target_ulong sc_hi1
; /* Was sc_cause */
2006 target_ulong sc_lo1
; /* Was sc_badvaddr */
2007 target_ulong sc_hi2
; /* Was sc_sigset[4] */
2008 target_ulong sc_lo2
;
2009 target_ulong sc_hi3
;
2010 target_ulong sc_lo3
;
2012 # else /* N32 || N64 */
2013 struct target_sigcontext
{
2014 uint64_t sc_regs
[32];
2015 uint64_t sc_fpregs
[32];
2025 uint32_t sc_fpc_csr
;
2026 uint32_t sc_used_math
;
2028 uint32_t sc_reserved
;
2033 uint32_t sf_ass
[4]; /* argument save space for o32 */
2034 uint32_t sf_code
[2]; /* signal trampoline */
2035 struct target_sigcontext sf_sc
;
2036 target_sigset_t sf_mask
;
2039 struct target_ucontext
{
2040 target_ulong tuc_flags
;
2041 target_ulong tuc_link
;
2042 target_stack_t tuc_stack
;
2044 struct target_sigcontext tuc_mcontext
;
2045 target_sigset_t tuc_sigmask
;
2048 struct target_rt_sigframe
{
2049 uint32_t rs_ass
[4]; /* argument save space for o32 */
2050 uint32_t rs_code
[2]; /* signal trampoline */
2051 struct target_siginfo rs_info
;
2052 struct target_ucontext rs_uc
;
2055 /* Install trampoline to jump back from signal handler */
2056 static inline int install_sigtramp(unsigned int *tramp
, unsigned int syscall
)
2061 * Set up the return code ...
2063 * li v0, __NR__foo_sigreturn
2067 __put_user(0x24020000 + syscall
, tramp
+ 0);
2068 __put_user(0x0000000c , tramp
+ 1);
2072 static inline void setup_sigcontext(CPUMIPSState
*regs
,
2073 struct target_sigcontext
*sc
)
2077 __put_user(exception_resume_pc(regs
), &sc
->sc_pc
);
2078 regs
->hflags
&= ~MIPS_HFLAG_BMASK
;
2080 __put_user(0, &sc
->sc_regs
[0]);
2081 for (i
= 1; i
< 32; ++i
) {
2082 __put_user(regs
->active_tc
.gpr
[i
], &sc
->sc_regs
[i
]);
2085 __put_user(regs
->active_tc
.HI
[0], &sc
->sc_mdhi
);
2086 __put_user(regs
->active_tc
.LO
[0], &sc
->sc_mdlo
);
2088 /* Rather than checking for dsp existence, always copy. The storage
2089 would just be garbage otherwise. */
2090 __put_user(regs
->active_tc
.HI
[1], &sc
->sc_hi1
);
2091 __put_user(regs
->active_tc
.HI
[2], &sc
->sc_hi2
);
2092 __put_user(regs
->active_tc
.HI
[3], &sc
->sc_hi3
);
2093 __put_user(regs
->active_tc
.LO
[1], &sc
->sc_lo1
);
2094 __put_user(regs
->active_tc
.LO
[2], &sc
->sc_lo2
);
2095 __put_user(regs
->active_tc
.LO
[3], &sc
->sc_lo3
);
2097 uint32_t dsp
= cpu_rddsp(0x3ff, regs
);
2098 __put_user(dsp
, &sc
->sc_dsp
);
2101 __put_user(1, &sc
->sc_used_math
);
2103 for (i
= 0; i
< 32; ++i
) {
2104 __put_user(regs
->active_fpu
.fpr
[i
].d
, &sc
->sc_fpregs
[i
]);
2109 restore_sigcontext(CPUMIPSState
*regs
, struct target_sigcontext
*sc
)
2113 __get_user(regs
->CP0_EPC
, &sc
->sc_pc
);
2115 __get_user(regs
->active_tc
.HI
[0], &sc
->sc_mdhi
);
2116 __get_user(regs
->active_tc
.LO
[0], &sc
->sc_mdlo
);
2118 for (i
= 1; i
< 32; ++i
) {
2119 __get_user(regs
->active_tc
.gpr
[i
], &sc
->sc_regs
[i
]);
2122 __get_user(regs
->active_tc
.HI
[1], &sc
->sc_hi1
);
2123 __get_user(regs
->active_tc
.HI
[2], &sc
->sc_hi2
);
2124 __get_user(regs
->active_tc
.HI
[3], &sc
->sc_hi3
);
2125 __get_user(regs
->active_tc
.LO
[1], &sc
->sc_lo1
);
2126 __get_user(regs
->active_tc
.LO
[2], &sc
->sc_lo2
);
2127 __get_user(regs
->active_tc
.LO
[3], &sc
->sc_lo3
);
2130 __get_user(dsp
, &sc
->sc_dsp
);
2131 cpu_wrdsp(dsp
, 0x3ff, regs
);
2134 for (i
= 0; i
< 32; ++i
) {
2135 __get_user(regs
->active_fpu
.fpr
[i
].d
, &sc
->sc_fpregs
[i
]);
2140 * Determine which stack to use..
2142 static inline abi_ulong
2143 get_sigframe(struct target_sigaction
*ka
, CPUMIPSState
*regs
, size_t frame_size
)
2147 /* Default to using normal stack */
2148 sp
= regs
->active_tc
.gpr
[29];
2151 * FPU emulator may have its own trampoline active just
2152 * above the user stack, 16-bytes before the next lowest
2153 * 16 byte boundary. Try to avoid trashing it.
2157 /* This is the X/Open sanctioned signal stack switching. */
2158 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && (sas_ss_flags (sp
) == 0)) {
2159 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
2162 return (sp
- frame_size
) & ~7;
2165 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState
*env
)
2167 if (env
->insn_flags
& (ASE_MIPS16
| ASE_MICROMIPS
)) {
2168 env
->hflags
&= ~MIPS_HFLAG_M16
;
2169 env
->hflags
|= (env
->active_tc
.PC
& 1) << MIPS_HFLAG_M16_SHIFT
;
2170 env
->active_tc
.PC
&= ~(target_ulong
) 1;
2174 # if defined(TARGET_ABI_MIPSO32)
2175 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2176 static void setup_frame(int sig
, struct target_sigaction
* ka
,
2177 target_sigset_t
*set
, CPUMIPSState
*regs
)
2179 struct sigframe
*frame
;
2180 abi_ulong frame_addr
;
2183 frame_addr
= get_sigframe(ka
, regs
, sizeof(*frame
));
2184 trace_user_setup_frame(regs
, frame_addr
);
2185 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
2189 install_sigtramp(frame
->sf_code
, TARGET_NR_sigreturn
);
2191 setup_sigcontext(regs
, &frame
->sf_sc
);
2193 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
2194 __put_user(set
->sig
[i
], &frame
->sf_mask
.sig
[i
]);
2198 * Arguments to signal handler:
2200 * a0 = signal number
2201 * a1 = 0 (should be cause)
2202 * a2 = pointer to struct sigcontext
2204 * $25 and PC point to the signal handler, $29 points to the
2207 regs
->active_tc
.gpr
[ 4] = sig
;
2208 regs
->active_tc
.gpr
[ 5] = 0;
2209 regs
->active_tc
.gpr
[ 6] = frame_addr
+ offsetof(struct sigframe
, sf_sc
);
2210 regs
->active_tc
.gpr
[29] = frame_addr
;
2211 regs
->active_tc
.gpr
[31] = frame_addr
+ offsetof(struct sigframe
, sf_code
);
2212 /* The original kernel code sets CP0_EPC to the handler
2213 * since it returns to userland using eret
2214 * we cannot do this here, and we must set PC directly */
2215 regs
->active_tc
.PC
= regs
->active_tc
.gpr
[25] = ka
->_sa_handler
;
2216 mips_set_hflags_isa_mode_from_pc(regs
);
2217 unlock_user_struct(frame
, frame_addr
, 1);
2224 long do_sigreturn(CPUMIPSState
*regs
)
2226 struct sigframe
*frame
;
2227 abi_ulong frame_addr
;
2229 target_sigset_t target_set
;
2232 frame_addr
= regs
->active_tc
.gpr
[29];
2233 trace_user_do_sigreturn(regs
, frame_addr
);
2234 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1))
2237 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
2238 __get_user(target_set
.sig
[i
], &frame
->sf_mask
.sig
[i
]);
2241 target_to_host_sigset_internal(&blocked
, &target_set
);
2242 set_sigmask(&blocked
);
2244 restore_sigcontext(regs
, &frame
->sf_sc
);
2248 * Don't let your children do this ...
2250 __asm__
__volatile__(
2258 regs
->active_tc
.PC
= regs
->CP0_EPC
;
2259 mips_set_hflags_isa_mode_from_pc(regs
);
2260 /* I am not sure this is right, but it seems to work
2261 * maybe a problem with nested signals ? */
2263 return -TARGET_QEMU_ESIGRETURN
;
2266 force_sig(TARGET_SIGSEGV
);
2267 return -TARGET_QEMU_ESIGRETURN
;
2271 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
2272 target_siginfo_t
*info
,
2273 target_sigset_t
*set
, CPUMIPSState
*env
)
2275 struct target_rt_sigframe
*frame
;
2276 abi_ulong frame_addr
;
2279 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
2280 trace_user_setup_rt_frame(env
, frame_addr
);
2281 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
2285 install_sigtramp(frame
->rs_code
, TARGET_NR_rt_sigreturn
);
2287 tswap_siginfo(&frame
->rs_info
, info
);
2289 __put_user(0, &frame
->rs_uc
.tuc_flags
);
2290 __put_user(0, &frame
->rs_uc
.tuc_link
);
2291 __put_user(target_sigaltstack_used
.ss_sp
, &frame
->rs_uc
.tuc_stack
.ss_sp
);
2292 __put_user(target_sigaltstack_used
.ss_size
, &frame
->rs_uc
.tuc_stack
.ss_size
);
2293 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)),
2294 &frame
->rs_uc
.tuc_stack
.ss_flags
);
2296 setup_sigcontext(env
, &frame
->rs_uc
.tuc_mcontext
);
2298 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
2299 __put_user(set
->sig
[i
], &frame
->rs_uc
.tuc_sigmask
.sig
[i
]);
2303 * Arguments to signal handler:
2305 * a0 = signal number
2306 * a1 = pointer to siginfo_t
2307 * a2 = pointer to ucontext_t
2309 * $25 and PC point to the signal handler, $29 points to the
2312 env
->active_tc
.gpr
[ 4] = sig
;
2313 env
->active_tc
.gpr
[ 5] = frame_addr
2314 + offsetof(struct target_rt_sigframe
, rs_info
);
2315 env
->active_tc
.gpr
[ 6] = frame_addr
2316 + offsetof(struct target_rt_sigframe
, rs_uc
);
2317 env
->active_tc
.gpr
[29] = frame_addr
;
2318 env
->active_tc
.gpr
[31] = frame_addr
2319 + offsetof(struct target_rt_sigframe
, rs_code
);
2320 /* The original kernel code sets CP0_EPC to the handler
2321 * since it returns to userland using eret
2322 * we cannot do this here, and we must set PC directly */
2323 env
->active_tc
.PC
= env
->active_tc
.gpr
[25] = ka
->_sa_handler
;
2324 mips_set_hflags_isa_mode_from_pc(env
);
2325 unlock_user_struct(frame
, frame_addr
, 1);
2329 unlock_user_struct(frame
, frame_addr
, 1);
2333 long do_rt_sigreturn(CPUMIPSState
*env
)
2335 struct target_rt_sigframe
*frame
;
2336 abi_ulong frame_addr
;
2339 frame_addr
= env
->active_tc
.gpr
[29];
2340 trace_user_do_rt_sigreturn(env
, frame_addr
);
2341 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
2345 target_to_host_sigset(&blocked
, &frame
->rs_uc
.tuc_sigmask
);
2346 set_sigmask(&blocked
);
2348 restore_sigcontext(env
, &frame
->rs_uc
.tuc_mcontext
);
2350 if (do_sigaltstack(frame_addr
+
2351 offsetof(struct target_rt_sigframe
, rs_uc
.tuc_stack
),
2352 0, get_sp_from_cpustate(env
)) == -EFAULT
)
2355 env
->active_tc
.PC
= env
->CP0_EPC
;
2356 mips_set_hflags_isa_mode_from_pc(env
);
2357 /* I am not sure this is right, but it seems to work
2358 * maybe a problem with nested signals ? */
2360 return -TARGET_QEMU_ESIGRETURN
;
2363 force_sig(TARGET_SIGSEGV
);
2364 return -TARGET_QEMU_ESIGRETURN
;
2367 #elif defined(TARGET_PPC)
2369 /* Size of dummy stack frame allocated when calling signal handler.
2370 See arch/powerpc/include/asm/ptrace.h. */
2371 #if defined(TARGET_PPC64)
2372 #define SIGNAL_FRAMESIZE 128
2374 #define SIGNAL_FRAMESIZE 64
2377 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
2378 on 64-bit PPC, sigcontext and mcontext are one and the same. */
2379 struct target_mcontext
{
2380 target_ulong mc_gregs
[48];
2381 /* Includes fpscr. */
2382 uint64_t mc_fregs
[33];
2383 #if defined(TARGET_PPC64)
2384 /* Pointer to the vector regs */
2385 target_ulong v_regs
;
2387 target_ulong mc_pad
[2];
2389 /* We need to handle Altivec and SPE at the same time, which no
2390 kernel needs to do. Fortunately, the kernel defines this bit to
2391 be Altivec-register-large all the time, rather than trying to
2392 twiddle it based on the specific platform. */
2394 /* SPE vector registers. One extra for SPEFSCR. */
2396 /* Altivec vector registers. The packing of VSCR and VRSAVE
2397 varies depending on whether we're PPC64 or not: PPC64 splits
2398 them apart; PPC32 stuffs them together.
2399 We also need to account for the VSX registers on PPC64
2401 #if defined(TARGET_PPC64)
2402 #define QEMU_NVRREG (34 + 16)
2403 /* On ppc64, this mcontext structure is naturally *unaligned*,
2404 * or rather it is aligned on a 8 bytes boundary but not on
2405 * a 16 bytes one. This pad fixes it up. This is also why the
2406 * vector regs are referenced by the v_regs pointer above so
2407 * any amount of padding can be added here
2411 /* On ppc32, we are already aligned to 16 bytes */
2412 #define QEMU_NVRREG 33
2414 /* We cannot use ppc_avr_t here as we do *not* want the implied
2415 * 16-bytes alignment that would result from it. This would have
2416 * the effect of making the whole struct target_mcontext aligned
2417 * which breaks the layout of struct target_ucontext on ppc64.
2419 uint64_t altivec
[QEMU_NVRREG
][2];
2424 /* See arch/powerpc/include/asm/sigcontext.h. */
2425 struct target_sigcontext
{
2426 target_ulong _unused
[4];
2428 #if defined(TARGET_PPC64)
2431 target_ulong handler
;
2432 target_ulong oldmask
;
2433 target_ulong regs
; /* struct pt_regs __user * */
2434 #if defined(TARGET_PPC64)
2435 struct target_mcontext mcontext
;
2439 /* Indices for target_mcontext.mc_gregs, below.
2440 See arch/powerpc/include/asm/ptrace.h for details. */
2476 TARGET_PT_ORIG_R3
= 34,
2481 /* Yes, there are two registers with #39. One is 64-bit only. */
2483 TARGET_PT_SOFTE
= 39,
2484 TARGET_PT_TRAP
= 40,
2486 TARGET_PT_DSISR
= 42,
2487 TARGET_PT_RESULT
= 43,
2488 TARGET_PT_REGS_COUNT
= 44
2492 struct target_ucontext
{
2493 target_ulong tuc_flags
;
2494 target_ulong tuc_link
; /* ucontext_t __user * */
2495 struct target_sigaltstack tuc_stack
;
2496 #if !defined(TARGET_PPC64)
2498 target_ulong tuc_regs
; /* struct mcontext __user *
2499 points to uc_mcontext field */
2501 target_sigset_t tuc_sigmask
;
2502 #if defined(TARGET_PPC64)
2503 target_sigset_t unused
[15]; /* Allow for uc_sigmask growth */
2504 struct target_sigcontext tuc_sigcontext
;
2506 int32_t tuc_maskext
[30];
2507 int32_t tuc_pad2
[3];
2508 struct target_mcontext tuc_mcontext
;
2512 /* See arch/powerpc/kernel/signal_32.c. */
2513 struct target_sigframe
{
2514 struct target_sigcontext sctx
;
2515 struct target_mcontext mctx
;
2519 #if defined(TARGET_PPC64)
2521 #define TARGET_TRAMP_SIZE 6
2523 struct target_rt_sigframe
{
2524 /* sys_rt_sigreturn requires the ucontext be the first field */
2525 struct target_ucontext uc
;
2526 target_ulong _unused
[2];
2527 uint32_t trampoline
[TARGET_TRAMP_SIZE
];
2528 target_ulong pinfo
; /* struct siginfo __user * */
2529 target_ulong puc
; /* void __user * */
2530 struct target_siginfo info
;
2531 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
2533 } __attribute__((aligned(16)));
2537 struct target_rt_sigframe
{
2538 struct target_siginfo info
;
2539 struct target_ucontext uc
;
2545 #if defined(TARGET_PPC64)
2547 struct target_func_ptr
{
2554 /* We use the mc_pad field for the signal return trampoline. */
2555 #define tramp mc_pad
2557 /* See arch/powerpc/kernel/signal.c. */
2558 static target_ulong
get_sigframe(struct target_sigaction
*ka
,
2564 oldsp
= env
->gpr
[1];
2566 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) &&
2567 (sas_ss_flags(oldsp
) == 0)) {
2568 oldsp
= (target_sigaltstack_used
.ss_sp
2569 + target_sigaltstack_used
.ss_size
);
2572 return (oldsp
- frame_size
) & ~0xFUL
;
2575 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
2576 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
2577 #define PPC_VEC_HI 0
2578 #define PPC_VEC_LO 1
2580 #define PPC_VEC_HI 1
2581 #define PPC_VEC_LO 0
2585 static void save_user_regs(CPUPPCState
*env
, struct target_mcontext
*frame
)
2587 target_ulong msr
= env
->msr
;
2589 target_ulong ccr
= 0;
2591 /* In general, the kernel attempts to be intelligent about what it
2592 needs to save for Altivec/FP/SPE registers. We don't care that
2593 much, so we just go ahead and save everything. */
2595 /* Save general registers. */
2596 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
2597 __put_user(env
->gpr
[i
], &frame
->mc_gregs
[i
]);
2599 __put_user(env
->nip
, &frame
->mc_gregs
[TARGET_PT_NIP
]);
2600 __put_user(env
->ctr
, &frame
->mc_gregs
[TARGET_PT_CTR
]);
2601 __put_user(env
->lr
, &frame
->mc_gregs
[TARGET_PT_LNK
]);
2602 __put_user(env
->xer
, &frame
->mc_gregs
[TARGET_PT_XER
]);
2604 for (i
= 0; i
< ARRAY_SIZE(env
->crf
); i
++) {
2605 ccr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
2607 __put_user(ccr
, &frame
->mc_gregs
[TARGET_PT_CCR
]);
2609 /* Save Altivec registers if necessary. */
2610 if (env
->insns_flags
& PPC_ALTIVEC
) {
2612 for (i
= 0; i
< ARRAY_SIZE(env
->avr
); i
++) {
2613 ppc_avr_t
*avr
= &env
->avr
[i
];
2614 ppc_avr_t
*vreg
= (ppc_avr_t
*)&frame
->mc_vregs
.altivec
[i
];
2616 __put_user(avr
->u64
[PPC_VEC_HI
], &vreg
->u64
[0]);
2617 __put_user(avr
->u64
[PPC_VEC_LO
], &vreg
->u64
[1]);
2619 /* Set MSR_VR in the saved MSR value to indicate that
2620 frame->mc_vregs contains valid data. */
2622 #if defined(TARGET_PPC64)
2623 vrsave
= (uint32_t *)&frame
->mc_vregs
.altivec
[33];
2624 /* 64-bit needs to put a pointer to the vectors in the frame */
2625 __put_user(h2g(frame
->mc_vregs
.altivec
), &frame
->v_regs
);
2627 vrsave
= (uint32_t *)&frame
->mc_vregs
.altivec
[32];
2629 __put_user((uint32_t)env
->spr
[SPR_VRSAVE
], vrsave
);
2632 /* Save VSX second halves */
2633 if (env
->insns_flags2
& PPC2_VSX
) {
2634 uint64_t *vsregs
= (uint64_t *)&frame
->mc_vregs
.altivec
[34];
2635 for (i
= 0; i
< ARRAY_SIZE(env
->vsr
); i
++) {
2636 __put_user(env
->vsr
[i
], &vsregs
[i
]);
2640 /* Save floating point registers. */
2641 if (env
->insns_flags
& PPC_FLOAT
) {
2642 for (i
= 0; i
< ARRAY_SIZE(env
->fpr
); i
++) {
2643 __put_user(env
->fpr
[i
], &frame
->mc_fregs
[i
]);
2645 __put_user((uint64_t) env
->fpscr
, &frame
->mc_fregs
[32]);
2648 /* Save SPE registers. The kernel only saves the high half. */
2649 if (env
->insns_flags
& PPC_SPE
) {
2650 #if defined(TARGET_PPC64)
2651 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
2652 __put_user(env
->gpr
[i
] >> 32, &frame
->mc_vregs
.spe
[i
]);
2655 for (i
= 0; i
< ARRAY_SIZE(env
->gprh
); i
++) {
2656 __put_user(env
->gprh
[i
], &frame
->mc_vregs
.spe
[i
]);
2659 /* Set MSR_SPE in the saved MSR value to indicate that
2660 frame->mc_vregs contains valid data. */
2662 __put_user(env
->spe_fscr
, &frame
->mc_vregs
.spe
[32]);
2666 __put_user(msr
, &frame
->mc_gregs
[TARGET_PT_MSR
]);
2669 static void encode_trampoline(int sigret
, uint32_t *tramp
)
2671 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
2673 __put_user(0x38000000 | sigret
, &tramp
[0]);
2674 __put_user(0x44000002, &tramp
[1]);
2678 static void restore_user_regs(CPUPPCState
*env
,
2679 struct target_mcontext
*frame
, int sig
)
2681 target_ulong save_r2
= 0;
2688 save_r2
= env
->gpr
[2];
2691 /* Restore general registers. */
2692 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
2693 __get_user(env
->gpr
[i
], &frame
->mc_gregs
[i
]);
2695 __get_user(env
->nip
, &frame
->mc_gregs
[TARGET_PT_NIP
]);
2696 __get_user(env
->ctr
, &frame
->mc_gregs
[TARGET_PT_CTR
]);
2697 __get_user(env
->lr
, &frame
->mc_gregs
[TARGET_PT_LNK
]);
2698 __get_user(env
->xer
, &frame
->mc_gregs
[TARGET_PT_XER
]);
2699 __get_user(ccr
, &frame
->mc_gregs
[TARGET_PT_CCR
]);
2701 for (i
= 0; i
< ARRAY_SIZE(env
->crf
); i
++) {
2702 env
->crf
[i
] = (ccr
>> (32 - ((i
+ 1) * 4))) & 0xf;
2706 env
->gpr
[2] = save_r2
;
2709 __get_user(msr
, &frame
->mc_gregs
[TARGET_PT_MSR
]);
2711 /* If doing signal return, restore the previous little-endian mode. */
2713 env
->msr
= (env
->msr
& ~(1ull << MSR_LE
)) | (msr
& (1ull << MSR_LE
));
2715 /* Restore Altivec registers if necessary. */
2716 if (env
->insns_flags
& PPC_ALTIVEC
) {
2719 #if defined(TARGET_PPC64)
2721 /* 64-bit needs to recover the pointer to the vectors from the frame */
2722 __get_user(v_addr
, &frame
->v_regs
);
2723 v_regs
= g2h(v_addr
);
2725 v_regs
= (ppc_avr_t
*)frame
->mc_vregs
.altivec
;
2727 for (i
= 0; i
< ARRAY_SIZE(env
->avr
); i
++) {
2728 ppc_avr_t
*avr
= &env
->avr
[i
];
2729 ppc_avr_t
*vreg
= &v_regs
[i
];
2731 __get_user(avr
->u64
[PPC_VEC_HI
], &vreg
->u64
[0]);
2732 __get_user(avr
->u64
[PPC_VEC_LO
], &vreg
->u64
[1]);
2734 /* Set MSR_VEC in the saved MSR value to indicate that
2735 frame->mc_vregs contains valid data. */
2736 #if defined(TARGET_PPC64)
2737 vrsave
= (uint32_t *)&v_regs
[33];
2739 vrsave
= (uint32_t *)&v_regs
[32];
2741 __get_user(env
->spr
[SPR_VRSAVE
], vrsave
);
2744 /* Restore VSX second halves */
2745 if (env
->insns_flags2
& PPC2_VSX
) {
2746 uint64_t *vsregs
= (uint64_t *)&frame
->mc_vregs
.altivec
[34];
2747 for (i
= 0; i
< ARRAY_SIZE(env
->vsr
); i
++) {
2748 __get_user(env
->vsr
[i
], &vsregs
[i
]);
2752 /* Restore floating point registers. */
2753 if (env
->insns_flags
& PPC_FLOAT
) {
2755 for (i
= 0; i
< ARRAY_SIZE(env
->fpr
); i
++) {
2756 __get_user(env
->fpr
[i
], &frame
->mc_fregs
[i
]);
2758 __get_user(fpscr
, &frame
->mc_fregs
[32]);
2759 env
->fpscr
= (uint32_t) fpscr
;
2762 /* Save SPE registers. The kernel only saves the high half. */
2763 if (env
->insns_flags
& PPC_SPE
) {
2764 #if defined(TARGET_PPC64)
2765 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
2768 __get_user(hi
, &frame
->mc_vregs
.spe
[i
]);
2769 env
->gpr
[i
] = ((uint64_t)hi
<< 32) | ((uint32_t) env
->gpr
[i
]);
2772 for (i
= 0; i
< ARRAY_SIZE(env
->gprh
); i
++) {
2773 __get_user(env
->gprh
[i
], &frame
->mc_vregs
.spe
[i
]);
2776 __get_user(env
->spe_fscr
, &frame
->mc_vregs
.spe
[32]);
2780 #if !defined(TARGET_PPC64)
2781 static void setup_frame(int sig
, struct target_sigaction
*ka
,
2782 target_sigset_t
*set
, CPUPPCState
*env
)
2784 struct target_sigframe
*frame
;
2785 struct target_sigcontext
*sc
;
2786 target_ulong frame_addr
, newsp
;
2789 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
2790 trace_user_setup_frame(env
, frame_addr
);
2791 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 1))
2795 __put_user(ka
->_sa_handler
, &sc
->handler
);
2796 __put_user(set
->sig
[0], &sc
->oldmask
);
2797 __put_user(set
->sig
[1], &sc
->_unused
[3]);
2798 __put_user(h2g(&frame
->mctx
), &sc
->regs
);
2799 __put_user(sig
, &sc
->signal
);
2801 /* Save user regs. */
2802 save_user_regs(env
, &frame
->mctx
);
2804 /* Construct the trampoline code on the stack. */
2805 encode_trampoline(TARGET_NR_sigreturn
, (uint32_t *)&frame
->mctx
.tramp
);
2807 /* The kernel checks for the presence of a VDSO here. We don't
2808 emulate a vdso, so use a sigreturn system call. */
2809 env
->lr
= (target_ulong
) h2g(frame
->mctx
.tramp
);
2811 /* Turn off all fp exceptions. */
2814 /* Create a stack frame for the caller of the handler. */
2815 newsp
= frame_addr
- SIGNAL_FRAMESIZE
;
2816 err
|= put_user(env
->gpr
[1], newsp
, target_ulong
);
2821 /* Set up registers for signal handler. */
2822 env
->gpr
[1] = newsp
;
2824 env
->gpr
[4] = frame_addr
+ offsetof(struct target_sigframe
, sctx
);
2826 env
->nip
= (target_ulong
) ka
->_sa_handler
;
2828 /* Signal handlers are entered in big-endian mode. */
2829 env
->msr
&= ~(1ull << MSR_LE
);
2831 unlock_user_struct(frame
, frame_addr
, 1);
2835 unlock_user_struct(frame
, frame_addr
, 1);
2838 #endif /* !defined(TARGET_PPC64) */
2840 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
2841 target_siginfo_t
*info
,
2842 target_sigset_t
*set
, CPUPPCState
*env
)
2844 struct target_rt_sigframe
*rt_sf
;
2845 uint32_t *trampptr
= 0;
2846 struct target_mcontext
*mctx
= 0;
2847 target_ulong rt_sf_addr
, newsp
= 0;
2849 #if defined(TARGET_PPC64)
2850 struct target_sigcontext
*sc
= 0;
2851 struct image_info
*image
= ((TaskState
*)thread_cpu
->opaque
)->info
;
2854 rt_sf_addr
= get_sigframe(ka
, env
, sizeof(*rt_sf
));
2855 if (!lock_user_struct(VERIFY_WRITE
, rt_sf
, rt_sf_addr
, 1))
2858 tswap_siginfo(&rt_sf
->info
, info
);
2860 __put_user(0, &rt_sf
->uc
.tuc_flags
);
2861 __put_user(0, &rt_sf
->uc
.tuc_link
);
2862 __put_user((target_ulong
)target_sigaltstack_used
.ss_sp
,
2863 &rt_sf
->uc
.tuc_stack
.ss_sp
);
2864 __put_user(sas_ss_flags(env
->gpr
[1]),
2865 &rt_sf
->uc
.tuc_stack
.ss_flags
);
2866 __put_user(target_sigaltstack_used
.ss_size
,
2867 &rt_sf
->uc
.tuc_stack
.ss_size
);
2868 #if !defined(TARGET_PPC64)
2869 __put_user(h2g (&rt_sf
->uc
.tuc_mcontext
),
2870 &rt_sf
->uc
.tuc_regs
);
2872 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
2873 __put_user(set
->sig
[i
], &rt_sf
->uc
.tuc_sigmask
.sig
[i
]);
2876 #if defined(TARGET_PPC64)
2877 mctx
= &rt_sf
->uc
.tuc_sigcontext
.mcontext
;
2878 trampptr
= &rt_sf
->trampoline
[0];
2880 sc
= &rt_sf
->uc
.tuc_sigcontext
;
2881 __put_user(h2g(mctx
), &sc
->regs
);
2882 __put_user(sig
, &sc
->signal
);
2884 mctx
= &rt_sf
->uc
.tuc_mcontext
;
2885 trampptr
= (uint32_t *)&rt_sf
->uc
.tuc_mcontext
.tramp
;
2888 save_user_regs(env
, mctx
);
2889 encode_trampoline(TARGET_NR_rt_sigreturn
, trampptr
);
2891 /* The kernel checks for the presence of a VDSO here. We don't
2892 emulate a vdso, so use a sigreturn system call. */
2893 env
->lr
= (target_ulong
) h2g(trampptr
);
2895 /* Turn off all fp exceptions. */
2898 /* Create a stack frame for the caller of the handler. */
2899 newsp
= rt_sf_addr
- (SIGNAL_FRAMESIZE
+ 16);
2900 err
|= put_user(env
->gpr
[1], newsp
, target_ulong
);
2905 /* Set up registers for signal handler. */
2906 env
->gpr
[1] = newsp
;
2907 env
->gpr
[3] = (target_ulong
) sig
;
2908 env
->gpr
[4] = (target_ulong
) h2g(&rt_sf
->info
);
2909 env
->gpr
[5] = (target_ulong
) h2g(&rt_sf
->uc
);
2910 env
->gpr
[6] = (target_ulong
) h2g(rt_sf
);
2912 #if defined(TARGET_PPC64)
2913 if (get_ppc64_abi(image
) < 2) {
2914 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
2915 struct target_func_ptr
*handler
=
2916 (struct target_func_ptr
*)g2h(ka
->_sa_handler
);
2917 env
->nip
= tswapl(handler
->entry
);
2918 env
->gpr
[2] = tswapl(handler
->toc
);
2920 /* ELFv2 PPC64 function pointers are entry points, but R12
2921 * must also be set */
2922 env
->nip
= tswapl((target_ulong
) ka
->_sa_handler
);
2923 env
->gpr
[12] = env
->nip
;
2926 env
->nip
= (target_ulong
) ka
->_sa_handler
;
2929 /* Signal handlers are entered in big-endian mode. */
2930 env
->msr
&= ~(1ull << MSR_LE
);
2932 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
2936 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
2941 #if !defined(TARGET_PPC64)
2942 long do_sigreturn(CPUPPCState
*env
)
2944 struct target_sigcontext
*sc
= NULL
;
2945 struct target_mcontext
*sr
= NULL
;
2946 target_ulong sr_addr
= 0, sc_addr
;
2948 target_sigset_t set
;
2950 sc_addr
= env
->gpr
[1] + SIGNAL_FRAMESIZE
;
2951 if (!lock_user_struct(VERIFY_READ
, sc
, sc_addr
, 1))
2954 #if defined(TARGET_PPC64)
2955 set
.sig
[0] = sc
->oldmask
+ ((uint64_t)(sc
->_unused
[3]) << 32);
2957 __get_user(set
.sig
[0], &sc
->oldmask
);
2958 __get_user(set
.sig
[1], &sc
->_unused
[3]);
2960 target_to_host_sigset_internal(&blocked
, &set
);
2961 set_sigmask(&blocked
);
2963 __get_user(sr_addr
, &sc
->regs
);
2964 if (!lock_user_struct(VERIFY_READ
, sr
, sr_addr
, 1))
2966 restore_user_regs(env
, sr
, 1);
2968 unlock_user_struct(sr
, sr_addr
, 1);
2969 unlock_user_struct(sc
, sc_addr
, 1);
2970 return -TARGET_QEMU_ESIGRETURN
;
2973 unlock_user_struct(sr
, sr_addr
, 1);
2974 unlock_user_struct(sc
, sc_addr
, 1);
2975 force_sig(TARGET_SIGSEGV
);
2976 return -TARGET_QEMU_ESIGRETURN
;
2978 #endif /* !defined(TARGET_PPC64) */
2980 /* See arch/powerpc/kernel/signal_32.c. */
2981 static int do_setcontext(struct target_ucontext
*ucp
, CPUPPCState
*env
, int sig
)
2983 struct target_mcontext
*mcp
;
2984 target_ulong mcp_addr
;
2986 target_sigset_t set
;
2988 if (copy_from_user(&set
, h2g(ucp
) + offsetof(struct target_ucontext
, tuc_sigmask
),
2992 #if defined(TARGET_PPC64)
2993 mcp_addr
= h2g(ucp
) +
2994 offsetof(struct target_ucontext
, tuc_sigcontext
.mcontext
);
2996 __get_user(mcp_addr
, &ucp
->tuc_regs
);
2999 if (!lock_user_struct(VERIFY_READ
, mcp
, mcp_addr
, 1))
3002 target_to_host_sigset_internal(&blocked
, &set
);
3003 set_sigmask(&blocked
);
3004 restore_user_regs(env
, mcp
, sig
);
3006 unlock_user_struct(mcp
, mcp_addr
, 1);
3010 long do_rt_sigreturn(CPUPPCState
*env
)
3012 struct target_rt_sigframe
*rt_sf
= NULL
;
3013 target_ulong rt_sf_addr
;
3015 rt_sf_addr
= env
->gpr
[1] + SIGNAL_FRAMESIZE
+ 16;
3016 if (!lock_user_struct(VERIFY_READ
, rt_sf
, rt_sf_addr
, 1))
3019 if (do_setcontext(&rt_sf
->uc
, env
, 1))
3022 do_sigaltstack(rt_sf_addr
3023 + offsetof(struct target_rt_sigframe
, uc
.tuc_stack
),
3026 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
3027 return -TARGET_QEMU_ESIGRETURN
;
3030 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
3031 force_sig(TARGET_SIGSEGV
);
3032 return -TARGET_QEMU_ESIGRETURN
;
3035 #elif defined(TARGET_RISCV)
3037 /* Signal handler invocation must be transparent for the code being
3038 interrupted. Complete CPU (hart) state is saved on entry and restored
3039 before returning from the handler. Process sigmask is also saved to block
3040 signals while the handler is running. The handler gets its own stack,
3041 which also doubles as storage for the CPU state and sigmask.
3043 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
3045 struct target_sigcontext
{
3047 abi_long gpr
[31]; /* x0 is not present, so all offsets must be -1 */
3050 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
3052 struct target_ucontext
{
3053 unsigned long uc_flags
;
3054 struct target_ucontext
*uc_link
;
3055 target_stack_t uc_stack
;
3056 struct target_sigcontext uc_mcontext
;
3057 target_sigset_t uc_sigmask
;
3060 struct target_rt_sigframe
{
3061 uint32_t tramp
[2]; /* not in kernel, which uses VDSO instead */
3062 struct target_siginfo info
;
3063 struct target_ucontext uc
;
3066 static abi_ulong
get_sigframe(struct target_sigaction
*ka
,
3067 CPURISCVState
*regs
, size_t framesize
)
3069 abi_ulong sp
= regs
->gpr
[xSP
];
3070 int onsigstack
= on_sig_stack(sp
);
3073 /* This is the X/Open sanctioned signal stack switching. */
3074 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) != 0 && !onsigstack
) {
3075 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
3079 sp
&= ~3UL; /* align sp on 4-byte boundary */
3081 /* If we are on the alternate signal stack and would overflow it, don't.
3082 Return an always-bogus address instead so we will die with SIGSEGV. */
3083 if (onsigstack
&& !likely(on_sig_stack(sp
))) {
3090 static void setup_sigcontext(struct target_sigcontext
*sc
, CPURISCVState
*env
)
3094 __put_user(env
->pc
, &sc
->pc
);
3096 for (i
= 1; i
< 32; i
++) {
3097 __put_user(env
->gpr
[i
], &sc
->gpr
[i
- 1]);
3099 for (i
= 0; i
< 32; i
++) {
3100 __put_user(env
->fpr
[i
], &sc
->fpr
[i
]);
3103 uint32_t fcsr
= csr_read_helper(env
, CSR_FCSR
); /*riscv_get_fcsr(env);*/
3104 __put_user(fcsr
, &sc
->fcsr
);
3107 static void setup_ucontext(struct target_ucontext
*uc
,
3108 CPURISCVState
*env
, target_sigset_t
*set
)
3110 abi_ulong ss_sp
= (target_ulong
)target_sigaltstack_used
.ss_sp
;
3111 abi_ulong ss_flags
= sas_ss_flags(env
->gpr
[xSP
]);
3112 abi_ulong ss_size
= target_sigaltstack_used
.ss_size
;
3114 __put_user(0, &(uc
->uc_flags
));
3115 __put_user(0, &(uc
->uc_link
));
3117 __put_user(ss_sp
, &(uc
->uc_stack
.ss_sp
));
3118 __put_user(ss_flags
, &(uc
->uc_stack
.ss_flags
));
3119 __put_user(ss_size
, &(uc
->uc_stack
.ss_size
));
3122 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
3123 __put_user(set
->sig
[i
], &(uc
->uc_sigmask
.sig
[i
]));
3126 setup_sigcontext(&uc
->uc_mcontext
, env
);
3129 static inline void install_sigtramp(uint32_t *tramp
)
3131 __put_user(0x08b00893, tramp
+ 0); /* li a7, 139 = __NR_rt_sigreturn */
3132 __put_user(0x00000073, tramp
+ 1); /* ecall */
3135 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
3136 target_siginfo_t
*info
,
3137 target_sigset_t
*set
, CPURISCVState
*env
)
3139 abi_ulong frame_addr
;
3140 struct target_rt_sigframe
*frame
;
3142 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
3143 trace_user_setup_rt_frame(env
, frame_addr
);
3145 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
3149 setup_ucontext(&frame
->uc
, env
, set
);
3150 tswap_siginfo(&frame
->info
, info
);
3151 install_sigtramp(frame
->tramp
);
3153 env
->pc
= ka
->_sa_handler
;
3154 env
->gpr
[xSP
] = frame_addr
;
3155 env
->gpr
[xA0
] = sig
;
3156 env
->gpr
[xA1
] = frame_addr
+ offsetof(struct target_rt_sigframe
, info
);
3157 env
->gpr
[xA2
] = frame_addr
+ offsetof(struct target_rt_sigframe
, uc
);
3158 env
->gpr
[xRA
] = frame_addr
+ offsetof(struct target_rt_sigframe
, tramp
);
3163 unlock_user_struct(frame
, frame_addr
, 1);
3164 if (sig
== TARGET_SIGSEGV
) {
3165 ka
->_sa_handler
= TARGET_SIG_DFL
;
3167 force_sig(TARGET_SIGSEGV
);
3170 static void restore_sigcontext(CPURISCVState
*env
, struct target_sigcontext
*sc
)
3174 __get_user(env
->pc
, &sc
->pc
);
3176 for (i
= 1; i
< 32; ++i
) {
3177 __get_user(env
->gpr
[i
], &sc
->gpr
[i
- 1]);
3179 for (i
= 0; i
< 32; ++i
) {
3180 __get_user(env
->fpr
[i
], &sc
->fpr
[i
]);
3184 __get_user(fcsr
, &sc
->fcsr
);
3185 csr_write_helper(env
, fcsr
, CSR_FCSR
);
3188 static void restore_ucontext(CPURISCVState
*env
, struct target_ucontext
*uc
)
3191 target_sigset_t target_set
;
3194 target_sigemptyset(&target_set
);
3195 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
3196 __get_user(target_set
.sig
[i
], &(uc
->uc_sigmask
.sig
[i
]));
3199 target_to_host_sigset_internal(&blocked
, &target_set
);
3200 set_sigmask(&blocked
);
3202 restore_sigcontext(env
, &uc
->uc_mcontext
);
3205 long do_rt_sigreturn(CPURISCVState
*env
)
3207 struct target_rt_sigframe
*frame
;
3208 abi_ulong frame_addr
;
3210 frame_addr
= env
->gpr
[xSP
];
3211 trace_user_do_sigreturn(env
, frame_addr
);
3212 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
3216 restore_ucontext(env
, &frame
->uc
);
3218 if (do_sigaltstack(frame_addr
+ offsetof(struct target_rt_sigframe
,
3219 uc
.uc_stack
), 0, get_sp_from_cpustate(env
)) == -EFAULT
) {
3223 unlock_user_struct(frame
, frame_addr
, 0);
3224 return -TARGET_QEMU_ESIGRETURN
;
3227 unlock_user_struct(frame
, frame_addr
, 0);
3228 force_sig(TARGET_SIGSEGV
);
3232 #elif defined(TARGET_HPPA)
3234 struct target_sigcontext
{
3236 abi_ulong sc_gr
[32];
3238 abi_ulong sc_iasq
[2];
3239 abi_ulong sc_iaoq
[2];
3243 struct target_ucontext
{
3246 target_stack_t tuc_stack
;
3248 struct target_sigcontext tuc_mcontext
;
3249 target_sigset_t tuc_sigmask
;
3252 struct target_rt_sigframe
{
3254 target_siginfo_t info
;
3255 struct target_ucontext uc
;
3256 /* hidden location of upper halves of pa2.0 64-bit gregs */
3259 static void setup_sigcontext(struct target_sigcontext
*sc
, CPUArchState
*env
)
3264 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
3266 if (env
->iaoq_f
< TARGET_PAGE_SIZE
) {
3267 /* In the gateway page, executing a syscall. */
3268 flags
|= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
3269 __put_user(env
->gr
[31], &sc
->sc_iaoq
[0]);
3270 __put_user(env
->gr
[31] + 4, &sc
->sc_iaoq
[1]);
3272 __put_user(env
->iaoq_f
, &sc
->sc_iaoq
[0]);
3273 __put_user(env
->iaoq_b
, &sc
->sc_iaoq
[1]);
3275 __put_user(0, &sc
->sc_iasq
[0]);
3276 __put_user(0, &sc
->sc_iasq
[1]);
3277 __put_user(flags
, &sc
->sc_flags
);
3279 __put_user(cpu_hppa_get_psw(env
), &sc
->sc_gr
[0]);
3280 for (i
= 1; i
< 32; ++i
) {
3281 __put_user(env
->gr
[i
], &sc
->sc_gr
[i
]);
3284 __put_user((uint64_t)env
->fr0_shadow
<< 32, &sc
->sc_fr
[0]);
3285 for (i
= 1; i
< 32; ++i
) {
3286 __put_user(env
->fr
[i
], &sc
->sc_fr
[i
]);
3289 __put_user(env
->cr
[CR_SAR
], &sc
->sc_sar
);
3292 static void restore_sigcontext(CPUArchState
*env
, struct target_sigcontext
*sc
)
3297 __get_user(psw
, &sc
->sc_gr
[0]);
3298 cpu_hppa_put_psw(env
, psw
);
3300 for (i
= 1; i
< 32; ++i
) {
3301 __get_user(env
->gr
[i
], &sc
->sc_gr
[i
]);
3303 for (i
= 0; i
< 32; ++i
) {
3304 __get_user(env
->fr
[i
], &sc
->sc_fr
[i
]);
3306 cpu_hppa_loaded_fr0(env
);
3308 __get_user(env
->iaoq_f
, &sc
->sc_iaoq
[0]);
3309 __get_user(env
->iaoq_b
, &sc
->sc_iaoq
[1]);
3310 __get_user(env
->cr
[CR_SAR
], &sc
->sc_sar
);
3313 /* No, this doesn't look right, but it's copied straight from the kernel. */
3314 #define PARISC_RT_SIGFRAME_SIZE32 \
3315 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
3317 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
3318 target_siginfo_t
*info
,
3319 target_sigset_t
*set
, CPUArchState
*env
)
3321 abi_ulong frame_addr
, sp
, haddr
;
3322 struct target_rt_sigframe
*frame
;
3326 if (ka
->sa_flags
& TARGET_SA_ONSTACK
) {
3327 if (sas_ss_flags(sp
) == 0) {
3328 sp
= (target_sigaltstack_used
.ss_sp
+ 0x7f) & ~0x3f;
3331 frame_addr
= QEMU_ALIGN_UP(sp
, 64);
3332 sp
= frame_addr
+ PARISC_RT_SIGFRAME_SIZE32
;
3334 trace_user_setup_rt_frame(env
, frame_addr
);
3336 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
3340 tswap_siginfo(&frame
->info
, info
);
3341 frame
->uc
.tuc_flags
= 0;
3342 frame
->uc
.tuc_link
= 0;
3344 __put_user(target_sigaltstack_used
.ss_sp
, &frame
->uc
.tuc_stack
.ss_sp
);
3345 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)),
3346 &frame
->uc
.tuc_stack
.ss_flags
);
3347 __put_user(target_sigaltstack_used
.ss_size
,
3348 &frame
->uc
.tuc_stack
.ss_size
);
3350 for (i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
3351 __put_user(set
->sig
[i
], &frame
->uc
.tuc_sigmask
.sig
[i
]);
3354 setup_sigcontext(&frame
->uc
.tuc_mcontext
, env
);
3356 __put_user(0x34190000, frame
->tramp
+ 0); /* ldi 0,%r25 */
3357 __put_user(0x3414015a, frame
->tramp
+ 1); /* ldi __NR_rt_sigreturn,%r20 */
3358 __put_user(0xe4008200, frame
->tramp
+ 2); /* be,l 0x100(%sr2,%r0) */
3359 __put_user(0x08000240, frame
->tramp
+ 3); /* nop */
3361 unlock_user_struct(frame
, frame_addr
, 1);
3363 env
->gr
[2] = h2g(frame
->tramp
);
3366 env
->gr
[25] = h2g(&frame
->info
);
3367 env
->gr
[24] = h2g(&frame
->uc
);
3369 haddr
= ka
->_sa_handler
;
3371 /* Function descriptor. */
3372 target_ulong
*fdesc
, dest
;
3375 if (!lock_user_struct(VERIFY_READ
, fdesc
, haddr
, 1)) {
3378 __get_user(dest
, fdesc
);
3379 __get_user(env
->gr
[19], fdesc
+ 1);
3380 unlock_user_struct(fdesc
, haddr
, 1);
3383 env
->iaoq_f
= haddr
;
3384 env
->iaoq_b
= haddr
+ 4;
3391 long do_rt_sigreturn(CPUArchState
*env
)
3393 abi_ulong frame_addr
= env
->gr
[30] - PARISC_RT_SIGFRAME_SIZE32
;
3394 struct target_rt_sigframe
*frame
;
3397 trace_user_do_rt_sigreturn(env
, frame_addr
);
3398 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
3401 target_to_host_sigset(&set
, &frame
->uc
.tuc_sigmask
);
3404 restore_sigcontext(env
, &frame
->uc
.tuc_mcontext
);
3405 unlock_user_struct(frame
, frame_addr
, 0);
3407 if (do_sigaltstack(frame_addr
+ offsetof(struct target_rt_sigframe
,
3409 0, env
->gr
[30]) == -EFAULT
) {
3413 unlock_user_struct(frame
, frame_addr
, 0);
3414 return -TARGET_QEMU_ESIGRETURN
;
3417 force_sig(TARGET_SIGSEGV
);
3418 return -TARGET_QEMU_ESIGRETURN
;
3421 #elif defined(TARGET_XTENSA)
3423 struct target_sigcontext
{
3428 abi_ulong sc_lcount
;
3433 abi_ulong sc_xtregs
;
3436 struct target_ucontext
{
3437 abi_ulong tuc_flags
;
3439 target_stack_t tuc_stack
;
3440 struct target_sigcontext tuc_mcontext
;
3441 target_sigset_t tuc_sigmask
;
3444 struct target_rt_sigframe
{
3445 target_siginfo_t info
;
3446 struct target_ucontext uc
;
3449 abi_ulong window
[4];
3452 static abi_ulong
get_sigframe(struct target_sigaction
*sa
,
3453 CPUXtensaState
*env
,
3454 unsigned long framesize
)
3456 abi_ulong sp
= env
->regs
[1];
3458 /* This is the X/Open sanctioned signal stack switching. */
3459 if ((sa
->sa_flags
& TARGET_SA_ONSTACK
) != 0 && !sas_ss_flags(sp
)) {
3460 sp
= target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
3462 return (sp
- framesize
) & -16;
3465 static int flush_window_regs(CPUXtensaState
*env
)
3467 uint32_t wb
= env
->sregs
[WINDOW_BASE
];
3468 uint32_t ws
= xtensa_replicate_windowstart(env
) >> (wb
+ 1);
3469 unsigned d
= ctz32(ws
) + 1;
3473 for (i
= d
; i
< env
->config
->nareg
/ 4; i
+= d
) {
3478 xtensa_rotate_window(env
, d
);
3483 } else if (ws
& 0x2) {
3485 ret
|= get_user_ual(osp
, env
->regs
[1] - 12);
3488 } else if (ws
& 0x4) {
3489 ssp
= env
->regs
[13];
3490 ret
|= get_user_ual(osp
, env
->regs
[1] - 12);
3494 g_assert_not_reached();
3497 for (j
= 0; j
< 4; ++j
) {
3498 ret
|= put_user_ual(env
->regs
[j
], ssp
- 16 + j
* 4);
3500 for (j
= 4; j
< d
* 4; ++j
) {
3501 ret
|= put_user_ual(env
->regs
[j
], osp
- 16 + j
* 4);
3504 xtensa_rotate_window(env
, d
);
3505 g_assert(env
->sregs
[WINDOW_BASE
] == wb
);
3509 static int setup_sigcontext(struct target_rt_sigframe
*frame
,
3510 CPUXtensaState
*env
)
3512 struct target_sigcontext
*sc
= &frame
->uc
.tuc_mcontext
;
3515 __put_user(env
->pc
, &sc
->sc_pc
);
3516 __put_user(env
->sregs
[PS
], &sc
->sc_ps
);
3517 __put_user(env
->sregs
[LBEG
], &sc
->sc_lbeg
);
3518 __put_user(env
->sregs
[LEND
], &sc
->sc_lend
);
3519 __put_user(env
->sregs
[LCOUNT
], &sc
->sc_lcount
);
3520 if (!flush_window_regs(env
)) {
3523 for (i
= 0; i
< 16; ++i
) {
3524 __put_user(env
->regs
[i
], sc
->sc_a
+ i
);
3526 __put_user(0, &sc
->sc_xtregs
);
3531 static void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
3532 target_siginfo_t
*info
,
3533 target_sigset_t
*set
, CPUXtensaState
*env
)
3535 abi_ulong frame_addr
;
3536 struct target_rt_sigframe
*frame
;
3540 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
3541 trace_user_setup_rt_frame(env
, frame_addr
);
3543 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 0)) {
3547 if (ka
->sa_flags
& SA_SIGINFO
) {
3548 tswap_siginfo(&frame
->info
, info
);
3551 __put_user(0, &frame
->uc
.tuc_flags
);
3552 __put_user(0, &frame
->uc
.tuc_link
);
3553 __put_user(target_sigaltstack_used
.ss_sp
,
3554 &frame
->uc
.tuc_stack
.ss_sp
);
3555 __put_user(sas_ss_flags(env
->regs
[1]),
3556 &frame
->uc
.tuc_stack
.ss_flags
);
3557 __put_user(target_sigaltstack_used
.ss_size
,
3558 &frame
->uc
.tuc_stack
.ss_size
);
3559 if (!setup_sigcontext(frame
, env
)) {
3560 unlock_user_struct(frame
, frame_addr
, 0);
3563 for (i
= 0; i
< TARGET_NSIG_WORDS
; ++i
) {
3564 __put_user(set
->sig
[i
], &frame
->uc
.tuc_sigmask
.sig
[i
]);
3567 if (ka
->sa_flags
& TARGET_SA_RESTORER
) {
3568 ra
= ka
->sa_restorer
;
3570 ra
= frame_addr
+ offsetof(struct target_rt_sigframe
, retcode
);
3571 #ifdef TARGET_WORDS_BIGENDIAN
3572 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
3573 __put_user(0x22, &frame
->retcode
[0]);
3574 __put_user(0x0a, &frame
->retcode
[1]);
3575 __put_user(TARGET_NR_rt_sigreturn
, &frame
->retcode
[2]);
3576 /* Generate instruction: SYSCALL */
3577 __put_user(0x00, &frame
->retcode
[3]);
3578 __put_user(0x05, &frame
->retcode
[4]);
3579 __put_user(0x00, &frame
->retcode
[5]);
3581 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
3582 __put_user(0x22, &frame
->retcode
[0]);
3583 __put_user(0xa0, &frame
->retcode
[1]);
3584 __put_user(TARGET_NR_rt_sigreturn
, &frame
->retcode
[2]);
3585 /* Generate instruction: SYSCALL */
3586 __put_user(0x00, &frame
->retcode
[3]);
3587 __put_user(0x50, &frame
->retcode
[4]);
3588 __put_user(0x00, &frame
->retcode
[5]);
3591 env
->sregs
[PS
] = PS_UM
| (3 << PS_RING_SHIFT
);
3592 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_WINDOWED_REGISTER
)) {
3593 env
->sregs
[PS
] |= PS_WOE
| (1 << PS_CALLINC_SHIFT
);
3595 memset(env
->regs
, 0, sizeof(env
->regs
));
3596 env
->pc
= ka
->_sa_handler
;
3597 env
->regs
[1] = frame_addr
;
3598 env
->sregs
[WINDOW_BASE
] = 0;
3599 env
->sregs
[WINDOW_START
] = 1;
3601 env
->regs
[4] = (ra
& 0x3fffffff) | 0x40000000;
3603 env
->regs
[7] = frame_addr
+ offsetof(struct target_rt_sigframe
, info
);
3604 env
->regs
[8] = frame_addr
+ offsetof(struct target_rt_sigframe
, uc
);
3605 unlock_user_struct(frame
, frame_addr
, 1);
3613 static void restore_sigcontext(CPUXtensaState
*env
,
3614 struct target_rt_sigframe
*frame
)
3616 struct target_sigcontext
*sc
= &frame
->uc
.tuc_mcontext
;
3620 __get_user(env
->pc
, &sc
->sc_pc
);
3621 __get_user(ps
, &sc
->sc_ps
);
3622 __get_user(env
->sregs
[LBEG
], &sc
->sc_lbeg
);
3623 __get_user(env
->sregs
[LEND
], &sc
->sc_lend
);
3624 __get_user(env
->sregs
[LCOUNT
], &sc
->sc_lcount
);
3626 env
->sregs
[WINDOW_BASE
] = 0;
3627 env
->sregs
[WINDOW_START
] = 1;
3628 env
->sregs
[PS
] = deposit32(env
->sregs
[PS
],
3631 extract32(ps
, PS_CALLINC_SHIFT
,
3633 for (i
= 0; i
< 16; ++i
) {
3634 __get_user(env
->regs
[i
], sc
->sc_a
+ i
);
3639 long do_rt_sigreturn(CPUXtensaState
*env
)
3641 abi_ulong frame_addr
= env
->regs
[1];
3642 struct target_rt_sigframe
*frame
;
3645 trace_user_do_rt_sigreturn(env
, frame_addr
);
3646 if (!lock_user_struct(VERIFY_READ
, frame
, frame_addr
, 1)) {
3649 target_to_host_sigset(&set
, &frame
->uc
.tuc_sigmask
);
3652 restore_sigcontext(env
, frame
);
3654 if (do_sigaltstack(frame_addr
+
3655 offsetof(struct target_rt_sigframe
, uc
.tuc_stack
),
3656 0, get_sp_from_cpustate(env
)) == -TARGET_EFAULT
) {
3659 unlock_user_struct(frame
, frame_addr
, 0);
3660 return -TARGET_QEMU_ESIGRETURN
;
3663 unlock_user_struct(frame
, frame_addr
, 0);
3664 force_sig(TARGET_SIGSEGV
);
3665 return -TARGET_QEMU_ESIGRETURN
;
3669 static void handle_pending_signal(CPUArchState
*cpu_env
, int sig
,
3670 struct emulated_sigtable
*k
)
3672 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
3675 target_sigset_t target_old_set
;
3676 struct target_sigaction
*sa
;
3677 TaskState
*ts
= cpu
->opaque
;
3679 trace_user_handle_signal(cpu_env
, sig
);
3680 /* dequeue signal */
3683 sig
= gdb_handlesig(cpu
, sig
);
3686 handler
= TARGET_SIG_IGN
;
3688 sa
= &sigact_table
[sig
- 1];
3689 handler
= sa
->_sa_handler
;
3693 print_taken_signal(sig
, &k
->info
);
3696 if (handler
== TARGET_SIG_DFL
) {
3697 /* default handler : ignore some signal. The other are job control or fatal */
3698 if (sig
== TARGET_SIGTSTP
|| sig
== TARGET_SIGTTIN
|| sig
== TARGET_SIGTTOU
) {
3699 kill(getpid(),SIGSTOP
);
3700 } else if (sig
!= TARGET_SIGCHLD
&&
3701 sig
!= TARGET_SIGURG
&&
3702 sig
!= TARGET_SIGWINCH
&&
3703 sig
!= TARGET_SIGCONT
) {
3704 dump_core_and_abort(sig
);
3706 } else if (handler
== TARGET_SIG_IGN
) {
3708 } else if (handler
== TARGET_SIG_ERR
) {
3709 dump_core_and_abort(sig
);
3711 /* compute the blocked signals during the handler execution */
3712 sigset_t
*blocked_set
;
3714 target_to_host_sigset(&set
, &sa
->sa_mask
);
3715 /* SA_NODEFER indicates that the current signal should not be
3716 blocked during the handler */
3717 if (!(sa
->sa_flags
& TARGET_SA_NODEFER
))
3718 sigaddset(&set
, target_to_host_signal(sig
));
3720 /* save the previous blocked signal state to restore it at the
3721 end of the signal execution (see do_sigreturn) */
3722 host_to_target_sigset_internal(&target_old_set
, &ts
->signal_mask
);
3724 /* block signals in the handler */
3725 blocked_set
= ts
->in_sigsuspend
?
3726 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
3727 sigorset(&ts
->signal_mask
, blocked_set
, &set
);
3728 ts
->in_sigsuspend
= 0;
3730 /* if the CPU is in VM86 mode, we restore the 32 bit values */
3731 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
3733 CPUX86State
*env
= cpu_env
;
3734 if (env
->eflags
& VM_MASK
)
3735 save_v86_state(env
);
3738 /* prepare the stack frame of the virtual CPU */
3739 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
3740 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
3741 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
3742 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
3743 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
3744 /* These targets do not have traditional signals. */
3745 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
3747 if (sa
->sa_flags
& TARGET_SA_SIGINFO
)
3748 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
3750 setup_frame(sig
, sa
, &target_old_set
, cpu_env
);
3752 if (sa
->sa_flags
& TARGET_SA_RESETHAND
) {
3753 sa
->_sa_handler
= TARGET_SIG_DFL
;
3758 void process_pending_signals(CPUArchState
*cpu_env
)
3760 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
3762 TaskState
*ts
= cpu
->opaque
;
3764 sigset_t
*blocked_set
;
3766 while (atomic_read(&ts
->signal_pending
)) {
3767 /* FIXME: This is not threadsafe. */
3769 sigprocmask(SIG_SETMASK
, &set
, 0);
3772 sig
= ts
->sync_signal
.pending
;
3774 /* Synchronous signals are forced,
3775 * see force_sig_info() and callers in Linux
3776 * Note that not all of our queue_signal() calls in QEMU correspond
3777 * to force_sig_info() calls in Linux (some are send_sig_info()).
3778 * However it seems like a kernel bug to me to allow the process
3779 * to block a synchronous signal since it could then just end up
3780 * looping round and round indefinitely.
3782 if (sigismember(&ts
->signal_mask
, target_to_host_signal_table
[sig
])
3783 || sigact_table
[sig
- 1]._sa_handler
== TARGET_SIG_IGN
) {
3784 sigdelset(&ts
->signal_mask
, target_to_host_signal_table
[sig
]);
3785 sigact_table
[sig
- 1]._sa_handler
= TARGET_SIG_DFL
;
3788 handle_pending_signal(cpu_env
, sig
, &ts
->sync_signal
);
3791 for (sig
= 1; sig
<= TARGET_NSIG
; sig
++) {
3792 blocked_set
= ts
->in_sigsuspend
?
3793 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
3795 if (ts
->sigtab
[sig
- 1].pending
&&
3796 (!sigismember(blocked_set
,
3797 target_to_host_signal_table
[sig
]))) {
3798 handle_pending_signal(cpu_env
, sig
, &ts
->sigtab
[sig
- 1]);
3799 /* Restart scan from the beginning, as handle_pending_signal
3800 * might have resulted in a new synchronous signal (eg SIGSEGV).
3806 /* if no signal is pending, unblock signals and recheck (the act
3807 * of unblocking might cause us to take another host signal which
3808 * will set signal_pending again).
3810 atomic_set(&ts
->signal_pending
, 0);
3811 ts
->in_sigsuspend
= 0;
3812 set
= ts
->signal_mask
;
3813 sigdelset(&set
, SIGSEGV
);
3814 sigdelset(&set
, SIGBUS
);
3815 sigprocmask(SIG_SETMASK
, &set
, 0);
3817 ts
->in_sigsuspend
= 0;