]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: move tilegx signal.c parts to tilegx directory
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29
30 struct target_sigaltstack target_sigaltstack_used = {
31 .ss_sp = 0,
32 .ss_size = 0,
33 .ss_flags = TARGET_SS_DISABLE,
34 };
35
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39 void *puc);
40
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42 [SIGHUP] = TARGET_SIGHUP,
43 [SIGINT] = TARGET_SIGINT,
44 [SIGQUIT] = TARGET_SIGQUIT,
45 [SIGILL] = TARGET_SIGILL,
46 [SIGTRAP] = TARGET_SIGTRAP,
47 [SIGABRT] = TARGET_SIGABRT,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS] = TARGET_SIGBUS,
50 [SIGFPE] = TARGET_SIGFPE,
51 [SIGKILL] = TARGET_SIGKILL,
52 [SIGUSR1] = TARGET_SIGUSR1,
53 [SIGSEGV] = TARGET_SIGSEGV,
54 [SIGUSR2] = TARGET_SIGUSR2,
55 [SIGPIPE] = TARGET_SIGPIPE,
56 [SIGALRM] = TARGET_SIGALRM,
57 [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59 [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61 [SIGCHLD] = TARGET_SIGCHLD,
62 [SIGCONT] = TARGET_SIGCONT,
63 [SIGSTOP] = TARGET_SIGSTOP,
64 [SIGTSTP] = TARGET_SIGTSTP,
65 [SIGTTIN] = TARGET_SIGTTIN,
66 [SIGTTOU] = TARGET_SIGTTOU,
67 [SIGURG] = TARGET_SIGURG,
68 [SIGXCPU] = TARGET_SIGXCPU,
69 [SIGXFSZ] = TARGET_SIGXFSZ,
70 [SIGVTALRM] = TARGET_SIGVTALRM,
71 [SIGPROF] = TARGET_SIGPROF,
72 [SIGWINCH] = TARGET_SIGWINCH,
73 [SIGIO] = TARGET_SIGIO,
74 [SIGPWR] = TARGET_SIGPWR,
75 [SIGSYS] = TARGET_SIGSYS,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN] = __SIGRTMAX,
82 [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85
86 int host_to_target_signal(int sig)
87 {
88 if (sig < 0 || sig >= _NSIG)
89 return sig;
90 return host_to_target_signal_table[sig];
91 }
92
93 int target_to_host_signal(int sig)
94 {
95 if (sig < 0 || sig >= _NSIG)
96 return sig;
97 return target_to_host_signal_table[sig];
98 }
99
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113
114 void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
116 {
117 int i;
118 target_sigemptyset(d);
119 for (i = 1; i <= TARGET_NSIG; i++) {
120 if (sigismember(s, i)) {
121 target_sigaddset(d, host_to_target_signal(i));
122 }
123 }
124 }
125
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128 target_sigset_t d1;
129 int i;
130
131 host_to_target_sigset_internal(&d1, s);
132 for(i = 0;i < TARGET_NSIG_WORDS; i++)
133 d->sig[i] = tswapal(d1.sig[i]);
134 }
135
136 void target_to_host_sigset_internal(sigset_t *d,
137 const target_sigset_t *s)
138 {
139 int i;
140 sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (target_sigismember(s, i)) {
143 sigaddset(d, target_to_host_signal(i));
144 }
145 }
146 }
147
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150 target_sigset_t s1;
151 int i;
152
153 for(i = 0;i < TARGET_NSIG_WORDS; i++)
154 s1.sig[i] = tswapal(s->sig[i]);
155 target_to_host_sigset_internal(d, &s1);
156 }
157
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159 const sigset_t *sigset)
160 {
161 target_sigset_t d;
162 host_to_target_sigset(&d, sigset);
163 *old_sigset = d.sig[0];
164 }
165
166 void target_to_host_old_sigset(sigset_t *sigset,
167 const abi_ulong *old_sigset)
168 {
169 target_sigset_t d;
170 int i;
171
172 d.sig[0] = *old_sigset;
173 for(i = 1;i < TARGET_NSIG_WORDS; i++)
174 d.sig[i] = 0;
175 target_to_host_sigset(sigset, &d);
176 }
177
178 int block_signals(void)
179 {
180 TaskState *ts = (TaskState *)thread_cpu->opaque;
181 sigset_t set;
182
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
186 */
187 sigfillset(&set);
188 sigprocmask(SIG_SETMASK, &set, 0);
189
190 return atomic_xchg(&ts->signal_pending, 1);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
197 * 0 on success.
198 * If set is NULL, this is guaranteed not to fail.
199 */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202 TaskState *ts = (TaskState *)thread_cpu->opaque;
203
204 if (oldset) {
205 *oldset = ts->signal_mask;
206 }
207
208 if (set) {
209 int i;
210
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS;
213 }
214
215 switch (how) {
216 case SIG_BLOCK:
217 sigorset(&ts->signal_mask, &ts->signal_mask, set);
218 break;
219 case SIG_UNBLOCK:
220 for (i = 1; i <= NSIG; ++i) {
221 if (sigismember(set, i)) {
222 sigdelset(&ts->signal_mask, i);
223 }
224 }
225 break;
226 case SIG_SETMASK:
227 ts->signal_mask = *set;
228 break;
229 default:
230 g_assert_not_reached();
231 }
232
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts->signal_mask, SIGKILL);
235 sigdelset(&ts->signal_mask, SIGSTOP);
236 }
237 return 0;
238 }
239
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
243 */
244 void set_sigmask(const sigset_t *set)
245 {
246 TaskState *ts = (TaskState *)thread_cpu->opaque;
247
248 ts->signal_mask = *set;
249 }
250 #endif
251
252 /* siginfo conversion */
253
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255 const siginfo_t *info)
256 {
257 int sig = host_to_target_signal(info->si_signo);
258 int si_code = info->si_code;
259 int si_type;
260 tinfo->si_signo = sig;
261 tinfo->si_errno = 0;
262 tinfo->si_code = info->si_code;
263
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
269 */
270 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
280 *
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
285 */
286
287 switch (si_code) {
288 case SI_USER:
289 case SI_TKILL:
290 case SI_KERNEL:
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
293 */
294 tinfo->_sifields._kill._pid = info->si_pid;
295 tinfo->_sifields._kill._uid = info->si_uid;
296 si_type = QEMU_SI_KILL;
297 break;
298 default:
299 /* Everything else is spoofable. Make best guess based on signal */
300 switch (sig) {
301 case TARGET_SIGCHLD:
302 tinfo->_sifields._sigchld._pid = info->si_pid;
303 tinfo->_sifields._sigchld._uid = info->si_uid;
304 tinfo->_sifields._sigchld._status
305 = host_to_target_waitstatus(info->si_status);
306 tinfo->_sifields._sigchld._utime = info->si_utime;
307 tinfo->_sifields._sigchld._stime = info->si_stime;
308 si_type = QEMU_SI_CHLD;
309 break;
310 case TARGET_SIGIO:
311 tinfo->_sifields._sigpoll._band = info->si_band;
312 tinfo->_sifields._sigpoll._fd = info->si_fd;
313 si_type = QEMU_SI_POLL;
314 break;
315 default:
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo->_sifields._rt._pid = info->si_pid;
318 tinfo->_sifields._rt._uid = info->si_uid;
319 /* XXX: potential problem if 64 bit */
320 tinfo->_sifields._rt._sigval.sival_ptr
321 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322 si_type = QEMU_SI_RT;
323 break;
324 }
325 break;
326 }
327
328 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330
331 void tswap_siginfo(target_siginfo_t *tinfo,
332 const target_siginfo_t *info)
333 {
334 int si_type = extract32(info->si_code, 16, 16);
335 int si_code = sextract32(info->si_code, 0, 16);
336
337 __put_user(info->si_signo, &tinfo->si_signo);
338 __put_user(info->si_errno, &tinfo->si_errno);
339 __put_user(si_code, &tinfo->si_code);
340
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
344 */
345 switch (si_type) {
346 case QEMU_SI_KILL:
347 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349 break;
350 case QEMU_SI_TIMER:
351 __put_user(info->_sifields._timer._timer1,
352 &tinfo->_sifields._timer._timer1);
353 __put_user(info->_sifields._timer._timer2,
354 &tinfo->_sifields._timer._timer2);
355 break;
356 case QEMU_SI_POLL:
357 __put_user(info->_sifields._sigpoll._band,
358 &tinfo->_sifields._sigpoll._band);
359 __put_user(info->_sifields._sigpoll._fd,
360 &tinfo->_sifields._sigpoll._fd);
361 break;
362 case QEMU_SI_FAULT:
363 __put_user(info->_sifields._sigfault._addr,
364 &tinfo->_sifields._sigfault._addr);
365 break;
366 case QEMU_SI_CHLD:
367 __put_user(info->_sifields._sigchld._pid,
368 &tinfo->_sifields._sigchld._pid);
369 __put_user(info->_sifields._sigchld._uid,
370 &tinfo->_sifields._sigchld._uid);
371 __put_user(info->_sifields._sigchld._status,
372 &tinfo->_sifields._sigchld._status);
373 __put_user(info->_sifields._sigchld._utime,
374 &tinfo->_sifields._sigchld._utime);
375 __put_user(info->_sifields._sigchld._stime,
376 &tinfo->_sifields._sigchld._stime);
377 break;
378 case QEMU_SI_RT:
379 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381 __put_user(info->_sifields._rt._sigval.sival_ptr,
382 &tinfo->_sifields._rt._sigval.sival_ptr);
383 break;
384 default:
385 g_assert_not_reached();
386 }
387 }
388
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391 target_siginfo_t tgt_tmp;
392 host_to_target_siginfo_noswap(&tgt_tmp, info);
393 tswap_siginfo(tinfo, &tgt_tmp);
394 }
395
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
402 */
403 abi_ulong sival_ptr;
404
405 __get_user(info->si_signo, &tinfo->si_signo);
406 __get_user(info->si_errno, &tinfo->si_errno);
407 __get_user(info->si_code, &tinfo->si_code);
408 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411 info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413
414 static int fatal_signal (int sig)
415 {
416 switch (sig) {
417 case TARGET_SIGCHLD:
418 case TARGET_SIGURG:
419 case TARGET_SIGWINCH:
420 /* Ignored by default. */
421 return 0;
422 case TARGET_SIGCONT:
423 case TARGET_SIGSTOP:
424 case TARGET_SIGTSTP:
425 case TARGET_SIGTTIN:
426 case TARGET_SIGTTOU:
427 /* Job control signals. */
428 return 0;
429 default:
430 return 1;
431 }
432 }
433
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437 switch (sig) {
438 case TARGET_SIGABRT:
439 case TARGET_SIGFPE:
440 case TARGET_SIGILL:
441 case TARGET_SIGQUIT:
442 case TARGET_SIGSEGV:
443 case TARGET_SIGTRAP:
444 case TARGET_SIGBUS:
445 return (1);
446 default:
447 return (0);
448 }
449 }
450
451 void signal_init(void)
452 {
453 TaskState *ts = (TaskState *)thread_cpu->opaque;
454 struct sigaction act;
455 struct sigaction oact;
456 int i, j;
457 int host_sig;
458
459 /* generate signal conversion tables */
460 for(i = 1; i < _NSIG; i++) {
461 if (host_to_target_signal_table[i] == 0)
462 host_to_target_signal_table[i] = i;
463 }
464 for(i = 1; i < _NSIG; i++) {
465 j = host_to_target_signal_table[i];
466 target_to_host_signal_table[j] = i;
467 }
468
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts->signal_mask);
471
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table, 0, sizeof(sigact_table));
475
476 sigfillset(&act.sa_mask);
477 act.sa_flags = SA_SIGINFO;
478 act.sa_sigaction = host_signal_handler;
479 for(i = 1; i <= TARGET_NSIG; i++) {
480 host_sig = target_to_host_signal(i);
481 sigaction(host_sig, NULL, &oact);
482 if (oact.sa_sigaction == (void *)SIG_IGN) {
483 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486 }
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i))
494 sigaction(host_sig, &act, NULL);
495 }
496 }
497
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
501 */
502 void force_sig(int sig)
503 {
504 CPUState *cpu = thread_cpu;
505 CPUArchState *env = cpu->env_ptr;
506 target_siginfo_t info;
507
508 info.si_signo = sig;
509 info.si_errno = 0;
510 info.si_code = TARGET_SI_KERNEL;
511 info._sifields._kill._pid = 0;
512 info._sifields._kill._uid = 0;
513 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
519 */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523 if (oldsig == SIGSEGV) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
526 */
527 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528 }
529 force_sig(TARGET_SIGSEGV);
530 }
531
532 #endif
533
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537 CPUState *cpu = thread_cpu;
538 CPUArchState *env = cpu->env_ptr;
539 TaskState *ts = (TaskState *)cpu->opaque;
540 int host_sig, core_dumped = 0;
541 struct sigaction act;
542
543 host_sig = target_to_host_signal(target_sig);
544 trace_user_force_sig(env, target_sig, host_sig);
545 gdb_signalled(env, target_sig);
546
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549 stop_all_tasks();
550 core_dumped =
551 ((*ts->bprm->core_dump)(target_sig, env) == 0);
552 }
553 if (core_dumped) {
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump;
557 getrlimit(RLIMIT_CORE, &nodump);
558 nodump.rlim_cur=0;
559 setrlimit(RLIMIT_CORE, &nodump);
560 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig, strsignal(host_sig), "core dumped" );
562 }
563
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
569 * it to arrive. */
570 sigfillset(&act.sa_mask);
571 act.sa_handler = SIG_DFL;
572 act.sa_flags = 0;
573 sigaction(host_sig, &act, NULL);
574
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig);
578
579 /* Make sure the signal isn't masked (just reuse the mask inside
580 of act) */
581 sigdelset(&act.sa_mask, host_sig);
582 sigsuspend(&act.sa_mask);
583
584 /* unreachable */
585 abort();
586 }
587
588 /* queue a signal so that it will be send to the virtual CPU as soon
589 as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591 target_siginfo_t *info)
592 {
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
595
596 trace_user_queue_signal(env, sig);
597
598 info->si_code = deposit32(info->si_code, 16, 16, si_type);
599
600 ts->sync_signal.info = *info;
601 ts->sync_signal.pending = sig;
602 /* signal that a new signal is pending */
603 atomic_set(&ts->signal_pending, 1);
604 return 1; /* indicates that the signal was queued */
605 }
606
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610 /* Default version: never rewind */
611 }
612 #endif
613
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615 void *puc)
616 {
617 CPUArchState *env = thread_cpu->env_ptr;
618 CPUState *cpu = ENV_GET_CPU(env);
619 TaskState *ts = cpu->opaque;
620
621 int sig;
622 target_siginfo_t tinfo;
623 ucontext_t *uc = puc;
624 struct emulated_sigtable *k;
625
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629 && info->si_code > 0) {
630 if (cpu_signal_handler(host_signum, info, puc))
631 return;
632 }
633
634 /* get target signal number */
635 sig = host_to_target_signal(host_signum);
636 if (sig < 1 || sig > TARGET_NSIG)
637 return;
638 trace_user_host_signal(env, host_signum, sig);
639
640 rewind_if_in_safe_syscall(puc);
641
642 host_to_target_siginfo_noswap(&tinfo, info);
643 k = &ts->sigtab[sig - 1];
644 k->info = tinfo;
645 k->pending = sig;
646 ts->signal_pending = 1;
647
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
653 *
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
661 */
662 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663 sigdelset(&uc->uc_sigmask, SIGSEGV);
664 sigdelset(&uc->uc_sigmask, SIGBUS);
665
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu);
668 }
669
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674 int ret;
675 struct target_sigaltstack oss;
676
677 /* XXX: test errors */
678 if(uoss_addr)
679 {
680 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682 __put_user(sas_ss_flags(sp), &oss.ss_flags);
683 }
684
685 if(uss_addr)
686 {
687 struct target_sigaltstack *uss;
688 struct target_sigaltstack ss;
689 size_t minstacksize = TARGET_MINSIGSTKSZ;
690
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694 if (get_ppc64_abi(image) > 1) {
695 minstacksize = 4096;
696 }
697 #endif
698
699 ret = -TARGET_EFAULT;
700 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701 goto out;
702 }
703 __get_user(ss.ss_sp, &uss->ss_sp);
704 __get_user(ss.ss_size, &uss->ss_size);
705 __get_user(ss.ss_flags, &uss->ss_flags);
706 unlock_user_struct(uss, uss_addr, 0);
707
708 ret = -TARGET_EPERM;
709 if (on_sig_stack(sp))
710 goto out;
711
712 ret = -TARGET_EINVAL;
713 if (ss.ss_flags != TARGET_SS_DISABLE
714 && ss.ss_flags != TARGET_SS_ONSTACK
715 && ss.ss_flags != 0)
716 goto out;
717
718 if (ss.ss_flags == TARGET_SS_DISABLE) {
719 ss.ss_size = 0;
720 ss.ss_sp = 0;
721 } else {
722 ret = -TARGET_ENOMEM;
723 if (ss.ss_size < minstacksize) {
724 goto out;
725 }
726 }
727
728 target_sigaltstack_used.ss_sp = ss.ss_sp;
729 target_sigaltstack_used.ss_size = ss.ss_size;
730 }
731
732 if (uoss_addr) {
733 ret = -TARGET_EFAULT;
734 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735 goto out;
736 }
737
738 ret = 0;
739 out:
740 return ret;
741 }
742
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745 struct target_sigaction *oact)
746 {
747 struct target_sigaction *k;
748 struct sigaction act1;
749 int host_sig;
750 int ret = 0;
751
752 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753 return -TARGET_EINVAL;
754 }
755
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS;
758 }
759
760 k = &sigact_table[sig - 1];
761 if (oact) {
762 __put_user(k->_sa_handler, &oact->_sa_handler);
763 __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767 /* Not swapped. */
768 oact->sa_mask = k->sa_mask;
769 }
770 if (act) {
771 /* FIXME: This is not threadsafe. */
772 __get_user(k->_sa_handler, &act->_sa_handler);
773 __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777 /* To be swapped in target_to_host_sigset. */
778 k->sa_mask = act->sa_mask;
779
780 /* we update the host linux signal state */
781 host_sig = target_to_host_signal(sig);
782 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783 sigfillset(&act1.sa_mask);
784 act1.sa_flags = SA_SIGINFO;
785 if (k->sa_flags & TARGET_SA_RESTART)
786 act1.sa_flags |= SA_RESTART;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
789 syscalls */
790 if (k->_sa_handler == TARGET_SIG_IGN) {
791 act1.sa_sigaction = (void *)SIG_IGN;
792 } else if (k->_sa_handler == TARGET_SIG_DFL) {
793 if (fatal_signal (sig))
794 act1.sa_sigaction = host_signal_handler;
795 else
796 act1.sa_sigaction = (void *)SIG_DFL;
797 } else {
798 act1.sa_sigaction = host_signal_handler;
799 }
800 ret = sigaction(host_sig, &act1, NULL);
801 }
802 }
803 return ret;
804 }
805
806 #if defined(TARGET_I386)
807 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
808
809 struct target_fpreg {
810 uint16_t significand[4];
811 uint16_t exponent;
812 };
813
814 struct target_fpxreg {
815 uint16_t significand[4];
816 uint16_t exponent;
817 uint16_t padding[3];
818 };
819
820 struct target_xmmreg {
821 uint32_t element[4];
822 };
823
824 struct target_fpstate_32 {
825 /* Regular FPU environment */
826 uint32_t cw;
827 uint32_t sw;
828 uint32_t tag;
829 uint32_t ipoff;
830 uint32_t cssel;
831 uint32_t dataoff;
832 uint32_t datasel;
833 struct target_fpreg st[8];
834 uint16_t status;
835 uint16_t magic; /* 0xffff = regular FPU data only */
836
837 /* FXSR FPU environment */
838 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
839 uint32_t mxcsr;
840 uint32_t reserved;
841 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
842 struct target_xmmreg xmm[8];
843 uint32_t padding[56];
844 };
845
846 struct target_fpstate_64 {
847 /* FXSAVE format */
848 uint16_t cw;
849 uint16_t sw;
850 uint16_t twd;
851 uint16_t fop;
852 uint64_t rip;
853 uint64_t rdp;
854 uint32_t mxcsr;
855 uint32_t mxcsr_mask;
856 uint32_t st_space[32];
857 uint32_t xmm_space[64];
858 uint32_t reserved[24];
859 };
860
861 #ifndef TARGET_X86_64
862 # define target_fpstate target_fpstate_32
863 #else
864 # define target_fpstate target_fpstate_64
865 #endif
866
867 struct target_sigcontext_32 {
868 uint16_t gs, __gsh;
869 uint16_t fs, __fsh;
870 uint16_t es, __esh;
871 uint16_t ds, __dsh;
872 uint32_t edi;
873 uint32_t esi;
874 uint32_t ebp;
875 uint32_t esp;
876 uint32_t ebx;
877 uint32_t edx;
878 uint32_t ecx;
879 uint32_t eax;
880 uint32_t trapno;
881 uint32_t err;
882 uint32_t eip;
883 uint16_t cs, __csh;
884 uint32_t eflags;
885 uint32_t esp_at_signal;
886 uint16_t ss, __ssh;
887 uint32_t fpstate; /* pointer */
888 uint32_t oldmask;
889 uint32_t cr2;
890 };
891
892 struct target_sigcontext_64 {
893 uint64_t r8;
894 uint64_t r9;
895 uint64_t r10;
896 uint64_t r11;
897 uint64_t r12;
898 uint64_t r13;
899 uint64_t r14;
900 uint64_t r15;
901
902 uint64_t rdi;
903 uint64_t rsi;
904 uint64_t rbp;
905 uint64_t rbx;
906 uint64_t rdx;
907 uint64_t rax;
908 uint64_t rcx;
909 uint64_t rsp;
910 uint64_t rip;
911
912 uint64_t eflags;
913
914 uint16_t cs;
915 uint16_t gs;
916 uint16_t fs;
917 uint16_t ss;
918
919 uint64_t err;
920 uint64_t trapno;
921 uint64_t oldmask;
922 uint64_t cr2;
923
924 uint64_t fpstate; /* pointer */
925 uint64_t padding[8];
926 };
927
928 #ifndef TARGET_X86_64
929 # define target_sigcontext target_sigcontext_32
930 #else
931 # define target_sigcontext target_sigcontext_64
932 #endif
933
934 /* see Linux/include/uapi/asm-generic/ucontext.h */
935 struct target_ucontext {
936 abi_ulong tuc_flags;
937 abi_ulong tuc_link;
938 target_stack_t tuc_stack;
939 struct target_sigcontext tuc_mcontext;
940 target_sigset_t tuc_sigmask; /* mask last for extensibility */
941 };
942
943 #ifndef TARGET_X86_64
944 struct sigframe {
945 abi_ulong pretcode;
946 int sig;
947 struct target_sigcontext sc;
948 struct target_fpstate fpstate;
949 abi_ulong extramask[TARGET_NSIG_WORDS-1];
950 char retcode[8];
951 };
952
953 struct rt_sigframe {
954 abi_ulong pretcode;
955 int sig;
956 abi_ulong pinfo;
957 abi_ulong puc;
958 struct target_siginfo info;
959 struct target_ucontext uc;
960 struct target_fpstate fpstate;
961 char retcode[8];
962 };
963
964 #else
965
966 struct rt_sigframe {
967 abi_ulong pretcode;
968 struct target_ucontext uc;
969 struct target_siginfo info;
970 struct target_fpstate fpstate;
971 };
972
973 #endif
974
975 /*
976 * Set up a signal frame.
977 */
978
979 /* XXX: save x87 state */
980 static void setup_sigcontext(struct target_sigcontext *sc,
981 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
982 abi_ulong fpstate_addr)
983 {
984 CPUState *cs = CPU(x86_env_get_cpu(env));
985 #ifndef TARGET_X86_64
986 uint16_t magic;
987
988 /* already locked in setup_frame() */
989 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
990 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
991 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
992 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
993 __put_user(env->regs[R_EDI], &sc->edi);
994 __put_user(env->regs[R_ESI], &sc->esi);
995 __put_user(env->regs[R_EBP], &sc->ebp);
996 __put_user(env->regs[R_ESP], &sc->esp);
997 __put_user(env->regs[R_EBX], &sc->ebx);
998 __put_user(env->regs[R_EDX], &sc->edx);
999 __put_user(env->regs[R_ECX], &sc->ecx);
1000 __put_user(env->regs[R_EAX], &sc->eax);
1001 __put_user(cs->exception_index, &sc->trapno);
1002 __put_user(env->error_code, &sc->err);
1003 __put_user(env->eip, &sc->eip);
1004 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1005 __put_user(env->eflags, &sc->eflags);
1006 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1007 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1008
1009 cpu_x86_fsave(env, fpstate_addr, 1);
1010 fpstate->status = fpstate->sw;
1011 magic = 0xffff;
1012 __put_user(magic, &fpstate->magic);
1013 __put_user(fpstate_addr, &sc->fpstate);
1014
1015 /* non-iBCS2 extensions.. */
1016 __put_user(mask, &sc->oldmask);
1017 __put_user(env->cr[2], &sc->cr2);
1018 #else
1019 __put_user(env->regs[R_EDI], &sc->rdi);
1020 __put_user(env->regs[R_ESI], &sc->rsi);
1021 __put_user(env->regs[R_EBP], &sc->rbp);
1022 __put_user(env->regs[R_ESP], &sc->rsp);
1023 __put_user(env->regs[R_EBX], &sc->rbx);
1024 __put_user(env->regs[R_EDX], &sc->rdx);
1025 __put_user(env->regs[R_ECX], &sc->rcx);
1026 __put_user(env->regs[R_EAX], &sc->rax);
1027
1028 __put_user(env->regs[8], &sc->r8);
1029 __put_user(env->regs[9], &sc->r9);
1030 __put_user(env->regs[10], &sc->r10);
1031 __put_user(env->regs[11], &sc->r11);
1032 __put_user(env->regs[12], &sc->r12);
1033 __put_user(env->regs[13], &sc->r13);
1034 __put_user(env->regs[14], &sc->r14);
1035 __put_user(env->regs[15], &sc->r15);
1036
1037 __put_user(cs->exception_index, &sc->trapno);
1038 __put_user(env->error_code, &sc->err);
1039 __put_user(env->eip, &sc->rip);
1040
1041 __put_user(env->eflags, &sc->eflags);
1042 __put_user(env->segs[R_CS].selector, &sc->cs);
1043 __put_user((uint16_t)0, &sc->gs);
1044 __put_user((uint16_t)0, &sc->fs);
1045 __put_user(env->segs[R_SS].selector, &sc->ss);
1046
1047 __put_user(mask, &sc->oldmask);
1048 __put_user(env->cr[2], &sc->cr2);
1049
1050 /* fpstate_addr must be 16 byte aligned for fxsave */
1051 assert(!(fpstate_addr & 0xf));
1052
1053 cpu_x86_fxsave(env, fpstate_addr);
1054 __put_user(fpstate_addr, &sc->fpstate);
1055 #endif
1056 }
1057
1058 /*
1059 * Determine which stack to use..
1060 */
1061
1062 static inline abi_ulong
1063 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1064 {
1065 unsigned long esp;
1066
1067 /* Default to using normal stack */
1068 esp = env->regs[R_ESP];
1069 #ifdef TARGET_X86_64
1070 esp -= 128; /* this is the redzone */
1071 #endif
1072
1073 /* This is the X/Open sanctioned signal stack switching. */
1074 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1075 if (sas_ss_flags(esp) == 0) {
1076 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1077 }
1078 } else {
1079 #ifndef TARGET_X86_64
1080 /* This is the legacy signal stack switching. */
1081 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1082 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1083 ka->sa_restorer) {
1084 esp = (unsigned long) ka->sa_restorer;
1085 }
1086 #endif
1087 }
1088
1089 #ifndef TARGET_X86_64
1090 return (esp - frame_size) & -8ul;
1091 #else
1092 return ((esp - frame_size) & (~15ul)) - 8;
1093 #endif
1094 }
1095
1096 #ifndef TARGET_X86_64
1097 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1098 static void setup_frame(int sig, struct target_sigaction *ka,
1099 target_sigset_t *set, CPUX86State *env)
1100 {
1101 abi_ulong frame_addr;
1102 struct sigframe *frame;
1103 int i;
1104
1105 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1106 trace_user_setup_frame(env, frame_addr);
1107
1108 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1109 goto give_sigsegv;
1110
1111 __put_user(sig, &frame->sig);
1112
1113 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1114 frame_addr + offsetof(struct sigframe, fpstate));
1115
1116 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1117 __put_user(set->sig[i], &frame->extramask[i - 1]);
1118 }
1119
1120 /* Set up to return from userspace. If provided, use a stub
1121 already in userspace. */
1122 if (ka->sa_flags & TARGET_SA_RESTORER) {
1123 __put_user(ka->sa_restorer, &frame->pretcode);
1124 } else {
1125 uint16_t val16;
1126 abi_ulong retcode_addr;
1127 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1128 __put_user(retcode_addr, &frame->pretcode);
1129 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1130 val16 = 0xb858;
1131 __put_user(val16, (uint16_t *)(frame->retcode+0));
1132 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1133 val16 = 0x80cd;
1134 __put_user(val16, (uint16_t *)(frame->retcode+6));
1135 }
1136
1137 /* Set up registers for signal handler */
1138 env->regs[R_ESP] = frame_addr;
1139 env->eip = ka->_sa_handler;
1140
1141 cpu_x86_load_seg(env, R_DS, __USER_DS);
1142 cpu_x86_load_seg(env, R_ES, __USER_DS);
1143 cpu_x86_load_seg(env, R_SS, __USER_DS);
1144 cpu_x86_load_seg(env, R_CS, __USER_CS);
1145 env->eflags &= ~TF_MASK;
1146
1147 unlock_user_struct(frame, frame_addr, 1);
1148
1149 return;
1150
1151 give_sigsegv:
1152 force_sigsegv(sig);
1153 }
1154 #endif
1155
1156 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1157 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1158 target_siginfo_t *info,
1159 target_sigset_t *set, CPUX86State *env)
1160 {
1161 abi_ulong frame_addr;
1162 #ifndef TARGET_X86_64
1163 abi_ulong addr;
1164 #endif
1165 struct rt_sigframe *frame;
1166 int i;
1167
1168 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1169 trace_user_setup_rt_frame(env, frame_addr);
1170
1171 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1172 goto give_sigsegv;
1173
1174 /* These fields are only in rt_sigframe on 32 bit */
1175 #ifndef TARGET_X86_64
1176 __put_user(sig, &frame->sig);
1177 addr = frame_addr + offsetof(struct rt_sigframe, info);
1178 __put_user(addr, &frame->pinfo);
1179 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1180 __put_user(addr, &frame->puc);
1181 #endif
1182 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1183 tswap_siginfo(&frame->info, info);
1184 }
1185
1186 /* Create the ucontext. */
1187 __put_user(0, &frame->uc.tuc_flags);
1188 __put_user(0, &frame->uc.tuc_link);
1189 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1190 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1191 &frame->uc.tuc_stack.ss_flags);
1192 __put_user(target_sigaltstack_used.ss_size,
1193 &frame->uc.tuc_stack.ss_size);
1194 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1195 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1196
1197 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1198 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1199 }
1200
1201 /* Set up to return from userspace. If provided, use a stub
1202 already in userspace. */
1203 #ifndef TARGET_X86_64
1204 if (ka->sa_flags & TARGET_SA_RESTORER) {
1205 __put_user(ka->sa_restorer, &frame->pretcode);
1206 } else {
1207 uint16_t val16;
1208 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1209 __put_user(addr, &frame->pretcode);
1210 /* This is movl $,%eax ; int $0x80 */
1211 __put_user(0xb8, (char *)(frame->retcode+0));
1212 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1213 val16 = 0x80cd;
1214 __put_user(val16, (uint16_t *)(frame->retcode+5));
1215 }
1216 #else
1217 /* XXX: Would be slightly better to return -EFAULT here if test fails
1218 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1219 __put_user(ka->sa_restorer, &frame->pretcode);
1220 #endif
1221
1222 /* Set up registers for signal handler */
1223 env->regs[R_ESP] = frame_addr;
1224 env->eip = ka->_sa_handler;
1225
1226 #ifndef TARGET_X86_64
1227 env->regs[R_EAX] = sig;
1228 env->regs[R_EDX] = (unsigned long)&frame->info;
1229 env->regs[R_ECX] = (unsigned long)&frame->uc;
1230 #else
1231 env->regs[R_EAX] = 0;
1232 env->regs[R_EDI] = sig;
1233 env->regs[R_ESI] = (unsigned long)&frame->info;
1234 env->regs[R_EDX] = (unsigned long)&frame->uc;
1235 #endif
1236
1237 cpu_x86_load_seg(env, R_DS, __USER_DS);
1238 cpu_x86_load_seg(env, R_ES, __USER_DS);
1239 cpu_x86_load_seg(env, R_CS, __USER_CS);
1240 cpu_x86_load_seg(env, R_SS, __USER_DS);
1241 env->eflags &= ~TF_MASK;
1242
1243 unlock_user_struct(frame, frame_addr, 1);
1244
1245 return;
1246
1247 give_sigsegv:
1248 force_sigsegv(sig);
1249 }
1250
1251 static int
1252 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1253 {
1254 unsigned int err = 0;
1255 abi_ulong fpstate_addr;
1256 unsigned int tmpflags;
1257
1258 #ifndef TARGET_X86_64
1259 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1260 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1261 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1262 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1263
1264 env->regs[R_EDI] = tswapl(sc->edi);
1265 env->regs[R_ESI] = tswapl(sc->esi);
1266 env->regs[R_EBP] = tswapl(sc->ebp);
1267 env->regs[R_ESP] = tswapl(sc->esp);
1268 env->regs[R_EBX] = tswapl(sc->ebx);
1269 env->regs[R_EDX] = tswapl(sc->edx);
1270 env->regs[R_ECX] = tswapl(sc->ecx);
1271 env->regs[R_EAX] = tswapl(sc->eax);
1272
1273 env->eip = tswapl(sc->eip);
1274 #else
1275 env->regs[8] = tswapl(sc->r8);
1276 env->regs[9] = tswapl(sc->r9);
1277 env->regs[10] = tswapl(sc->r10);
1278 env->regs[11] = tswapl(sc->r11);
1279 env->regs[12] = tswapl(sc->r12);
1280 env->regs[13] = tswapl(sc->r13);
1281 env->regs[14] = tswapl(sc->r14);
1282 env->regs[15] = tswapl(sc->r15);
1283
1284 env->regs[R_EDI] = tswapl(sc->rdi);
1285 env->regs[R_ESI] = tswapl(sc->rsi);
1286 env->regs[R_EBP] = tswapl(sc->rbp);
1287 env->regs[R_EBX] = tswapl(sc->rbx);
1288 env->regs[R_EDX] = tswapl(sc->rdx);
1289 env->regs[R_EAX] = tswapl(sc->rax);
1290 env->regs[R_ECX] = tswapl(sc->rcx);
1291 env->regs[R_ESP] = tswapl(sc->rsp);
1292
1293 env->eip = tswapl(sc->rip);
1294 #endif
1295
1296 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1297 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1298
1299 tmpflags = tswapl(sc->eflags);
1300 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1301 // regs->orig_eax = -1; /* disable syscall checks */
1302
1303 fpstate_addr = tswapl(sc->fpstate);
1304 if (fpstate_addr != 0) {
1305 if (!access_ok(VERIFY_READ, fpstate_addr,
1306 sizeof(struct target_fpstate)))
1307 goto badframe;
1308 #ifndef TARGET_X86_64
1309 cpu_x86_frstor(env, fpstate_addr, 1);
1310 #else
1311 cpu_x86_fxrstor(env, fpstate_addr);
1312 #endif
1313 }
1314
1315 return err;
1316 badframe:
1317 return 1;
1318 }
1319
1320 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1321 #ifndef TARGET_X86_64
1322 long do_sigreturn(CPUX86State *env)
1323 {
1324 struct sigframe *frame;
1325 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1326 target_sigset_t target_set;
1327 sigset_t set;
1328 int i;
1329
1330 trace_user_do_sigreturn(env, frame_addr);
1331 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1332 goto badframe;
1333 /* set blocked signals */
1334 __get_user(target_set.sig[0], &frame->sc.oldmask);
1335 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1336 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1337 }
1338
1339 target_to_host_sigset_internal(&set, &target_set);
1340 set_sigmask(&set);
1341
1342 /* restore registers */
1343 if (restore_sigcontext(env, &frame->sc))
1344 goto badframe;
1345 unlock_user_struct(frame, frame_addr, 0);
1346 return -TARGET_QEMU_ESIGRETURN;
1347
1348 badframe:
1349 unlock_user_struct(frame, frame_addr, 0);
1350 force_sig(TARGET_SIGSEGV);
1351 return -TARGET_QEMU_ESIGRETURN;
1352 }
1353 #endif
1354
1355 long do_rt_sigreturn(CPUX86State *env)
1356 {
1357 abi_ulong frame_addr;
1358 struct rt_sigframe *frame;
1359 sigset_t set;
1360
1361 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1362 trace_user_do_rt_sigreturn(env, frame_addr);
1363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1364 goto badframe;
1365 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1366 set_sigmask(&set);
1367
1368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1369 goto badframe;
1370 }
1371
1372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1373 get_sp_from_cpustate(env)) == -EFAULT) {
1374 goto badframe;
1375 }
1376
1377 unlock_user_struct(frame, frame_addr, 0);
1378 return -TARGET_QEMU_ESIGRETURN;
1379
1380 badframe:
1381 unlock_user_struct(frame, frame_addr, 0);
1382 force_sig(TARGET_SIGSEGV);
1383 return -TARGET_QEMU_ESIGRETURN;
1384 }
1385
1386 #elif defined(TARGET_SPARC)
1387
1388 #define __SUNOS_MAXWIN 31
1389
1390 /* This is what SunOS does, so shall I. */
1391 struct target_sigcontext {
1392 abi_ulong sigc_onstack; /* state to restore */
1393
1394 abi_ulong sigc_mask; /* sigmask to restore */
1395 abi_ulong sigc_sp; /* stack pointer */
1396 abi_ulong sigc_pc; /* program counter */
1397 abi_ulong sigc_npc; /* next program counter */
1398 abi_ulong sigc_psr; /* for condition codes etc */
1399 abi_ulong sigc_g1; /* User uses these two registers */
1400 abi_ulong sigc_o0; /* within the trampoline code. */
1401
1402 /* Now comes information regarding the users window set
1403 * at the time of the signal.
1404 */
1405 abi_ulong sigc_oswins; /* outstanding windows */
1406
1407 /* stack ptrs for each regwin buf */
1408 char *sigc_spbuf[__SUNOS_MAXWIN];
1409
1410 /* Windows to restore after signal */
1411 struct {
1412 abi_ulong locals[8];
1413 abi_ulong ins[8];
1414 } sigc_wbuf[__SUNOS_MAXWIN];
1415 };
1416 /* A Sparc stack frame */
1417 struct sparc_stackf {
1418 abi_ulong locals[8];
1419 abi_ulong ins[8];
1420 /* It's simpler to treat fp and callers_pc as elements of ins[]
1421 * since we never need to access them ourselves.
1422 */
1423 char *structptr;
1424 abi_ulong xargs[6];
1425 abi_ulong xxargs[1];
1426 };
1427
1428 typedef struct {
1429 struct {
1430 abi_ulong psr;
1431 abi_ulong pc;
1432 abi_ulong npc;
1433 abi_ulong y;
1434 abi_ulong u_regs[16]; /* globals and ins */
1435 } si_regs;
1436 int si_mask;
1437 } __siginfo_t;
1438
1439 typedef struct {
1440 abi_ulong si_float_regs[32];
1441 unsigned long si_fsr;
1442 unsigned long si_fpqdepth;
1443 struct {
1444 unsigned long *insn_addr;
1445 unsigned long insn;
1446 } si_fpqueue [16];
1447 } qemu_siginfo_fpu_t;
1448
1449
1450 struct target_signal_frame {
1451 struct sparc_stackf ss;
1452 __siginfo_t info;
1453 abi_ulong fpu_save;
1454 abi_ulong insns[2] __attribute__ ((aligned (8)));
1455 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
1456 abi_ulong extra_size; /* Should be 0 */
1457 qemu_siginfo_fpu_t fpu_state;
1458 };
1459 struct target_rt_signal_frame {
1460 struct sparc_stackf ss;
1461 siginfo_t info;
1462 abi_ulong regs[20];
1463 sigset_t mask;
1464 abi_ulong fpu_save;
1465 unsigned int insns[2];
1466 stack_t stack;
1467 unsigned int extra_size; /* Should be 0 */
1468 qemu_siginfo_fpu_t fpu_state;
1469 };
1470
1471 #define UREG_O0 16
1472 #define UREG_O6 22
1473 #define UREG_I0 0
1474 #define UREG_I1 1
1475 #define UREG_I2 2
1476 #define UREG_I3 3
1477 #define UREG_I4 4
1478 #define UREG_I5 5
1479 #define UREG_I6 6
1480 #define UREG_I7 7
1481 #define UREG_L0 8
1482 #define UREG_FP UREG_I6
1483 #define UREG_SP UREG_O6
1484
1485 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
1486 CPUSPARCState *env,
1487 unsigned long framesize)
1488 {
1489 abi_ulong sp;
1490
1491 sp = env->regwptr[UREG_FP];
1492
1493 /* This is the X/Open sanctioned signal stack switching. */
1494 if (sa->sa_flags & TARGET_SA_ONSTACK) {
1495 if (!on_sig_stack(sp)
1496 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
1497 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1498 }
1499 }
1500 return sp - framesize;
1501 }
1502
1503 static int
1504 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
1505 {
1506 int err = 0, i;
1507
1508 __put_user(env->psr, &si->si_regs.psr);
1509 __put_user(env->pc, &si->si_regs.pc);
1510 __put_user(env->npc, &si->si_regs.npc);
1511 __put_user(env->y, &si->si_regs.y);
1512 for (i=0; i < 8; i++) {
1513 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
1514 }
1515 for (i=0; i < 8; i++) {
1516 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
1517 }
1518 __put_user(mask, &si->si_mask);
1519 return err;
1520 }
1521
1522 #if 0
1523 static int
1524 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1525 CPUSPARCState *env, unsigned long mask)
1526 {
1527 int err = 0;
1528
1529 __put_user(mask, &sc->sigc_mask);
1530 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
1531 __put_user(env->pc, &sc->sigc_pc);
1532 __put_user(env->npc, &sc->sigc_npc);
1533 __put_user(env->psr, &sc->sigc_psr);
1534 __put_user(env->gregs[1], &sc->sigc_g1);
1535 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
1536
1537 return err;
1538 }
1539 #endif
1540 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
1541
1542 static void setup_frame(int sig, struct target_sigaction *ka,
1543 target_sigset_t *set, CPUSPARCState *env)
1544 {
1545 abi_ulong sf_addr;
1546 struct target_signal_frame *sf;
1547 int sigframe_size, err, i;
1548
1549 /* 1. Make sure everything is clean */
1550 //synchronize_user_stack();
1551
1552 sigframe_size = NF_ALIGNEDSZ;
1553 sf_addr = get_sigframe(ka, env, sigframe_size);
1554 trace_user_setup_frame(env, sf_addr);
1555
1556 sf = lock_user(VERIFY_WRITE, sf_addr,
1557 sizeof(struct target_signal_frame), 0);
1558 if (!sf) {
1559 goto sigsegv;
1560 }
1561 #if 0
1562 if (invalid_frame_pointer(sf, sigframe_size))
1563 goto sigill_and_return;
1564 #endif
1565 /* 2. Save the current process state */
1566 err = setup___siginfo(&sf->info, env, set->sig[0]);
1567 __put_user(0, &sf->extra_size);
1568
1569 //save_fpu_state(regs, &sf->fpu_state);
1570 //__put_user(&sf->fpu_state, &sf->fpu_save);
1571
1572 __put_user(set->sig[0], &sf->info.si_mask);
1573 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
1574 __put_user(set->sig[i + 1], &sf->extramask[i]);
1575 }
1576
1577 for (i = 0; i < 8; i++) {
1578 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
1579 }
1580 for (i = 0; i < 8; i++) {
1581 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
1582 }
1583 if (err)
1584 goto sigsegv;
1585
1586 /* 3. signal handler back-trampoline and parameters */
1587 env->regwptr[UREG_FP] = sf_addr;
1588 env->regwptr[UREG_I0] = sig;
1589 env->regwptr[UREG_I1] = sf_addr +
1590 offsetof(struct target_signal_frame, info);
1591 env->regwptr[UREG_I2] = sf_addr +
1592 offsetof(struct target_signal_frame, info);
1593
1594 /* 4. signal handler */
1595 env->pc = ka->_sa_handler;
1596 env->npc = (env->pc + 4);
1597 /* 5. return to kernel instructions */
1598 if (ka->ka_restorer) {
1599 env->regwptr[UREG_I7] = ka->ka_restorer;
1600 } else {
1601 uint32_t val32;
1602
1603 env->regwptr[UREG_I7] = sf_addr +
1604 offsetof(struct target_signal_frame, insns) - 2 * 4;
1605
1606 /* mov __NR_sigreturn, %g1 */
1607 val32 = 0x821020d8;
1608 __put_user(val32, &sf->insns[0]);
1609
1610 /* t 0x10 */
1611 val32 = 0x91d02010;
1612 __put_user(val32, &sf->insns[1]);
1613 if (err)
1614 goto sigsegv;
1615
1616 /* Flush instruction space. */
1617 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
1618 // tb_flush(env);
1619 }
1620 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1621 return;
1622 #if 0
1623 sigill_and_return:
1624 force_sig(TARGET_SIGILL);
1625 #endif
1626 sigsegv:
1627 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1628 force_sigsegv(sig);
1629 }
1630
1631 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1632 target_siginfo_t *info,
1633 target_sigset_t *set, CPUSPARCState *env)
1634 {
1635 fprintf(stderr, "setup_rt_frame: not implemented\n");
1636 }
1637
1638 long do_sigreturn(CPUSPARCState *env)
1639 {
1640 abi_ulong sf_addr;
1641 struct target_signal_frame *sf;
1642 uint32_t up_psr, pc, npc;
1643 target_sigset_t set;
1644 sigset_t host_set;
1645 int err=0, i;
1646
1647 sf_addr = env->regwptr[UREG_FP];
1648 trace_user_do_sigreturn(env, sf_addr);
1649 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
1650 goto segv_and_exit;
1651 }
1652
1653 /* 1. Make sure we are not getting garbage from the user */
1654
1655 if (sf_addr & 3)
1656 goto segv_and_exit;
1657
1658 __get_user(pc, &sf->info.si_regs.pc);
1659 __get_user(npc, &sf->info.si_regs.npc);
1660
1661 if ((pc | npc) & 3) {
1662 goto segv_and_exit;
1663 }
1664
1665 /* 2. Restore the state */
1666 __get_user(up_psr, &sf->info.si_regs.psr);
1667
1668 /* User can only change condition codes and FPU enabling in %psr. */
1669 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
1670 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
1671
1672 env->pc = pc;
1673 env->npc = npc;
1674 __get_user(env->y, &sf->info.si_regs.y);
1675 for (i=0; i < 8; i++) {
1676 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
1677 }
1678 for (i=0; i < 8; i++) {
1679 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
1680 }
1681
1682 /* FIXME: implement FPU save/restore:
1683 * __get_user(fpu_save, &sf->fpu_save);
1684 * if (fpu_save)
1685 * err |= restore_fpu_state(env, fpu_save);
1686 */
1687
1688 /* This is pretty much atomic, no amount locking would prevent
1689 * the races which exist anyways.
1690 */
1691 __get_user(set.sig[0], &sf->info.si_mask);
1692 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1693 __get_user(set.sig[i], &sf->extramask[i - 1]);
1694 }
1695
1696 target_to_host_sigset_internal(&host_set, &set);
1697 set_sigmask(&host_set);
1698
1699 if (err) {
1700 goto segv_and_exit;
1701 }
1702 unlock_user_struct(sf, sf_addr, 0);
1703 return -TARGET_QEMU_ESIGRETURN;
1704
1705 segv_and_exit:
1706 unlock_user_struct(sf, sf_addr, 0);
1707 force_sig(TARGET_SIGSEGV);
1708 return -TARGET_QEMU_ESIGRETURN;
1709 }
1710
1711 long do_rt_sigreturn(CPUSPARCState *env)
1712 {
1713 trace_user_do_rt_sigreturn(env, 0);
1714 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1715 return -TARGET_ENOSYS;
1716 }
1717
1718 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1719 #define SPARC_MC_TSTATE 0
1720 #define SPARC_MC_PC 1
1721 #define SPARC_MC_NPC 2
1722 #define SPARC_MC_Y 3
1723 #define SPARC_MC_G1 4
1724 #define SPARC_MC_G2 5
1725 #define SPARC_MC_G3 6
1726 #define SPARC_MC_G4 7
1727 #define SPARC_MC_G5 8
1728 #define SPARC_MC_G6 9
1729 #define SPARC_MC_G7 10
1730 #define SPARC_MC_O0 11
1731 #define SPARC_MC_O1 12
1732 #define SPARC_MC_O2 13
1733 #define SPARC_MC_O3 14
1734 #define SPARC_MC_O4 15
1735 #define SPARC_MC_O5 16
1736 #define SPARC_MC_O6 17
1737 #define SPARC_MC_O7 18
1738 #define SPARC_MC_NGREG 19
1739
1740 typedef abi_ulong target_mc_greg_t;
1741 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
1742
1743 struct target_mc_fq {
1744 abi_ulong *mcfq_addr;
1745 uint32_t mcfq_insn;
1746 };
1747
1748 struct target_mc_fpu {
1749 union {
1750 uint32_t sregs[32];
1751 uint64_t dregs[32];
1752 //uint128_t qregs[16];
1753 } mcfpu_fregs;
1754 abi_ulong mcfpu_fsr;
1755 abi_ulong mcfpu_fprs;
1756 abi_ulong mcfpu_gsr;
1757 struct target_mc_fq *mcfpu_fq;
1758 unsigned char mcfpu_qcnt;
1759 unsigned char mcfpu_qentsz;
1760 unsigned char mcfpu_enab;
1761 };
1762 typedef struct target_mc_fpu target_mc_fpu_t;
1763
1764 typedef struct {
1765 target_mc_gregset_t mc_gregs;
1766 target_mc_greg_t mc_fp;
1767 target_mc_greg_t mc_i7;
1768 target_mc_fpu_t mc_fpregs;
1769 } target_mcontext_t;
1770
1771 struct target_ucontext {
1772 struct target_ucontext *tuc_link;
1773 abi_ulong tuc_flags;
1774 target_sigset_t tuc_sigmask;
1775 target_mcontext_t tuc_mcontext;
1776 };
1777
1778 /* A V9 register window */
1779 struct target_reg_window {
1780 abi_ulong locals[8];
1781 abi_ulong ins[8];
1782 };
1783
1784 #define TARGET_STACK_BIAS 2047
1785
1786 /* {set, get}context() needed for 64-bit SparcLinux userland. */
1787 void sparc64_set_context(CPUSPARCState *env)
1788 {
1789 abi_ulong ucp_addr;
1790 struct target_ucontext *ucp;
1791 target_mc_gregset_t *grp;
1792 abi_ulong pc, npc, tstate;
1793 abi_ulong fp, i7, w_addr;
1794 unsigned int i;
1795
1796 ucp_addr = env->regwptr[UREG_I0];
1797 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
1798 goto do_sigsegv;
1799 }
1800 grp = &ucp->tuc_mcontext.mc_gregs;
1801 __get_user(pc, &((*grp)[SPARC_MC_PC]));
1802 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
1803 if ((pc | npc) & 3) {
1804 goto do_sigsegv;
1805 }
1806 if (env->regwptr[UREG_I1]) {
1807 target_sigset_t target_set;
1808 sigset_t set;
1809
1810 if (TARGET_NSIG_WORDS == 1) {
1811 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
1812 } else {
1813 abi_ulong *src, *dst;
1814 src = ucp->tuc_sigmask.sig;
1815 dst = target_set.sig;
1816 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1817 __get_user(*dst, src);
1818 }
1819 }
1820 target_to_host_sigset_internal(&set, &target_set);
1821 set_sigmask(&set);
1822 }
1823 env->pc = pc;
1824 env->npc = npc;
1825 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
1826 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
1827 env->asi = (tstate >> 24) & 0xff;
1828 cpu_put_ccr(env, tstate >> 32);
1829 cpu_put_cwp64(env, tstate & 0x1f);
1830 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
1831 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
1832 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
1833 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
1834 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
1835 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
1836 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
1837 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
1838 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
1839 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
1840 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
1841 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
1842 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
1843 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
1844 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
1845
1846 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
1847 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
1848
1849 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1850 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1851 abi_ulong) != 0) {
1852 goto do_sigsegv;
1853 }
1854 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1855 abi_ulong) != 0) {
1856 goto do_sigsegv;
1857 }
1858 /* FIXME this does not match how the kernel handles the FPU in
1859 * its sparc64_set_context implementation. In particular the FPU
1860 * is only restored if fenab is non-zero in:
1861 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
1862 */
1863 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
1864 {
1865 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1866 for (i = 0; i < 64; i++, src++) {
1867 if (i & 1) {
1868 __get_user(env->fpr[i/2].l.lower, src);
1869 } else {
1870 __get_user(env->fpr[i/2].l.upper, src);
1871 }
1872 }
1873 }
1874 __get_user(env->fsr,
1875 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
1876 __get_user(env->gsr,
1877 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
1878 unlock_user_struct(ucp, ucp_addr, 0);
1879 return;
1880 do_sigsegv:
1881 unlock_user_struct(ucp, ucp_addr, 0);
1882 force_sig(TARGET_SIGSEGV);
1883 }
1884
1885 void sparc64_get_context(CPUSPARCState *env)
1886 {
1887 abi_ulong ucp_addr;
1888 struct target_ucontext *ucp;
1889 target_mc_gregset_t *grp;
1890 target_mcontext_t *mcp;
1891 abi_ulong fp, i7, w_addr;
1892 int err;
1893 unsigned int i;
1894 target_sigset_t target_set;
1895 sigset_t set;
1896
1897 ucp_addr = env->regwptr[UREG_I0];
1898 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
1899 goto do_sigsegv;
1900 }
1901
1902 mcp = &ucp->tuc_mcontext;
1903 grp = &mcp->mc_gregs;
1904
1905 /* Skip over the trap instruction, first. */
1906 env->pc = env->npc;
1907 env->npc += 4;
1908
1909 /* If we're only reading the signal mask then do_sigprocmask()
1910 * is guaranteed not to fail, which is important because we don't
1911 * have any way to signal a failure or restart this operation since
1912 * this is not a normal syscall.
1913 */
1914 err = do_sigprocmask(0, NULL, &set);
1915 assert(err == 0);
1916 host_to_target_sigset_internal(&target_set, &set);
1917 if (TARGET_NSIG_WORDS == 1) {
1918 __put_user(target_set.sig[0],
1919 (abi_ulong *)&ucp->tuc_sigmask);
1920 } else {
1921 abi_ulong *src, *dst;
1922 src = target_set.sig;
1923 dst = ucp->tuc_sigmask.sig;
1924 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1925 __put_user(*src, dst);
1926 }
1927 if (err)
1928 goto do_sigsegv;
1929 }
1930
1931 /* XXX: tstate must be saved properly */
1932 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
1933 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
1934 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
1935 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
1936 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
1937 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
1938 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
1939 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
1940 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
1941 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
1942 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
1943 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
1944 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
1945 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
1946 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
1947 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
1948 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
1949 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
1950 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
1951
1952 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1953 fp = i7 = 0;
1954 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1955 abi_ulong) != 0) {
1956 goto do_sigsegv;
1957 }
1958 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1959 abi_ulong) != 0) {
1960 goto do_sigsegv;
1961 }
1962 __put_user(fp, &(mcp->mc_fp));
1963 __put_user(i7, &(mcp->mc_i7));
1964
1965 {
1966 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1967 for (i = 0; i < 64; i++, dst++) {
1968 if (i & 1) {
1969 __put_user(env->fpr[i/2].l.lower, dst);
1970 } else {
1971 __put_user(env->fpr[i/2].l.upper, dst);
1972 }
1973 }
1974 }
1975 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
1976 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
1977 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
1978
1979 if (err)
1980 goto do_sigsegv;
1981 unlock_user_struct(ucp, ucp_addr, 1);
1982 return;
1983 do_sigsegv:
1984 unlock_user_struct(ucp, ucp_addr, 1);
1985 force_sig(TARGET_SIGSEGV);
1986 }
1987 #endif
1988 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
1989
1990 # if defined(TARGET_ABI_MIPSO32)
1991 struct target_sigcontext {
1992 uint32_t sc_regmask; /* Unused */
1993 uint32_t sc_status;
1994 uint64_t sc_pc;
1995 uint64_t sc_regs[32];
1996 uint64_t sc_fpregs[32];
1997 uint32_t sc_ownedfp; /* Unused */
1998 uint32_t sc_fpc_csr;
1999 uint32_t sc_fpc_eir; /* Unused */
2000 uint32_t sc_used_math;
2001 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2002 uint32_t pad0;
2003 uint64_t sc_mdhi;
2004 uint64_t sc_mdlo;
2005 target_ulong sc_hi1; /* Was sc_cause */
2006 target_ulong sc_lo1; /* Was sc_badvaddr */
2007 target_ulong sc_hi2; /* Was sc_sigset[4] */
2008 target_ulong sc_lo2;
2009 target_ulong sc_hi3;
2010 target_ulong sc_lo3;
2011 };
2012 # else /* N32 || N64 */
2013 struct target_sigcontext {
2014 uint64_t sc_regs[32];
2015 uint64_t sc_fpregs[32];
2016 uint64_t sc_mdhi;
2017 uint64_t sc_hi1;
2018 uint64_t sc_hi2;
2019 uint64_t sc_hi3;
2020 uint64_t sc_mdlo;
2021 uint64_t sc_lo1;
2022 uint64_t sc_lo2;
2023 uint64_t sc_lo3;
2024 uint64_t sc_pc;
2025 uint32_t sc_fpc_csr;
2026 uint32_t sc_used_math;
2027 uint32_t sc_dsp;
2028 uint32_t sc_reserved;
2029 };
2030 # endif /* O32 */
2031
2032 struct sigframe {
2033 uint32_t sf_ass[4]; /* argument save space for o32 */
2034 uint32_t sf_code[2]; /* signal trampoline */
2035 struct target_sigcontext sf_sc;
2036 target_sigset_t sf_mask;
2037 };
2038
2039 struct target_ucontext {
2040 target_ulong tuc_flags;
2041 target_ulong tuc_link;
2042 target_stack_t tuc_stack;
2043 target_ulong pad0;
2044 struct target_sigcontext tuc_mcontext;
2045 target_sigset_t tuc_sigmask;
2046 };
2047
2048 struct target_rt_sigframe {
2049 uint32_t rs_ass[4]; /* argument save space for o32 */
2050 uint32_t rs_code[2]; /* signal trampoline */
2051 struct target_siginfo rs_info;
2052 struct target_ucontext rs_uc;
2053 };
2054
2055 /* Install trampoline to jump back from signal handler */
2056 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2057 {
2058 int err = 0;
2059
2060 /*
2061 * Set up the return code ...
2062 *
2063 * li v0, __NR__foo_sigreturn
2064 * syscall
2065 */
2066
2067 __put_user(0x24020000 + syscall, tramp + 0);
2068 __put_user(0x0000000c , tramp + 1);
2069 return err;
2070 }
2071
2072 static inline void setup_sigcontext(CPUMIPSState *regs,
2073 struct target_sigcontext *sc)
2074 {
2075 int i;
2076
2077 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2078 regs->hflags &= ~MIPS_HFLAG_BMASK;
2079
2080 __put_user(0, &sc->sc_regs[0]);
2081 for (i = 1; i < 32; ++i) {
2082 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2083 }
2084
2085 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2086 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2087
2088 /* Rather than checking for dsp existence, always copy. The storage
2089 would just be garbage otherwise. */
2090 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2091 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2092 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2093 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2094 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2095 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2096 {
2097 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2098 __put_user(dsp, &sc->sc_dsp);
2099 }
2100
2101 __put_user(1, &sc->sc_used_math);
2102
2103 for (i = 0; i < 32; ++i) {
2104 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2105 }
2106 }
2107
2108 static inline void
2109 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2110 {
2111 int i;
2112
2113 __get_user(regs->CP0_EPC, &sc->sc_pc);
2114
2115 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2116 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2117
2118 for (i = 1; i < 32; ++i) {
2119 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2120 }
2121
2122 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2123 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2124 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2125 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2126 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2127 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2128 {
2129 uint32_t dsp;
2130 __get_user(dsp, &sc->sc_dsp);
2131 cpu_wrdsp(dsp, 0x3ff, regs);
2132 }
2133
2134 for (i = 0; i < 32; ++i) {
2135 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2136 }
2137 }
2138
2139 /*
2140 * Determine which stack to use..
2141 */
2142 static inline abi_ulong
2143 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2144 {
2145 unsigned long sp;
2146
2147 /* Default to using normal stack */
2148 sp = regs->active_tc.gpr[29];
2149
2150 /*
2151 * FPU emulator may have its own trampoline active just
2152 * above the user stack, 16-bytes before the next lowest
2153 * 16 byte boundary. Try to avoid trashing it.
2154 */
2155 sp -= 32;
2156
2157 /* This is the X/Open sanctioned signal stack switching. */
2158 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2159 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2160 }
2161
2162 return (sp - frame_size) & ~7;
2163 }
2164
2165 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2166 {
2167 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2168 env->hflags &= ~MIPS_HFLAG_M16;
2169 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2170 env->active_tc.PC &= ~(target_ulong) 1;
2171 }
2172 }
2173
2174 # if defined(TARGET_ABI_MIPSO32)
2175 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2176 static void setup_frame(int sig, struct target_sigaction * ka,
2177 target_sigset_t *set, CPUMIPSState *regs)
2178 {
2179 struct sigframe *frame;
2180 abi_ulong frame_addr;
2181 int i;
2182
2183 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2184 trace_user_setup_frame(regs, frame_addr);
2185 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2186 goto give_sigsegv;
2187 }
2188
2189 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2190
2191 setup_sigcontext(regs, &frame->sf_sc);
2192
2193 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2194 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2195 }
2196
2197 /*
2198 * Arguments to signal handler:
2199 *
2200 * a0 = signal number
2201 * a1 = 0 (should be cause)
2202 * a2 = pointer to struct sigcontext
2203 *
2204 * $25 and PC point to the signal handler, $29 points to the
2205 * struct sigframe.
2206 */
2207 regs->active_tc.gpr[ 4] = sig;
2208 regs->active_tc.gpr[ 5] = 0;
2209 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2210 regs->active_tc.gpr[29] = frame_addr;
2211 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2212 /* The original kernel code sets CP0_EPC to the handler
2213 * since it returns to userland using eret
2214 * we cannot do this here, and we must set PC directly */
2215 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2216 mips_set_hflags_isa_mode_from_pc(regs);
2217 unlock_user_struct(frame, frame_addr, 1);
2218 return;
2219
2220 give_sigsegv:
2221 force_sigsegv(sig);
2222 }
2223
2224 long do_sigreturn(CPUMIPSState *regs)
2225 {
2226 struct sigframe *frame;
2227 abi_ulong frame_addr;
2228 sigset_t blocked;
2229 target_sigset_t target_set;
2230 int i;
2231
2232 frame_addr = regs->active_tc.gpr[29];
2233 trace_user_do_sigreturn(regs, frame_addr);
2234 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2235 goto badframe;
2236
2237 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2238 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2239 }
2240
2241 target_to_host_sigset_internal(&blocked, &target_set);
2242 set_sigmask(&blocked);
2243
2244 restore_sigcontext(regs, &frame->sf_sc);
2245
2246 #if 0
2247 /*
2248 * Don't let your children do this ...
2249 */
2250 __asm__ __volatile__(
2251 "move\t$29, %0\n\t"
2252 "j\tsyscall_exit"
2253 :/* no outputs */
2254 :"r" (&regs));
2255 /* Unreached */
2256 #endif
2257
2258 regs->active_tc.PC = regs->CP0_EPC;
2259 mips_set_hflags_isa_mode_from_pc(regs);
2260 /* I am not sure this is right, but it seems to work
2261 * maybe a problem with nested signals ? */
2262 regs->CP0_EPC = 0;
2263 return -TARGET_QEMU_ESIGRETURN;
2264
2265 badframe:
2266 force_sig(TARGET_SIGSEGV);
2267 return -TARGET_QEMU_ESIGRETURN;
2268 }
2269 # endif /* O32 */
2270
2271 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2272 target_siginfo_t *info,
2273 target_sigset_t *set, CPUMIPSState *env)
2274 {
2275 struct target_rt_sigframe *frame;
2276 abi_ulong frame_addr;
2277 int i;
2278
2279 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2280 trace_user_setup_rt_frame(env, frame_addr);
2281 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2282 goto give_sigsegv;
2283 }
2284
2285 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
2286
2287 tswap_siginfo(&frame->rs_info, info);
2288
2289 __put_user(0, &frame->rs_uc.tuc_flags);
2290 __put_user(0, &frame->rs_uc.tuc_link);
2291 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
2292 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
2293 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
2294 &frame->rs_uc.tuc_stack.ss_flags);
2295
2296 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2297
2298 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2299 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
2300 }
2301
2302 /*
2303 * Arguments to signal handler:
2304 *
2305 * a0 = signal number
2306 * a1 = pointer to siginfo_t
2307 * a2 = pointer to ucontext_t
2308 *
2309 * $25 and PC point to the signal handler, $29 points to the
2310 * struct sigframe.
2311 */
2312 env->active_tc.gpr[ 4] = sig;
2313 env->active_tc.gpr[ 5] = frame_addr
2314 + offsetof(struct target_rt_sigframe, rs_info);
2315 env->active_tc.gpr[ 6] = frame_addr
2316 + offsetof(struct target_rt_sigframe, rs_uc);
2317 env->active_tc.gpr[29] = frame_addr;
2318 env->active_tc.gpr[31] = frame_addr
2319 + offsetof(struct target_rt_sigframe, rs_code);
2320 /* The original kernel code sets CP0_EPC to the handler
2321 * since it returns to userland using eret
2322 * we cannot do this here, and we must set PC directly */
2323 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
2324 mips_set_hflags_isa_mode_from_pc(env);
2325 unlock_user_struct(frame, frame_addr, 1);
2326 return;
2327
2328 give_sigsegv:
2329 unlock_user_struct(frame, frame_addr, 1);
2330 force_sigsegv(sig);
2331 }
2332
2333 long do_rt_sigreturn(CPUMIPSState *env)
2334 {
2335 struct target_rt_sigframe *frame;
2336 abi_ulong frame_addr;
2337 sigset_t blocked;
2338
2339 frame_addr = env->active_tc.gpr[29];
2340 trace_user_do_rt_sigreturn(env, frame_addr);
2341 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2342 goto badframe;
2343 }
2344
2345 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
2346 set_sigmask(&blocked);
2347
2348 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2349
2350 if (do_sigaltstack(frame_addr +
2351 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
2352 0, get_sp_from_cpustate(env)) == -EFAULT)
2353 goto badframe;
2354
2355 env->active_tc.PC = env->CP0_EPC;
2356 mips_set_hflags_isa_mode_from_pc(env);
2357 /* I am not sure this is right, but it seems to work
2358 * maybe a problem with nested signals ? */
2359 env->CP0_EPC = 0;
2360 return -TARGET_QEMU_ESIGRETURN;
2361
2362 badframe:
2363 force_sig(TARGET_SIGSEGV);
2364 return -TARGET_QEMU_ESIGRETURN;
2365 }
2366
2367 #elif defined(TARGET_PPC)
2368
2369 /* Size of dummy stack frame allocated when calling signal handler.
2370 See arch/powerpc/include/asm/ptrace.h. */
2371 #if defined(TARGET_PPC64)
2372 #define SIGNAL_FRAMESIZE 128
2373 #else
2374 #define SIGNAL_FRAMESIZE 64
2375 #endif
2376
2377 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
2378 on 64-bit PPC, sigcontext and mcontext are one and the same. */
2379 struct target_mcontext {
2380 target_ulong mc_gregs[48];
2381 /* Includes fpscr. */
2382 uint64_t mc_fregs[33];
2383 #if defined(TARGET_PPC64)
2384 /* Pointer to the vector regs */
2385 target_ulong v_regs;
2386 #else
2387 target_ulong mc_pad[2];
2388 #endif
2389 /* We need to handle Altivec and SPE at the same time, which no
2390 kernel needs to do. Fortunately, the kernel defines this bit to
2391 be Altivec-register-large all the time, rather than trying to
2392 twiddle it based on the specific platform. */
2393 union {
2394 /* SPE vector registers. One extra for SPEFSCR. */
2395 uint32_t spe[33];
2396 /* Altivec vector registers. The packing of VSCR and VRSAVE
2397 varies depending on whether we're PPC64 or not: PPC64 splits
2398 them apart; PPC32 stuffs them together.
2399 We also need to account for the VSX registers on PPC64
2400 */
2401 #if defined(TARGET_PPC64)
2402 #define QEMU_NVRREG (34 + 16)
2403 /* On ppc64, this mcontext structure is naturally *unaligned*,
2404 * or rather it is aligned on a 8 bytes boundary but not on
2405 * a 16 bytes one. This pad fixes it up. This is also why the
2406 * vector regs are referenced by the v_regs pointer above so
2407 * any amount of padding can be added here
2408 */
2409 target_ulong pad;
2410 #else
2411 /* On ppc32, we are already aligned to 16 bytes */
2412 #define QEMU_NVRREG 33
2413 #endif
2414 /* We cannot use ppc_avr_t here as we do *not* want the implied
2415 * 16-bytes alignment that would result from it. This would have
2416 * the effect of making the whole struct target_mcontext aligned
2417 * which breaks the layout of struct target_ucontext on ppc64.
2418 */
2419 uint64_t altivec[QEMU_NVRREG][2];
2420 #undef QEMU_NVRREG
2421 } mc_vregs;
2422 };
2423
2424 /* See arch/powerpc/include/asm/sigcontext.h. */
2425 struct target_sigcontext {
2426 target_ulong _unused[4];
2427 int32_t signal;
2428 #if defined(TARGET_PPC64)
2429 int32_t pad0;
2430 #endif
2431 target_ulong handler;
2432 target_ulong oldmask;
2433 target_ulong regs; /* struct pt_regs __user * */
2434 #if defined(TARGET_PPC64)
2435 struct target_mcontext mcontext;
2436 #endif
2437 };
2438
2439 /* Indices for target_mcontext.mc_gregs, below.
2440 See arch/powerpc/include/asm/ptrace.h for details. */
2441 enum {
2442 TARGET_PT_R0 = 0,
2443 TARGET_PT_R1 = 1,
2444 TARGET_PT_R2 = 2,
2445 TARGET_PT_R3 = 3,
2446 TARGET_PT_R4 = 4,
2447 TARGET_PT_R5 = 5,
2448 TARGET_PT_R6 = 6,
2449 TARGET_PT_R7 = 7,
2450 TARGET_PT_R8 = 8,
2451 TARGET_PT_R9 = 9,
2452 TARGET_PT_R10 = 10,
2453 TARGET_PT_R11 = 11,
2454 TARGET_PT_R12 = 12,
2455 TARGET_PT_R13 = 13,
2456 TARGET_PT_R14 = 14,
2457 TARGET_PT_R15 = 15,
2458 TARGET_PT_R16 = 16,
2459 TARGET_PT_R17 = 17,
2460 TARGET_PT_R18 = 18,
2461 TARGET_PT_R19 = 19,
2462 TARGET_PT_R20 = 20,
2463 TARGET_PT_R21 = 21,
2464 TARGET_PT_R22 = 22,
2465 TARGET_PT_R23 = 23,
2466 TARGET_PT_R24 = 24,
2467 TARGET_PT_R25 = 25,
2468 TARGET_PT_R26 = 26,
2469 TARGET_PT_R27 = 27,
2470 TARGET_PT_R28 = 28,
2471 TARGET_PT_R29 = 29,
2472 TARGET_PT_R30 = 30,
2473 TARGET_PT_R31 = 31,
2474 TARGET_PT_NIP = 32,
2475 TARGET_PT_MSR = 33,
2476 TARGET_PT_ORIG_R3 = 34,
2477 TARGET_PT_CTR = 35,
2478 TARGET_PT_LNK = 36,
2479 TARGET_PT_XER = 37,
2480 TARGET_PT_CCR = 38,
2481 /* Yes, there are two registers with #39. One is 64-bit only. */
2482 TARGET_PT_MQ = 39,
2483 TARGET_PT_SOFTE = 39,
2484 TARGET_PT_TRAP = 40,
2485 TARGET_PT_DAR = 41,
2486 TARGET_PT_DSISR = 42,
2487 TARGET_PT_RESULT = 43,
2488 TARGET_PT_REGS_COUNT = 44
2489 };
2490
2491
2492 struct target_ucontext {
2493 target_ulong tuc_flags;
2494 target_ulong tuc_link; /* ucontext_t __user * */
2495 struct target_sigaltstack tuc_stack;
2496 #if !defined(TARGET_PPC64)
2497 int32_t tuc_pad[7];
2498 target_ulong tuc_regs; /* struct mcontext __user *
2499 points to uc_mcontext field */
2500 #endif
2501 target_sigset_t tuc_sigmask;
2502 #if defined(TARGET_PPC64)
2503 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
2504 struct target_sigcontext tuc_sigcontext;
2505 #else
2506 int32_t tuc_maskext[30];
2507 int32_t tuc_pad2[3];
2508 struct target_mcontext tuc_mcontext;
2509 #endif
2510 };
2511
2512 /* See arch/powerpc/kernel/signal_32.c. */
2513 struct target_sigframe {
2514 struct target_sigcontext sctx;
2515 struct target_mcontext mctx;
2516 int32_t abigap[56];
2517 };
2518
2519 #if defined(TARGET_PPC64)
2520
2521 #define TARGET_TRAMP_SIZE 6
2522
2523 struct target_rt_sigframe {
2524 /* sys_rt_sigreturn requires the ucontext be the first field */
2525 struct target_ucontext uc;
2526 target_ulong _unused[2];
2527 uint32_t trampoline[TARGET_TRAMP_SIZE];
2528 target_ulong pinfo; /* struct siginfo __user * */
2529 target_ulong puc; /* void __user * */
2530 struct target_siginfo info;
2531 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
2532 char abigap[288];
2533 } __attribute__((aligned(16)));
2534
2535 #else
2536
2537 struct target_rt_sigframe {
2538 struct target_siginfo info;
2539 struct target_ucontext uc;
2540 int32_t abigap[56];
2541 };
2542
2543 #endif
2544
2545 #if defined(TARGET_PPC64)
2546
2547 struct target_func_ptr {
2548 target_ulong entry;
2549 target_ulong toc;
2550 };
2551
2552 #endif
2553
2554 /* We use the mc_pad field for the signal return trampoline. */
2555 #define tramp mc_pad
2556
2557 /* See arch/powerpc/kernel/signal.c. */
2558 static target_ulong get_sigframe(struct target_sigaction *ka,
2559 CPUPPCState *env,
2560 int frame_size)
2561 {
2562 target_ulong oldsp;
2563
2564 oldsp = env->gpr[1];
2565
2566 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
2567 (sas_ss_flags(oldsp) == 0)) {
2568 oldsp = (target_sigaltstack_used.ss_sp
2569 + target_sigaltstack_used.ss_size);
2570 }
2571
2572 return (oldsp - frame_size) & ~0xFUL;
2573 }
2574
2575 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
2576 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
2577 #define PPC_VEC_HI 0
2578 #define PPC_VEC_LO 1
2579 #else
2580 #define PPC_VEC_HI 1
2581 #define PPC_VEC_LO 0
2582 #endif
2583
2584
2585 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
2586 {
2587 target_ulong msr = env->msr;
2588 int i;
2589 target_ulong ccr = 0;
2590
2591 /* In general, the kernel attempts to be intelligent about what it
2592 needs to save for Altivec/FP/SPE registers. We don't care that
2593 much, so we just go ahead and save everything. */
2594
2595 /* Save general registers. */
2596 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2597 __put_user(env->gpr[i], &frame->mc_gregs[i]);
2598 }
2599 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
2600 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
2601 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
2602 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
2603
2604 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
2605 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
2606 }
2607 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
2608
2609 /* Save Altivec registers if necessary. */
2610 if (env->insns_flags & PPC_ALTIVEC) {
2611 uint32_t *vrsave;
2612 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
2613 ppc_avr_t *avr = &env->avr[i];
2614 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
2615
2616 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
2617 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
2618 }
2619 /* Set MSR_VR in the saved MSR value to indicate that
2620 frame->mc_vregs contains valid data. */
2621 msr |= MSR_VR;
2622 #if defined(TARGET_PPC64)
2623 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
2624 /* 64-bit needs to put a pointer to the vectors in the frame */
2625 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
2626 #else
2627 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
2628 #endif
2629 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
2630 }
2631
2632 /* Save VSX second halves */
2633 if (env->insns_flags2 & PPC2_VSX) {
2634 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
2635 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
2636 __put_user(env->vsr[i], &vsregs[i]);
2637 }
2638 }
2639
2640 /* Save floating point registers. */
2641 if (env->insns_flags & PPC_FLOAT) {
2642 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
2643 __put_user(env->fpr[i], &frame->mc_fregs[i]);
2644 }
2645 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
2646 }
2647
2648 /* Save SPE registers. The kernel only saves the high half. */
2649 if (env->insns_flags & PPC_SPE) {
2650 #if defined(TARGET_PPC64)
2651 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2652 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
2653 }
2654 #else
2655 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
2656 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
2657 }
2658 #endif
2659 /* Set MSR_SPE in the saved MSR value to indicate that
2660 frame->mc_vregs contains valid data. */
2661 msr |= MSR_SPE;
2662 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
2663 }
2664
2665 /* Store MSR. */
2666 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
2667 }
2668
2669 static void encode_trampoline(int sigret, uint32_t *tramp)
2670 {
2671 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
2672 if (sigret) {
2673 __put_user(0x38000000 | sigret, &tramp[0]);
2674 __put_user(0x44000002, &tramp[1]);
2675 }
2676 }
2677
2678 static void restore_user_regs(CPUPPCState *env,
2679 struct target_mcontext *frame, int sig)
2680 {
2681 target_ulong save_r2 = 0;
2682 target_ulong msr;
2683 target_ulong ccr;
2684
2685 int i;
2686
2687 if (!sig) {
2688 save_r2 = env->gpr[2];
2689 }
2690
2691 /* Restore general registers. */
2692 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2693 __get_user(env->gpr[i], &frame->mc_gregs[i]);
2694 }
2695 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
2696 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
2697 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
2698 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
2699 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
2700
2701 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
2702 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
2703 }
2704
2705 if (!sig) {
2706 env->gpr[2] = save_r2;
2707 }
2708 /* Restore MSR. */
2709 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
2710
2711 /* If doing signal return, restore the previous little-endian mode. */
2712 if (sig)
2713 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
2714
2715 /* Restore Altivec registers if necessary. */
2716 if (env->insns_flags & PPC_ALTIVEC) {
2717 ppc_avr_t *v_regs;
2718 uint32_t *vrsave;
2719 #if defined(TARGET_PPC64)
2720 uint64_t v_addr;
2721 /* 64-bit needs to recover the pointer to the vectors from the frame */
2722 __get_user(v_addr, &frame->v_regs);
2723 v_regs = g2h(v_addr);
2724 #else
2725 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
2726 #endif
2727 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
2728 ppc_avr_t *avr = &env->avr[i];
2729 ppc_avr_t *vreg = &v_regs[i];
2730
2731 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
2732 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
2733 }
2734 /* Set MSR_VEC in the saved MSR value to indicate that
2735 frame->mc_vregs contains valid data. */
2736 #if defined(TARGET_PPC64)
2737 vrsave = (uint32_t *)&v_regs[33];
2738 #else
2739 vrsave = (uint32_t *)&v_regs[32];
2740 #endif
2741 __get_user(env->spr[SPR_VRSAVE], vrsave);
2742 }
2743
2744 /* Restore VSX second halves */
2745 if (env->insns_flags2 & PPC2_VSX) {
2746 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
2747 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
2748 __get_user(env->vsr[i], &vsregs[i]);
2749 }
2750 }
2751
2752 /* Restore floating point registers. */
2753 if (env->insns_flags & PPC_FLOAT) {
2754 uint64_t fpscr;
2755 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
2756 __get_user(env->fpr[i], &frame->mc_fregs[i]);
2757 }
2758 __get_user(fpscr, &frame->mc_fregs[32]);
2759 env->fpscr = (uint32_t) fpscr;
2760 }
2761
2762 /* Save SPE registers. The kernel only saves the high half. */
2763 if (env->insns_flags & PPC_SPE) {
2764 #if defined(TARGET_PPC64)
2765 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2766 uint32_t hi;
2767
2768 __get_user(hi, &frame->mc_vregs.spe[i]);
2769 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
2770 }
2771 #else
2772 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
2773 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
2774 }
2775 #endif
2776 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
2777 }
2778 }
2779
2780 #if !defined(TARGET_PPC64)
2781 static void setup_frame(int sig, struct target_sigaction *ka,
2782 target_sigset_t *set, CPUPPCState *env)
2783 {
2784 struct target_sigframe *frame;
2785 struct target_sigcontext *sc;
2786 target_ulong frame_addr, newsp;
2787 int err = 0;
2788
2789 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2790 trace_user_setup_frame(env, frame_addr);
2791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
2792 goto sigsegv;
2793 sc = &frame->sctx;
2794
2795 __put_user(ka->_sa_handler, &sc->handler);
2796 __put_user(set->sig[0], &sc->oldmask);
2797 __put_user(set->sig[1], &sc->_unused[3]);
2798 __put_user(h2g(&frame->mctx), &sc->regs);
2799 __put_user(sig, &sc->signal);
2800
2801 /* Save user regs. */
2802 save_user_regs(env, &frame->mctx);
2803
2804 /* Construct the trampoline code on the stack. */
2805 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
2806
2807 /* The kernel checks for the presence of a VDSO here. We don't
2808 emulate a vdso, so use a sigreturn system call. */
2809 env->lr = (target_ulong) h2g(frame->mctx.tramp);
2810
2811 /* Turn off all fp exceptions. */
2812 env->fpscr = 0;
2813
2814 /* Create a stack frame for the caller of the handler. */
2815 newsp = frame_addr - SIGNAL_FRAMESIZE;
2816 err |= put_user(env->gpr[1], newsp, target_ulong);
2817
2818 if (err)
2819 goto sigsegv;
2820
2821 /* Set up registers for signal handler. */
2822 env->gpr[1] = newsp;
2823 env->gpr[3] = sig;
2824 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
2825
2826 env->nip = (target_ulong) ka->_sa_handler;
2827
2828 /* Signal handlers are entered in big-endian mode. */
2829 env->msr &= ~(1ull << MSR_LE);
2830
2831 unlock_user_struct(frame, frame_addr, 1);
2832 return;
2833
2834 sigsegv:
2835 unlock_user_struct(frame, frame_addr, 1);
2836 force_sigsegv(sig);
2837 }
2838 #endif /* !defined(TARGET_PPC64) */
2839
2840 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2841 target_siginfo_t *info,
2842 target_sigset_t *set, CPUPPCState *env)
2843 {
2844 struct target_rt_sigframe *rt_sf;
2845 uint32_t *trampptr = 0;
2846 struct target_mcontext *mctx = 0;
2847 target_ulong rt_sf_addr, newsp = 0;
2848 int i, err = 0;
2849 #if defined(TARGET_PPC64)
2850 struct target_sigcontext *sc = 0;
2851 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
2852 #endif
2853
2854 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
2855 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
2856 goto sigsegv;
2857
2858 tswap_siginfo(&rt_sf->info, info);
2859
2860 __put_user(0, &rt_sf->uc.tuc_flags);
2861 __put_user(0, &rt_sf->uc.tuc_link);
2862 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
2863 &rt_sf->uc.tuc_stack.ss_sp);
2864 __put_user(sas_ss_flags(env->gpr[1]),
2865 &rt_sf->uc.tuc_stack.ss_flags);
2866 __put_user(target_sigaltstack_used.ss_size,
2867 &rt_sf->uc.tuc_stack.ss_size);
2868 #if !defined(TARGET_PPC64)
2869 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
2870 &rt_sf->uc.tuc_regs);
2871 #endif
2872 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2873 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
2874 }
2875
2876 #if defined(TARGET_PPC64)
2877 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
2878 trampptr = &rt_sf->trampoline[0];
2879
2880 sc = &rt_sf->uc.tuc_sigcontext;
2881 __put_user(h2g(mctx), &sc->regs);
2882 __put_user(sig, &sc->signal);
2883 #else
2884 mctx = &rt_sf->uc.tuc_mcontext;
2885 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
2886 #endif
2887
2888 save_user_regs(env, mctx);
2889 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
2890
2891 /* The kernel checks for the presence of a VDSO here. We don't
2892 emulate a vdso, so use a sigreturn system call. */
2893 env->lr = (target_ulong) h2g(trampptr);
2894
2895 /* Turn off all fp exceptions. */
2896 env->fpscr = 0;
2897
2898 /* Create a stack frame for the caller of the handler. */
2899 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
2900 err |= put_user(env->gpr[1], newsp, target_ulong);
2901
2902 if (err)
2903 goto sigsegv;
2904
2905 /* Set up registers for signal handler. */
2906 env->gpr[1] = newsp;
2907 env->gpr[3] = (target_ulong) sig;
2908 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
2909 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
2910 env->gpr[6] = (target_ulong) h2g(rt_sf);
2911
2912 #if defined(TARGET_PPC64)
2913 if (get_ppc64_abi(image) < 2) {
2914 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
2915 struct target_func_ptr *handler =
2916 (struct target_func_ptr *)g2h(ka->_sa_handler);
2917 env->nip = tswapl(handler->entry);
2918 env->gpr[2] = tswapl(handler->toc);
2919 } else {
2920 /* ELFv2 PPC64 function pointers are entry points, but R12
2921 * must also be set */
2922 env->nip = tswapl((target_ulong) ka->_sa_handler);
2923 env->gpr[12] = env->nip;
2924 }
2925 #else
2926 env->nip = (target_ulong) ka->_sa_handler;
2927 #endif
2928
2929 /* Signal handlers are entered in big-endian mode. */
2930 env->msr &= ~(1ull << MSR_LE);
2931
2932 unlock_user_struct(rt_sf, rt_sf_addr, 1);
2933 return;
2934
2935 sigsegv:
2936 unlock_user_struct(rt_sf, rt_sf_addr, 1);
2937 force_sigsegv(sig);
2938
2939 }
2940
2941 #if !defined(TARGET_PPC64)
2942 long do_sigreturn(CPUPPCState *env)
2943 {
2944 struct target_sigcontext *sc = NULL;
2945 struct target_mcontext *sr = NULL;
2946 target_ulong sr_addr = 0, sc_addr;
2947 sigset_t blocked;
2948 target_sigset_t set;
2949
2950 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
2951 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
2952 goto sigsegv;
2953
2954 #if defined(TARGET_PPC64)
2955 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
2956 #else
2957 __get_user(set.sig[0], &sc->oldmask);
2958 __get_user(set.sig[1], &sc->_unused[3]);
2959 #endif
2960 target_to_host_sigset_internal(&blocked, &set);
2961 set_sigmask(&blocked);
2962
2963 __get_user(sr_addr, &sc->regs);
2964 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
2965 goto sigsegv;
2966 restore_user_regs(env, sr, 1);
2967
2968 unlock_user_struct(sr, sr_addr, 1);
2969 unlock_user_struct(sc, sc_addr, 1);
2970 return -TARGET_QEMU_ESIGRETURN;
2971
2972 sigsegv:
2973 unlock_user_struct(sr, sr_addr, 1);
2974 unlock_user_struct(sc, sc_addr, 1);
2975 force_sig(TARGET_SIGSEGV);
2976 return -TARGET_QEMU_ESIGRETURN;
2977 }
2978 #endif /* !defined(TARGET_PPC64) */
2979
2980 /* See arch/powerpc/kernel/signal_32.c. */
2981 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
2982 {
2983 struct target_mcontext *mcp;
2984 target_ulong mcp_addr;
2985 sigset_t blocked;
2986 target_sigset_t set;
2987
2988 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
2989 sizeof (set)))
2990 return 1;
2991
2992 #if defined(TARGET_PPC64)
2993 mcp_addr = h2g(ucp) +
2994 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
2995 #else
2996 __get_user(mcp_addr, &ucp->tuc_regs);
2997 #endif
2998
2999 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
3000 return 1;
3001
3002 target_to_host_sigset_internal(&blocked, &set);
3003 set_sigmask(&blocked);
3004 restore_user_regs(env, mcp, sig);
3005
3006 unlock_user_struct(mcp, mcp_addr, 1);
3007 return 0;
3008 }
3009
3010 long do_rt_sigreturn(CPUPPCState *env)
3011 {
3012 struct target_rt_sigframe *rt_sf = NULL;
3013 target_ulong rt_sf_addr;
3014
3015 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
3016 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
3017 goto sigsegv;
3018
3019 if (do_setcontext(&rt_sf->uc, env, 1))
3020 goto sigsegv;
3021
3022 do_sigaltstack(rt_sf_addr
3023 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
3024 0, env->gpr[1]);
3025
3026 unlock_user_struct(rt_sf, rt_sf_addr, 1);
3027 return -TARGET_QEMU_ESIGRETURN;
3028
3029 sigsegv:
3030 unlock_user_struct(rt_sf, rt_sf_addr, 1);
3031 force_sig(TARGET_SIGSEGV);
3032 return -TARGET_QEMU_ESIGRETURN;
3033 }
3034
3035 #elif defined(TARGET_RISCV)
3036
3037 /* Signal handler invocation must be transparent for the code being
3038 interrupted. Complete CPU (hart) state is saved on entry and restored
3039 before returning from the handler. Process sigmask is also saved to block
3040 signals while the handler is running. The handler gets its own stack,
3041 which also doubles as storage for the CPU state and sigmask.
3042
3043 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
3044
3045 struct target_sigcontext {
3046 abi_long pc;
3047 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
3048 uint64_t fpr[32];
3049 uint32_t fcsr;
3050 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
3051
3052 struct target_ucontext {
3053 unsigned long uc_flags;
3054 struct target_ucontext *uc_link;
3055 target_stack_t uc_stack;
3056 struct target_sigcontext uc_mcontext;
3057 target_sigset_t uc_sigmask;
3058 };
3059
3060 struct target_rt_sigframe {
3061 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
3062 struct target_siginfo info;
3063 struct target_ucontext uc;
3064 };
3065
3066 static abi_ulong get_sigframe(struct target_sigaction *ka,
3067 CPURISCVState *regs, size_t framesize)
3068 {
3069 abi_ulong sp = regs->gpr[xSP];
3070 int onsigstack = on_sig_stack(sp);
3071
3072 /* redzone */
3073 /* This is the X/Open sanctioned signal stack switching. */
3074 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3075 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3076 }
3077
3078 sp -= framesize;
3079 sp &= ~3UL; /* align sp on 4-byte boundary */
3080
3081 /* If we are on the alternate signal stack and would overflow it, don't.
3082 Return an always-bogus address instead so we will die with SIGSEGV. */
3083 if (onsigstack && !likely(on_sig_stack(sp))) {
3084 return -1L;
3085 }
3086
3087 return sp;
3088 }
3089
3090 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
3091 {
3092 int i;
3093
3094 __put_user(env->pc, &sc->pc);
3095
3096 for (i = 1; i < 32; i++) {
3097 __put_user(env->gpr[i], &sc->gpr[i - 1]);
3098 }
3099 for (i = 0; i < 32; i++) {
3100 __put_user(env->fpr[i], &sc->fpr[i]);
3101 }
3102
3103 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
3104 __put_user(fcsr, &sc->fcsr);
3105 }
3106
3107 static void setup_ucontext(struct target_ucontext *uc,
3108 CPURISCVState *env, target_sigset_t *set)
3109 {
3110 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
3111 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
3112 abi_ulong ss_size = target_sigaltstack_used.ss_size;
3113
3114 __put_user(0, &(uc->uc_flags));
3115 __put_user(0, &(uc->uc_link));
3116
3117 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
3118 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
3119 __put_user(ss_size, &(uc->uc_stack.ss_size));
3120
3121 int i;
3122 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
3123 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
3124 }
3125
3126 setup_sigcontext(&uc->uc_mcontext, env);
3127 }
3128
3129 static inline void install_sigtramp(uint32_t *tramp)
3130 {
3131 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
3132 __put_user(0x00000073, tramp + 1); /* ecall */
3133 }
3134
3135 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3136 target_siginfo_t *info,
3137 target_sigset_t *set, CPURISCVState *env)
3138 {
3139 abi_ulong frame_addr;
3140 struct target_rt_sigframe *frame;
3141
3142 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3143 trace_user_setup_rt_frame(env, frame_addr);
3144
3145 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3146 goto badframe;
3147 }
3148
3149 setup_ucontext(&frame->uc, env, set);
3150 tswap_siginfo(&frame->info, info);
3151 install_sigtramp(frame->tramp);
3152
3153 env->pc = ka->_sa_handler;
3154 env->gpr[xSP] = frame_addr;
3155 env->gpr[xA0] = sig;
3156 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
3157 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
3158 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
3159
3160 return;
3161
3162 badframe:
3163 unlock_user_struct(frame, frame_addr, 1);
3164 if (sig == TARGET_SIGSEGV) {
3165 ka->_sa_handler = TARGET_SIG_DFL;
3166 }
3167 force_sig(TARGET_SIGSEGV);
3168 }
3169
3170 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
3171 {
3172 int i;
3173
3174 __get_user(env->pc, &sc->pc);
3175
3176 for (i = 1; i < 32; ++i) {
3177 __get_user(env->gpr[i], &sc->gpr[i - 1]);
3178 }
3179 for (i = 0; i < 32; ++i) {
3180 __get_user(env->fpr[i], &sc->fpr[i]);
3181 }
3182
3183 uint32_t fcsr;
3184 __get_user(fcsr, &sc->fcsr);
3185 csr_write_helper(env, fcsr, CSR_FCSR);
3186 }
3187
3188 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
3189 {
3190 sigset_t blocked;
3191 target_sigset_t target_set;
3192 int i;
3193
3194 target_sigemptyset(&target_set);
3195 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
3196 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
3197 }
3198
3199 target_to_host_sigset_internal(&blocked, &target_set);
3200 set_sigmask(&blocked);
3201
3202 restore_sigcontext(env, &uc->uc_mcontext);
3203 }
3204
3205 long do_rt_sigreturn(CPURISCVState *env)
3206 {
3207 struct target_rt_sigframe *frame;
3208 abi_ulong frame_addr;
3209
3210 frame_addr = env->gpr[xSP];
3211 trace_user_do_sigreturn(env, frame_addr);
3212 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3213 goto badframe;
3214 }
3215
3216 restore_ucontext(env, &frame->uc);
3217
3218 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
3219 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
3220 goto badframe;
3221 }
3222
3223 unlock_user_struct(frame, frame_addr, 0);
3224 return -TARGET_QEMU_ESIGRETURN;
3225
3226 badframe:
3227 unlock_user_struct(frame, frame_addr, 0);
3228 force_sig(TARGET_SIGSEGV);
3229 return 0;
3230 }
3231
3232 #elif defined(TARGET_HPPA)
3233
3234 struct target_sigcontext {
3235 abi_ulong sc_flags;
3236 abi_ulong sc_gr[32];
3237 uint64_t sc_fr[32];
3238 abi_ulong sc_iasq[2];
3239 abi_ulong sc_iaoq[2];
3240 abi_ulong sc_sar;
3241 };
3242
3243 struct target_ucontext {
3244 abi_uint tuc_flags;
3245 abi_ulong tuc_link;
3246 target_stack_t tuc_stack;
3247 abi_uint pad[1];
3248 struct target_sigcontext tuc_mcontext;
3249 target_sigset_t tuc_sigmask;
3250 };
3251
3252 struct target_rt_sigframe {
3253 abi_uint tramp[9];
3254 target_siginfo_t info;
3255 struct target_ucontext uc;
3256 /* hidden location of upper halves of pa2.0 64-bit gregs */
3257 };
3258
3259 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
3260 {
3261 int flags = 0;
3262 int i;
3263
3264 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
3265
3266 if (env->iaoq_f < TARGET_PAGE_SIZE) {
3267 /* In the gateway page, executing a syscall. */
3268 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
3269 __put_user(env->gr[31], &sc->sc_iaoq[0]);
3270 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
3271 } else {
3272 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
3273 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
3274 }
3275 __put_user(0, &sc->sc_iasq[0]);
3276 __put_user(0, &sc->sc_iasq[1]);
3277 __put_user(flags, &sc->sc_flags);
3278
3279 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
3280 for (i = 1; i < 32; ++i) {
3281 __put_user(env->gr[i], &sc->sc_gr[i]);
3282 }
3283
3284 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
3285 for (i = 1; i < 32; ++i) {
3286 __put_user(env->fr[i], &sc->sc_fr[i]);
3287 }
3288
3289 __put_user(env->cr[CR_SAR], &sc->sc_sar);
3290 }
3291
3292 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
3293 {
3294 target_ulong psw;
3295 int i;
3296
3297 __get_user(psw, &sc->sc_gr[0]);
3298 cpu_hppa_put_psw(env, psw);
3299
3300 for (i = 1; i < 32; ++i) {
3301 __get_user(env->gr[i], &sc->sc_gr[i]);
3302 }
3303 for (i = 0; i < 32; ++i) {
3304 __get_user(env->fr[i], &sc->sc_fr[i]);
3305 }
3306 cpu_hppa_loaded_fr0(env);
3307
3308 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
3309 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
3310 __get_user(env->cr[CR_SAR], &sc->sc_sar);
3311 }
3312
3313 /* No, this doesn't look right, but it's copied straight from the kernel. */
3314 #define PARISC_RT_SIGFRAME_SIZE32 \
3315 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
3316
3317 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3318 target_siginfo_t *info,
3319 target_sigset_t *set, CPUArchState *env)
3320 {
3321 abi_ulong frame_addr, sp, haddr;
3322 struct target_rt_sigframe *frame;
3323 int i;
3324
3325 sp = env->gr[30];
3326 if (ka->sa_flags & TARGET_SA_ONSTACK) {
3327 if (sas_ss_flags(sp) == 0) {
3328 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
3329 }
3330 }
3331 frame_addr = QEMU_ALIGN_UP(sp, 64);
3332 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
3333
3334 trace_user_setup_rt_frame(env, frame_addr);
3335
3336 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3337 goto give_sigsegv;
3338 }
3339
3340 tswap_siginfo(&frame->info, info);
3341 frame->uc.tuc_flags = 0;
3342 frame->uc.tuc_link = 0;
3343
3344 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
3345 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3346 &frame->uc.tuc_stack.ss_flags);
3347 __put_user(target_sigaltstack_used.ss_size,
3348 &frame->uc.tuc_stack.ss_size);
3349
3350 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
3351 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3352 }
3353
3354 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3355
3356 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
3357 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
3358 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
3359 __put_user(0x08000240, frame->tramp + 3); /* nop */
3360
3361 unlock_user_struct(frame, frame_addr, 1);
3362
3363 env->gr[2] = h2g(frame->tramp);
3364 env->gr[30] = sp;
3365 env->gr[26] = sig;
3366 env->gr[25] = h2g(&frame->info);
3367 env->gr[24] = h2g(&frame->uc);
3368
3369 haddr = ka->_sa_handler;
3370 if (haddr & 2) {
3371 /* Function descriptor. */
3372 target_ulong *fdesc, dest;
3373
3374 haddr &= -4;
3375 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
3376 goto give_sigsegv;
3377 }
3378 __get_user(dest, fdesc);
3379 __get_user(env->gr[19], fdesc + 1);
3380 unlock_user_struct(fdesc, haddr, 1);
3381 haddr = dest;
3382 }
3383 env->iaoq_f = haddr;
3384 env->iaoq_b = haddr + 4;
3385 return;
3386
3387 give_sigsegv:
3388 force_sigsegv(sig);
3389 }
3390
3391 long do_rt_sigreturn(CPUArchState *env)
3392 {
3393 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
3394 struct target_rt_sigframe *frame;
3395 sigset_t set;
3396
3397 trace_user_do_rt_sigreturn(env, frame_addr);
3398 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3399 goto badframe;
3400 }
3401 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3402 set_sigmask(&set);
3403
3404 restore_sigcontext(env, &frame->uc.tuc_mcontext);
3405 unlock_user_struct(frame, frame_addr, 0);
3406
3407 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
3408 uc.tuc_stack),
3409 0, env->gr[30]) == -EFAULT) {
3410 goto badframe;
3411 }
3412
3413 unlock_user_struct(frame, frame_addr, 0);
3414 return -TARGET_QEMU_ESIGRETURN;
3415
3416 badframe:
3417 force_sig(TARGET_SIGSEGV);
3418 return -TARGET_QEMU_ESIGRETURN;
3419 }
3420
3421 #elif defined(TARGET_XTENSA)
3422
3423 struct target_sigcontext {
3424 abi_ulong sc_pc;
3425 abi_ulong sc_ps;
3426 abi_ulong sc_lbeg;
3427 abi_ulong sc_lend;
3428 abi_ulong sc_lcount;
3429 abi_ulong sc_sar;
3430 abi_ulong sc_acclo;
3431 abi_ulong sc_acchi;
3432 abi_ulong sc_a[16];
3433 abi_ulong sc_xtregs;
3434 };
3435
3436 struct target_ucontext {
3437 abi_ulong tuc_flags;
3438 abi_ulong tuc_link;
3439 target_stack_t tuc_stack;
3440 struct target_sigcontext tuc_mcontext;
3441 target_sigset_t tuc_sigmask;
3442 };
3443
3444 struct target_rt_sigframe {
3445 target_siginfo_t info;
3446 struct target_ucontext uc;
3447 /* TODO: xtregs */
3448 uint8_t retcode[6];
3449 abi_ulong window[4];
3450 };
3451
3452 static abi_ulong get_sigframe(struct target_sigaction *sa,
3453 CPUXtensaState *env,
3454 unsigned long framesize)
3455 {
3456 abi_ulong sp = env->regs[1];
3457
3458 /* This is the X/Open sanctioned signal stack switching. */
3459 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
3460 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3461 }
3462 return (sp - framesize) & -16;
3463 }
3464
3465 static int flush_window_regs(CPUXtensaState *env)
3466 {
3467 uint32_t wb = env->sregs[WINDOW_BASE];
3468 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1);
3469 unsigned d = ctz32(ws) + 1;
3470 unsigned i;
3471 int ret = 0;
3472
3473 for (i = d; i < env->config->nareg / 4; i += d) {
3474 uint32_t ssp, osp;
3475 unsigned j;
3476
3477 ws >>= d;
3478 xtensa_rotate_window(env, d);
3479
3480 if (ws & 0x1) {
3481 ssp = env->regs[5];
3482 d = 1;
3483 } else if (ws & 0x2) {
3484 ssp = env->regs[9];
3485 ret |= get_user_ual(osp, env->regs[1] - 12);
3486 osp -= 32;
3487 d = 2;
3488 } else if (ws & 0x4) {
3489 ssp = env->regs[13];
3490 ret |= get_user_ual(osp, env->regs[1] - 12);
3491 osp -= 48;
3492 d = 3;
3493 } else {
3494 g_assert_not_reached();
3495 }
3496
3497 for (j = 0; j < 4; ++j) {
3498 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4);
3499 }
3500 for (j = 4; j < d * 4; ++j) {
3501 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4);
3502 }
3503 }
3504 xtensa_rotate_window(env, d);
3505 g_assert(env->sregs[WINDOW_BASE] == wb);
3506 return ret == 0;
3507 }
3508
3509 static int setup_sigcontext(struct target_rt_sigframe *frame,
3510 CPUXtensaState *env)
3511 {
3512 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
3513 int i;
3514
3515 __put_user(env->pc, &sc->sc_pc);
3516 __put_user(env->sregs[PS], &sc->sc_ps);
3517 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
3518 __put_user(env->sregs[LEND], &sc->sc_lend);
3519 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
3520 if (!flush_window_regs(env)) {
3521 return 0;
3522 }
3523 for (i = 0; i < 16; ++i) {
3524 __put_user(env->regs[i], sc->sc_a + i);
3525 }
3526 __put_user(0, &sc->sc_xtregs);
3527 /* TODO: xtregs */
3528 return 1;
3529 }
3530
3531 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3532 target_siginfo_t *info,
3533 target_sigset_t *set, CPUXtensaState *env)
3534 {
3535 abi_ulong frame_addr;
3536 struct target_rt_sigframe *frame;
3537 uint32_t ra;
3538 int i;
3539
3540 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3541 trace_user_setup_rt_frame(env, frame_addr);
3542
3543 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3544 goto give_sigsegv;
3545 }
3546
3547 if (ka->sa_flags & SA_SIGINFO) {
3548 tswap_siginfo(&frame->info, info);
3549 }
3550
3551 __put_user(0, &frame->uc.tuc_flags);
3552 __put_user(0, &frame->uc.tuc_link);
3553 __put_user(target_sigaltstack_used.ss_sp,
3554 &frame->uc.tuc_stack.ss_sp);
3555 __put_user(sas_ss_flags(env->regs[1]),
3556 &frame->uc.tuc_stack.ss_flags);
3557 __put_user(target_sigaltstack_used.ss_size,
3558 &frame->uc.tuc_stack.ss_size);
3559 if (!setup_sigcontext(frame, env)) {
3560 unlock_user_struct(frame, frame_addr, 0);
3561 goto give_sigsegv;
3562 }
3563 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
3564 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3565 }
3566
3567 if (ka->sa_flags & TARGET_SA_RESTORER) {
3568 ra = ka->sa_restorer;
3569 } else {
3570 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
3571 #ifdef TARGET_WORDS_BIGENDIAN
3572 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
3573 __put_user(0x22, &frame->retcode[0]);
3574 __put_user(0x0a, &frame->retcode[1]);
3575 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
3576 /* Generate instruction: SYSCALL */
3577 __put_user(0x00, &frame->retcode[3]);
3578 __put_user(0x05, &frame->retcode[4]);
3579 __put_user(0x00, &frame->retcode[5]);
3580 #else
3581 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
3582 __put_user(0x22, &frame->retcode[0]);
3583 __put_user(0xa0, &frame->retcode[1]);
3584 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
3585 /* Generate instruction: SYSCALL */
3586 __put_user(0x00, &frame->retcode[3]);
3587 __put_user(0x50, &frame->retcode[4]);
3588 __put_user(0x00, &frame->retcode[5]);
3589 #endif
3590 }
3591 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
3592 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
3593 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
3594 }
3595 memset(env->regs, 0, sizeof(env->regs));
3596 env->pc = ka->_sa_handler;
3597 env->regs[1] = frame_addr;
3598 env->sregs[WINDOW_BASE] = 0;
3599 env->sregs[WINDOW_START] = 1;
3600
3601 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
3602 env->regs[6] = sig;
3603 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
3604 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
3605 unlock_user_struct(frame, frame_addr, 1);
3606 return;
3607
3608 give_sigsegv:
3609 force_sigsegv(sig);
3610 return;
3611 }
3612
3613 static void restore_sigcontext(CPUXtensaState *env,
3614 struct target_rt_sigframe *frame)
3615 {
3616 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
3617 uint32_t ps;
3618 int i;
3619
3620 __get_user(env->pc, &sc->sc_pc);
3621 __get_user(ps, &sc->sc_ps);
3622 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
3623 __get_user(env->sregs[LEND], &sc->sc_lend);
3624 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
3625
3626 env->sregs[WINDOW_BASE] = 0;
3627 env->sregs[WINDOW_START] = 1;
3628 env->sregs[PS] = deposit32(env->sregs[PS],
3629 PS_CALLINC_SHIFT,
3630 PS_CALLINC_LEN,
3631 extract32(ps, PS_CALLINC_SHIFT,
3632 PS_CALLINC_LEN));
3633 for (i = 0; i < 16; ++i) {
3634 __get_user(env->regs[i], sc->sc_a + i);
3635 }
3636 /* TODO: xtregs */
3637 }
3638
3639 long do_rt_sigreturn(CPUXtensaState *env)
3640 {
3641 abi_ulong frame_addr = env->regs[1];
3642 struct target_rt_sigframe *frame;
3643 sigset_t set;
3644
3645 trace_user_do_rt_sigreturn(env, frame_addr);
3646 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3647 goto badframe;
3648 }
3649 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3650 set_sigmask(&set);
3651
3652 restore_sigcontext(env, frame);
3653
3654 if (do_sigaltstack(frame_addr +
3655 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3656 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
3657 goto badframe;
3658 }
3659 unlock_user_struct(frame, frame_addr, 0);
3660 return -TARGET_QEMU_ESIGRETURN;
3661
3662 badframe:
3663 unlock_user_struct(frame, frame_addr, 0);
3664 force_sig(TARGET_SIGSEGV);
3665 return -TARGET_QEMU_ESIGRETURN;
3666 }
3667 #endif
3668
3669 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
3670 struct emulated_sigtable *k)
3671 {
3672 CPUState *cpu = ENV_GET_CPU(cpu_env);
3673 abi_ulong handler;
3674 sigset_t set;
3675 target_sigset_t target_old_set;
3676 struct target_sigaction *sa;
3677 TaskState *ts = cpu->opaque;
3678
3679 trace_user_handle_signal(cpu_env, sig);
3680 /* dequeue signal */
3681 k->pending = 0;
3682
3683 sig = gdb_handlesig(cpu, sig);
3684 if (!sig) {
3685 sa = NULL;
3686 handler = TARGET_SIG_IGN;
3687 } else {
3688 sa = &sigact_table[sig - 1];
3689 handler = sa->_sa_handler;
3690 }
3691
3692 if (do_strace) {
3693 print_taken_signal(sig, &k->info);
3694 }
3695
3696 if (handler == TARGET_SIG_DFL) {
3697 /* default handler : ignore some signal. The other are job control or fatal */
3698 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
3699 kill(getpid(),SIGSTOP);
3700 } else if (sig != TARGET_SIGCHLD &&
3701 sig != TARGET_SIGURG &&
3702 sig != TARGET_SIGWINCH &&
3703 sig != TARGET_SIGCONT) {
3704 dump_core_and_abort(sig);
3705 }
3706 } else if (handler == TARGET_SIG_IGN) {
3707 /* ignore sig */
3708 } else if (handler == TARGET_SIG_ERR) {
3709 dump_core_and_abort(sig);
3710 } else {
3711 /* compute the blocked signals during the handler execution */
3712 sigset_t *blocked_set;
3713
3714 target_to_host_sigset(&set, &sa->sa_mask);
3715 /* SA_NODEFER indicates that the current signal should not be
3716 blocked during the handler */
3717 if (!(sa->sa_flags & TARGET_SA_NODEFER))
3718 sigaddset(&set, target_to_host_signal(sig));
3719
3720 /* save the previous blocked signal state to restore it at the
3721 end of the signal execution (see do_sigreturn) */
3722 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
3723
3724 /* block signals in the handler */
3725 blocked_set = ts->in_sigsuspend ?
3726 &ts->sigsuspend_mask : &ts->signal_mask;
3727 sigorset(&ts->signal_mask, blocked_set, &set);
3728 ts->in_sigsuspend = 0;
3729
3730 /* if the CPU is in VM86 mode, we restore the 32 bit values */
3731 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
3732 {
3733 CPUX86State *env = cpu_env;
3734 if (env->eflags & VM_MASK)
3735 save_v86_state(env);
3736 }
3737 #endif
3738 /* prepare the stack frame of the virtual CPU */
3739 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
3740 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
3741 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
3742 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
3743 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
3744 /* These targets do not have traditional signals. */
3745 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
3746 #else
3747 if (sa->sa_flags & TARGET_SA_SIGINFO)
3748 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
3749 else
3750 setup_frame(sig, sa, &target_old_set, cpu_env);
3751 #endif
3752 if (sa->sa_flags & TARGET_SA_RESETHAND) {
3753 sa->_sa_handler = TARGET_SIG_DFL;
3754 }
3755 }
3756 }
3757
3758 void process_pending_signals(CPUArchState *cpu_env)
3759 {
3760 CPUState *cpu = ENV_GET_CPU(cpu_env);
3761 int sig;
3762 TaskState *ts = cpu->opaque;
3763 sigset_t set;
3764 sigset_t *blocked_set;
3765
3766 while (atomic_read(&ts->signal_pending)) {
3767 /* FIXME: This is not threadsafe. */
3768 sigfillset(&set);
3769 sigprocmask(SIG_SETMASK, &set, 0);
3770
3771 restart_scan:
3772 sig = ts->sync_signal.pending;
3773 if (sig) {
3774 /* Synchronous signals are forced,
3775 * see force_sig_info() and callers in Linux
3776 * Note that not all of our queue_signal() calls in QEMU correspond
3777 * to force_sig_info() calls in Linux (some are send_sig_info()).
3778 * However it seems like a kernel bug to me to allow the process
3779 * to block a synchronous signal since it could then just end up
3780 * looping round and round indefinitely.
3781 */
3782 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
3783 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
3784 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
3785 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
3786 }
3787
3788 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
3789 }
3790
3791 for (sig = 1; sig <= TARGET_NSIG; sig++) {
3792 blocked_set = ts->in_sigsuspend ?
3793 &ts->sigsuspend_mask : &ts->signal_mask;
3794
3795 if (ts->sigtab[sig - 1].pending &&
3796 (!sigismember(blocked_set,
3797 target_to_host_signal_table[sig]))) {
3798 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
3799 /* Restart scan from the beginning, as handle_pending_signal
3800 * might have resulted in a new synchronous signal (eg SIGSEGV).
3801 */
3802 goto restart_scan;
3803 }
3804 }
3805
3806 /* if no signal is pending, unblock signals and recheck (the act
3807 * of unblocking might cause us to take another host signal which
3808 * will set signal_pending again).
3809 */
3810 atomic_set(&ts->signal_pending, 0);
3811 ts->in_sigsuspend = 0;
3812 set = ts->signal_mask;
3813 sigdelset(&set, SIGSEGV);
3814 sigdelset(&set, SIGBUS);
3815 sigprocmask(SIG_SETMASK, &set, 0);
3816 }
3817 ts->in_sigsuspend = 0;
3818 }