]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: move s390x signal.c parts to s390x directory
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29
30 struct target_sigaltstack target_sigaltstack_used = {
31 .ss_sp = 0,
32 .ss_size = 0,
33 .ss_flags = TARGET_SS_DISABLE,
34 };
35
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39 void *puc);
40
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42 [SIGHUP] = TARGET_SIGHUP,
43 [SIGINT] = TARGET_SIGINT,
44 [SIGQUIT] = TARGET_SIGQUIT,
45 [SIGILL] = TARGET_SIGILL,
46 [SIGTRAP] = TARGET_SIGTRAP,
47 [SIGABRT] = TARGET_SIGABRT,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS] = TARGET_SIGBUS,
50 [SIGFPE] = TARGET_SIGFPE,
51 [SIGKILL] = TARGET_SIGKILL,
52 [SIGUSR1] = TARGET_SIGUSR1,
53 [SIGSEGV] = TARGET_SIGSEGV,
54 [SIGUSR2] = TARGET_SIGUSR2,
55 [SIGPIPE] = TARGET_SIGPIPE,
56 [SIGALRM] = TARGET_SIGALRM,
57 [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59 [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61 [SIGCHLD] = TARGET_SIGCHLD,
62 [SIGCONT] = TARGET_SIGCONT,
63 [SIGSTOP] = TARGET_SIGSTOP,
64 [SIGTSTP] = TARGET_SIGTSTP,
65 [SIGTTIN] = TARGET_SIGTTIN,
66 [SIGTTOU] = TARGET_SIGTTOU,
67 [SIGURG] = TARGET_SIGURG,
68 [SIGXCPU] = TARGET_SIGXCPU,
69 [SIGXFSZ] = TARGET_SIGXFSZ,
70 [SIGVTALRM] = TARGET_SIGVTALRM,
71 [SIGPROF] = TARGET_SIGPROF,
72 [SIGWINCH] = TARGET_SIGWINCH,
73 [SIGIO] = TARGET_SIGIO,
74 [SIGPWR] = TARGET_SIGPWR,
75 [SIGSYS] = TARGET_SIGSYS,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN] = __SIGRTMAX,
82 [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85
86 int host_to_target_signal(int sig)
87 {
88 if (sig < 0 || sig >= _NSIG)
89 return sig;
90 return host_to_target_signal_table[sig];
91 }
92
93 int target_to_host_signal(int sig)
94 {
95 if (sig < 0 || sig >= _NSIG)
96 return sig;
97 return target_to_host_signal_table[sig];
98 }
99
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113
114 void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
116 {
117 int i;
118 target_sigemptyset(d);
119 for (i = 1; i <= TARGET_NSIG; i++) {
120 if (sigismember(s, i)) {
121 target_sigaddset(d, host_to_target_signal(i));
122 }
123 }
124 }
125
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128 target_sigset_t d1;
129 int i;
130
131 host_to_target_sigset_internal(&d1, s);
132 for(i = 0;i < TARGET_NSIG_WORDS; i++)
133 d->sig[i] = tswapal(d1.sig[i]);
134 }
135
136 void target_to_host_sigset_internal(sigset_t *d,
137 const target_sigset_t *s)
138 {
139 int i;
140 sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (target_sigismember(s, i)) {
143 sigaddset(d, target_to_host_signal(i));
144 }
145 }
146 }
147
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150 target_sigset_t s1;
151 int i;
152
153 for(i = 0;i < TARGET_NSIG_WORDS; i++)
154 s1.sig[i] = tswapal(s->sig[i]);
155 target_to_host_sigset_internal(d, &s1);
156 }
157
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159 const sigset_t *sigset)
160 {
161 target_sigset_t d;
162 host_to_target_sigset(&d, sigset);
163 *old_sigset = d.sig[0];
164 }
165
166 void target_to_host_old_sigset(sigset_t *sigset,
167 const abi_ulong *old_sigset)
168 {
169 target_sigset_t d;
170 int i;
171
172 d.sig[0] = *old_sigset;
173 for(i = 1;i < TARGET_NSIG_WORDS; i++)
174 d.sig[i] = 0;
175 target_to_host_sigset(sigset, &d);
176 }
177
178 int block_signals(void)
179 {
180 TaskState *ts = (TaskState *)thread_cpu->opaque;
181 sigset_t set;
182
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
186 */
187 sigfillset(&set);
188 sigprocmask(SIG_SETMASK, &set, 0);
189
190 return atomic_xchg(&ts->signal_pending, 1);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
197 * 0 on success.
198 * If set is NULL, this is guaranteed not to fail.
199 */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202 TaskState *ts = (TaskState *)thread_cpu->opaque;
203
204 if (oldset) {
205 *oldset = ts->signal_mask;
206 }
207
208 if (set) {
209 int i;
210
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS;
213 }
214
215 switch (how) {
216 case SIG_BLOCK:
217 sigorset(&ts->signal_mask, &ts->signal_mask, set);
218 break;
219 case SIG_UNBLOCK:
220 for (i = 1; i <= NSIG; ++i) {
221 if (sigismember(set, i)) {
222 sigdelset(&ts->signal_mask, i);
223 }
224 }
225 break;
226 case SIG_SETMASK:
227 ts->signal_mask = *set;
228 break;
229 default:
230 g_assert_not_reached();
231 }
232
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts->signal_mask, SIGKILL);
235 sigdelset(&ts->signal_mask, SIGSTOP);
236 }
237 return 0;
238 }
239
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
243 */
244 void set_sigmask(const sigset_t *set)
245 {
246 TaskState *ts = (TaskState *)thread_cpu->opaque;
247
248 ts->signal_mask = *set;
249 }
250 #endif
251
252 /* siginfo conversion */
253
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255 const siginfo_t *info)
256 {
257 int sig = host_to_target_signal(info->si_signo);
258 int si_code = info->si_code;
259 int si_type;
260 tinfo->si_signo = sig;
261 tinfo->si_errno = 0;
262 tinfo->si_code = info->si_code;
263
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
269 */
270 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
280 *
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
285 */
286
287 switch (si_code) {
288 case SI_USER:
289 case SI_TKILL:
290 case SI_KERNEL:
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
293 */
294 tinfo->_sifields._kill._pid = info->si_pid;
295 tinfo->_sifields._kill._uid = info->si_uid;
296 si_type = QEMU_SI_KILL;
297 break;
298 default:
299 /* Everything else is spoofable. Make best guess based on signal */
300 switch (sig) {
301 case TARGET_SIGCHLD:
302 tinfo->_sifields._sigchld._pid = info->si_pid;
303 tinfo->_sifields._sigchld._uid = info->si_uid;
304 tinfo->_sifields._sigchld._status
305 = host_to_target_waitstatus(info->si_status);
306 tinfo->_sifields._sigchld._utime = info->si_utime;
307 tinfo->_sifields._sigchld._stime = info->si_stime;
308 si_type = QEMU_SI_CHLD;
309 break;
310 case TARGET_SIGIO:
311 tinfo->_sifields._sigpoll._band = info->si_band;
312 tinfo->_sifields._sigpoll._fd = info->si_fd;
313 si_type = QEMU_SI_POLL;
314 break;
315 default:
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo->_sifields._rt._pid = info->si_pid;
318 tinfo->_sifields._rt._uid = info->si_uid;
319 /* XXX: potential problem if 64 bit */
320 tinfo->_sifields._rt._sigval.sival_ptr
321 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322 si_type = QEMU_SI_RT;
323 break;
324 }
325 break;
326 }
327
328 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330
331 void tswap_siginfo(target_siginfo_t *tinfo,
332 const target_siginfo_t *info)
333 {
334 int si_type = extract32(info->si_code, 16, 16);
335 int si_code = sextract32(info->si_code, 0, 16);
336
337 __put_user(info->si_signo, &tinfo->si_signo);
338 __put_user(info->si_errno, &tinfo->si_errno);
339 __put_user(si_code, &tinfo->si_code);
340
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
344 */
345 switch (si_type) {
346 case QEMU_SI_KILL:
347 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349 break;
350 case QEMU_SI_TIMER:
351 __put_user(info->_sifields._timer._timer1,
352 &tinfo->_sifields._timer._timer1);
353 __put_user(info->_sifields._timer._timer2,
354 &tinfo->_sifields._timer._timer2);
355 break;
356 case QEMU_SI_POLL:
357 __put_user(info->_sifields._sigpoll._band,
358 &tinfo->_sifields._sigpoll._band);
359 __put_user(info->_sifields._sigpoll._fd,
360 &tinfo->_sifields._sigpoll._fd);
361 break;
362 case QEMU_SI_FAULT:
363 __put_user(info->_sifields._sigfault._addr,
364 &tinfo->_sifields._sigfault._addr);
365 break;
366 case QEMU_SI_CHLD:
367 __put_user(info->_sifields._sigchld._pid,
368 &tinfo->_sifields._sigchld._pid);
369 __put_user(info->_sifields._sigchld._uid,
370 &tinfo->_sifields._sigchld._uid);
371 __put_user(info->_sifields._sigchld._status,
372 &tinfo->_sifields._sigchld._status);
373 __put_user(info->_sifields._sigchld._utime,
374 &tinfo->_sifields._sigchld._utime);
375 __put_user(info->_sifields._sigchld._stime,
376 &tinfo->_sifields._sigchld._stime);
377 break;
378 case QEMU_SI_RT:
379 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381 __put_user(info->_sifields._rt._sigval.sival_ptr,
382 &tinfo->_sifields._rt._sigval.sival_ptr);
383 break;
384 default:
385 g_assert_not_reached();
386 }
387 }
388
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391 target_siginfo_t tgt_tmp;
392 host_to_target_siginfo_noswap(&tgt_tmp, info);
393 tswap_siginfo(tinfo, &tgt_tmp);
394 }
395
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
402 */
403 abi_ulong sival_ptr;
404
405 __get_user(info->si_signo, &tinfo->si_signo);
406 __get_user(info->si_errno, &tinfo->si_errno);
407 __get_user(info->si_code, &tinfo->si_code);
408 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411 info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413
414 static int fatal_signal (int sig)
415 {
416 switch (sig) {
417 case TARGET_SIGCHLD:
418 case TARGET_SIGURG:
419 case TARGET_SIGWINCH:
420 /* Ignored by default. */
421 return 0;
422 case TARGET_SIGCONT:
423 case TARGET_SIGSTOP:
424 case TARGET_SIGTSTP:
425 case TARGET_SIGTTIN:
426 case TARGET_SIGTTOU:
427 /* Job control signals. */
428 return 0;
429 default:
430 return 1;
431 }
432 }
433
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437 switch (sig) {
438 case TARGET_SIGABRT:
439 case TARGET_SIGFPE:
440 case TARGET_SIGILL:
441 case TARGET_SIGQUIT:
442 case TARGET_SIGSEGV:
443 case TARGET_SIGTRAP:
444 case TARGET_SIGBUS:
445 return (1);
446 default:
447 return (0);
448 }
449 }
450
451 void signal_init(void)
452 {
453 TaskState *ts = (TaskState *)thread_cpu->opaque;
454 struct sigaction act;
455 struct sigaction oact;
456 int i, j;
457 int host_sig;
458
459 /* generate signal conversion tables */
460 for(i = 1; i < _NSIG; i++) {
461 if (host_to_target_signal_table[i] == 0)
462 host_to_target_signal_table[i] = i;
463 }
464 for(i = 1; i < _NSIG; i++) {
465 j = host_to_target_signal_table[i];
466 target_to_host_signal_table[j] = i;
467 }
468
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts->signal_mask);
471
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table, 0, sizeof(sigact_table));
475
476 sigfillset(&act.sa_mask);
477 act.sa_flags = SA_SIGINFO;
478 act.sa_sigaction = host_signal_handler;
479 for(i = 1; i <= TARGET_NSIG; i++) {
480 host_sig = target_to_host_signal(i);
481 sigaction(host_sig, NULL, &oact);
482 if (oact.sa_sigaction == (void *)SIG_IGN) {
483 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486 }
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i))
494 sigaction(host_sig, &act, NULL);
495 }
496 }
497
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
501 */
502 void force_sig(int sig)
503 {
504 CPUState *cpu = thread_cpu;
505 CPUArchState *env = cpu->env_ptr;
506 target_siginfo_t info;
507
508 info.si_signo = sig;
509 info.si_errno = 0;
510 info.si_code = TARGET_SI_KERNEL;
511 info._sifields._kill._pid = 0;
512 info._sifields._kill._uid = 0;
513 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
519 */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523 if (oldsig == SIGSEGV) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
526 */
527 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528 }
529 force_sig(TARGET_SIGSEGV);
530 }
531
532 #endif
533
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537 CPUState *cpu = thread_cpu;
538 CPUArchState *env = cpu->env_ptr;
539 TaskState *ts = (TaskState *)cpu->opaque;
540 int host_sig, core_dumped = 0;
541 struct sigaction act;
542
543 host_sig = target_to_host_signal(target_sig);
544 trace_user_force_sig(env, target_sig, host_sig);
545 gdb_signalled(env, target_sig);
546
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549 stop_all_tasks();
550 core_dumped =
551 ((*ts->bprm->core_dump)(target_sig, env) == 0);
552 }
553 if (core_dumped) {
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump;
557 getrlimit(RLIMIT_CORE, &nodump);
558 nodump.rlim_cur=0;
559 setrlimit(RLIMIT_CORE, &nodump);
560 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig, strsignal(host_sig), "core dumped" );
562 }
563
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
569 * it to arrive. */
570 sigfillset(&act.sa_mask);
571 act.sa_handler = SIG_DFL;
572 act.sa_flags = 0;
573 sigaction(host_sig, &act, NULL);
574
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig);
578
579 /* Make sure the signal isn't masked (just reuse the mask inside
580 of act) */
581 sigdelset(&act.sa_mask, host_sig);
582 sigsuspend(&act.sa_mask);
583
584 /* unreachable */
585 abort();
586 }
587
588 /* queue a signal so that it will be send to the virtual CPU as soon
589 as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591 target_siginfo_t *info)
592 {
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
595
596 trace_user_queue_signal(env, sig);
597
598 info->si_code = deposit32(info->si_code, 16, 16, si_type);
599
600 ts->sync_signal.info = *info;
601 ts->sync_signal.pending = sig;
602 /* signal that a new signal is pending */
603 atomic_set(&ts->signal_pending, 1);
604 return 1; /* indicates that the signal was queued */
605 }
606
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610 /* Default version: never rewind */
611 }
612 #endif
613
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615 void *puc)
616 {
617 CPUArchState *env = thread_cpu->env_ptr;
618 CPUState *cpu = ENV_GET_CPU(env);
619 TaskState *ts = cpu->opaque;
620
621 int sig;
622 target_siginfo_t tinfo;
623 ucontext_t *uc = puc;
624 struct emulated_sigtable *k;
625
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629 && info->si_code > 0) {
630 if (cpu_signal_handler(host_signum, info, puc))
631 return;
632 }
633
634 /* get target signal number */
635 sig = host_to_target_signal(host_signum);
636 if (sig < 1 || sig > TARGET_NSIG)
637 return;
638 trace_user_host_signal(env, host_signum, sig);
639
640 rewind_if_in_safe_syscall(puc);
641
642 host_to_target_siginfo_noswap(&tinfo, info);
643 k = &ts->sigtab[sig - 1];
644 k->info = tinfo;
645 k->pending = sig;
646 ts->signal_pending = 1;
647
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
653 *
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
661 */
662 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663 sigdelset(&uc->uc_sigmask, SIGSEGV);
664 sigdelset(&uc->uc_sigmask, SIGBUS);
665
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu);
668 }
669
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674 int ret;
675 struct target_sigaltstack oss;
676
677 /* XXX: test errors */
678 if(uoss_addr)
679 {
680 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682 __put_user(sas_ss_flags(sp), &oss.ss_flags);
683 }
684
685 if(uss_addr)
686 {
687 struct target_sigaltstack *uss;
688 struct target_sigaltstack ss;
689 size_t minstacksize = TARGET_MINSIGSTKSZ;
690
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694 if (get_ppc64_abi(image) > 1) {
695 minstacksize = 4096;
696 }
697 #endif
698
699 ret = -TARGET_EFAULT;
700 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701 goto out;
702 }
703 __get_user(ss.ss_sp, &uss->ss_sp);
704 __get_user(ss.ss_size, &uss->ss_size);
705 __get_user(ss.ss_flags, &uss->ss_flags);
706 unlock_user_struct(uss, uss_addr, 0);
707
708 ret = -TARGET_EPERM;
709 if (on_sig_stack(sp))
710 goto out;
711
712 ret = -TARGET_EINVAL;
713 if (ss.ss_flags != TARGET_SS_DISABLE
714 && ss.ss_flags != TARGET_SS_ONSTACK
715 && ss.ss_flags != 0)
716 goto out;
717
718 if (ss.ss_flags == TARGET_SS_DISABLE) {
719 ss.ss_size = 0;
720 ss.ss_sp = 0;
721 } else {
722 ret = -TARGET_ENOMEM;
723 if (ss.ss_size < minstacksize) {
724 goto out;
725 }
726 }
727
728 target_sigaltstack_used.ss_sp = ss.ss_sp;
729 target_sigaltstack_used.ss_size = ss.ss_size;
730 }
731
732 if (uoss_addr) {
733 ret = -TARGET_EFAULT;
734 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735 goto out;
736 }
737
738 ret = 0;
739 out:
740 return ret;
741 }
742
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745 struct target_sigaction *oact)
746 {
747 struct target_sigaction *k;
748 struct sigaction act1;
749 int host_sig;
750 int ret = 0;
751
752 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753 return -TARGET_EINVAL;
754 }
755
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS;
758 }
759
760 k = &sigact_table[sig - 1];
761 if (oact) {
762 __put_user(k->_sa_handler, &oact->_sa_handler);
763 __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767 /* Not swapped. */
768 oact->sa_mask = k->sa_mask;
769 }
770 if (act) {
771 /* FIXME: This is not threadsafe. */
772 __get_user(k->_sa_handler, &act->_sa_handler);
773 __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777 /* To be swapped in target_to_host_sigset. */
778 k->sa_mask = act->sa_mask;
779
780 /* we update the host linux signal state */
781 host_sig = target_to_host_signal(sig);
782 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783 sigfillset(&act1.sa_mask);
784 act1.sa_flags = SA_SIGINFO;
785 if (k->sa_flags & TARGET_SA_RESTART)
786 act1.sa_flags |= SA_RESTART;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
789 syscalls */
790 if (k->_sa_handler == TARGET_SIG_IGN) {
791 act1.sa_sigaction = (void *)SIG_IGN;
792 } else if (k->_sa_handler == TARGET_SIG_DFL) {
793 if (fatal_signal (sig))
794 act1.sa_sigaction = host_signal_handler;
795 else
796 act1.sa_sigaction = (void *)SIG_DFL;
797 } else {
798 act1.sa_sigaction = host_signal_handler;
799 }
800 ret = sigaction(host_sig, &act1, NULL);
801 }
802 }
803 return ret;
804 }
805
806 #if defined(TARGET_I386)
807 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
808
809 struct target_fpreg {
810 uint16_t significand[4];
811 uint16_t exponent;
812 };
813
814 struct target_fpxreg {
815 uint16_t significand[4];
816 uint16_t exponent;
817 uint16_t padding[3];
818 };
819
820 struct target_xmmreg {
821 uint32_t element[4];
822 };
823
824 struct target_fpstate_32 {
825 /* Regular FPU environment */
826 uint32_t cw;
827 uint32_t sw;
828 uint32_t tag;
829 uint32_t ipoff;
830 uint32_t cssel;
831 uint32_t dataoff;
832 uint32_t datasel;
833 struct target_fpreg st[8];
834 uint16_t status;
835 uint16_t magic; /* 0xffff = regular FPU data only */
836
837 /* FXSR FPU environment */
838 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
839 uint32_t mxcsr;
840 uint32_t reserved;
841 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
842 struct target_xmmreg xmm[8];
843 uint32_t padding[56];
844 };
845
846 struct target_fpstate_64 {
847 /* FXSAVE format */
848 uint16_t cw;
849 uint16_t sw;
850 uint16_t twd;
851 uint16_t fop;
852 uint64_t rip;
853 uint64_t rdp;
854 uint32_t mxcsr;
855 uint32_t mxcsr_mask;
856 uint32_t st_space[32];
857 uint32_t xmm_space[64];
858 uint32_t reserved[24];
859 };
860
861 #ifndef TARGET_X86_64
862 # define target_fpstate target_fpstate_32
863 #else
864 # define target_fpstate target_fpstate_64
865 #endif
866
867 struct target_sigcontext_32 {
868 uint16_t gs, __gsh;
869 uint16_t fs, __fsh;
870 uint16_t es, __esh;
871 uint16_t ds, __dsh;
872 uint32_t edi;
873 uint32_t esi;
874 uint32_t ebp;
875 uint32_t esp;
876 uint32_t ebx;
877 uint32_t edx;
878 uint32_t ecx;
879 uint32_t eax;
880 uint32_t trapno;
881 uint32_t err;
882 uint32_t eip;
883 uint16_t cs, __csh;
884 uint32_t eflags;
885 uint32_t esp_at_signal;
886 uint16_t ss, __ssh;
887 uint32_t fpstate; /* pointer */
888 uint32_t oldmask;
889 uint32_t cr2;
890 };
891
892 struct target_sigcontext_64 {
893 uint64_t r8;
894 uint64_t r9;
895 uint64_t r10;
896 uint64_t r11;
897 uint64_t r12;
898 uint64_t r13;
899 uint64_t r14;
900 uint64_t r15;
901
902 uint64_t rdi;
903 uint64_t rsi;
904 uint64_t rbp;
905 uint64_t rbx;
906 uint64_t rdx;
907 uint64_t rax;
908 uint64_t rcx;
909 uint64_t rsp;
910 uint64_t rip;
911
912 uint64_t eflags;
913
914 uint16_t cs;
915 uint16_t gs;
916 uint16_t fs;
917 uint16_t ss;
918
919 uint64_t err;
920 uint64_t trapno;
921 uint64_t oldmask;
922 uint64_t cr2;
923
924 uint64_t fpstate; /* pointer */
925 uint64_t padding[8];
926 };
927
928 #ifndef TARGET_X86_64
929 # define target_sigcontext target_sigcontext_32
930 #else
931 # define target_sigcontext target_sigcontext_64
932 #endif
933
934 /* see Linux/include/uapi/asm-generic/ucontext.h */
935 struct target_ucontext {
936 abi_ulong tuc_flags;
937 abi_ulong tuc_link;
938 target_stack_t tuc_stack;
939 struct target_sigcontext tuc_mcontext;
940 target_sigset_t tuc_sigmask; /* mask last for extensibility */
941 };
942
943 #ifndef TARGET_X86_64
944 struct sigframe {
945 abi_ulong pretcode;
946 int sig;
947 struct target_sigcontext sc;
948 struct target_fpstate fpstate;
949 abi_ulong extramask[TARGET_NSIG_WORDS-1];
950 char retcode[8];
951 };
952
953 struct rt_sigframe {
954 abi_ulong pretcode;
955 int sig;
956 abi_ulong pinfo;
957 abi_ulong puc;
958 struct target_siginfo info;
959 struct target_ucontext uc;
960 struct target_fpstate fpstate;
961 char retcode[8];
962 };
963
964 #else
965
966 struct rt_sigframe {
967 abi_ulong pretcode;
968 struct target_ucontext uc;
969 struct target_siginfo info;
970 struct target_fpstate fpstate;
971 };
972
973 #endif
974
975 /*
976 * Set up a signal frame.
977 */
978
979 /* XXX: save x87 state */
980 static void setup_sigcontext(struct target_sigcontext *sc,
981 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
982 abi_ulong fpstate_addr)
983 {
984 CPUState *cs = CPU(x86_env_get_cpu(env));
985 #ifndef TARGET_X86_64
986 uint16_t magic;
987
988 /* already locked in setup_frame() */
989 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
990 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
991 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
992 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
993 __put_user(env->regs[R_EDI], &sc->edi);
994 __put_user(env->regs[R_ESI], &sc->esi);
995 __put_user(env->regs[R_EBP], &sc->ebp);
996 __put_user(env->regs[R_ESP], &sc->esp);
997 __put_user(env->regs[R_EBX], &sc->ebx);
998 __put_user(env->regs[R_EDX], &sc->edx);
999 __put_user(env->regs[R_ECX], &sc->ecx);
1000 __put_user(env->regs[R_EAX], &sc->eax);
1001 __put_user(cs->exception_index, &sc->trapno);
1002 __put_user(env->error_code, &sc->err);
1003 __put_user(env->eip, &sc->eip);
1004 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1005 __put_user(env->eflags, &sc->eflags);
1006 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1007 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1008
1009 cpu_x86_fsave(env, fpstate_addr, 1);
1010 fpstate->status = fpstate->sw;
1011 magic = 0xffff;
1012 __put_user(magic, &fpstate->magic);
1013 __put_user(fpstate_addr, &sc->fpstate);
1014
1015 /* non-iBCS2 extensions.. */
1016 __put_user(mask, &sc->oldmask);
1017 __put_user(env->cr[2], &sc->cr2);
1018 #else
1019 __put_user(env->regs[R_EDI], &sc->rdi);
1020 __put_user(env->regs[R_ESI], &sc->rsi);
1021 __put_user(env->regs[R_EBP], &sc->rbp);
1022 __put_user(env->regs[R_ESP], &sc->rsp);
1023 __put_user(env->regs[R_EBX], &sc->rbx);
1024 __put_user(env->regs[R_EDX], &sc->rdx);
1025 __put_user(env->regs[R_ECX], &sc->rcx);
1026 __put_user(env->regs[R_EAX], &sc->rax);
1027
1028 __put_user(env->regs[8], &sc->r8);
1029 __put_user(env->regs[9], &sc->r9);
1030 __put_user(env->regs[10], &sc->r10);
1031 __put_user(env->regs[11], &sc->r11);
1032 __put_user(env->regs[12], &sc->r12);
1033 __put_user(env->regs[13], &sc->r13);
1034 __put_user(env->regs[14], &sc->r14);
1035 __put_user(env->regs[15], &sc->r15);
1036
1037 __put_user(cs->exception_index, &sc->trapno);
1038 __put_user(env->error_code, &sc->err);
1039 __put_user(env->eip, &sc->rip);
1040
1041 __put_user(env->eflags, &sc->eflags);
1042 __put_user(env->segs[R_CS].selector, &sc->cs);
1043 __put_user((uint16_t)0, &sc->gs);
1044 __put_user((uint16_t)0, &sc->fs);
1045 __put_user(env->segs[R_SS].selector, &sc->ss);
1046
1047 __put_user(mask, &sc->oldmask);
1048 __put_user(env->cr[2], &sc->cr2);
1049
1050 /* fpstate_addr must be 16 byte aligned for fxsave */
1051 assert(!(fpstate_addr & 0xf));
1052
1053 cpu_x86_fxsave(env, fpstate_addr);
1054 __put_user(fpstate_addr, &sc->fpstate);
1055 #endif
1056 }
1057
1058 /*
1059 * Determine which stack to use..
1060 */
1061
1062 static inline abi_ulong
1063 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1064 {
1065 unsigned long esp;
1066
1067 /* Default to using normal stack */
1068 esp = env->regs[R_ESP];
1069 #ifdef TARGET_X86_64
1070 esp -= 128; /* this is the redzone */
1071 #endif
1072
1073 /* This is the X/Open sanctioned signal stack switching. */
1074 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1075 if (sas_ss_flags(esp) == 0) {
1076 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1077 }
1078 } else {
1079 #ifndef TARGET_X86_64
1080 /* This is the legacy signal stack switching. */
1081 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1082 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1083 ka->sa_restorer) {
1084 esp = (unsigned long) ka->sa_restorer;
1085 }
1086 #endif
1087 }
1088
1089 #ifndef TARGET_X86_64
1090 return (esp - frame_size) & -8ul;
1091 #else
1092 return ((esp - frame_size) & (~15ul)) - 8;
1093 #endif
1094 }
1095
1096 #ifndef TARGET_X86_64
1097 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1098 static void setup_frame(int sig, struct target_sigaction *ka,
1099 target_sigset_t *set, CPUX86State *env)
1100 {
1101 abi_ulong frame_addr;
1102 struct sigframe *frame;
1103 int i;
1104
1105 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1106 trace_user_setup_frame(env, frame_addr);
1107
1108 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1109 goto give_sigsegv;
1110
1111 __put_user(sig, &frame->sig);
1112
1113 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1114 frame_addr + offsetof(struct sigframe, fpstate));
1115
1116 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1117 __put_user(set->sig[i], &frame->extramask[i - 1]);
1118 }
1119
1120 /* Set up to return from userspace. If provided, use a stub
1121 already in userspace. */
1122 if (ka->sa_flags & TARGET_SA_RESTORER) {
1123 __put_user(ka->sa_restorer, &frame->pretcode);
1124 } else {
1125 uint16_t val16;
1126 abi_ulong retcode_addr;
1127 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1128 __put_user(retcode_addr, &frame->pretcode);
1129 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1130 val16 = 0xb858;
1131 __put_user(val16, (uint16_t *)(frame->retcode+0));
1132 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1133 val16 = 0x80cd;
1134 __put_user(val16, (uint16_t *)(frame->retcode+6));
1135 }
1136
1137 /* Set up registers for signal handler */
1138 env->regs[R_ESP] = frame_addr;
1139 env->eip = ka->_sa_handler;
1140
1141 cpu_x86_load_seg(env, R_DS, __USER_DS);
1142 cpu_x86_load_seg(env, R_ES, __USER_DS);
1143 cpu_x86_load_seg(env, R_SS, __USER_DS);
1144 cpu_x86_load_seg(env, R_CS, __USER_CS);
1145 env->eflags &= ~TF_MASK;
1146
1147 unlock_user_struct(frame, frame_addr, 1);
1148
1149 return;
1150
1151 give_sigsegv:
1152 force_sigsegv(sig);
1153 }
1154 #endif
1155
1156 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1157 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1158 target_siginfo_t *info,
1159 target_sigset_t *set, CPUX86State *env)
1160 {
1161 abi_ulong frame_addr;
1162 #ifndef TARGET_X86_64
1163 abi_ulong addr;
1164 #endif
1165 struct rt_sigframe *frame;
1166 int i;
1167
1168 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1169 trace_user_setup_rt_frame(env, frame_addr);
1170
1171 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1172 goto give_sigsegv;
1173
1174 /* These fields are only in rt_sigframe on 32 bit */
1175 #ifndef TARGET_X86_64
1176 __put_user(sig, &frame->sig);
1177 addr = frame_addr + offsetof(struct rt_sigframe, info);
1178 __put_user(addr, &frame->pinfo);
1179 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1180 __put_user(addr, &frame->puc);
1181 #endif
1182 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1183 tswap_siginfo(&frame->info, info);
1184 }
1185
1186 /* Create the ucontext. */
1187 __put_user(0, &frame->uc.tuc_flags);
1188 __put_user(0, &frame->uc.tuc_link);
1189 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1190 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1191 &frame->uc.tuc_stack.ss_flags);
1192 __put_user(target_sigaltstack_used.ss_size,
1193 &frame->uc.tuc_stack.ss_size);
1194 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1195 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1196
1197 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1198 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1199 }
1200
1201 /* Set up to return from userspace. If provided, use a stub
1202 already in userspace. */
1203 #ifndef TARGET_X86_64
1204 if (ka->sa_flags & TARGET_SA_RESTORER) {
1205 __put_user(ka->sa_restorer, &frame->pretcode);
1206 } else {
1207 uint16_t val16;
1208 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1209 __put_user(addr, &frame->pretcode);
1210 /* This is movl $,%eax ; int $0x80 */
1211 __put_user(0xb8, (char *)(frame->retcode+0));
1212 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1213 val16 = 0x80cd;
1214 __put_user(val16, (uint16_t *)(frame->retcode+5));
1215 }
1216 #else
1217 /* XXX: Would be slightly better to return -EFAULT here if test fails
1218 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1219 __put_user(ka->sa_restorer, &frame->pretcode);
1220 #endif
1221
1222 /* Set up registers for signal handler */
1223 env->regs[R_ESP] = frame_addr;
1224 env->eip = ka->_sa_handler;
1225
1226 #ifndef TARGET_X86_64
1227 env->regs[R_EAX] = sig;
1228 env->regs[R_EDX] = (unsigned long)&frame->info;
1229 env->regs[R_ECX] = (unsigned long)&frame->uc;
1230 #else
1231 env->regs[R_EAX] = 0;
1232 env->regs[R_EDI] = sig;
1233 env->regs[R_ESI] = (unsigned long)&frame->info;
1234 env->regs[R_EDX] = (unsigned long)&frame->uc;
1235 #endif
1236
1237 cpu_x86_load_seg(env, R_DS, __USER_DS);
1238 cpu_x86_load_seg(env, R_ES, __USER_DS);
1239 cpu_x86_load_seg(env, R_CS, __USER_CS);
1240 cpu_x86_load_seg(env, R_SS, __USER_DS);
1241 env->eflags &= ~TF_MASK;
1242
1243 unlock_user_struct(frame, frame_addr, 1);
1244
1245 return;
1246
1247 give_sigsegv:
1248 force_sigsegv(sig);
1249 }
1250
1251 static int
1252 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1253 {
1254 unsigned int err = 0;
1255 abi_ulong fpstate_addr;
1256 unsigned int tmpflags;
1257
1258 #ifndef TARGET_X86_64
1259 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1260 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1261 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1262 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1263
1264 env->regs[R_EDI] = tswapl(sc->edi);
1265 env->regs[R_ESI] = tswapl(sc->esi);
1266 env->regs[R_EBP] = tswapl(sc->ebp);
1267 env->regs[R_ESP] = tswapl(sc->esp);
1268 env->regs[R_EBX] = tswapl(sc->ebx);
1269 env->regs[R_EDX] = tswapl(sc->edx);
1270 env->regs[R_ECX] = tswapl(sc->ecx);
1271 env->regs[R_EAX] = tswapl(sc->eax);
1272
1273 env->eip = tswapl(sc->eip);
1274 #else
1275 env->regs[8] = tswapl(sc->r8);
1276 env->regs[9] = tswapl(sc->r9);
1277 env->regs[10] = tswapl(sc->r10);
1278 env->regs[11] = tswapl(sc->r11);
1279 env->regs[12] = tswapl(sc->r12);
1280 env->regs[13] = tswapl(sc->r13);
1281 env->regs[14] = tswapl(sc->r14);
1282 env->regs[15] = tswapl(sc->r15);
1283
1284 env->regs[R_EDI] = tswapl(sc->rdi);
1285 env->regs[R_ESI] = tswapl(sc->rsi);
1286 env->regs[R_EBP] = tswapl(sc->rbp);
1287 env->regs[R_EBX] = tswapl(sc->rbx);
1288 env->regs[R_EDX] = tswapl(sc->rdx);
1289 env->regs[R_EAX] = tswapl(sc->rax);
1290 env->regs[R_ECX] = tswapl(sc->rcx);
1291 env->regs[R_ESP] = tswapl(sc->rsp);
1292
1293 env->eip = tswapl(sc->rip);
1294 #endif
1295
1296 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1297 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1298
1299 tmpflags = tswapl(sc->eflags);
1300 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1301 // regs->orig_eax = -1; /* disable syscall checks */
1302
1303 fpstate_addr = tswapl(sc->fpstate);
1304 if (fpstate_addr != 0) {
1305 if (!access_ok(VERIFY_READ, fpstate_addr,
1306 sizeof(struct target_fpstate)))
1307 goto badframe;
1308 #ifndef TARGET_X86_64
1309 cpu_x86_frstor(env, fpstate_addr, 1);
1310 #else
1311 cpu_x86_fxrstor(env, fpstate_addr);
1312 #endif
1313 }
1314
1315 return err;
1316 badframe:
1317 return 1;
1318 }
1319
1320 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1321 #ifndef TARGET_X86_64
1322 long do_sigreturn(CPUX86State *env)
1323 {
1324 struct sigframe *frame;
1325 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1326 target_sigset_t target_set;
1327 sigset_t set;
1328 int i;
1329
1330 trace_user_do_sigreturn(env, frame_addr);
1331 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1332 goto badframe;
1333 /* set blocked signals */
1334 __get_user(target_set.sig[0], &frame->sc.oldmask);
1335 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1336 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1337 }
1338
1339 target_to_host_sigset_internal(&set, &target_set);
1340 set_sigmask(&set);
1341
1342 /* restore registers */
1343 if (restore_sigcontext(env, &frame->sc))
1344 goto badframe;
1345 unlock_user_struct(frame, frame_addr, 0);
1346 return -TARGET_QEMU_ESIGRETURN;
1347
1348 badframe:
1349 unlock_user_struct(frame, frame_addr, 0);
1350 force_sig(TARGET_SIGSEGV);
1351 return -TARGET_QEMU_ESIGRETURN;
1352 }
1353 #endif
1354
1355 long do_rt_sigreturn(CPUX86State *env)
1356 {
1357 abi_ulong frame_addr;
1358 struct rt_sigframe *frame;
1359 sigset_t set;
1360
1361 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1362 trace_user_do_rt_sigreturn(env, frame_addr);
1363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1364 goto badframe;
1365 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1366 set_sigmask(&set);
1367
1368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1369 goto badframe;
1370 }
1371
1372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1373 get_sp_from_cpustate(env)) == -EFAULT) {
1374 goto badframe;
1375 }
1376
1377 unlock_user_struct(frame, frame_addr, 0);
1378 return -TARGET_QEMU_ESIGRETURN;
1379
1380 badframe:
1381 unlock_user_struct(frame, frame_addr, 0);
1382 force_sig(TARGET_SIGSEGV);
1383 return -TARGET_QEMU_ESIGRETURN;
1384 }
1385
1386 #elif defined(TARGET_SPARC)
1387
1388 #define __SUNOS_MAXWIN 31
1389
1390 /* This is what SunOS does, so shall I. */
1391 struct target_sigcontext {
1392 abi_ulong sigc_onstack; /* state to restore */
1393
1394 abi_ulong sigc_mask; /* sigmask to restore */
1395 abi_ulong sigc_sp; /* stack pointer */
1396 abi_ulong sigc_pc; /* program counter */
1397 abi_ulong sigc_npc; /* next program counter */
1398 abi_ulong sigc_psr; /* for condition codes etc */
1399 abi_ulong sigc_g1; /* User uses these two registers */
1400 abi_ulong sigc_o0; /* within the trampoline code. */
1401
1402 /* Now comes information regarding the users window set
1403 * at the time of the signal.
1404 */
1405 abi_ulong sigc_oswins; /* outstanding windows */
1406
1407 /* stack ptrs for each regwin buf */
1408 char *sigc_spbuf[__SUNOS_MAXWIN];
1409
1410 /* Windows to restore after signal */
1411 struct {
1412 abi_ulong locals[8];
1413 abi_ulong ins[8];
1414 } sigc_wbuf[__SUNOS_MAXWIN];
1415 };
1416 /* A Sparc stack frame */
1417 struct sparc_stackf {
1418 abi_ulong locals[8];
1419 abi_ulong ins[8];
1420 /* It's simpler to treat fp and callers_pc as elements of ins[]
1421 * since we never need to access them ourselves.
1422 */
1423 char *structptr;
1424 abi_ulong xargs[6];
1425 abi_ulong xxargs[1];
1426 };
1427
1428 typedef struct {
1429 struct {
1430 abi_ulong psr;
1431 abi_ulong pc;
1432 abi_ulong npc;
1433 abi_ulong y;
1434 abi_ulong u_regs[16]; /* globals and ins */
1435 } si_regs;
1436 int si_mask;
1437 } __siginfo_t;
1438
1439 typedef struct {
1440 abi_ulong si_float_regs[32];
1441 unsigned long si_fsr;
1442 unsigned long si_fpqdepth;
1443 struct {
1444 unsigned long *insn_addr;
1445 unsigned long insn;
1446 } si_fpqueue [16];
1447 } qemu_siginfo_fpu_t;
1448
1449
1450 struct target_signal_frame {
1451 struct sparc_stackf ss;
1452 __siginfo_t info;
1453 abi_ulong fpu_save;
1454 abi_ulong insns[2] __attribute__ ((aligned (8)));
1455 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
1456 abi_ulong extra_size; /* Should be 0 */
1457 qemu_siginfo_fpu_t fpu_state;
1458 };
1459 struct target_rt_signal_frame {
1460 struct sparc_stackf ss;
1461 siginfo_t info;
1462 abi_ulong regs[20];
1463 sigset_t mask;
1464 abi_ulong fpu_save;
1465 unsigned int insns[2];
1466 stack_t stack;
1467 unsigned int extra_size; /* Should be 0 */
1468 qemu_siginfo_fpu_t fpu_state;
1469 };
1470
1471 #define UREG_O0 16
1472 #define UREG_O6 22
1473 #define UREG_I0 0
1474 #define UREG_I1 1
1475 #define UREG_I2 2
1476 #define UREG_I3 3
1477 #define UREG_I4 4
1478 #define UREG_I5 5
1479 #define UREG_I6 6
1480 #define UREG_I7 7
1481 #define UREG_L0 8
1482 #define UREG_FP UREG_I6
1483 #define UREG_SP UREG_O6
1484
1485 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
1486 CPUSPARCState *env,
1487 unsigned long framesize)
1488 {
1489 abi_ulong sp;
1490
1491 sp = env->regwptr[UREG_FP];
1492
1493 /* This is the X/Open sanctioned signal stack switching. */
1494 if (sa->sa_flags & TARGET_SA_ONSTACK) {
1495 if (!on_sig_stack(sp)
1496 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
1497 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1498 }
1499 }
1500 return sp - framesize;
1501 }
1502
1503 static int
1504 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
1505 {
1506 int err = 0, i;
1507
1508 __put_user(env->psr, &si->si_regs.psr);
1509 __put_user(env->pc, &si->si_regs.pc);
1510 __put_user(env->npc, &si->si_regs.npc);
1511 __put_user(env->y, &si->si_regs.y);
1512 for (i=0; i < 8; i++) {
1513 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
1514 }
1515 for (i=0; i < 8; i++) {
1516 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
1517 }
1518 __put_user(mask, &si->si_mask);
1519 return err;
1520 }
1521
1522 #if 0
1523 static int
1524 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1525 CPUSPARCState *env, unsigned long mask)
1526 {
1527 int err = 0;
1528
1529 __put_user(mask, &sc->sigc_mask);
1530 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
1531 __put_user(env->pc, &sc->sigc_pc);
1532 __put_user(env->npc, &sc->sigc_npc);
1533 __put_user(env->psr, &sc->sigc_psr);
1534 __put_user(env->gregs[1], &sc->sigc_g1);
1535 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
1536
1537 return err;
1538 }
1539 #endif
1540 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
1541
1542 static void setup_frame(int sig, struct target_sigaction *ka,
1543 target_sigset_t *set, CPUSPARCState *env)
1544 {
1545 abi_ulong sf_addr;
1546 struct target_signal_frame *sf;
1547 int sigframe_size, err, i;
1548
1549 /* 1. Make sure everything is clean */
1550 //synchronize_user_stack();
1551
1552 sigframe_size = NF_ALIGNEDSZ;
1553 sf_addr = get_sigframe(ka, env, sigframe_size);
1554 trace_user_setup_frame(env, sf_addr);
1555
1556 sf = lock_user(VERIFY_WRITE, sf_addr,
1557 sizeof(struct target_signal_frame), 0);
1558 if (!sf) {
1559 goto sigsegv;
1560 }
1561 #if 0
1562 if (invalid_frame_pointer(sf, sigframe_size))
1563 goto sigill_and_return;
1564 #endif
1565 /* 2. Save the current process state */
1566 err = setup___siginfo(&sf->info, env, set->sig[0]);
1567 __put_user(0, &sf->extra_size);
1568
1569 //save_fpu_state(regs, &sf->fpu_state);
1570 //__put_user(&sf->fpu_state, &sf->fpu_save);
1571
1572 __put_user(set->sig[0], &sf->info.si_mask);
1573 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
1574 __put_user(set->sig[i + 1], &sf->extramask[i]);
1575 }
1576
1577 for (i = 0; i < 8; i++) {
1578 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
1579 }
1580 for (i = 0; i < 8; i++) {
1581 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
1582 }
1583 if (err)
1584 goto sigsegv;
1585
1586 /* 3. signal handler back-trampoline and parameters */
1587 env->regwptr[UREG_FP] = sf_addr;
1588 env->regwptr[UREG_I0] = sig;
1589 env->regwptr[UREG_I1] = sf_addr +
1590 offsetof(struct target_signal_frame, info);
1591 env->regwptr[UREG_I2] = sf_addr +
1592 offsetof(struct target_signal_frame, info);
1593
1594 /* 4. signal handler */
1595 env->pc = ka->_sa_handler;
1596 env->npc = (env->pc + 4);
1597 /* 5. return to kernel instructions */
1598 if (ka->ka_restorer) {
1599 env->regwptr[UREG_I7] = ka->ka_restorer;
1600 } else {
1601 uint32_t val32;
1602
1603 env->regwptr[UREG_I7] = sf_addr +
1604 offsetof(struct target_signal_frame, insns) - 2 * 4;
1605
1606 /* mov __NR_sigreturn, %g1 */
1607 val32 = 0x821020d8;
1608 __put_user(val32, &sf->insns[0]);
1609
1610 /* t 0x10 */
1611 val32 = 0x91d02010;
1612 __put_user(val32, &sf->insns[1]);
1613 if (err)
1614 goto sigsegv;
1615
1616 /* Flush instruction space. */
1617 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
1618 // tb_flush(env);
1619 }
1620 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1621 return;
1622 #if 0
1623 sigill_and_return:
1624 force_sig(TARGET_SIGILL);
1625 #endif
1626 sigsegv:
1627 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1628 force_sigsegv(sig);
1629 }
1630
1631 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1632 target_siginfo_t *info,
1633 target_sigset_t *set, CPUSPARCState *env)
1634 {
1635 fprintf(stderr, "setup_rt_frame: not implemented\n");
1636 }
1637
1638 long do_sigreturn(CPUSPARCState *env)
1639 {
1640 abi_ulong sf_addr;
1641 struct target_signal_frame *sf;
1642 uint32_t up_psr, pc, npc;
1643 target_sigset_t set;
1644 sigset_t host_set;
1645 int err=0, i;
1646
1647 sf_addr = env->regwptr[UREG_FP];
1648 trace_user_do_sigreturn(env, sf_addr);
1649 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
1650 goto segv_and_exit;
1651 }
1652
1653 /* 1. Make sure we are not getting garbage from the user */
1654
1655 if (sf_addr & 3)
1656 goto segv_and_exit;
1657
1658 __get_user(pc, &sf->info.si_regs.pc);
1659 __get_user(npc, &sf->info.si_regs.npc);
1660
1661 if ((pc | npc) & 3) {
1662 goto segv_and_exit;
1663 }
1664
1665 /* 2. Restore the state */
1666 __get_user(up_psr, &sf->info.si_regs.psr);
1667
1668 /* User can only change condition codes and FPU enabling in %psr. */
1669 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
1670 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
1671
1672 env->pc = pc;
1673 env->npc = npc;
1674 __get_user(env->y, &sf->info.si_regs.y);
1675 for (i=0; i < 8; i++) {
1676 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
1677 }
1678 for (i=0; i < 8; i++) {
1679 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
1680 }
1681
1682 /* FIXME: implement FPU save/restore:
1683 * __get_user(fpu_save, &sf->fpu_save);
1684 * if (fpu_save)
1685 * err |= restore_fpu_state(env, fpu_save);
1686 */
1687
1688 /* This is pretty much atomic, no amount locking would prevent
1689 * the races which exist anyways.
1690 */
1691 __get_user(set.sig[0], &sf->info.si_mask);
1692 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1693 __get_user(set.sig[i], &sf->extramask[i - 1]);
1694 }
1695
1696 target_to_host_sigset_internal(&host_set, &set);
1697 set_sigmask(&host_set);
1698
1699 if (err) {
1700 goto segv_and_exit;
1701 }
1702 unlock_user_struct(sf, sf_addr, 0);
1703 return -TARGET_QEMU_ESIGRETURN;
1704
1705 segv_and_exit:
1706 unlock_user_struct(sf, sf_addr, 0);
1707 force_sig(TARGET_SIGSEGV);
1708 return -TARGET_QEMU_ESIGRETURN;
1709 }
1710
1711 long do_rt_sigreturn(CPUSPARCState *env)
1712 {
1713 trace_user_do_rt_sigreturn(env, 0);
1714 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1715 return -TARGET_ENOSYS;
1716 }
1717
1718 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1719 #define SPARC_MC_TSTATE 0
1720 #define SPARC_MC_PC 1
1721 #define SPARC_MC_NPC 2
1722 #define SPARC_MC_Y 3
1723 #define SPARC_MC_G1 4
1724 #define SPARC_MC_G2 5
1725 #define SPARC_MC_G3 6
1726 #define SPARC_MC_G4 7
1727 #define SPARC_MC_G5 8
1728 #define SPARC_MC_G6 9
1729 #define SPARC_MC_G7 10
1730 #define SPARC_MC_O0 11
1731 #define SPARC_MC_O1 12
1732 #define SPARC_MC_O2 13
1733 #define SPARC_MC_O3 14
1734 #define SPARC_MC_O4 15
1735 #define SPARC_MC_O5 16
1736 #define SPARC_MC_O6 17
1737 #define SPARC_MC_O7 18
1738 #define SPARC_MC_NGREG 19
1739
1740 typedef abi_ulong target_mc_greg_t;
1741 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
1742
1743 struct target_mc_fq {
1744 abi_ulong *mcfq_addr;
1745 uint32_t mcfq_insn;
1746 };
1747
1748 struct target_mc_fpu {
1749 union {
1750 uint32_t sregs[32];
1751 uint64_t dregs[32];
1752 //uint128_t qregs[16];
1753 } mcfpu_fregs;
1754 abi_ulong mcfpu_fsr;
1755 abi_ulong mcfpu_fprs;
1756 abi_ulong mcfpu_gsr;
1757 struct target_mc_fq *mcfpu_fq;
1758 unsigned char mcfpu_qcnt;
1759 unsigned char mcfpu_qentsz;
1760 unsigned char mcfpu_enab;
1761 };
1762 typedef struct target_mc_fpu target_mc_fpu_t;
1763
1764 typedef struct {
1765 target_mc_gregset_t mc_gregs;
1766 target_mc_greg_t mc_fp;
1767 target_mc_greg_t mc_i7;
1768 target_mc_fpu_t mc_fpregs;
1769 } target_mcontext_t;
1770
1771 struct target_ucontext {
1772 struct target_ucontext *tuc_link;
1773 abi_ulong tuc_flags;
1774 target_sigset_t tuc_sigmask;
1775 target_mcontext_t tuc_mcontext;
1776 };
1777
1778 /* A V9 register window */
1779 struct target_reg_window {
1780 abi_ulong locals[8];
1781 abi_ulong ins[8];
1782 };
1783
1784 #define TARGET_STACK_BIAS 2047
1785
1786 /* {set, get}context() needed for 64-bit SparcLinux userland. */
1787 void sparc64_set_context(CPUSPARCState *env)
1788 {
1789 abi_ulong ucp_addr;
1790 struct target_ucontext *ucp;
1791 target_mc_gregset_t *grp;
1792 abi_ulong pc, npc, tstate;
1793 abi_ulong fp, i7, w_addr;
1794 unsigned int i;
1795
1796 ucp_addr = env->regwptr[UREG_I0];
1797 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
1798 goto do_sigsegv;
1799 }
1800 grp = &ucp->tuc_mcontext.mc_gregs;
1801 __get_user(pc, &((*grp)[SPARC_MC_PC]));
1802 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
1803 if ((pc | npc) & 3) {
1804 goto do_sigsegv;
1805 }
1806 if (env->regwptr[UREG_I1]) {
1807 target_sigset_t target_set;
1808 sigset_t set;
1809
1810 if (TARGET_NSIG_WORDS == 1) {
1811 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
1812 } else {
1813 abi_ulong *src, *dst;
1814 src = ucp->tuc_sigmask.sig;
1815 dst = target_set.sig;
1816 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1817 __get_user(*dst, src);
1818 }
1819 }
1820 target_to_host_sigset_internal(&set, &target_set);
1821 set_sigmask(&set);
1822 }
1823 env->pc = pc;
1824 env->npc = npc;
1825 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
1826 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
1827 env->asi = (tstate >> 24) & 0xff;
1828 cpu_put_ccr(env, tstate >> 32);
1829 cpu_put_cwp64(env, tstate & 0x1f);
1830 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
1831 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
1832 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
1833 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
1834 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
1835 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
1836 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
1837 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
1838 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
1839 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
1840 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
1841 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
1842 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
1843 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
1844 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
1845
1846 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
1847 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
1848
1849 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1850 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1851 abi_ulong) != 0) {
1852 goto do_sigsegv;
1853 }
1854 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1855 abi_ulong) != 0) {
1856 goto do_sigsegv;
1857 }
1858 /* FIXME this does not match how the kernel handles the FPU in
1859 * its sparc64_set_context implementation. In particular the FPU
1860 * is only restored if fenab is non-zero in:
1861 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
1862 */
1863 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
1864 {
1865 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1866 for (i = 0; i < 64; i++, src++) {
1867 if (i & 1) {
1868 __get_user(env->fpr[i/2].l.lower, src);
1869 } else {
1870 __get_user(env->fpr[i/2].l.upper, src);
1871 }
1872 }
1873 }
1874 __get_user(env->fsr,
1875 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
1876 __get_user(env->gsr,
1877 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
1878 unlock_user_struct(ucp, ucp_addr, 0);
1879 return;
1880 do_sigsegv:
1881 unlock_user_struct(ucp, ucp_addr, 0);
1882 force_sig(TARGET_SIGSEGV);
1883 }
1884
1885 void sparc64_get_context(CPUSPARCState *env)
1886 {
1887 abi_ulong ucp_addr;
1888 struct target_ucontext *ucp;
1889 target_mc_gregset_t *grp;
1890 target_mcontext_t *mcp;
1891 abi_ulong fp, i7, w_addr;
1892 int err;
1893 unsigned int i;
1894 target_sigset_t target_set;
1895 sigset_t set;
1896
1897 ucp_addr = env->regwptr[UREG_I0];
1898 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
1899 goto do_sigsegv;
1900 }
1901
1902 mcp = &ucp->tuc_mcontext;
1903 grp = &mcp->mc_gregs;
1904
1905 /* Skip over the trap instruction, first. */
1906 env->pc = env->npc;
1907 env->npc += 4;
1908
1909 /* If we're only reading the signal mask then do_sigprocmask()
1910 * is guaranteed not to fail, which is important because we don't
1911 * have any way to signal a failure or restart this operation since
1912 * this is not a normal syscall.
1913 */
1914 err = do_sigprocmask(0, NULL, &set);
1915 assert(err == 0);
1916 host_to_target_sigset_internal(&target_set, &set);
1917 if (TARGET_NSIG_WORDS == 1) {
1918 __put_user(target_set.sig[0],
1919 (abi_ulong *)&ucp->tuc_sigmask);
1920 } else {
1921 abi_ulong *src, *dst;
1922 src = target_set.sig;
1923 dst = ucp->tuc_sigmask.sig;
1924 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1925 __put_user(*src, dst);
1926 }
1927 if (err)
1928 goto do_sigsegv;
1929 }
1930
1931 /* XXX: tstate must be saved properly */
1932 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
1933 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
1934 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
1935 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
1936 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
1937 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
1938 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
1939 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
1940 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
1941 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
1942 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
1943 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
1944 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
1945 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
1946 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
1947 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
1948 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
1949 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
1950 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
1951
1952 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1953 fp = i7 = 0;
1954 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1955 abi_ulong) != 0) {
1956 goto do_sigsegv;
1957 }
1958 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1959 abi_ulong) != 0) {
1960 goto do_sigsegv;
1961 }
1962 __put_user(fp, &(mcp->mc_fp));
1963 __put_user(i7, &(mcp->mc_i7));
1964
1965 {
1966 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1967 for (i = 0; i < 64; i++, dst++) {
1968 if (i & 1) {
1969 __put_user(env->fpr[i/2].l.lower, dst);
1970 } else {
1971 __put_user(env->fpr[i/2].l.upper, dst);
1972 }
1973 }
1974 }
1975 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
1976 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
1977 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
1978
1979 if (err)
1980 goto do_sigsegv;
1981 unlock_user_struct(ucp, ucp_addr, 1);
1982 return;
1983 do_sigsegv:
1984 unlock_user_struct(ucp, ucp_addr, 1);
1985 force_sig(TARGET_SIGSEGV);
1986 }
1987 #endif
1988 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
1989
1990 # if defined(TARGET_ABI_MIPSO32)
1991 struct target_sigcontext {
1992 uint32_t sc_regmask; /* Unused */
1993 uint32_t sc_status;
1994 uint64_t sc_pc;
1995 uint64_t sc_regs[32];
1996 uint64_t sc_fpregs[32];
1997 uint32_t sc_ownedfp; /* Unused */
1998 uint32_t sc_fpc_csr;
1999 uint32_t sc_fpc_eir; /* Unused */
2000 uint32_t sc_used_math;
2001 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2002 uint32_t pad0;
2003 uint64_t sc_mdhi;
2004 uint64_t sc_mdlo;
2005 target_ulong sc_hi1; /* Was sc_cause */
2006 target_ulong sc_lo1; /* Was sc_badvaddr */
2007 target_ulong sc_hi2; /* Was sc_sigset[4] */
2008 target_ulong sc_lo2;
2009 target_ulong sc_hi3;
2010 target_ulong sc_lo3;
2011 };
2012 # else /* N32 || N64 */
2013 struct target_sigcontext {
2014 uint64_t sc_regs[32];
2015 uint64_t sc_fpregs[32];
2016 uint64_t sc_mdhi;
2017 uint64_t sc_hi1;
2018 uint64_t sc_hi2;
2019 uint64_t sc_hi3;
2020 uint64_t sc_mdlo;
2021 uint64_t sc_lo1;
2022 uint64_t sc_lo2;
2023 uint64_t sc_lo3;
2024 uint64_t sc_pc;
2025 uint32_t sc_fpc_csr;
2026 uint32_t sc_used_math;
2027 uint32_t sc_dsp;
2028 uint32_t sc_reserved;
2029 };
2030 # endif /* O32 */
2031
2032 struct sigframe {
2033 uint32_t sf_ass[4]; /* argument save space for o32 */
2034 uint32_t sf_code[2]; /* signal trampoline */
2035 struct target_sigcontext sf_sc;
2036 target_sigset_t sf_mask;
2037 };
2038
2039 struct target_ucontext {
2040 target_ulong tuc_flags;
2041 target_ulong tuc_link;
2042 target_stack_t tuc_stack;
2043 target_ulong pad0;
2044 struct target_sigcontext tuc_mcontext;
2045 target_sigset_t tuc_sigmask;
2046 };
2047
2048 struct target_rt_sigframe {
2049 uint32_t rs_ass[4]; /* argument save space for o32 */
2050 uint32_t rs_code[2]; /* signal trampoline */
2051 struct target_siginfo rs_info;
2052 struct target_ucontext rs_uc;
2053 };
2054
2055 /* Install trampoline to jump back from signal handler */
2056 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2057 {
2058 int err = 0;
2059
2060 /*
2061 * Set up the return code ...
2062 *
2063 * li v0, __NR__foo_sigreturn
2064 * syscall
2065 */
2066
2067 __put_user(0x24020000 + syscall, tramp + 0);
2068 __put_user(0x0000000c , tramp + 1);
2069 return err;
2070 }
2071
2072 static inline void setup_sigcontext(CPUMIPSState *regs,
2073 struct target_sigcontext *sc)
2074 {
2075 int i;
2076
2077 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2078 regs->hflags &= ~MIPS_HFLAG_BMASK;
2079
2080 __put_user(0, &sc->sc_regs[0]);
2081 for (i = 1; i < 32; ++i) {
2082 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2083 }
2084
2085 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2086 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2087
2088 /* Rather than checking for dsp existence, always copy. The storage
2089 would just be garbage otherwise. */
2090 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2091 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2092 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2093 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2094 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2095 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2096 {
2097 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2098 __put_user(dsp, &sc->sc_dsp);
2099 }
2100
2101 __put_user(1, &sc->sc_used_math);
2102
2103 for (i = 0; i < 32; ++i) {
2104 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2105 }
2106 }
2107
2108 static inline void
2109 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2110 {
2111 int i;
2112
2113 __get_user(regs->CP0_EPC, &sc->sc_pc);
2114
2115 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2116 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2117
2118 for (i = 1; i < 32; ++i) {
2119 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2120 }
2121
2122 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2123 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2124 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2125 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2126 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2127 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2128 {
2129 uint32_t dsp;
2130 __get_user(dsp, &sc->sc_dsp);
2131 cpu_wrdsp(dsp, 0x3ff, regs);
2132 }
2133
2134 for (i = 0; i < 32; ++i) {
2135 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2136 }
2137 }
2138
2139 /*
2140 * Determine which stack to use..
2141 */
2142 static inline abi_ulong
2143 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2144 {
2145 unsigned long sp;
2146
2147 /* Default to using normal stack */
2148 sp = regs->active_tc.gpr[29];
2149
2150 /*
2151 * FPU emulator may have its own trampoline active just
2152 * above the user stack, 16-bytes before the next lowest
2153 * 16 byte boundary. Try to avoid trashing it.
2154 */
2155 sp -= 32;
2156
2157 /* This is the X/Open sanctioned signal stack switching. */
2158 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2159 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2160 }
2161
2162 return (sp - frame_size) & ~7;
2163 }
2164
2165 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2166 {
2167 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2168 env->hflags &= ~MIPS_HFLAG_M16;
2169 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2170 env->active_tc.PC &= ~(target_ulong) 1;
2171 }
2172 }
2173
2174 # if defined(TARGET_ABI_MIPSO32)
2175 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2176 static void setup_frame(int sig, struct target_sigaction * ka,
2177 target_sigset_t *set, CPUMIPSState *regs)
2178 {
2179 struct sigframe *frame;
2180 abi_ulong frame_addr;
2181 int i;
2182
2183 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2184 trace_user_setup_frame(regs, frame_addr);
2185 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2186 goto give_sigsegv;
2187 }
2188
2189 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2190
2191 setup_sigcontext(regs, &frame->sf_sc);
2192
2193 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2194 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2195 }
2196
2197 /*
2198 * Arguments to signal handler:
2199 *
2200 * a0 = signal number
2201 * a1 = 0 (should be cause)
2202 * a2 = pointer to struct sigcontext
2203 *
2204 * $25 and PC point to the signal handler, $29 points to the
2205 * struct sigframe.
2206 */
2207 regs->active_tc.gpr[ 4] = sig;
2208 regs->active_tc.gpr[ 5] = 0;
2209 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2210 regs->active_tc.gpr[29] = frame_addr;
2211 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2212 /* The original kernel code sets CP0_EPC to the handler
2213 * since it returns to userland using eret
2214 * we cannot do this here, and we must set PC directly */
2215 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2216 mips_set_hflags_isa_mode_from_pc(regs);
2217 unlock_user_struct(frame, frame_addr, 1);
2218 return;
2219
2220 give_sigsegv:
2221 force_sigsegv(sig);
2222 }
2223
2224 long do_sigreturn(CPUMIPSState *regs)
2225 {
2226 struct sigframe *frame;
2227 abi_ulong frame_addr;
2228 sigset_t blocked;
2229 target_sigset_t target_set;
2230 int i;
2231
2232 frame_addr = regs->active_tc.gpr[29];
2233 trace_user_do_sigreturn(regs, frame_addr);
2234 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2235 goto badframe;
2236
2237 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2238 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2239 }
2240
2241 target_to_host_sigset_internal(&blocked, &target_set);
2242 set_sigmask(&blocked);
2243
2244 restore_sigcontext(regs, &frame->sf_sc);
2245
2246 #if 0
2247 /*
2248 * Don't let your children do this ...
2249 */
2250 __asm__ __volatile__(
2251 "move\t$29, %0\n\t"
2252 "j\tsyscall_exit"
2253 :/* no outputs */
2254 :"r" (&regs));
2255 /* Unreached */
2256 #endif
2257
2258 regs->active_tc.PC = regs->CP0_EPC;
2259 mips_set_hflags_isa_mode_from_pc(regs);
2260 /* I am not sure this is right, but it seems to work
2261 * maybe a problem with nested signals ? */
2262 regs->CP0_EPC = 0;
2263 return -TARGET_QEMU_ESIGRETURN;
2264
2265 badframe:
2266 force_sig(TARGET_SIGSEGV);
2267 return -TARGET_QEMU_ESIGRETURN;
2268 }
2269 # endif /* O32 */
2270
2271 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2272 target_siginfo_t *info,
2273 target_sigset_t *set, CPUMIPSState *env)
2274 {
2275 struct target_rt_sigframe *frame;
2276 abi_ulong frame_addr;
2277 int i;
2278
2279 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2280 trace_user_setup_rt_frame(env, frame_addr);
2281 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2282 goto give_sigsegv;
2283 }
2284
2285 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
2286
2287 tswap_siginfo(&frame->rs_info, info);
2288
2289 __put_user(0, &frame->rs_uc.tuc_flags);
2290 __put_user(0, &frame->rs_uc.tuc_link);
2291 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
2292 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
2293 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
2294 &frame->rs_uc.tuc_stack.ss_flags);
2295
2296 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2297
2298 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2299 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
2300 }
2301
2302 /*
2303 * Arguments to signal handler:
2304 *
2305 * a0 = signal number
2306 * a1 = pointer to siginfo_t
2307 * a2 = pointer to ucontext_t
2308 *
2309 * $25 and PC point to the signal handler, $29 points to the
2310 * struct sigframe.
2311 */
2312 env->active_tc.gpr[ 4] = sig;
2313 env->active_tc.gpr[ 5] = frame_addr
2314 + offsetof(struct target_rt_sigframe, rs_info);
2315 env->active_tc.gpr[ 6] = frame_addr
2316 + offsetof(struct target_rt_sigframe, rs_uc);
2317 env->active_tc.gpr[29] = frame_addr;
2318 env->active_tc.gpr[31] = frame_addr
2319 + offsetof(struct target_rt_sigframe, rs_code);
2320 /* The original kernel code sets CP0_EPC to the handler
2321 * since it returns to userland using eret
2322 * we cannot do this here, and we must set PC directly */
2323 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
2324 mips_set_hflags_isa_mode_from_pc(env);
2325 unlock_user_struct(frame, frame_addr, 1);
2326 return;
2327
2328 give_sigsegv:
2329 unlock_user_struct(frame, frame_addr, 1);
2330 force_sigsegv(sig);
2331 }
2332
2333 long do_rt_sigreturn(CPUMIPSState *env)
2334 {
2335 struct target_rt_sigframe *frame;
2336 abi_ulong frame_addr;
2337 sigset_t blocked;
2338
2339 frame_addr = env->active_tc.gpr[29];
2340 trace_user_do_rt_sigreturn(env, frame_addr);
2341 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2342 goto badframe;
2343 }
2344
2345 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
2346 set_sigmask(&blocked);
2347
2348 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2349
2350 if (do_sigaltstack(frame_addr +
2351 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
2352 0, get_sp_from_cpustate(env)) == -EFAULT)
2353 goto badframe;
2354
2355 env->active_tc.PC = env->CP0_EPC;
2356 mips_set_hflags_isa_mode_from_pc(env);
2357 /* I am not sure this is right, but it seems to work
2358 * maybe a problem with nested signals ? */
2359 env->CP0_EPC = 0;
2360 return -TARGET_QEMU_ESIGRETURN;
2361
2362 badframe:
2363 force_sig(TARGET_SIGSEGV);
2364 return -TARGET_QEMU_ESIGRETURN;
2365 }
2366
2367 #elif defined(TARGET_PPC)
2368
2369 /* Size of dummy stack frame allocated when calling signal handler.
2370 See arch/powerpc/include/asm/ptrace.h. */
2371 #if defined(TARGET_PPC64)
2372 #define SIGNAL_FRAMESIZE 128
2373 #else
2374 #define SIGNAL_FRAMESIZE 64
2375 #endif
2376
2377 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
2378 on 64-bit PPC, sigcontext and mcontext are one and the same. */
2379 struct target_mcontext {
2380 target_ulong mc_gregs[48];
2381 /* Includes fpscr. */
2382 uint64_t mc_fregs[33];
2383 #if defined(TARGET_PPC64)
2384 /* Pointer to the vector regs */
2385 target_ulong v_regs;
2386 #else
2387 target_ulong mc_pad[2];
2388 #endif
2389 /* We need to handle Altivec and SPE at the same time, which no
2390 kernel needs to do. Fortunately, the kernel defines this bit to
2391 be Altivec-register-large all the time, rather than trying to
2392 twiddle it based on the specific platform. */
2393 union {
2394 /* SPE vector registers. One extra for SPEFSCR. */
2395 uint32_t spe[33];
2396 /* Altivec vector registers. The packing of VSCR and VRSAVE
2397 varies depending on whether we're PPC64 or not: PPC64 splits
2398 them apart; PPC32 stuffs them together.
2399 We also need to account for the VSX registers on PPC64
2400 */
2401 #if defined(TARGET_PPC64)
2402 #define QEMU_NVRREG (34 + 16)
2403 /* On ppc64, this mcontext structure is naturally *unaligned*,
2404 * or rather it is aligned on a 8 bytes boundary but not on
2405 * a 16 bytes one. This pad fixes it up. This is also why the
2406 * vector regs are referenced by the v_regs pointer above so
2407 * any amount of padding can be added here
2408 */
2409 target_ulong pad;
2410 #else
2411 /* On ppc32, we are already aligned to 16 bytes */
2412 #define QEMU_NVRREG 33
2413 #endif
2414 /* We cannot use ppc_avr_t here as we do *not* want the implied
2415 * 16-bytes alignment that would result from it. This would have
2416 * the effect of making the whole struct target_mcontext aligned
2417 * which breaks the layout of struct target_ucontext on ppc64.
2418 */
2419 uint64_t altivec[QEMU_NVRREG][2];
2420 #undef QEMU_NVRREG
2421 } mc_vregs;
2422 };
2423
2424 /* See arch/powerpc/include/asm/sigcontext.h. */
2425 struct target_sigcontext {
2426 target_ulong _unused[4];
2427 int32_t signal;
2428 #if defined(TARGET_PPC64)
2429 int32_t pad0;
2430 #endif
2431 target_ulong handler;
2432 target_ulong oldmask;
2433 target_ulong regs; /* struct pt_regs __user * */
2434 #if defined(TARGET_PPC64)
2435 struct target_mcontext mcontext;
2436 #endif
2437 };
2438
2439 /* Indices for target_mcontext.mc_gregs, below.
2440 See arch/powerpc/include/asm/ptrace.h for details. */
2441 enum {
2442 TARGET_PT_R0 = 0,
2443 TARGET_PT_R1 = 1,
2444 TARGET_PT_R2 = 2,
2445 TARGET_PT_R3 = 3,
2446 TARGET_PT_R4 = 4,
2447 TARGET_PT_R5 = 5,
2448 TARGET_PT_R6 = 6,
2449 TARGET_PT_R7 = 7,
2450 TARGET_PT_R8 = 8,
2451 TARGET_PT_R9 = 9,
2452 TARGET_PT_R10 = 10,
2453 TARGET_PT_R11 = 11,
2454 TARGET_PT_R12 = 12,
2455 TARGET_PT_R13 = 13,
2456 TARGET_PT_R14 = 14,
2457 TARGET_PT_R15 = 15,
2458 TARGET_PT_R16 = 16,
2459 TARGET_PT_R17 = 17,
2460 TARGET_PT_R18 = 18,
2461 TARGET_PT_R19 = 19,
2462 TARGET_PT_R20 = 20,
2463 TARGET_PT_R21 = 21,
2464 TARGET_PT_R22 = 22,
2465 TARGET_PT_R23 = 23,
2466 TARGET_PT_R24 = 24,
2467 TARGET_PT_R25 = 25,
2468 TARGET_PT_R26 = 26,
2469 TARGET_PT_R27 = 27,
2470 TARGET_PT_R28 = 28,
2471 TARGET_PT_R29 = 29,
2472 TARGET_PT_R30 = 30,
2473 TARGET_PT_R31 = 31,
2474 TARGET_PT_NIP = 32,
2475 TARGET_PT_MSR = 33,
2476 TARGET_PT_ORIG_R3 = 34,
2477 TARGET_PT_CTR = 35,
2478 TARGET_PT_LNK = 36,
2479 TARGET_PT_XER = 37,
2480 TARGET_PT_CCR = 38,
2481 /* Yes, there are two registers with #39. One is 64-bit only. */
2482 TARGET_PT_MQ = 39,
2483 TARGET_PT_SOFTE = 39,
2484 TARGET_PT_TRAP = 40,
2485 TARGET_PT_DAR = 41,
2486 TARGET_PT_DSISR = 42,
2487 TARGET_PT_RESULT = 43,
2488 TARGET_PT_REGS_COUNT = 44
2489 };
2490
2491
2492 struct target_ucontext {
2493 target_ulong tuc_flags;
2494 target_ulong tuc_link; /* ucontext_t __user * */
2495 struct target_sigaltstack tuc_stack;
2496 #if !defined(TARGET_PPC64)
2497 int32_t tuc_pad[7];
2498 target_ulong tuc_regs; /* struct mcontext __user *
2499 points to uc_mcontext field */
2500 #endif
2501 target_sigset_t tuc_sigmask;
2502 #if defined(TARGET_PPC64)
2503 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
2504 struct target_sigcontext tuc_sigcontext;
2505 #else
2506 int32_t tuc_maskext[30];
2507 int32_t tuc_pad2[3];
2508 struct target_mcontext tuc_mcontext;
2509 #endif
2510 };
2511
2512 /* See arch/powerpc/kernel/signal_32.c. */
2513 struct target_sigframe {
2514 struct target_sigcontext sctx;
2515 struct target_mcontext mctx;
2516 int32_t abigap[56];
2517 };
2518
2519 #if defined(TARGET_PPC64)
2520
2521 #define TARGET_TRAMP_SIZE 6
2522
2523 struct target_rt_sigframe {
2524 /* sys_rt_sigreturn requires the ucontext be the first field */
2525 struct target_ucontext uc;
2526 target_ulong _unused[2];
2527 uint32_t trampoline[TARGET_TRAMP_SIZE];
2528 target_ulong pinfo; /* struct siginfo __user * */
2529 target_ulong puc; /* void __user * */
2530 struct target_siginfo info;
2531 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
2532 char abigap[288];
2533 } __attribute__((aligned(16)));
2534
2535 #else
2536
2537 struct target_rt_sigframe {
2538 struct target_siginfo info;
2539 struct target_ucontext uc;
2540 int32_t abigap[56];
2541 };
2542
2543 #endif
2544
2545 #if defined(TARGET_PPC64)
2546
2547 struct target_func_ptr {
2548 target_ulong entry;
2549 target_ulong toc;
2550 };
2551
2552 #endif
2553
2554 /* We use the mc_pad field for the signal return trampoline. */
2555 #define tramp mc_pad
2556
2557 /* See arch/powerpc/kernel/signal.c. */
2558 static target_ulong get_sigframe(struct target_sigaction *ka,
2559 CPUPPCState *env,
2560 int frame_size)
2561 {
2562 target_ulong oldsp;
2563
2564 oldsp = env->gpr[1];
2565
2566 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
2567 (sas_ss_flags(oldsp) == 0)) {
2568 oldsp = (target_sigaltstack_used.ss_sp
2569 + target_sigaltstack_used.ss_size);
2570 }
2571
2572 return (oldsp - frame_size) & ~0xFUL;
2573 }
2574
2575 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
2576 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
2577 #define PPC_VEC_HI 0
2578 #define PPC_VEC_LO 1
2579 #else
2580 #define PPC_VEC_HI 1
2581 #define PPC_VEC_LO 0
2582 #endif
2583
2584
2585 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
2586 {
2587 target_ulong msr = env->msr;
2588 int i;
2589 target_ulong ccr = 0;
2590
2591 /* In general, the kernel attempts to be intelligent about what it
2592 needs to save for Altivec/FP/SPE registers. We don't care that
2593 much, so we just go ahead and save everything. */
2594
2595 /* Save general registers. */
2596 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2597 __put_user(env->gpr[i], &frame->mc_gregs[i]);
2598 }
2599 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
2600 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
2601 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
2602 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
2603
2604 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
2605 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
2606 }
2607 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
2608
2609 /* Save Altivec registers if necessary. */
2610 if (env->insns_flags & PPC_ALTIVEC) {
2611 uint32_t *vrsave;
2612 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
2613 ppc_avr_t *avr = &env->avr[i];
2614 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
2615
2616 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
2617 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
2618 }
2619 /* Set MSR_VR in the saved MSR value to indicate that
2620 frame->mc_vregs contains valid data. */
2621 msr |= MSR_VR;
2622 #if defined(TARGET_PPC64)
2623 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
2624 /* 64-bit needs to put a pointer to the vectors in the frame */
2625 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
2626 #else
2627 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
2628 #endif
2629 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
2630 }
2631
2632 /* Save VSX second halves */
2633 if (env->insns_flags2 & PPC2_VSX) {
2634 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
2635 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
2636 __put_user(env->vsr[i], &vsregs[i]);
2637 }
2638 }
2639
2640 /* Save floating point registers. */
2641 if (env->insns_flags & PPC_FLOAT) {
2642 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
2643 __put_user(env->fpr[i], &frame->mc_fregs[i]);
2644 }
2645 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
2646 }
2647
2648 /* Save SPE registers. The kernel only saves the high half. */
2649 if (env->insns_flags & PPC_SPE) {
2650 #if defined(TARGET_PPC64)
2651 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2652 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
2653 }
2654 #else
2655 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
2656 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
2657 }
2658 #endif
2659 /* Set MSR_SPE in the saved MSR value to indicate that
2660 frame->mc_vregs contains valid data. */
2661 msr |= MSR_SPE;
2662 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
2663 }
2664
2665 /* Store MSR. */
2666 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
2667 }
2668
2669 static void encode_trampoline(int sigret, uint32_t *tramp)
2670 {
2671 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
2672 if (sigret) {
2673 __put_user(0x38000000 | sigret, &tramp[0]);
2674 __put_user(0x44000002, &tramp[1]);
2675 }
2676 }
2677
2678 static void restore_user_regs(CPUPPCState *env,
2679 struct target_mcontext *frame, int sig)
2680 {
2681 target_ulong save_r2 = 0;
2682 target_ulong msr;
2683 target_ulong ccr;
2684
2685 int i;
2686
2687 if (!sig) {
2688 save_r2 = env->gpr[2];
2689 }
2690
2691 /* Restore general registers. */
2692 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2693 __get_user(env->gpr[i], &frame->mc_gregs[i]);
2694 }
2695 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
2696 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
2697 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
2698 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
2699 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
2700
2701 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
2702 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
2703 }
2704
2705 if (!sig) {
2706 env->gpr[2] = save_r2;
2707 }
2708 /* Restore MSR. */
2709 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
2710
2711 /* If doing signal return, restore the previous little-endian mode. */
2712 if (sig)
2713 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
2714
2715 /* Restore Altivec registers if necessary. */
2716 if (env->insns_flags & PPC_ALTIVEC) {
2717 ppc_avr_t *v_regs;
2718 uint32_t *vrsave;
2719 #if defined(TARGET_PPC64)
2720 uint64_t v_addr;
2721 /* 64-bit needs to recover the pointer to the vectors from the frame */
2722 __get_user(v_addr, &frame->v_regs);
2723 v_regs = g2h(v_addr);
2724 #else
2725 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
2726 #endif
2727 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
2728 ppc_avr_t *avr = &env->avr[i];
2729 ppc_avr_t *vreg = &v_regs[i];
2730
2731 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
2732 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
2733 }
2734 /* Set MSR_VEC in the saved MSR value to indicate that
2735 frame->mc_vregs contains valid data. */
2736 #if defined(TARGET_PPC64)
2737 vrsave = (uint32_t *)&v_regs[33];
2738 #else
2739 vrsave = (uint32_t *)&v_regs[32];
2740 #endif
2741 __get_user(env->spr[SPR_VRSAVE], vrsave);
2742 }
2743
2744 /* Restore VSX second halves */
2745 if (env->insns_flags2 & PPC2_VSX) {
2746 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
2747 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
2748 __get_user(env->vsr[i], &vsregs[i]);
2749 }
2750 }
2751
2752 /* Restore floating point registers. */
2753 if (env->insns_flags & PPC_FLOAT) {
2754 uint64_t fpscr;
2755 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
2756 __get_user(env->fpr[i], &frame->mc_fregs[i]);
2757 }
2758 __get_user(fpscr, &frame->mc_fregs[32]);
2759 env->fpscr = (uint32_t) fpscr;
2760 }
2761
2762 /* Save SPE registers. The kernel only saves the high half. */
2763 if (env->insns_flags & PPC_SPE) {
2764 #if defined(TARGET_PPC64)
2765 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2766 uint32_t hi;
2767
2768 __get_user(hi, &frame->mc_vregs.spe[i]);
2769 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
2770 }
2771 #else
2772 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
2773 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
2774 }
2775 #endif
2776 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
2777 }
2778 }
2779
2780 #if !defined(TARGET_PPC64)
2781 static void setup_frame(int sig, struct target_sigaction *ka,
2782 target_sigset_t *set, CPUPPCState *env)
2783 {
2784 struct target_sigframe *frame;
2785 struct target_sigcontext *sc;
2786 target_ulong frame_addr, newsp;
2787 int err = 0;
2788
2789 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2790 trace_user_setup_frame(env, frame_addr);
2791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
2792 goto sigsegv;
2793 sc = &frame->sctx;
2794
2795 __put_user(ka->_sa_handler, &sc->handler);
2796 __put_user(set->sig[0], &sc->oldmask);
2797 __put_user(set->sig[1], &sc->_unused[3]);
2798 __put_user(h2g(&frame->mctx), &sc->regs);
2799 __put_user(sig, &sc->signal);
2800
2801 /* Save user regs. */
2802 save_user_regs(env, &frame->mctx);
2803
2804 /* Construct the trampoline code on the stack. */
2805 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
2806
2807 /* The kernel checks for the presence of a VDSO here. We don't
2808 emulate a vdso, so use a sigreturn system call. */
2809 env->lr = (target_ulong) h2g(frame->mctx.tramp);
2810
2811 /* Turn off all fp exceptions. */
2812 env->fpscr = 0;
2813
2814 /* Create a stack frame for the caller of the handler. */
2815 newsp = frame_addr - SIGNAL_FRAMESIZE;
2816 err |= put_user(env->gpr[1], newsp, target_ulong);
2817
2818 if (err)
2819 goto sigsegv;
2820
2821 /* Set up registers for signal handler. */
2822 env->gpr[1] = newsp;
2823 env->gpr[3] = sig;
2824 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
2825
2826 env->nip = (target_ulong) ka->_sa_handler;
2827
2828 /* Signal handlers are entered in big-endian mode. */
2829 env->msr &= ~(1ull << MSR_LE);
2830
2831 unlock_user_struct(frame, frame_addr, 1);
2832 return;
2833
2834 sigsegv:
2835 unlock_user_struct(frame, frame_addr, 1);
2836 force_sigsegv(sig);
2837 }
2838 #endif /* !defined(TARGET_PPC64) */
2839
2840 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2841 target_siginfo_t *info,
2842 target_sigset_t *set, CPUPPCState *env)
2843 {
2844 struct target_rt_sigframe *rt_sf;
2845 uint32_t *trampptr = 0;
2846 struct target_mcontext *mctx = 0;
2847 target_ulong rt_sf_addr, newsp = 0;
2848 int i, err = 0;
2849 #if defined(TARGET_PPC64)
2850 struct target_sigcontext *sc = 0;
2851 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
2852 #endif
2853
2854 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
2855 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
2856 goto sigsegv;
2857
2858 tswap_siginfo(&rt_sf->info, info);
2859
2860 __put_user(0, &rt_sf->uc.tuc_flags);
2861 __put_user(0, &rt_sf->uc.tuc_link);
2862 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
2863 &rt_sf->uc.tuc_stack.ss_sp);
2864 __put_user(sas_ss_flags(env->gpr[1]),
2865 &rt_sf->uc.tuc_stack.ss_flags);
2866 __put_user(target_sigaltstack_used.ss_size,
2867 &rt_sf->uc.tuc_stack.ss_size);
2868 #if !defined(TARGET_PPC64)
2869 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
2870 &rt_sf->uc.tuc_regs);
2871 #endif
2872 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2873 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
2874 }
2875
2876 #if defined(TARGET_PPC64)
2877 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
2878 trampptr = &rt_sf->trampoline[0];
2879
2880 sc = &rt_sf->uc.tuc_sigcontext;
2881 __put_user(h2g(mctx), &sc->regs);
2882 __put_user(sig, &sc->signal);
2883 #else
2884 mctx = &rt_sf->uc.tuc_mcontext;
2885 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
2886 #endif
2887
2888 save_user_regs(env, mctx);
2889 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
2890
2891 /* The kernel checks for the presence of a VDSO here. We don't
2892 emulate a vdso, so use a sigreturn system call. */
2893 env->lr = (target_ulong) h2g(trampptr);
2894
2895 /* Turn off all fp exceptions. */
2896 env->fpscr = 0;
2897
2898 /* Create a stack frame for the caller of the handler. */
2899 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
2900 err |= put_user(env->gpr[1], newsp, target_ulong);
2901
2902 if (err)
2903 goto sigsegv;
2904
2905 /* Set up registers for signal handler. */
2906 env->gpr[1] = newsp;
2907 env->gpr[3] = (target_ulong) sig;
2908 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
2909 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
2910 env->gpr[6] = (target_ulong) h2g(rt_sf);
2911
2912 #if defined(TARGET_PPC64)
2913 if (get_ppc64_abi(image) < 2) {
2914 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
2915 struct target_func_ptr *handler =
2916 (struct target_func_ptr *)g2h(ka->_sa_handler);
2917 env->nip = tswapl(handler->entry);
2918 env->gpr[2] = tswapl(handler->toc);
2919 } else {
2920 /* ELFv2 PPC64 function pointers are entry points, but R12
2921 * must also be set */
2922 env->nip = tswapl((target_ulong) ka->_sa_handler);
2923 env->gpr[12] = env->nip;
2924 }
2925 #else
2926 env->nip = (target_ulong) ka->_sa_handler;
2927 #endif
2928
2929 /* Signal handlers are entered in big-endian mode. */
2930 env->msr &= ~(1ull << MSR_LE);
2931
2932 unlock_user_struct(rt_sf, rt_sf_addr, 1);
2933 return;
2934
2935 sigsegv:
2936 unlock_user_struct(rt_sf, rt_sf_addr, 1);
2937 force_sigsegv(sig);
2938
2939 }
2940
2941 #if !defined(TARGET_PPC64)
2942 long do_sigreturn(CPUPPCState *env)
2943 {
2944 struct target_sigcontext *sc = NULL;
2945 struct target_mcontext *sr = NULL;
2946 target_ulong sr_addr = 0, sc_addr;
2947 sigset_t blocked;
2948 target_sigset_t set;
2949
2950 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
2951 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
2952 goto sigsegv;
2953
2954 #if defined(TARGET_PPC64)
2955 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
2956 #else
2957 __get_user(set.sig[0], &sc->oldmask);
2958 __get_user(set.sig[1], &sc->_unused[3]);
2959 #endif
2960 target_to_host_sigset_internal(&blocked, &set);
2961 set_sigmask(&blocked);
2962
2963 __get_user(sr_addr, &sc->regs);
2964 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
2965 goto sigsegv;
2966 restore_user_regs(env, sr, 1);
2967
2968 unlock_user_struct(sr, sr_addr, 1);
2969 unlock_user_struct(sc, sc_addr, 1);
2970 return -TARGET_QEMU_ESIGRETURN;
2971
2972 sigsegv:
2973 unlock_user_struct(sr, sr_addr, 1);
2974 unlock_user_struct(sc, sc_addr, 1);
2975 force_sig(TARGET_SIGSEGV);
2976 return -TARGET_QEMU_ESIGRETURN;
2977 }
2978 #endif /* !defined(TARGET_PPC64) */
2979
2980 /* See arch/powerpc/kernel/signal_32.c. */
2981 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
2982 {
2983 struct target_mcontext *mcp;
2984 target_ulong mcp_addr;
2985 sigset_t blocked;
2986 target_sigset_t set;
2987
2988 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
2989 sizeof (set)))
2990 return 1;
2991
2992 #if defined(TARGET_PPC64)
2993 mcp_addr = h2g(ucp) +
2994 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
2995 #else
2996 __get_user(mcp_addr, &ucp->tuc_regs);
2997 #endif
2998
2999 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
3000 return 1;
3001
3002 target_to_host_sigset_internal(&blocked, &set);
3003 set_sigmask(&blocked);
3004 restore_user_regs(env, mcp, sig);
3005
3006 unlock_user_struct(mcp, mcp_addr, 1);
3007 return 0;
3008 }
3009
3010 long do_rt_sigreturn(CPUPPCState *env)
3011 {
3012 struct target_rt_sigframe *rt_sf = NULL;
3013 target_ulong rt_sf_addr;
3014
3015 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
3016 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
3017 goto sigsegv;
3018
3019 if (do_setcontext(&rt_sf->uc, env, 1))
3020 goto sigsegv;
3021
3022 do_sigaltstack(rt_sf_addr
3023 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
3024 0, env->gpr[1]);
3025
3026 unlock_user_struct(rt_sf, rt_sf_addr, 1);
3027 return -TARGET_QEMU_ESIGRETURN;
3028
3029 sigsegv:
3030 unlock_user_struct(rt_sf, rt_sf_addr, 1);
3031 force_sig(TARGET_SIGSEGV);
3032 return -TARGET_QEMU_ESIGRETURN;
3033 }
3034
3035 #elif defined(TARGET_M68K)
3036
3037 struct target_sigcontext {
3038 abi_ulong sc_mask;
3039 abi_ulong sc_usp;
3040 abi_ulong sc_d0;
3041 abi_ulong sc_d1;
3042 abi_ulong sc_a0;
3043 abi_ulong sc_a1;
3044 unsigned short sc_sr;
3045 abi_ulong sc_pc;
3046 };
3047
3048 struct target_sigframe
3049 {
3050 abi_ulong pretcode;
3051 int sig;
3052 int code;
3053 abi_ulong psc;
3054 char retcode[8];
3055 abi_ulong extramask[TARGET_NSIG_WORDS-1];
3056 struct target_sigcontext sc;
3057 };
3058
3059 typedef int target_greg_t;
3060 #define TARGET_NGREG 18
3061 typedef target_greg_t target_gregset_t[TARGET_NGREG];
3062
3063 typedef struct target_fpregset {
3064 int f_fpcntl[3];
3065 int f_fpregs[8*3];
3066 } target_fpregset_t;
3067
3068 struct target_mcontext {
3069 int version;
3070 target_gregset_t gregs;
3071 target_fpregset_t fpregs;
3072 };
3073
3074 #define TARGET_MCONTEXT_VERSION 2
3075
3076 struct target_ucontext {
3077 abi_ulong tuc_flags;
3078 abi_ulong tuc_link;
3079 target_stack_t tuc_stack;
3080 struct target_mcontext tuc_mcontext;
3081 abi_long tuc_filler[80];
3082 target_sigset_t tuc_sigmask;
3083 };
3084
3085 struct target_rt_sigframe
3086 {
3087 abi_ulong pretcode;
3088 int sig;
3089 abi_ulong pinfo;
3090 abi_ulong puc;
3091 char retcode[8];
3092 struct target_siginfo info;
3093 struct target_ucontext uc;
3094 };
3095
3096 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
3097 abi_ulong mask)
3098 {
3099 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
3100 __put_user(mask, &sc->sc_mask);
3101 __put_user(env->aregs[7], &sc->sc_usp);
3102 __put_user(env->dregs[0], &sc->sc_d0);
3103 __put_user(env->dregs[1], &sc->sc_d1);
3104 __put_user(env->aregs[0], &sc->sc_a0);
3105 __put_user(env->aregs[1], &sc->sc_a1);
3106 __put_user(sr, &sc->sc_sr);
3107 __put_user(env->pc, &sc->sc_pc);
3108 }
3109
3110 static void
3111 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
3112 {
3113 int temp;
3114
3115 __get_user(env->aregs[7], &sc->sc_usp);
3116 __get_user(env->dregs[0], &sc->sc_d0);
3117 __get_user(env->dregs[1], &sc->sc_d1);
3118 __get_user(env->aregs[0], &sc->sc_a0);
3119 __get_user(env->aregs[1], &sc->sc_a1);
3120 __get_user(env->pc, &sc->sc_pc);
3121 __get_user(temp, &sc->sc_sr);
3122 cpu_m68k_set_ccr(env, temp);
3123 }
3124
3125 /*
3126 * Determine which stack to use..
3127 */
3128 static inline abi_ulong
3129 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
3130 size_t frame_size)
3131 {
3132 unsigned long sp;
3133
3134 sp = regs->aregs[7];
3135
3136 /* This is the X/Open sanctioned signal stack switching. */
3137 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3138 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3139 }
3140
3141 return ((sp - frame_size) & -8UL);
3142 }
3143
3144 static void setup_frame(int sig, struct target_sigaction *ka,
3145 target_sigset_t *set, CPUM68KState *env)
3146 {
3147 struct target_sigframe *frame;
3148 abi_ulong frame_addr;
3149 abi_ulong retcode_addr;
3150 abi_ulong sc_addr;
3151 int i;
3152
3153 frame_addr = get_sigframe(ka, env, sizeof *frame);
3154 trace_user_setup_frame(env, frame_addr);
3155 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3156 goto give_sigsegv;
3157 }
3158
3159 __put_user(sig, &frame->sig);
3160
3161 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
3162 __put_user(sc_addr, &frame->psc);
3163
3164 setup_sigcontext(&frame->sc, env, set->sig[0]);
3165
3166 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3167 __put_user(set->sig[i], &frame->extramask[i - 1]);
3168 }
3169
3170 /* Set up to return from userspace. */
3171
3172 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
3173 __put_user(retcode_addr, &frame->pretcode);
3174
3175 /* moveq #,d0; trap #0 */
3176
3177 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
3178 (uint32_t *)(frame->retcode));
3179
3180 /* Set up to return from userspace */
3181
3182 env->aregs[7] = frame_addr;
3183 env->pc = ka->_sa_handler;
3184
3185 unlock_user_struct(frame, frame_addr, 1);
3186 return;
3187
3188 give_sigsegv:
3189 force_sigsegv(sig);
3190 }
3191
3192 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
3193 CPUM68KState *env)
3194 {
3195 int i;
3196 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
3197
3198 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
3199 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
3200 /* fpiar is not emulated */
3201
3202 for (i = 0; i < 8; i++) {
3203 uint32_t high = env->fregs[i].d.high << 16;
3204 __put_user(high, &fpregs->f_fpregs[i * 3]);
3205 __put_user(env->fregs[i].d.low,
3206 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
3207 }
3208 }
3209
3210 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
3211 CPUM68KState *env)
3212 {
3213 target_greg_t *gregs = uc->tuc_mcontext.gregs;
3214 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
3215
3216 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
3217 __put_user(env->dregs[0], &gregs[0]);
3218 __put_user(env->dregs[1], &gregs[1]);
3219 __put_user(env->dregs[2], &gregs[2]);
3220 __put_user(env->dregs[3], &gregs[3]);
3221 __put_user(env->dregs[4], &gregs[4]);
3222 __put_user(env->dregs[5], &gregs[5]);
3223 __put_user(env->dregs[6], &gregs[6]);
3224 __put_user(env->dregs[7], &gregs[7]);
3225 __put_user(env->aregs[0], &gregs[8]);
3226 __put_user(env->aregs[1], &gregs[9]);
3227 __put_user(env->aregs[2], &gregs[10]);
3228 __put_user(env->aregs[3], &gregs[11]);
3229 __put_user(env->aregs[4], &gregs[12]);
3230 __put_user(env->aregs[5], &gregs[13]);
3231 __put_user(env->aregs[6], &gregs[14]);
3232 __put_user(env->aregs[7], &gregs[15]);
3233 __put_user(env->pc, &gregs[16]);
3234 __put_user(sr, &gregs[17]);
3235
3236 target_rt_save_fpu_state(uc, env);
3237
3238 return 0;
3239 }
3240
3241 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
3242 struct target_ucontext *uc)
3243 {
3244 int i;
3245 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
3246 uint32_t fpcr;
3247
3248 __get_user(fpcr, &fpregs->f_fpcntl[0]);
3249 cpu_m68k_set_fpcr(env, fpcr);
3250 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
3251 /* fpiar is not emulated */
3252
3253 for (i = 0; i < 8; i++) {
3254 uint32_t high;
3255 __get_user(high, &fpregs->f_fpregs[i * 3]);
3256 env->fregs[i].d.high = high >> 16;
3257 __get_user(env->fregs[i].d.low,
3258 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
3259 }
3260 }
3261
3262 static inline int target_rt_restore_ucontext(CPUM68KState *env,
3263 struct target_ucontext *uc)
3264 {
3265 int temp;
3266 target_greg_t *gregs = uc->tuc_mcontext.gregs;
3267
3268 __get_user(temp, &uc->tuc_mcontext.version);
3269 if (temp != TARGET_MCONTEXT_VERSION)
3270 goto badframe;
3271
3272 /* restore passed registers */
3273 __get_user(env->dregs[0], &gregs[0]);
3274 __get_user(env->dregs[1], &gregs[1]);
3275 __get_user(env->dregs[2], &gregs[2]);
3276 __get_user(env->dregs[3], &gregs[3]);
3277 __get_user(env->dregs[4], &gregs[4]);
3278 __get_user(env->dregs[5], &gregs[5]);
3279 __get_user(env->dregs[6], &gregs[6]);
3280 __get_user(env->dregs[7], &gregs[7]);
3281 __get_user(env->aregs[0], &gregs[8]);
3282 __get_user(env->aregs[1], &gregs[9]);
3283 __get_user(env->aregs[2], &gregs[10]);
3284 __get_user(env->aregs[3], &gregs[11]);
3285 __get_user(env->aregs[4], &gregs[12]);
3286 __get_user(env->aregs[5], &gregs[13]);
3287 __get_user(env->aregs[6], &gregs[14]);
3288 __get_user(env->aregs[7], &gregs[15]);
3289 __get_user(env->pc, &gregs[16]);
3290 __get_user(temp, &gregs[17]);
3291 cpu_m68k_set_ccr(env, temp);
3292
3293 target_rt_restore_fpu_state(env, uc);
3294
3295 return 0;
3296
3297 badframe:
3298 return 1;
3299 }
3300
3301 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3302 target_siginfo_t *info,
3303 target_sigset_t *set, CPUM68KState *env)
3304 {
3305 struct target_rt_sigframe *frame;
3306 abi_ulong frame_addr;
3307 abi_ulong retcode_addr;
3308 abi_ulong info_addr;
3309 abi_ulong uc_addr;
3310 int err = 0;
3311 int i;
3312
3313 frame_addr = get_sigframe(ka, env, sizeof *frame);
3314 trace_user_setup_rt_frame(env, frame_addr);
3315 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3316 goto give_sigsegv;
3317 }
3318
3319 __put_user(sig, &frame->sig);
3320
3321 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
3322 __put_user(info_addr, &frame->pinfo);
3323
3324 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
3325 __put_user(uc_addr, &frame->puc);
3326
3327 tswap_siginfo(&frame->info, info);
3328
3329 /* Create the ucontext */
3330
3331 __put_user(0, &frame->uc.tuc_flags);
3332 __put_user(0, &frame->uc.tuc_link);
3333 __put_user(target_sigaltstack_used.ss_sp,
3334 &frame->uc.tuc_stack.ss_sp);
3335 __put_user(sas_ss_flags(env->aregs[7]),
3336 &frame->uc.tuc_stack.ss_flags);
3337 __put_user(target_sigaltstack_used.ss_size,
3338 &frame->uc.tuc_stack.ss_size);
3339 err |= target_rt_setup_ucontext(&frame->uc, env);
3340
3341 if (err)
3342 goto give_sigsegv;
3343
3344 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3345 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3346 }
3347
3348 /* Set up to return from userspace. */
3349
3350 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
3351 __put_user(retcode_addr, &frame->pretcode);
3352
3353 /* moveq #,d0; notb d0; trap #0 */
3354
3355 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
3356 (uint32_t *)(frame->retcode + 0));
3357 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
3358
3359 if (err)
3360 goto give_sigsegv;
3361
3362 /* Set up to return from userspace */
3363
3364 env->aregs[7] = frame_addr;
3365 env->pc = ka->_sa_handler;
3366
3367 unlock_user_struct(frame, frame_addr, 1);
3368 return;
3369
3370 give_sigsegv:
3371 unlock_user_struct(frame, frame_addr, 1);
3372 force_sigsegv(sig);
3373 }
3374
3375 long do_sigreturn(CPUM68KState *env)
3376 {
3377 struct target_sigframe *frame;
3378 abi_ulong frame_addr = env->aregs[7] - 4;
3379 target_sigset_t target_set;
3380 sigset_t set;
3381 int i;
3382
3383 trace_user_do_sigreturn(env, frame_addr);
3384 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3385 goto badframe;
3386
3387 /* set blocked signals */
3388
3389 __get_user(target_set.sig[0], &frame->sc.sc_mask);
3390
3391 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3392 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3393 }
3394
3395 target_to_host_sigset_internal(&set, &target_set);
3396 set_sigmask(&set);
3397
3398 /* restore registers */
3399
3400 restore_sigcontext(env, &frame->sc);
3401
3402 unlock_user_struct(frame, frame_addr, 0);
3403 return -TARGET_QEMU_ESIGRETURN;
3404
3405 badframe:
3406 force_sig(TARGET_SIGSEGV);
3407 return -TARGET_QEMU_ESIGRETURN;
3408 }
3409
3410 long do_rt_sigreturn(CPUM68KState *env)
3411 {
3412 struct target_rt_sigframe *frame;
3413 abi_ulong frame_addr = env->aregs[7] - 4;
3414 sigset_t set;
3415
3416 trace_user_do_rt_sigreturn(env, frame_addr);
3417 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3418 goto badframe;
3419
3420 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3421 set_sigmask(&set);
3422
3423 /* restore registers */
3424
3425 if (target_rt_restore_ucontext(env, &frame->uc))
3426 goto badframe;
3427
3428 if (do_sigaltstack(frame_addr +
3429 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3430 0, get_sp_from_cpustate(env)) == -EFAULT)
3431 goto badframe;
3432
3433 unlock_user_struct(frame, frame_addr, 0);
3434 return -TARGET_QEMU_ESIGRETURN;
3435
3436 badframe:
3437 unlock_user_struct(frame, frame_addr, 0);
3438 force_sig(TARGET_SIGSEGV);
3439 return -TARGET_QEMU_ESIGRETURN;
3440 }
3441
3442 #elif defined(TARGET_ALPHA)
3443
3444 struct target_sigcontext {
3445 abi_long sc_onstack;
3446 abi_long sc_mask;
3447 abi_long sc_pc;
3448 abi_long sc_ps;
3449 abi_long sc_regs[32];
3450 abi_long sc_ownedfp;
3451 abi_long sc_fpregs[32];
3452 abi_ulong sc_fpcr;
3453 abi_ulong sc_fp_control;
3454 abi_ulong sc_reserved1;
3455 abi_ulong sc_reserved2;
3456 abi_ulong sc_ssize;
3457 abi_ulong sc_sbase;
3458 abi_ulong sc_traparg_a0;
3459 abi_ulong sc_traparg_a1;
3460 abi_ulong sc_traparg_a2;
3461 abi_ulong sc_fp_trap_pc;
3462 abi_ulong sc_fp_trigger_sum;
3463 abi_ulong sc_fp_trigger_inst;
3464 };
3465
3466 struct target_ucontext {
3467 abi_ulong tuc_flags;
3468 abi_ulong tuc_link;
3469 abi_ulong tuc_osf_sigmask;
3470 target_stack_t tuc_stack;
3471 struct target_sigcontext tuc_mcontext;
3472 target_sigset_t tuc_sigmask;
3473 };
3474
3475 struct target_sigframe {
3476 struct target_sigcontext sc;
3477 unsigned int retcode[3];
3478 };
3479
3480 struct target_rt_sigframe {
3481 target_siginfo_t info;
3482 struct target_ucontext uc;
3483 unsigned int retcode[3];
3484 };
3485
3486 #define INSN_MOV_R30_R16 0x47fe0410
3487 #define INSN_LDI_R0 0x201f0000
3488 #define INSN_CALLSYS 0x00000083
3489
3490 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
3491 abi_ulong frame_addr, target_sigset_t *set)
3492 {
3493 int i;
3494
3495 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
3496 __put_user(set->sig[0], &sc->sc_mask);
3497 __put_user(env->pc, &sc->sc_pc);
3498 __put_user(8, &sc->sc_ps);
3499
3500 for (i = 0; i < 31; ++i) {
3501 __put_user(env->ir[i], &sc->sc_regs[i]);
3502 }
3503 __put_user(0, &sc->sc_regs[31]);
3504
3505 for (i = 0; i < 31; ++i) {
3506 __put_user(env->fir[i], &sc->sc_fpregs[i]);
3507 }
3508 __put_user(0, &sc->sc_fpregs[31]);
3509 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
3510
3511 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
3512 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
3513 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
3514 }
3515
3516 static void restore_sigcontext(CPUAlphaState *env,
3517 struct target_sigcontext *sc)
3518 {
3519 uint64_t fpcr;
3520 int i;
3521
3522 __get_user(env->pc, &sc->sc_pc);
3523
3524 for (i = 0; i < 31; ++i) {
3525 __get_user(env->ir[i], &sc->sc_regs[i]);
3526 }
3527 for (i = 0; i < 31; ++i) {
3528 __get_user(env->fir[i], &sc->sc_fpregs[i]);
3529 }
3530
3531 __get_user(fpcr, &sc->sc_fpcr);
3532 cpu_alpha_store_fpcr(env, fpcr);
3533 }
3534
3535 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
3536 CPUAlphaState *env,
3537 unsigned long framesize)
3538 {
3539 abi_ulong sp = env->ir[IR_SP];
3540
3541 /* This is the X/Open sanctioned signal stack switching. */
3542 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
3543 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3544 }
3545 return (sp - framesize) & -32;
3546 }
3547
3548 static void setup_frame(int sig, struct target_sigaction *ka,
3549 target_sigset_t *set, CPUAlphaState *env)
3550 {
3551 abi_ulong frame_addr, r26;
3552 struct target_sigframe *frame;
3553 int err = 0;
3554
3555 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3556 trace_user_setup_frame(env, frame_addr);
3557 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3558 goto give_sigsegv;
3559 }
3560
3561 setup_sigcontext(&frame->sc, env, frame_addr, set);
3562
3563 if (ka->sa_restorer) {
3564 r26 = ka->sa_restorer;
3565 } else {
3566 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
3567 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
3568 &frame->retcode[1]);
3569 __put_user(INSN_CALLSYS, &frame->retcode[2]);
3570 /* imb() */
3571 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
3572 }
3573
3574 unlock_user_struct(frame, frame_addr, 1);
3575
3576 if (err) {
3577 give_sigsegv:
3578 force_sigsegv(sig);
3579 return;
3580 }
3581
3582 env->ir[IR_RA] = r26;
3583 env->ir[IR_PV] = env->pc = ka->_sa_handler;
3584 env->ir[IR_A0] = sig;
3585 env->ir[IR_A1] = 0;
3586 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
3587 env->ir[IR_SP] = frame_addr;
3588 }
3589
3590 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3591 target_siginfo_t *info,
3592 target_sigset_t *set, CPUAlphaState *env)
3593 {
3594 abi_ulong frame_addr, r26;
3595 struct target_rt_sigframe *frame;
3596 int i, err = 0;
3597
3598 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3599 trace_user_setup_rt_frame(env, frame_addr);
3600 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3601 goto give_sigsegv;
3602 }
3603
3604 tswap_siginfo(&frame->info, info);
3605
3606 __put_user(0, &frame->uc.tuc_flags);
3607 __put_user(0, &frame->uc.tuc_link);
3608 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
3609 __put_user(target_sigaltstack_used.ss_sp,
3610 &frame->uc.tuc_stack.ss_sp);
3611 __put_user(sas_ss_flags(env->ir[IR_SP]),
3612 &frame->uc.tuc_stack.ss_flags);
3613 __put_user(target_sigaltstack_used.ss_size,
3614 &frame->uc.tuc_stack.ss_size);
3615 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
3616 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
3617 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3618 }
3619
3620 if (ka->sa_restorer) {
3621 r26 = ka->sa_restorer;
3622 } else {
3623 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
3624 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
3625 &frame->retcode[1]);
3626 __put_user(INSN_CALLSYS, &frame->retcode[2]);
3627 /* imb(); */
3628 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
3629 }
3630
3631 if (err) {
3632 give_sigsegv:
3633 force_sigsegv(sig);
3634 return;
3635 }
3636
3637 env->ir[IR_RA] = r26;
3638 env->ir[IR_PV] = env->pc = ka->_sa_handler;
3639 env->ir[IR_A0] = sig;
3640 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
3641 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
3642 env->ir[IR_SP] = frame_addr;
3643 }
3644
3645 long do_sigreturn(CPUAlphaState *env)
3646 {
3647 struct target_sigcontext *sc;
3648 abi_ulong sc_addr = env->ir[IR_A0];
3649 target_sigset_t target_set;
3650 sigset_t set;
3651
3652 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
3653 goto badframe;
3654 }
3655
3656 target_sigemptyset(&target_set);
3657 __get_user(target_set.sig[0], &sc->sc_mask);
3658
3659 target_to_host_sigset_internal(&set, &target_set);
3660 set_sigmask(&set);
3661
3662 restore_sigcontext(env, sc);
3663 unlock_user_struct(sc, sc_addr, 0);
3664 return -TARGET_QEMU_ESIGRETURN;
3665
3666 badframe:
3667 force_sig(TARGET_SIGSEGV);
3668 return -TARGET_QEMU_ESIGRETURN;
3669 }
3670
3671 long do_rt_sigreturn(CPUAlphaState *env)
3672 {
3673 abi_ulong frame_addr = env->ir[IR_A0];
3674 struct target_rt_sigframe *frame;
3675 sigset_t set;
3676
3677 trace_user_do_rt_sigreturn(env, frame_addr);
3678 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3679 goto badframe;
3680 }
3681 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3682 set_sigmask(&set);
3683
3684 restore_sigcontext(env, &frame->uc.tuc_mcontext);
3685 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
3686 uc.tuc_stack),
3687 0, env->ir[IR_SP]) == -EFAULT) {
3688 goto badframe;
3689 }
3690
3691 unlock_user_struct(frame, frame_addr, 0);
3692 return -TARGET_QEMU_ESIGRETURN;
3693
3694
3695 badframe:
3696 unlock_user_struct(frame, frame_addr, 0);
3697 force_sig(TARGET_SIGSEGV);
3698 return -TARGET_QEMU_ESIGRETURN;
3699 }
3700
3701 #elif defined(TARGET_TILEGX)
3702
3703 struct target_sigcontext {
3704 union {
3705 /* General-purpose registers. */
3706 abi_ulong gregs[56];
3707 struct {
3708 abi_ulong __gregs[53];
3709 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
3710 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
3711 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
3712 };
3713 };
3714 abi_ulong pc; /* Program counter. */
3715 abi_ulong ics; /* In Interrupt Critical Section? */
3716 abi_ulong faultnum; /* Fault number. */
3717 abi_ulong pad[5];
3718 };
3719
3720 struct target_ucontext {
3721 abi_ulong tuc_flags;
3722 abi_ulong tuc_link;
3723 target_stack_t tuc_stack;
3724 struct target_sigcontext tuc_mcontext;
3725 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3726 };
3727
3728 struct target_rt_sigframe {
3729 unsigned char save_area[16]; /* caller save area */
3730 struct target_siginfo info;
3731 struct target_ucontext uc;
3732 abi_ulong retcode[2];
3733 };
3734
3735 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
3736 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
3737
3738
3739 static void setup_sigcontext(struct target_sigcontext *sc,
3740 CPUArchState *env, int signo)
3741 {
3742 int i;
3743
3744 for (i = 0; i < TILEGX_R_COUNT; ++i) {
3745 __put_user(env->regs[i], &sc->gregs[i]);
3746 }
3747
3748 __put_user(env->pc, &sc->pc);
3749 __put_user(0, &sc->ics);
3750 __put_user(signo, &sc->faultnum);
3751 }
3752
3753 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
3754 {
3755 int i;
3756
3757 for (i = 0; i < TILEGX_R_COUNT; ++i) {
3758 __get_user(env->regs[i], &sc->gregs[i]);
3759 }
3760
3761 __get_user(env->pc, &sc->pc);
3762 }
3763
3764 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
3765 size_t frame_size)
3766 {
3767 unsigned long sp = env->regs[TILEGX_R_SP];
3768
3769 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
3770 return -1UL;
3771 }
3772
3773 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
3774 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3775 }
3776
3777 sp -= frame_size;
3778 sp &= -16UL;
3779 return sp;
3780 }
3781
3782 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3783 target_siginfo_t *info,
3784 target_sigset_t *set, CPUArchState *env)
3785 {
3786 abi_ulong frame_addr;
3787 struct target_rt_sigframe *frame;
3788 unsigned long restorer;
3789
3790 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3791 trace_user_setup_rt_frame(env, frame_addr);
3792 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3793 goto give_sigsegv;
3794 }
3795
3796 /* Always write at least the signal number for the stack backtracer. */
3797 if (ka->sa_flags & TARGET_SA_SIGINFO) {
3798 /* At sigreturn time, restore the callee-save registers too. */
3799 tswap_siginfo(&frame->info, info);
3800 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
3801 } else {
3802 __put_user(info->si_signo, &frame->info.si_signo);
3803 }
3804
3805 /* Create the ucontext. */
3806 __put_user(0, &frame->uc.tuc_flags);
3807 __put_user(0, &frame->uc.tuc_link);
3808 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
3809 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
3810 &frame->uc.tuc_stack.ss_flags);
3811 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
3812 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
3813
3814 if (ka->sa_flags & TARGET_SA_RESTORER) {
3815 restorer = (unsigned long) ka->sa_restorer;
3816 } else {
3817 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
3818 __put_user(INSN_SWINT1, &frame->retcode[1]);
3819 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
3820 }
3821 env->pc = (unsigned long) ka->_sa_handler;
3822 env->regs[TILEGX_R_SP] = (unsigned long) frame;
3823 env->regs[TILEGX_R_LR] = restorer;
3824 env->regs[0] = (unsigned long) sig;
3825 env->regs[1] = (unsigned long) &frame->info;
3826 env->regs[2] = (unsigned long) &frame->uc;
3827 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
3828
3829 unlock_user_struct(frame, frame_addr, 1);
3830 return;
3831
3832 give_sigsegv:
3833 force_sigsegv(sig);
3834 }
3835
3836 long do_rt_sigreturn(CPUTLGState *env)
3837 {
3838 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
3839 struct target_rt_sigframe *frame;
3840 sigset_t set;
3841
3842 trace_user_do_rt_sigreturn(env, frame_addr);
3843 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3844 goto badframe;
3845 }
3846 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3847 set_sigmask(&set);
3848
3849 restore_sigcontext(env, &frame->uc.tuc_mcontext);
3850 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
3851 uc.tuc_stack),
3852 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
3853 goto badframe;
3854 }
3855
3856 unlock_user_struct(frame, frame_addr, 0);
3857 return -TARGET_QEMU_ESIGRETURN;
3858
3859
3860 badframe:
3861 unlock_user_struct(frame, frame_addr, 0);
3862 force_sig(TARGET_SIGSEGV);
3863 return -TARGET_QEMU_ESIGRETURN;
3864 }
3865
3866 #elif defined(TARGET_RISCV)
3867
3868 /* Signal handler invocation must be transparent for the code being
3869 interrupted. Complete CPU (hart) state is saved on entry and restored
3870 before returning from the handler. Process sigmask is also saved to block
3871 signals while the handler is running. The handler gets its own stack,
3872 which also doubles as storage for the CPU state and sigmask.
3873
3874 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
3875
3876 struct target_sigcontext {
3877 abi_long pc;
3878 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
3879 uint64_t fpr[32];
3880 uint32_t fcsr;
3881 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
3882
3883 struct target_ucontext {
3884 unsigned long uc_flags;
3885 struct target_ucontext *uc_link;
3886 target_stack_t uc_stack;
3887 struct target_sigcontext uc_mcontext;
3888 target_sigset_t uc_sigmask;
3889 };
3890
3891 struct target_rt_sigframe {
3892 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
3893 struct target_siginfo info;
3894 struct target_ucontext uc;
3895 };
3896
3897 static abi_ulong get_sigframe(struct target_sigaction *ka,
3898 CPURISCVState *regs, size_t framesize)
3899 {
3900 abi_ulong sp = regs->gpr[xSP];
3901 int onsigstack = on_sig_stack(sp);
3902
3903 /* redzone */
3904 /* This is the X/Open sanctioned signal stack switching. */
3905 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3906 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3907 }
3908
3909 sp -= framesize;
3910 sp &= ~3UL; /* align sp on 4-byte boundary */
3911
3912 /* If we are on the alternate signal stack and would overflow it, don't.
3913 Return an always-bogus address instead so we will die with SIGSEGV. */
3914 if (onsigstack && !likely(on_sig_stack(sp))) {
3915 return -1L;
3916 }
3917
3918 return sp;
3919 }
3920
3921 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
3922 {
3923 int i;
3924
3925 __put_user(env->pc, &sc->pc);
3926
3927 for (i = 1; i < 32; i++) {
3928 __put_user(env->gpr[i], &sc->gpr[i - 1]);
3929 }
3930 for (i = 0; i < 32; i++) {
3931 __put_user(env->fpr[i], &sc->fpr[i]);
3932 }
3933
3934 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
3935 __put_user(fcsr, &sc->fcsr);
3936 }
3937
3938 static void setup_ucontext(struct target_ucontext *uc,
3939 CPURISCVState *env, target_sigset_t *set)
3940 {
3941 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
3942 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
3943 abi_ulong ss_size = target_sigaltstack_used.ss_size;
3944
3945 __put_user(0, &(uc->uc_flags));
3946 __put_user(0, &(uc->uc_link));
3947
3948 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
3949 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
3950 __put_user(ss_size, &(uc->uc_stack.ss_size));
3951
3952 int i;
3953 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
3954 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
3955 }
3956
3957 setup_sigcontext(&uc->uc_mcontext, env);
3958 }
3959
3960 static inline void install_sigtramp(uint32_t *tramp)
3961 {
3962 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
3963 __put_user(0x00000073, tramp + 1); /* ecall */
3964 }
3965
3966 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3967 target_siginfo_t *info,
3968 target_sigset_t *set, CPURISCVState *env)
3969 {
3970 abi_ulong frame_addr;
3971 struct target_rt_sigframe *frame;
3972
3973 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3974 trace_user_setup_rt_frame(env, frame_addr);
3975
3976 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3977 goto badframe;
3978 }
3979
3980 setup_ucontext(&frame->uc, env, set);
3981 tswap_siginfo(&frame->info, info);
3982 install_sigtramp(frame->tramp);
3983
3984 env->pc = ka->_sa_handler;
3985 env->gpr[xSP] = frame_addr;
3986 env->gpr[xA0] = sig;
3987 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
3988 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
3989 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
3990
3991 return;
3992
3993 badframe:
3994 unlock_user_struct(frame, frame_addr, 1);
3995 if (sig == TARGET_SIGSEGV) {
3996 ka->_sa_handler = TARGET_SIG_DFL;
3997 }
3998 force_sig(TARGET_SIGSEGV);
3999 }
4000
4001 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
4002 {
4003 int i;
4004
4005 __get_user(env->pc, &sc->pc);
4006
4007 for (i = 1; i < 32; ++i) {
4008 __get_user(env->gpr[i], &sc->gpr[i - 1]);
4009 }
4010 for (i = 0; i < 32; ++i) {
4011 __get_user(env->fpr[i], &sc->fpr[i]);
4012 }
4013
4014 uint32_t fcsr;
4015 __get_user(fcsr, &sc->fcsr);
4016 csr_write_helper(env, fcsr, CSR_FCSR);
4017 }
4018
4019 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
4020 {
4021 sigset_t blocked;
4022 target_sigset_t target_set;
4023 int i;
4024
4025 target_sigemptyset(&target_set);
4026 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4027 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
4028 }
4029
4030 target_to_host_sigset_internal(&blocked, &target_set);
4031 set_sigmask(&blocked);
4032
4033 restore_sigcontext(env, &uc->uc_mcontext);
4034 }
4035
4036 long do_rt_sigreturn(CPURISCVState *env)
4037 {
4038 struct target_rt_sigframe *frame;
4039 abi_ulong frame_addr;
4040
4041 frame_addr = env->gpr[xSP];
4042 trace_user_do_sigreturn(env, frame_addr);
4043 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4044 goto badframe;
4045 }
4046
4047 restore_ucontext(env, &frame->uc);
4048
4049 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
4050 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
4051 goto badframe;
4052 }
4053
4054 unlock_user_struct(frame, frame_addr, 0);
4055 return -TARGET_QEMU_ESIGRETURN;
4056
4057 badframe:
4058 unlock_user_struct(frame, frame_addr, 0);
4059 force_sig(TARGET_SIGSEGV);
4060 return 0;
4061 }
4062
4063 #elif defined(TARGET_HPPA)
4064
4065 struct target_sigcontext {
4066 abi_ulong sc_flags;
4067 abi_ulong sc_gr[32];
4068 uint64_t sc_fr[32];
4069 abi_ulong sc_iasq[2];
4070 abi_ulong sc_iaoq[2];
4071 abi_ulong sc_sar;
4072 };
4073
4074 struct target_ucontext {
4075 abi_uint tuc_flags;
4076 abi_ulong tuc_link;
4077 target_stack_t tuc_stack;
4078 abi_uint pad[1];
4079 struct target_sigcontext tuc_mcontext;
4080 target_sigset_t tuc_sigmask;
4081 };
4082
4083 struct target_rt_sigframe {
4084 abi_uint tramp[9];
4085 target_siginfo_t info;
4086 struct target_ucontext uc;
4087 /* hidden location of upper halves of pa2.0 64-bit gregs */
4088 };
4089
4090 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
4091 {
4092 int flags = 0;
4093 int i;
4094
4095 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
4096
4097 if (env->iaoq_f < TARGET_PAGE_SIZE) {
4098 /* In the gateway page, executing a syscall. */
4099 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
4100 __put_user(env->gr[31], &sc->sc_iaoq[0]);
4101 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
4102 } else {
4103 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
4104 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
4105 }
4106 __put_user(0, &sc->sc_iasq[0]);
4107 __put_user(0, &sc->sc_iasq[1]);
4108 __put_user(flags, &sc->sc_flags);
4109
4110 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
4111 for (i = 1; i < 32; ++i) {
4112 __put_user(env->gr[i], &sc->sc_gr[i]);
4113 }
4114
4115 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
4116 for (i = 1; i < 32; ++i) {
4117 __put_user(env->fr[i], &sc->sc_fr[i]);
4118 }
4119
4120 __put_user(env->cr[CR_SAR], &sc->sc_sar);
4121 }
4122
4123 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
4124 {
4125 target_ulong psw;
4126 int i;
4127
4128 __get_user(psw, &sc->sc_gr[0]);
4129 cpu_hppa_put_psw(env, psw);
4130
4131 for (i = 1; i < 32; ++i) {
4132 __get_user(env->gr[i], &sc->sc_gr[i]);
4133 }
4134 for (i = 0; i < 32; ++i) {
4135 __get_user(env->fr[i], &sc->sc_fr[i]);
4136 }
4137 cpu_hppa_loaded_fr0(env);
4138
4139 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
4140 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
4141 __get_user(env->cr[CR_SAR], &sc->sc_sar);
4142 }
4143
4144 /* No, this doesn't look right, but it's copied straight from the kernel. */
4145 #define PARISC_RT_SIGFRAME_SIZE32 \
4146 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
4147
4148 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4149 target_siginfo_t *info,
4150 target_sigset_t *set, CPUArchState *env)
4151 {
4152 abi_ulong frame_addr, sp, haddr;
4153 struct target_rt_sigframe *frame;
4154 int i;
4155
4156 sp = env->gr[30];
4157 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4158 if (sas_ss_flags(sp) == 0) {
4159 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
4160 }
4161 }
4162 frame_addr = QEMU_ALIGN_UP(sp, 64);
4163 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
4164
4165 trace_user_setup_rt_frame(env, frame_addr);
4166
4167 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4168 goto give_sigsegv;
4169 }
4170
4171 tswap_siginfo(&frame->info, info);
4172 frame->uc.tuc_flags = 0;
4173 frame->uc.tuc_link = 0;
4174
4175 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4176 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4177 &frame->uc.tuc_stack.ss_flags);
4178 __put_user(target_sigaltstack_used.ss_size,
4179 &frame->uc.tuc_stack.ss_size);
4180
4181 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4182 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
4183 }
4184
4185 setup_sigcontext(&frame->uc.tuc_mcontext, env);
4186
4187 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
4188 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
4189 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
4190 __put_user(0x08000240, frame->tramp + 3); /* nop */
4191
4192 unlock_user_struct(frame, frame_addr, 1);
4193
4194 env->gr[2] = h2g(frame->tramp);
4195 env->gr[30] = sp;
4196 env->gr[26] = sig;
4197 env->gr[25] = h2g(&frame->info);
4198 env->gr[24] = h2g(&frame->uc);
4199
4200 haddr = ka->_sa_handler;
4201 if (haddr & 2) {
4202 /* Function descriptor. */
4203 target_ulong *fdesc, dest;
4204
4205 haddr &= -4;
4206 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
4207 goto give_sigsegv;
4208 }
4209 __get_user(dest, fdesc);
4210 __get_user(env->gr[19], fdesc + 1);
4211 unlock_user_struct(fdesc, haddr, 1);
4212 haddr = dest;
4213 }
4214 env->iaoq_f = haddr;
4215 env->iaoq_b = haddr + 4;
4216 return;
4217
4218 give_sigsegv:
4219 force_sigsegv(sig);
4220 }
4221
4222 long do_rt_sigreturn(CPUArchState *env)
4223 {
4224 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
4225 struct target_rt_sigframe *frame;
4226 sigset_t set;
4227
4228 trace_user_do_rt_sigreturn(env, frame_addr);
4229 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4230 goto badframe;
4231 }
4232 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4233 set_sigmask(&set);
4234
4235 restore_sigcontext(env, &frame->uc.tuc_mcontext);
4236 unlock_user_struct(frame, frame_addr, 0);
4237
4238 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
4239 uc.tuc_stack),
4240 0, env->gr[30]) == -EFAULT) {
4241 goto badframe;
4242 }
4243
4244 unlock_user_struct(frame, frame_addr, 0);
4245 return -TARGET_QEMU_ESIGRETURN;
4246
4247 badframe:
4248 force_sig(TARGET_SIGSEGV);
4249 return -TARGET_QEMU_ESIGRETURN;
4250 }
4251
4252 #elif defined(TARGET_XTENSA)
4253
4254 struct target_sigcontext {
4255 abi_ulong sc_pc;
4256 abi_ulong sc_ps;
4257 abi_ulong sc_lbeg;
4258 abi_ulong sc_lend;
4259 abi_ulong sc_lcount;
4260 abi_ulong sc_sar;
4261 abi_ulong sc_acclo;
4262 abi_ulong sc_acchi;
4263 abi_ulong sc_a[16];
4264 abi_ulong sc_xtregs;
4265 };
4266
4267 struct target_ucontext {
4268 abi_ulong tuc_flags;
4269 abi_ulong tuc_link;
4270 target_stack_t tuc_stack;
4271 struct target_sigcontext tuc_mcontext;
4272 target_sigset_t tuc_sigmask;
4273 };
4274
4275 struct target_rt_sigframe {
4276 target_siginfo_t info;
4277 struct target_ucontext uc;
4278 /* TODO: xtregs */
4279 uint8_t retcode[6];
4280 abi_ulong window[4];
4281 };
4282
4283 static abi_ulong get_sigframe(struct target_sigaction *sa,
4284 CPUXtensaState *env,
4285 unsigned long framesize)
4286 {
4287 abi_ulong sp = env->regs[1];
4288
4289 /* This is the X/Open sanctioned signal stack switching. */
4290 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
4291 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4292 }
4293 return (sp - framesize) & -16;
4294 }
4295
4296 static int flush_window_regs(CPUXtensaState *env)
4297 {
4298 uint32_t wb = env->sregs[WINDOW_BASE];
4299 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1);
4300 unsigned d = ctz32(ws) + 1;
4301 unsigned i;
4302 int ret = 0;
4303
4304 for (i = d; i < env->config->nareg / 4; i += d) {
4305 uint32_t ssp, osp;
4306 unsigned j;
4307
4308 ws >>= d;
4309 xtensa_rotate_window(env, d);
4310
4311 if (ws & 0x1) {
4312 ssp = env->regs[5];
4313 d = 1;
4314 } else if (ws & 0x2) {
4315 ssp = env->regs[9];
4316 ret |= get_user_ual(osp, env->regs[1] - 12);
4317 osp -= 32;
4318 d = 2;
4319 } else if (ws & 0x4) {
4320 ssp = env->regs[13];
4321 ret |= get_user_ual(osp, env->regs[1] - 12);
4322 osp -= 48;
4323 d = 3;
4324 } else {
4325 g_assert_not_reached();
4326 }
4327
4328 for (j = 0; j < 4; ++j) {
4329 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4);
4330 }
4331 for (j = 4; j < d * 4; ++j) {
4332 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4);
4333 }
4334 }
4335 xtensa_rotate_window(env, d);
4336 g_assert(env->sregs[WINDOW_BASE] == wb);
4337 return ret == 0;
4338 }
4339
4340 static int setup_sigcontext(struct target_rt_sigframe *frame,
4341 CPUXtensaState *env)
4342 {
4343 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
4344 int i;
4345
4346 __put_user(env->pc, &sc->sc_pc);
4347 __put_user(env->sregs[PS], &sc->sc_ps);
4348 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
4349 __put_user(env->sregs[LEND], &sc->sc_lend);
4350 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
4351 if (!flush_window_regs(env)) {
4352 return 0;
4353 }
4354 for (i = 0; i < 16; ++i) {
4355 __put_user(env->regs[i], sc->sc_a + i);
4356 }
4357 __put_user(0, &sc->sc_xtregs);
4358 /* TODO: xtregs */
4359 return 1;
4360 }
4361
4362 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4363 target_siginfo_t *info,
4364 target_sigset_t *set, CPUXtensaState *env)
4365 {
4366 abi_ulong frame_addr;
4367 struct target_rt_sigframe *frame;
4368 uint32_t ra;
4369 int i;
4370
4371 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4372 trace_user_setup_rt_frame(env, frame_addr);
4373
4374 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4375 goto give_sigsegv;
4376 }
4377
4378 if (ka->sa_flags & SA_SIGINFO) {
4379 tswap_siginfo(&frame->info, info);
4380 }
4381
4382 __put_user(0, &frame->uc.tuc_flags);
4383 __put_user(0, &frame->uc.tuc_link);
4384 __put_user(target_sigaltstack_used.ss_sp,
4385 &frame->uc.tuc_stack.ss_sp);
4386 __put_user(sas_ss_flags(env->regs[1]),
4387 &frame->uc.tuc_stack.ss_flags);
4388 __put_user(target_sigaltstack_used.ss_size,
4389 &frame->uc.tuc_stack.ss_size);
4390 if (!setup_sigcontext(frame, env)) {
4391 unlock_user_struct(frame, frame_addr, 0);
4392 goto give_sigsegv;
4393 }
4394 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
4395 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
4396 }
4397
4398 if (ka->sa_flags & TARGET_SA_RESTORER) {
4399 ra = ka->sa_restorer;
4400 } else {
4401 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
4402 #ifdef TARGET_WORDS_BIGENDIAN
4403 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
4404 __put_user(0x22, &frame->retcode[0]);
4405 __put_user(0x0a, &frame->retcode[1]);
4406 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
4407 /* Generate instruction: SYSCALL */
4408 __put_user(0x00, &frame->retcode[3]);
4409 __put_user(0x05, &frame->retcode[4]);
4410 __put_user(0x00, &frame->retcode[5]);
4411 #else
4412 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
4413 __put_user(0x22, &frame->retcode[0]);
4414 __put_user(0xa0, &frame->retcode[1]);
4415 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
4416 /* Generate instruction: SYSCALL */
4417 __put_user(0x00, &frame->retcode[3]);
4418 __put_user(0x50, &frame->retcode[4]);
4419 __put_user(0x00, &frame->retcode[5]);
4420 #endif
4421 }
4422 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
4423 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
4424 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
4425 }
4426 memset(env->regs, 0, sizeof(env->regs));
4427 env->pc = ka->_sa_handler;
4428 env->regs[1] = frame_addr;
4429 env->sregs[WINDOW_BASE] = 0;
4430 env->sregs[WINDOW_START] = 1;
4431
4432 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
4433 env->regs[6] = sig;
4434 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
4435 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
4436 unlock_user_struct(frame, frame_addr, 1);
4437 return;
4438
4439 give_sigsegv:
4440 force_sigsegv(sig);
4441 return;
4442 }
4443
4444 static void restore_sigcontext(CPUXtensaState *env,
4445 struct target_rt_sigframe *frame)
4446 {
4447 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
4448 uint32_t ps;
4449 int i;
4450
4451 __get_user(env->pc, &sc->sc_pc);
4452 __get_user(ps, &sc->sc_ps);
4453 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
4454 __get_user(env->sregs[LEND], &sc->sc_lend);
4455 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
4456
4457 env->sregs[WINDOW_BASE] = 0;
4458 env->sregs[WINDOW_START] = 1;
4459 env->sregs[PS] = deposit32(env->sregs[PS],
4460 PS_CALLINC_SHIFT,
4461 PS_CALLINC_LEN,
4462 extract32(ps, PS_CALLINC_SHIFT,
4463 PS_CALLINC_LEN));
4464 for (i = 0; i < 16; ++i) {
4465 __get_user(env->regs[i], sc->sc_a + i);
4466 }
4467 /* TODO: xtregs */
4468 }
4469
4470 long do_rt_sigreturn(CPUXtensaState *env)
4471 {
4472 abi_ulong frame_addr = env->regs[1];
4473 struct target_rt_sigframe *frame;
4474 sigset_t set;
4475
4476 trace_user_do_rt_sigreturn(env, frame_addr);
4477 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4478 goto badframe;
4479 }
4480 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4481 set_sigmask(&set);
4482
4483 restore_sigcontext(env, frame);
4484
4485 if (do_sigaltstack(frame_addr +
4486 offsetof(struct target_rt_sigframe, uc.tuc_stack),
4487 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
4488 goto badframe;
4489 }
4490 unlock_user_struct(frame, frame_addr, 0);
4491 return -TARGET_QEMU_ESIGRETURN;
4492
4493 badframe:
4494 unlock_user_struct(frame, frame_addr, 0);
4495 force_sig(TARGET_SIGSEGV);
4496 return -TARGET_QEMU_ESIGRETURN;
4497 }
4498 #endif
4499
4500 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
4501 struct emulated_sigtable *k)
4502 {
4503 CPUState *cpu = ENV_GET_CPU(cpu_env);
4504 abi_ulong handler;
4505 sigset_t set;
4506 target_sigset_t target_old_set;
4507 struct target_sigaction *sa;
4508 TaskState *ts = cpu->opaque;
4509
4510 trace_user_handle_signal(cpu_env, sig);
4511 /* dequeue signal */
4512 k->pending = 0;
4513
4514 sig = gdb_handlesig(cpu, sig);
4515 if (!sig) {
4516 sa = NULL;
4517 handler = TARGET_SIG_IGN;
4518 } else {
4519 sa = &sigact_table[sig - 1];
4520 handler = sa->_sa_handler;
4521 }
4522
4523 if (do_strace) {
4524 print_taken_signal(sig, &k->info);
4525 }
4526
4527 if (handler == TARGET_SIG_DFL) {
4528 /* default handler : ignore some signal. The other are job control or fatal */
4529 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
4530 kill(getpid(),SIGSTOP);
4531 } else if (sig != TARGET_SIGCHLD &&
4532 sig != TARGET_SIGURG &&
4533 sig != TARGET_SIGWINCH &&
4534 sig != TARGET_SIGCONT) {
4535 dump_core_and_abort(sig);
4536 }
4537 } else if (handler == TARGET_SIG_IGN) {
4538 /* ignore sig */
4539 } else if (handler == TARGET_SIG_ERR) {
4540 dump_core_and_abort(sig);
4541 } else {
4542 /* compute the blocked signals during the handler execution */
4543 sigset_t *blocked_set;
4544
4545 target_to_host_sigset(&set, &sa->sa_mask);
4546 /* SA_NODEFER indicates that the current signal should not be
4547 blocked during the handler */
4548 if (!(sa->sa_flags & TARGET_SA_NODEFER))
4549 sigaddset(&set, target_to_host_signal(sig));
4550
4551 /* save the previous blocked signal state to restore it at the
4552 end of the signal execution (see do_sigreturn) */
4553 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
4554
4555 /* block signals in the handler */
4556 blocked_set = ts->in_sigsuspend ?
4557 &ts->sigsuspend_mask : &ts->signal_mask;
4558 sigorset(&ts->signal_mask, blocked_set, &set);
4559 ts->in_sigsuspend = 0;
4560
4561 /* if the CPU is in VM86 mode, we restore the 32 bit values */
4562 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
4563 {
4564 CPUX86State *env = cpu_env;
4565 if (env->eflags & VM_MASK)
4566 save_v86_state(env);
4567 }
4568 #endif
4569 /* prepare the stack frame of the virtual CPU */
4570 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
4571 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
4572 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
4573 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
4574 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
4575 /* These targets do not have traditional signals. */
4576 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
4577 #else
4578 if (sa->sa_flags & TARGET_SA_SIGINFO)
4579 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
4580 else
4581 setup_frame(sig, sa, &target_old_set, cpu_env);
4582 #endif
4583 if (sa->sa_flags & TARGET_SA_RESETHAND) {
4584 sa->_sa_handler = TARGET_SIG_DFL;
4585 }
4586 }
4587 }
4588
4589 void process_pending_signals(CPUArchState *cpu_env)
4590 {
4591 CPUState *cpu = ENV_GET_CPU(cpu_env);
4592 int sig;
4593 TaskState *ts = cpu->opaque;
4594 sigset_t set;
4595 sigset_t *blocked_set;
4596
4597 while (atomic_read(&ts->signal_pending)) {
4598 /* FIXME: This is not threadsafe. */
4599 sigfillset(&set);
4600 sigprocmask(SIG_SETMASK, &set, 0);
4601
4602 restart_scan:
4603 sig = ts->sync_signal.pending;
4604 if (sig) {
4605 /* Synchronous signals are forced,
4606 * see force_sig_info() and callers in Linux
4607 * Note that not all of our queue_signal() calls in QEMU correspond
4608 * to force_sig_info() calls in Linux (some are send_sig_info()).
4609 * However it seems like a kernel bug to me to allow the process
4610 * to block a synchronous signal since it could then just end up
4611 * looping round and round indefinitely.
4612 */
4613 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
4614 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
4615 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
4616 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
4617 }
4618
4619 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
4620 }
4621
4622 for (sig = 1; sig <= TARGET_NSIG; sig++) {
4623 blocked_set = ts->in_sigsuspend ?
4624 &ts->sigsuspend_mask : &ts->signal_mask;
4625
4626 if (ts->sigtab[sig - 1].pending &&
4627 (!sigismember(blocked_set,
4628 target_to_host_signal_table[sig]))) {
4629 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
4630 /* Restart scan from the beginning, as handle_pending_signal
4631 * might have resulted in a new synchronous signal (eg SIGSEGV).
4632 */
4633 goto restart_scan;
4634 }
4635 }
4636
4637 /* if no signal is pending, unblock signals and recheck (the act
4638 * of unblocking might cause us to take another host signal which
4639 * will set signal_pending again).
4640 */
4641 atomic_set(&ts->signal_pending, 0);
4642 ts->in_sigsuspend = 0;
4643 set = ts->signal_mask;
4644 sigdelset(&set, SIGSEGV);
4645 sigdelset(&set, SIGBUS);
4646 sigprocmask(SIG_SETMASK, &set, 0);
4647 }
4648 ts->in_sigsuspend = 0;
4649 }