]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: Use both si_code and si_signo when converting siginfo_t
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198 int pending;
199
200 /* It's OK to block everything including SIGSEGV, because we won't
201 * run any further guest code before unblocking signals in
202 * process_pending_signals().
203 */
204 sigfillset(&set);
205 sigprocmask(SIG_SETMASK, &set, 0);
206
207 pending = atomic_xchg(&ts->signal_pending, 1);
208
209 return pending;
210 }
211
212 /* Wrapper for sigprocmask function
213 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
214 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
215 * a signal was already pending and the syscall must be restarted, or
216 * 0 on success.
217 * If set is NULL, this is guaranteed not to fail.
218 */
219 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
220 {
221 TaskState *ts = (TaskState *)thread_cpu->opaque;
222
223 if (oldset) {
224 *oldset = ts->signal_mask;
225 }
226
227 if (set) {
228 int i;
229
230 if (block_signals()) {
231 return -TARGET_ERESTARTSYS;
232 }
233
234 switch (how) {
235 case SIG_BLOCK:
236 sigorset(&ts->signal_mask, &ts->signal_mask, set);
237 break;
238 case SIG_UNBLOCK:
239 for (i = 1; i <= NSIG; ++i) {
240 if (sigismember(set, i)) {
241 sigdelset(&ts->signal_mask, i);
242 }
243 }
244 break;
245 case SIG_SETMASK:
246 ts->signal_mask = *set;
247 break;
248 default:
249 g_assert_not_reached();
250 }
251
252 /* Silently ignore attempts to change blocking status of KILL or STOP */
253 sigdelset(&ts->signal_mask, SIGKILL);
254 sigdelset(&ts->signal_mask, SIGSTOP);
255 }
256 return 0;
257 }
258
259 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
260 !defined(TARGET_X86_64)
261 /* Just set the guest's signal mask to the specified value; the
262 * caller is assumed to have called block_signals() already.
263 */
264 static void set_sigmask(const sigset_t *set)
265 {
266 TaskState *ts = (TaskState *)thread_cpu->opaque;
267
268 ts->signal_mask = *set;
269 }
270 #endif
271
272 /* siginfo conversion */
273
274 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
275 const siginfo_t *info)
276 {
277 int sig = host_to_target_signal(info->si_signo);
278 int si_code = info->si_code;
279 int si_type;
280 tinfo->si_signo = sig;
281 tinfo->si_errno = 0;
282 tinfo->si_code = info->si_code;
283
284 /* This is awkward, because we have to use a combination of
285 * the si_code and si_signo to figure out which of the union's
286 * members are valid. (Within the host kernel it is always possible
287 * to tell, but the kernel carefully avoids giving userspace the
288 * high 16 bits of si_code, so we don't have the information to
289 * do this the easy way...) We therefore make our best guess,
290 * bearing in mind that a guest can spoof most of the si_codes
291 * via rt_sigqueueinfo() if it likes.
292 *
293 * Once we have made our guess, we record it in the top 16 bits of
294 * the si_code, so that tswap_siginfo() later can use it.
295 * tswap_siginfo() will strip these top bits out before writing
296 * si_code to the guest (sign-extending the lower bits).
297 */
298
299 switch (si_code) {
300 case SI_USER:
301 case SI_TKILL:
302 case SI_KERNEL:
303 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
304 * These are the only unspoofable si_code values.
305 */
306 tinfo->_sifields._kill._pid = info->si_pid;
307 tinfo->_sifields._kill._uid = info->si_uid;
308 si_type = QEMU_SI_KILL;
309 break;
310 default:
311 /* Everything else is spoofable. Make best guess based on signal */
312 switch (sig) {
313 case TARGET_SIGCHLD:
314 tinfo->_sifields._sigchld._pid = info->si_pid;
315 tinfo->_sifields._sigchld._uid = info->si_uid;
316 tinfo->_sifields._sigchld._status
317 = host_to_target_waitstatus(info->si_status);
318 tinfo->_sifields._sigchld._utime = info->si_utime;
319 tinfo->_sifields._sigchld._stime = info->si_stime;
320 si_type = QEMU_SI_CHLD;
321 break;
322 case TARGET_SIGIO:
323 tinfo->_sifields._sigpoll._band = info->si_band;
324 tinfo->_sifields._sigpoll._fd = info->si_fd;
325 si_type = QEMU_SI_POLL;
326 break;
327 default:
328 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
329 tinfo->_sifields._rt._pid = info->si_pid;
330 tinfo->_sifields._rt._uid = info->si_uid;
331 /* XXX: potential problem if 64 bit */
332 tinfo->_sifields._rt._sigval.sival_ptr
333 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
334 si_type = QEMU_SI_RT;
335 break;
336 }
337 break;
338 }
339
340 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
341 }
342
343 static void tswap_siginfo(target_siginfo_t *tinfo,
344 const target_siginfo_t *info)
345 {
346 int si_type = extract32(info->si_code, 16, 16);
347 int si_code = sextract32(info->si_code, 0, 16);
348
349 __put_user(info->si_signo, &tinfo->si_signo);
350 __put_user(info->si_errno, &tinfo->si_errno);
351 __put_user(si_code, &tinfo->si_code);
352
353 /* We can use our internal marker of which fields in the structure
354 * are valid, rather than duplicating the guesswork of
355 * host_to_target_siginfo_noswap() here.
356 */
357 switch (si_type) {
358 case QEMU_SI_KILL:
359 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
360 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
361 break;
362 case QEMU_SI_TIMER:
363 __put_user(info->_sifields._timer._timer1,
364 &tinfo->_sifields._timer._timer1);
365 __put_user(info->_sifields._timer._timer2,
366 &tinfo->_sifields._timer._timer2);
367 break;
368 case QEMU_SI_POLL:
369 __put_user(info->_sifields._sigpoll._band,
370 &tinfo->_sifields._sigpoll._band);
371 __put_user(info->_sifields._sigpoll._fd,
372 &tinfo->_sifields._sigpoll._fd);
373 break;
374 case QEMU_SI_FAULT:
375 __put_user(info->_sifields._sigfault._addr,
376 &tinfo->_sifields._sigfault._addr);
377 break;
378 case QEMU_SI_CHLD:
379 __put_user(info->_sifields._sigchld._pid,
380 &tinfo->_sifields._sigchld._pid);
381 __put_user(info->_sifields._sigchld._uid,
382 &tinfo->_sifields._sigchld._uid);
383 __put_user(info->_sifields._sigchld._status,
384 &tinfo->_sifields._sigchld._status);
385 __put_user(info->_sifields._sigchld._utime,
386 &tinfo->_sifields._sigchld._utime);
387 __put_user(info->_sifields._sigchld._stime,
388 &tinfo->_sifields._sigchld._stime);
389 break;
390 case QEMU_SI_RT:
391 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
392 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
393 __put_user(info->_sifields._rt._sigval.sival_ptr,
394 &tinfo->_sifields._rt._sigval.sival_ptr);
395 break;
396 default:
397 g_assert_not_reached();
398 }
399 }
400
401 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
402 {
403 host_to_target_siginfo_noswap(tinfo, info);
404 tswap_siginfo(tinfo, tinfo);
405 }
406
407 /* XXX: we support only POSIX RT signals are used. */
408 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
409 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
410 {
411 info->si_signo = tswap32(tinfo->si_signo);
412 info->si_errno = tswap32(tinfo->si_errno);
413 info->si_code = tswap32(tinfo->si_code);
414 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
415 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
416 info->si_value.sival_ptr =
417 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
418 }
419
420 static int fatal_signal (int sig)
421 {
422 switch (sig) {
423 case TARGET_SIGCHLD:
424 case TARGET_SIGURG:
425 case TARGET_SIGWINCH:
426 /* Ignored by default. */
427 return 0;
428 case TARGET_SIGCONT:
429 case TARGET_SIGSTOP:
430 case TARGET_SIGTSTP:
431 case TARGET_SIGTTIN:
432 case TARGET_SIGTTOU:
433 /* Job control signals. */
434 return 0;
435 default:
436 return 1;
437 }
438 }
439
440 /* returns 1 if given signal should dump core if not handled */
441 static int core_dump_signal(int sig)
442 {
443 switch (sig) {
444 case TARGET_SIGABRT:
445 case TARGET_SIGFPE:
446 case TARGET_SIGILL:
447 case TARGET_SIGQUIT:
448 case TARGET_SIGSEGV:
449 case TARGET_SIGTRAP:
450 case TARGET_SIGBUS:
451 return (1);
452 default:
453 return (0);
454 }
455 }
456
457 void signal_init(void)
458 {
459 TaskState *ts = (TaskState *)thread_cpu->opaque;
460 struct sigaction act;
461 struct sigaction oact;
462 int i, j;
463 int host_sig;
464
465 /* generate signal conversion tables */
466 for(i = 1; i < _NSIG; i++) {
467 if (host_to_target_signal_table[i] == 0)
468 host_to_target_signal_table[i] = i;
469 }
470 for(i = 1; i < _NSIG; i++) {
471 j = host_to_target_signal_table[i];
472 target_to_host_signal_table[j] = i;
473 }
474
475 /* Set the signal mask from the host mask. */
476 sigprocmask(0, 0, &ts->signal_mask);
477
478 /* set all host signal handlers. ALL signals are blocked during
479 the handlers to serialize them. */
480 memset(sigact_table, 0, sizeof(sigact_table));
481
482 sigfillset(&act.sa_mask);
483 act.sa_flags = SA_SIGINFO;
484 act.sa_sigaction = host_signal_handler;
485 for(i = 1; i <= TARGET_NSIG; i++) {
486 host_sig = target_to_host_signal(i);
487 sigaction(host_sig, NULL, &oact);
488 if (oact.sa_sigaction == (void *)SIG_IGN) {
489 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
490 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
491 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
492 }
493 /* If there's already a handler installed then something has
494 gone horribly wrong, so don't even try to handle that case. */
495 /* Install some handlers for our own use. We need at least
496 SIGSEGV and SIGBUS, to detect exceptions. We can not just
497 trap all signals because it affects syscall interrupt
498 behavior. But do trap all default-fatal signals. */
499 if (fatal_signal (i))
500 sigaction(host_sig, &act, NULL);
501 }
502 }
503
504
505 /* abort execution with signal */
506 static void QEMU_NORETURN force_sig(int target_sig)
507 {
508 CPUState *cpu = thread_cpu;
509 CPUArchState *env = cpu->env_ptr;
510 TaskState *ts = (TaskState *)cpu->opaque;
511 int host_sig, core_dumped = 0;
512 struct sigaction act;
513
514 host_sig = target_to_host_signal(target_sig);
515 trace_user_force_sig(env, target_sig, host_sig);
516 gdb_signalled(env, target_sig);
517
518 /* dump core if supported by target binary format */
519 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
520 stop_all_tasks();
521 core_dumped =
522 ((*ts->bprm->core_dump)(target_sig, env) == 0);
523 }
524 if (core_dumped) {
525 /* we already dumped the core of target process, we don't want
526 * a coredump of qemu itself */
527 struct rlimit nodump;
528 getrlimit(RLIMIT_CORE, &nodump);
529 nodump.rlim_cur=0;
530 setrlimit(RLIMIT_CORE, &nodump);
531 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
532 target_sig, strsignal(host_sig), "core dumped" );
533 }
534
535 /* The proper exit code for dying from an uncaught signal is
536 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
537 * a negative value. To get the proper exit code we need to
538 * actually die from an uncaught signal. Here the default signal
539 * handler is installed, we send ourself a signal and we wait for
540 * it to arrive. */
541 sigfillset(&act.sa_mask);
542 act.sa_handler = SIG_DFL;
543 act.sa_flags = 0;
544 sigaction(host_sig, &act, NULL);
545
546 /* For some reason raise(host_sig) doesn't send the signal when
547 * statically linked on x86-64. */
548 kill(getpid(), host_sig);
549
550 /* Make sure the signal isn't masked (just reuse the mask inside
551 of act) */
552 sigdelset(&act.sa_mask, host_sig);
553 sigsuspend(&act.sa_mask);
554
555 /* unreachable */
556 abort();
557 }
558
559 /* queue a signal so that it will be send to the virtual CPU as soon
560 as possible */
561 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
562 {
563 CPUState *cpu = ENV_GET_CPU(env);
564 TaskState *ts = cpu->opaque;
565
566 trace_user_queue_signal(env, sig);
567
568 /* Currently all callers define siginfo structures which
569 * use the _sifields._sigfault union member, so we can
570 * set the type here. If that changes we should push this
571 * out so the si_type is passed in by callers.
572 */
573 info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT);
574
575 ts->sync_signal.info = *info;
576 ts->sync_signal.pending = sig;
577 /* signal that a new signal is pending */
578 atomic_set(&ts->signal_pending, 1);
579 return 1; /* indicates that the signal was queued */
580 }
581
582 #ifndef HAVE_SAFE_SYSCALL
583 static inline void rewind_if_in_safe_syscall(void *puc)
584 {
585 /* Default version: never rewind */
586 }
587 #endif
588
589 static void host_signal_handler(int host_signum, siginfo_t *info,
590 void *puc)
591 {
592 CPUArchState *env = thread_cpu->env_ptr;
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
595
596 int sig;
597 target_siginfo_t tinfo;
598 ucontext_t *uc = puc;
599 struct emulated_sigtable *k;
600
601 /* the CPU emulator uses some host signals to detect exceptions,
602 we forward to it some signals */
603 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
604 && info->si_code > 0) {
605 if (cpu_signal_handler(host_signum, info, puc))
606 return;
607 }
608
609 /* get target signal number */
610 sig = host_to_target_signal(host_signum);
611 if (sig < 1 || sig > TARGET_NSIG)
612 return;
613 trace_user_host_signal(env, host_signum, sig);
614
615 rewind_if_in_safe_syscall(puc);
616
617 host_to_target_siginfo_noswap(&tinfo, info);
618 k = &ts->sigtab[sig - 1];
619 k->info = tinfo;
620 k->pending = sig;
621 ts->signal_pending = 1;
622
623 /* Block host signals until target signal handler entered. We
624 * can't block SIGSEGV or SIGBUS while we're executing guest
625 * code in case the guest code provokes one in the window between
626 * now and it getting out to the main loop. Signals will be
627 * unblocked again in process_pending_signals().
628 */
629 sigfillset(&uc->uc_sigmask);
630 sigdelset(&uc->uc_sigmask, SIGSEGV);
631 sigdelset(&uc->uc_sigmask, SIGBUS);
632
633 /* interrupt the virtual CPU as soon as possible */
634 cpu_exit(thread_cpu);
635 }
636
637 /* do_sigaltstack() returns target values and errnos. */
638 /* compare linux/kernel/signal.c:do_sigaltstack() */
639 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
640 {
641 int ret;
642 struct target_sigaltstack oss;
643
644 /* XXX: test errors */
645 if(uoss_addr)
646 {
647 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
648 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
649 __put_user(sas_ss_flags(sp), &oss.ss_flags);
650 }
651
652 if(uss_addr)
653 {
654 struct target_sigaltstack *uss;
655 struct target_sigaltstack ss;
656 size_t minstacksize = TARGET_MINSIGSTKSZ;
657
658 #if defined(TARGET_PPC64)
659 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
660 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
661 if (get_ppc64_abi(image) > 1) {
662 minstacksize = 4096;
663 }
664 #endif
665
666 ret = -TARGET_EFAULT;
667 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
668 goto out;
669 }
670 __get_user(ss.ss_sp, &uss->ss_sp);
671 __get_user(ss.ss_size, &uss->ss_size);
672 __get_user(ss.ss_flags, &uss->ss_flags);
673 unlock_user_struct(uss, uss_addr, 0);
674
675 ret = -TARGET_EPERM;
676 if (on_sig_stack(sp))
677 goto out;
678
679 ret = -TARGET_EINVAL;
680 if (ss.ss_flags != TARGET_SS_DISABLE
681 && ss.ss_flags != TARGET_SS_ONSTACK
682 && ss.ss_flags != 0)
683 goto out;
684
685 if (ss.ss_flags == TARGET_SS_DISABLE) {
686 ss.ss_size = 0;
687 ss.ss_sp = 0;
688 } else {
689 ret = -TARGET_ENOMEM;
690 if (ss.ss_size < minstacksize) {
691 goto out;
692 }
693 }
694
695 target_sigaltstack_used.ss_sp = ss.ss_sp;
696 target_sigaltstack_used.ss_size = ss.ss_size;
697 }
698
699 if (uoss_addr) {
700 ret = -TARGET_EFAULT;
701 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
702 goto out;
703 }
704
705 ret = 0;
706 out:
707 return ret;
708 }
709
710 /* do_sigaction() return target values and host errnos */
711 int do_sigaction(int sig, const struct target_sigaction *act,
712 struct target_sigaction *oact)
713 {
714 struct target_sigaction *k;
715 struct sigaction act1;
716 int host_sig;
717 int ret = 0;
718
719 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
720 return -TARGET_EINVAL;
721 }
722
723 if (block_signals()) {
724 return -TARGET_ERESTARTSYS;
725 }
726
727 k = &sigact_table[sig - 1];
728 if (oact) {
729 __put_user(k->_sa_handler, &oact->_sa_handler);
730 __put_user(k->sa_flags, &oact->sa_flags);
731 #if !defined(TARGET_MIPS)
732 __put_user(k->sa_restorer, &oact->sa_restorer);
733 #endif
734 /* Not swapped. */
735 oact->sa_mask = k->sa_mask;
736 }
737 if (act) {
738 /* FIXME: This is not threadsafe. */
739 __get_user(k->_sa_handler, &act->_sa_handler);
740 __get_user(k->sa_flags, &act->sa_flags);
741 #if !defined(TARGET_MIPS)
742 __get_user(k->sa_restorer, &act->sa_restorer);
743 #endif
744 /* To be swapped in target_to_host_sigset. */
745 k->sa_mask = act->sa_mask;
746
747 /* we update the host linux signal state */
748 host_sig = target_to_host_signal(sig);
749 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
750 sigfillset(&act1.sa_mask);
751 act1.sa_flags = SA_SIGINFO;
752 if (k->sa_flags & TARGET_SA_RESTART)
753 act1.sa_flags |= SA_RESTART;
754 /* NOTE: it is important to update the host kernel signal
755 ignore state to avoid getting unexpected interrupted
756 syscalls */
757 if (k->_sa_handler == TARGET_SIG_IGN) {
758 act1.sa_sigaction = (void *)SIG_IGN;
759 } else if (k->_sa_handler == TARGET_SIG_DFL) {
760 if (fatal_signal (sig))
761 act1.sa_sigaction = host_signal_handler;
762 else
763 act1.sa_sigaction = (void *)SIG_DFL;
764 } else {
765 act1.sa_sigaction = host_signal_handler;
766 }
767 ret = sigaction(host_sig, &act1, NULL);
768 }
769 }
770 return ret;
771 }
772
773 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
774
775 /* from the Linux kernel */
776
777 struct target_fpreg {
778 uint16_t significand[4];
779 uint16_t exponent;
780 };
781
782 struct target_fpxreg {
783 uint16_t significand[4];
784 uint16_t exponent;
785 uint16_t padding[3];
786 };
787
788 struct target_xmmreg {
789 abi_ulong element[4];
790 };
791
792 struct target_fpstate {
793 /* Regular FPU environment */
794 abi_ulong cw;
795 abi_ulong sw;
796 abi_ulong tag;
797 abi_ulong ipoff;
798 abi_ulong cssel;
799 abi_ulong dataoff;
800 abi_ulong datasel;
801 struct target_fpreg _st[8];
802 uint16_t status;
803 uint16_t magic; /* 0xffff = regular FPU data only */
804
805 /* FXSR FPU environment */
806 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
807 abi_ulong mxcsr;
808 abi_ulong reserved;
809 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
810 struct target_xmmreg _xmm[8];
811 abi_ulong padding[56];
812 };
813
814 #define X86_FXSR_MAGIC 0x0000
815
816 struct target_sigcontext {
817 uint16_t gs, __gsh;
818 uint16_t fs, __fsh;
819 uint16_t es, __esh;
820 uint16_t ds, __dsh;
821 abi_ulong edi;
822 abi_ulong esi;
823 abi_ulong ebp;
824 abi_ulong esp;
825 abi_ulong ebx;
826 abi_ulong edx;
827 abi_ulong ecx;
828 abi_ulong eax;
829 abi_ulong trapno;
830 abi_ulong err;
831 abi_ulong eip;
832 uint16_t cs, __csh;
833 abi_ulong eflags;
834 abi_ulong esp_at_signal;
835 uint16_t ss, __ssh;
836 abi_ulong fpstate; /* pointer */
837 abi_ulong oldmask;
838 abi_ulong cr2;
839 };
840
841 struct target_ucontext {
842 abi_ulong tuc_flags;
843 abi_ulong tuc_link;
844 target_stack_t tuc_stack;
845 struct target_sigcontext tuc_mcontext;
846 target_sigset_t tuc_sigmask; /* mask last for extensibility */
847 };
848
849 struct sigframe
850 {
851 abi_ulong pretcode;
852 int sig;
853 struct target_sigcontext sc;
854 struct target_fpstate fpstate;
855 abi_ulong extramask[TARGET_NSIG_WORDS-1];
856 char retcode[8];
857 };
858
859 struct rt_sigframe
860 {
861 abi_ulong pretcode;
862 int sig;
863 abi_ulong pinfo;
864 abi_ulong puc;
865 struct target_siginfo info;
866 struct target_ucontext uc;
867 struct target_fpstate fpstate;
868 char retcode[8];
869 };
870
871 /*
872 * Set up a signal frame.
873 */
874
875 /* XXX: save x87 state */
876 static void setup_sigcontext(struct target_sigcontext *sc,
877 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
878 abi_ulong fpstate_addr)
879 {
880 CPUState *cs = CPU(x86_env_get_cpu(env));
881 uint16_t magic;
882
883 /* already locked in setup_frame() */
884 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
885 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
886 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
887 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
888 __put_user(env->regs[R_EDI], &sc->edi);
889 __put_user(env->regs[R_ESI], &sc->esi);
890 __put_user(env->regs[R_EBP], &sc->ebp);
891 __put_user(env->regs[R_ESP], &sc->esp);
892 __put_user(env->regs[R_EBX], &sc->ebx);
893 __put_user(env->regs[R_EDX], &sc->edx);
894 __put_user(env->regs[R_ECX], &sc->ecx);
895 __put_user(env->regs[R_EAX], &sc->eax);
896 __put_user(cs->exception_index, &sc->trapno);
897 __put_user(env->error_code, &sc->err);
898 __put_user(env->eip, &sc->eip);
899 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
900 __put_user(env->eflags, &sc->eflags);
901 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
902 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
903
904 cpu_x86_fsave(env, fpstate_addr, 1);
905 fpstate->status = fpstate->sw;
906 magic = 0xffff;
907 __put_user(magic, &fpstate->magic);
908 __put_user(fpstate_addr, &sc->fpstate);
909
910 /* non-iBCS2 extensions.. */
911 __put_user(mask, &sc->oldmask);
912 __put_user(env->cr[2], &sc->cr2);
913 }
914
915 /*
916 * Determine which stack to use..
917 */
918
919 static inline abi_ulong
920 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
921 {
922 unsigned long esp;
923
924 /* Default to using normal stack */
925 esp = env->regs[R_ESP];
926 /* This is the X/Open sanctioned signal stack switching. */
927 if (ka->sa_flags & TARGET_SA_ONSTACK) {
928 if (sas_ss_flags(esp) == 0) {
929 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
930 }
931 } else {
932
933 /* This is the legacy signal stack switching. */
934 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
935 !(ka->sa_flags & TARGET_SA_RESTORER) &&
936 ka->sa_restorer) {
937 esp = (unsigned long) ka->sa_restorer;
938 }
939 }
940 return (esp - frame_size) & -8ul;
941 }
942
943 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
944 static void setup_frame(int sig, struct target_sigaction *ka,
945 target_sigset_t *set, CPUX86State *env)
946 {
947 abi_ulong frame_addr;
948 struct sigframe *frame;
949 int i;
950
951 frame_addr = get_sigframe(ka, env, sizeof(*frame));
952 trace_user_setup_frame(env, frame_addr);
953
954 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
955 goto give_sigsegv;
956
957 __put_user(sig, &frame->sig);
958
959 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
960 frame_addr + offsetof(struct sigframe, fpstate));
961
962 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
963 __put_user(set->sig[i], &frame->extramask[i - 1]);
964 }
965
966 /* Set up to return from userspace. If provided, use a stub
967 already in userspace. */
968 if (ka->sa_flags & TARGET_SA_RESTORER) {
969 __put_user(ka->sa_restorer, &frame->pretcode);
970 } else {
971 uint16_t val16;
972 abi_ulong retcode_addr;
973 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
974 __put_user(retcode_addr, &frame->pretcode);
975 /* This is popl %eax ; movl $,%eax ; int $0x80 */
976 val16 = 0xb858;
977 __put_user(val16, (uint16_t *)(frame->retcode+0));
978 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
979 val16 = 0x80cd;
980 __put_user(val16, (uint16_t *)(frame->retcode+6));
981 }
982
983
984 /* Set up registers for signal handler */
985 env->regs[R_ESP] = frame_addr;
986 env->eip = ka->_sa_handler;
987
988 cpu_x86_load_seg(env, R_DS, __USER_DS);
989 cpu_x86_load_seg(env, R_ES, __USER_DS);
990 cpu_x86_load_seg(env, R_SS, __USER_DS);
991 cpu_x86_load_seg(env, R_CS, __USER_CS);
992 env->eflags &= ~TF_MASK;
993
994 unlock_user_struct(frame, frame_addr, 1);
995
996 return;
997
998 give_sigsegv:
999 if (sig == TARGET_SIGSEGV) {
1000 ka->_sa_handler = TARGET_SIG_DFL;
1001 }
1002 force_sig(TARGET_SIGSEGV /* , current */);
1003 }
1004
1005 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1006 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1007 target_siginfo_t *info,
1008 target_sigset_t *set, CPUX86State *env)
1009 {
1010 abi_ulong frame_addr, addr;
1011 struct rt_sigframe *frame;
1012 int i;
1013
1014 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1015 trace_user_setup_rt_frame(env, frame_addr);
1016
1017 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1018 goto give_sigsegv;
1019
1020 __put_user(sig, &frame->sig);
1021 addr = frame_addr + offsetof(struct rt_sigframe, info);
1022 __put_user(addr, &frame->pinfo);
1023 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1024 __put_user(addr, &frame->puc);
1025 tswap_siginfo(&frame->info, info);
1026
1027 /* Create the ucontext. */
1028 __put_user(0, &frame->uc.tuc_flags);
1029 __put_user(0, &frame->uc.tuc_link);
1030 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1031 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1032 &frame->uc.tuc_stack.ss_flags);
1033 __put_user(target_sigaltstack_used.ss_size,
1034 &frame->uc.tuc_stack.ss_size);
1035 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1036 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1037
1038 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1039 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1040 }
1041
1042 /* Set up to return from userspace. If provided, use a stub
1043 already in userspace. */
1044 if (ka->sa_flags & TARGET_SA_RESTORER) {
1045 __put_user(ka->sa_restorer, &frame->pretcode);
1046 } else {
1047 uint16_t val16;
1048 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1049 __put_user(addr, &frame->pretcode);
1050 /* This is movl $,%eax ; int $0x80 */
1051 __put_user(0xb8, (char *)(frame->retcode+0));
1052 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1053 val16 = 0x80cd;
1054 __put_user(val16, (uint16_t *)(frame->retcode+5));
1055 }
1056
1057 /* Set up registers for signal handler */
1058 env->regs[R_ESP] = frame_addr;
1059 env->eip = ka->_sa_handler;
1060
1061 cpu_x86_load_seg(env, R_DS, __USER_DS);
1062 cpu_x86_load_seg(env, R_ES, __USER_DS);
1063 cpu_x86_load_seg(env, R_SS, __USER_DS);
1064 cpu_x86_load_seg(env, R_CS, __USER_CS);
1065 env->eflags &= ~TF_MASK;
1066
1067 unlock_user_struct(frame, frame_addr, 1);
1068
1069 return;
1070
1071 give_sigsegv:
1072 if (sig == TARGET_SIGSEGV) {
1073 ka->_sa_handler = TARGET_SIG_DFL;
1074 }
1075 force_sig(TARGET_SIGSEGV /* , current */);
1076 }
1077
1078 static int
1079 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1080 {
1081 unsigned int err = 0;
1082 abi_ulong fpstate_addr;
1083 unsigned int tmpflags;
1084
1085 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1086 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1087 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1088 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1089
1090 env->regs[R_EDI] = tswapl(sc->edi);
1091 env->regs[R_ESI] = tswapl(sc->esi);
1092 env->regs[R_EBP] = tswapl(sc->ebp);
1093 env->regs[R_ESP] = tswapl(sc->esp);
1094 env->regs[R_EBX] = tswapl(sc->ebx);
1095 env->regs[R_EDX] = tswapl(sc->edx);
1096 env->regs[R_ECX] = tswapl(sc->ecx);
1097 env->regs[R_EAX] = tswapl(sc->eax);
1098 env->eip = tswapl(sc->eip);
1099
1100 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1101 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1102
1103 tmpflags = tswapl(sc->eflags);
1104 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1105 // regs->orig_eax = -1; /* disable syscall checks */
1106
1107 fpstate_addr = tswapl(sc->fpstate);
1108 if (fpstate_addr != 0) {
1109 if (!access_ok(VERIFY_READ, fpstate_addr,
1110 sizeof(struct target_fpstate)))
1111 goto badframe;
1112 cpu_x86_frstor(env, fpstate_addr, 1);
1113 }
1114
1115 return err;
1116 badframe:
1117 return 1;
1118 }
1119
1120 long do_sigreturn(CPUX86State *env)
1121 {
1122 struct sigframe *frame;
1123 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1124 target_sigset_t target_set;
1125 sigset_t set;
1126 int i;
1127
1128 trace_user_do_sigreturn(env, frame_addr);
1129 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1130 goto badframe;
1131 /* set blocked signals */
1132 __get_user(target_set.sig[0], &frame->sc.oldmask);
1133 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1134 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1135 }
1136
1137 target_to_host_sigset_internal(&set, &target_set);
1138 set_sigmask(&set);
1139
1140 /* restore registers */
1141 if (restore_sigcontext(env, &frame->sc))
1142 goto badframe;
1143 unlock_user_struct(frame, frame_addr, 0);
1144 return -TARGET_QEMU_ESIGRETURN;
1145
1146 badframe:
1147 unlock_user_struct(frame, frame_addr, 0);
1148 force_sig(TARGET_SIGSEGV);
1149 return 0;
1150 }
1151
1152 long do_rt_sigreturn(CPUX86State *env)
1153 {
1154 abi_ulong frame_addr;
1155 struct rt_sigframe *frame;
1156 sigset_t set;
1157
1158 frame_addr = env->regs[R_ESP] - 4;
1159 trace_user_do_rt_sigreturn(env, frame_addr);
1160 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1161 goto badframe;
1162 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1163 set_sigmask(&set);
1164
1165 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1166 goto badframe;
1167 }
1168
1169 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1170 get_sp_from_cpustate(env)) == -EFAULT) {
1171 goto badframe;
1172 }
1173
1174 unlock_user_struct(frame, frame_addr, 0);
1175 return -TARGET_QEMU_ESIGRETURN;
1176
1177 badframe:
1178 unlock_user_struct(frame, frame_addr, 0);
1179 force_sig(TARGET_SIGSEGV);
1180 return 0;
1181 }
1182
1183 #elif defined(TARGET_AARCH64)
1184
1185 struct target_sigcontext {
1186 uint64_t fault_address;
1187 /* AArch64 registers */
1188 uint64_t regs[31];
1189 uint64_t sp;
1190 uint64_t pc;
1191 uint64_t pstate;
1192 /* 4K reserved for FP/SIMD state and future expansion */
1193 char __reserved[4096] __attribute__((__aligned__(16)));
1194 };
1195
1196 struct target_ucontext {
1197 abi_ulong tuc_flags;
1198 abi_ulong tuc_link;
1199 target_stack_t tuc_stack;
1200 target_sigset_t tuc_sigmask;
1201 /* glibc uses a 1024-bit sigset_t */
1202 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1203 /* last for future expansion */
1204 struct target_sigcontext tuc_mcontext;
1205 };
1206
1207 /*
1208 * Header to be used at the beginning of structures extending the user
1209 * context. Such structures must be placed after the rt_sigframe on the stack
1210 * and be 16-byte aligned. The last structure must be a dummy one with the
1211 * magic and size set to 0.
1212 */
1213 struct target_aarch64_ctx {
1214 uint32_t magic;
1215 uint32_t size;
1216 };
1217
1218 #define TARGET_FPSIMD_MAGIC 0x46508001
1219
1220 struct target_fpsimd_context {
1221 struct target_aarch64_ctx head;
1222 uint32_t fpsr;
1223 uint32_t fpcr;
1224 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1225 };
1226
1227 /*
1228 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1229 * user space as it will change with the addition of new context. User space
1230 * should check the magic/size information.
1231 */
1232 struct target_aux_context {
1233 struct target_fpsimd_context fpsimd;
1234 /* additional context to be added before "end" */
1235 struct target_aarch64_ctx end;
1236 };
1237
1238 struct target_rt_sigframe {
1239 struct target_siginfo info;
1240 struct target_ucontext uc;
1241 uint64_t fp;
1242 uint64_t lr;
1243 uint32_t tramp[2];
1244 };
1245
1246 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1247 CPUARMState *env, target_sigset_t *set)
1248 {
1249 int i;
1250 struct target_aux_context *aux =
1251 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1252
1253 /* set up the stack frame for unwinding */
1254 __put_user(env->xregs[29], &sf->fp);
1255 __put_user(env->xregs[30], &sf->lr);
1256
1257 for (i = 0; i < 31; i++) {
1258 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1259 }
1260 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1261 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1262 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1263
1264 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1265
1266 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1267 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1268 }
1269
1270 for (i = 0; i < 32; i++) {
1271 #ifdef TARGET_WORDS_BIGENDIAN
1272 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1273 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1274 #else
1275 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1276 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1277 #endif
1278 }
1279 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1280 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1281 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1282 __put_user(sizeof(struct target_fpsimd_context),
1283 &aux->fpsimd.head.size);
1284
1285 /* set the "end" magic */
1286 __put_user(0, &aux->end.magic);
1287 __put_user(0, &aux->end.size);
1288
1289 return 0;
1290 }
1291
1292 static int target_restore_sigframe(CPUARMState *env,
1293 struct target_rt_sigframe *sf)
1294 {
1295 sigset_t set;
1296 int i;
1297 struct target_aux_context *aux =
1298 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1299 uint32_t magic, size, fpsr, fpcr;
1300 uint64_t pstate;
1301
1302 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1303 set_sigmask(&set);
1304
1305 for (i = 0; i < 31; i++) {
1306 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1307 }
1308
1309 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1310 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1311 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1312 pstate_write(env, pstate);
1313
1314 __get_user(magic, &aux->fpsimd.head.magic);
1315 __get_user(size, &aux->fpsimd.head.size);
1316
1317 if (magic != TARGET_FPSIMD_MAGIC
1318 || size != sizeof(struct target_fpsimd_context)) {
1319 return 1;
1320 }
1321
1322 for (i = 0; i < 32; i++) {
1323 #ifdef TARGET_WORDS_BIGENDIAN
1324 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1325 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1326 #else
1327 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1328 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1329 #endif
1330 }
1331 __get_user(fpsr, &aux->fpsimd.fpsr);
1332 vfp_set_fpsr(env, fpsr);
1333 __get_user(fpcr, &aux->fpsimd.fpcr);
1334 vfp_set_fpcr(env, fpcr);
1335
1336 return 0;
1337 }
1338
1339 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1340 {
1341 abi_ulong sp;
1342
1343 sp = env->xregs[31];
1344
1345 /*
1346 * This is the X/Open sanctioned signal stack switching.
1347 */
1348 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1349 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1350 }
1351
1352 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1353
1354 return sp;
1355 }
1356
1357 static void target_setup_frame(int usig, struct target_sigaction *ka,
1358 target_siginfo_t *info, target_sigset_t *set,
1359 CPUARMState *env)
1360 {
1361 struct target_rt_sigframe *frame;
1362 abi_ulong frame_addr, return_addr;
1363
1364 frame_addr = get_sigframe(ka, env);
1365 trace_user_setup_frame(env, frame_addr);
1366 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1367 goto give_sigsegv;
1368 }
1369
1370 __put_user(0, &frame->uc.tuc_flags);
1371 __put_user(0, &frame->uc.tuc_link);
1372
1373 __put_user(target_sigaltstack_used.ss_sp,
1374 &frame->uc.tuc_stack.ss_sp);
1375 __put_user(sas_ss_flags(env->xregs[31]),
1376 &frame->uc.tuc_stack.ss_flags);
1377 __put_user(target_sigaltstack_used.ss_size,
1378 &frame->uc.tuc_stack.ss_size);
1379 target_setup_sigframe(frame, env, set);
1380 if (ka->sa_flags & TARGET_SA_RESTORER) {
1381 return_addr = ka->sa_restorer;
1382 } else {
1383 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1384 __put_user(0xd2801168, &frame->tramp[0]);
1385 __put_user(0xd4000001, &frame->tramp[1]);
1386 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1387 }
1388 env->xregs[0] = usig;
1389 env->xregs[31] = frame_addr;
1390 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1391 env->pc = ka->_sa_handler;
1392 env->xregs[30] = return_addr;
1393 if (info) {
1394 tswap_siginfo(&frame->info, info);
1395 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1396 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1397 }
1398
1399 unlock_user_struct(frame, frame_addr, 1);
1400 return;
1401
1402 give_sigsegv:
1403 unlock_user_struct(frame, frame_addr, 1);
1404 force_sig(TARGET_SIGSEGV);
1405 }
1406
1407 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1408 target_siginfo_t *info, target_sigset_t *set,
1409 CPUARMState *env)
1410 {
1411 target_setup_frame(sig, ka, info, set, env);
1412 }
1413
1414 static void setup_frame(int sig, struct target_sigaction *ka,
1415 target_sigset_t *set, CPUARMState *env)
1416 {
1417 target_setup_frame(sig, ka, 0, set, env);
1418 }
1419
1420 long do_rt_sigreturn(CPUARMState *env)
1421 {
1422 struct target_rt_sigframe *frame = NULL;
1423 abi_ulong frame_addr = env->xregs[31];
1424
1425 trace_user_do_rt_sigreturn(env, frame_addr);
1426 if (frame_addr & 15) {
1427 goto badframe;
1428 }
1429
1430 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1431 goto badframe;
1432 }
1433
1434 if (target_restore_sigframe(env, frame)) {
1435 goto badframe;
1436 }
1437
1438 if (do_sigaltstack(frame_addr +
1439 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1440 0, get_sp_from_cpustate(env)) == -EFAULT) {
1441 goto badframe;
1442 }
1443
1444 unlock_user_struct(frame, frame_addr, 0);
1445 return -TARGET_QEMU_ESIGRETURN;
1446
1447 badframe:
1448 unlock_user_struct(frame, frame_addr, 0);
1449 force_sig(TARGET_SIGSEGV);
1450 return 0;
1451 }
1452
1453 long do_sigreturn(CPUARMState *env)
1454 {
1455 return do_rt_sigreturn(env);
1456 }
1457
1458 #elif defined(TARGET_ARM)
1459
1460 struct target_sigcontext {
1461 abi_ulong trap_no;
1462 abi_ulong error_code;
1463 abi_ulong oldmask;
1464 abi_ulong arm_r0;
1465 abi_ulong arm_r1;
1466 abi_ulong arm_r2;
1467 abi_ulong arm_r3;
1468 abi_ulong arm_r4;
1469 abi_ulong arm_r5;
1470 abi_ulong arm_r6;
1471 abi_ulong arm_r7;
1472 abi_ulong arm_r8;
1473 abi_ulong arm_r9;
1474 abi_ulong arm_r10;
1475 abi_ulong arm_fp;
1476 abi_ulong arm_ip;
1477 abi_ulong arm_sp;
1478 abi_ulong arm_lr;
1479 abi_ulong arm_pc;
1480 abi_ulong arm_cpsr;
1481 abi_ulong fault_address;
1482 };
1483
1484 struct target_ucontext_v1 {
1485 abi_ulong tuc_flags;
1486 abi_ulong tuc_link;
1487 target_stack_t tuc_stack;
1488 struct target_sigcontext tuc_mcontext;
1489 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1490 };
1491
1492 struct target_ucontext_v2 {
1493 abi_ulong tuc_flags;
1494 abi_ulong tuc_link;
1495 target_stack_t tuc_stack;
1496 struct target_sigcontext tuc_mcontext;
1497 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1498 char __unused[128 - sizeof(target_sigset_t)];
1499 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1500 };
1501
1502 struct target_user_vfp {
1503 uint64_t fpregs[32];
1504 abi_ulong fpscr;
1505 };
1506
1507 struct target_user_vfp_exc {
1508 abi_ulong fpexc;
1509 abi_ulong fpinst;
1510 abi_ulong fpinst2;
1511 };
1512
1513 struct target_vfp_sigframe {
1514 abi_ulong magic;
1515 abi_ulong size;
1516 struct target_user_vfp ufp;
1517 struct target_user_vfp_exc ufp_exc;
1518 } __attribute__((__aligned__(8)));
1519
1520 struct target_iwmmxt_sigframe {
1521 abi_ulong magic;
1522 abi_ulong size;
1523 uint64_t regs[16];
1524 /* Note that not all the coprocessor control registers are stored here */
1525 uint32_t wcssf;
1526 uint32_t wcasf;
1527 uint32_t wcgr0;
1528 uint32_t wcgr1;
1529 uint32_t wcgr2;
1530 uint32_t wcgr3;
1531 } __attribute__((__aligned__(8)));
1532
1533 #define TARGET_VFP_MAGIC 0x56465001
1534 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1535
1536 struct sigframe_v1
1537 {
1538 struct target_sigcontext sc;
1539 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1540 abi_ulong retcode;
1541 };
1542
1543 struct sigframe_v2
1544 {
1545 struct target_ucontext_v2 uc;
1546 abi_ulong retcode;
1547 };
1548
1549 struct rt_sigframe_v1
1550 {
1551 abi_ulong pinfo;
1552 abi_ulong puc;
1553 struct target_siginfo info;
1554 struct target_ucontext_v1 uc;
1555 abi_ulong retcode;
1556 };
1557
1558 struct rt_sigframe_v2
1559 {
1560 struct target_siginfo info;
1561 struct target_ucontext_v2 uc;
1562 abi_ulong retcode;
1563 };
1564
1565 #define TARGET_CONFIG_CPU_32 1
1566
1567 /*
1568 * For ARM syscalls, we encode the syscall number into the instruction.
1569 */
1570 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1571 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1572
1573 /*
1574 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1575 * need two 16-bit instructions.
1576 */
1577 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1578 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1579
1580 static const abi_ulong retcodes[4] = {
1581 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1582 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1583 };
1584
1585
1586 static inline int valid_user_regs(CPUARMState *regs)
1587 {
1588 return 1;
1589 }
1590
1591 static void
1592 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1593 CPUARMState *env, abi_ulong mask)
1594 {
1595 __put_user(env->regs[0], &sc->arm_r0);
1596 __put_user(env->regs[1], &sc->arm_r1);
1597 __put_user(env->regs[2], &sc->arm_r2);
1598 __put_user(env->regs[3], &sc->arm_r3);
1599 __put_user(env->regs[4], &sc->arm_r4);
1600 __put_user(env->regs[5], &sc->arm_r5);
1601 __put_user(env->regs[6], &sc->arm_r6);
1602 __put_user(env->regs[7], &sc->arm_r7);
1603 __put_user(env->regs[8], &sc->arm_r8);
1604 __put_user(env->regs[9], &sc->arm_r9);
1605 __put_user(env->regs[10], &sc->arm_r10);
1606 __put_user(env->regs[11], &sc->arm_fp);
1607 __put_user(env->regs[12], &sc->arm_ip);
1608 __put_user(env->regs[13], &sc->arm_sp);
1609 __put_user(env->regs[14], &sc->arm_lr);
1610 __put_user(env->regs[15], &sc->arm_pc);
1611 #ifdef TARGET_CONFIG_CPU_32
1612 __put_user(cpsr_read(env), &sc->arm_cpsr);
1613 #endif
1614
1615 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1616 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1617 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1618 __put_user(mask, &sc->oldmask);
1619 }
1620
1621 static inline abi_ulong
1622 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1623 {
1624 unsigned long sp = regs->regs[13];
1625
1626 /*
1627 * This is the X/Open sanctioned signal stack switching.
1628 */
1629 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1630 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1631 }
1632 /*
1633 * ATPCS B01 mandates 8-byte alignment
1634 */
1635 return (sp - framesize) & ~7;
1636 }
1637
1638 static void
1639 setup_return(CPUARMState *env, struct target_sigaction *ka,
1640 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1641 {
1642 abi_ulong handler = ka->_sa_handler;
1643 abi_ulong retcode;
1644 int thumb = handler & 1;
1645 uint32_t cpsr = cpsr_read(env);
1646
1647 cpsr &= ~CPSR_IT;
1648 if (thumb) {
1649 cpsr |= CPSR_T;
1650 } else {
1651 cpsr &= ~CPSR_T;
1652 }
1653
1654 if (ka->sa_flags & TARGET_SA_RESTORER) {
1655 retcode = ka->sa_restorer;
1656 } else {
1657 unsigned int idx = thumb;
1658
1659 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1660 idx += 2;
1661 }
1662
1663 __put_user(retcodes[idx], rc);
1664
1665 retcode = rc_addr + thumb;
1666 }
1667
1668 env->regs[0] = usig;
1669 env->regs[13] = frame_addr;
1670 env->regs[14] = retcode;
1671 env->regs[15] = handler & (thumb ? ~1 : ~3);
1672 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1673 }
1674
1675 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1676 {
1677 int i;
1678 struct target_vfp_sigframe *vfpframe;
1679 vfpframe = (struct target_vfp_sigframe *)regspace;
1680 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1681 __put_user(sizeof(*vfpframe), &vfpframe->size);
1682 for (i = 0; i < 32; i++) {
1683 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1684 }
1685 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1686 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1687 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1688 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1689 return (abi_ulong*)(vfpframe+1);
1690 }
1691
1692 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1693 CPUARMState *env)
1694 {
1695 int i;
1696 struct target_iwmmxt_sigframe *iwmmxtframe;
1697 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1698 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1699 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1700 for (i = 0; i < 16; i++) {
1701 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1702 }
1703 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1704 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1705 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1706 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1707 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1708 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1709 return (abi_ulong*)(iwmmxtframe+1);
1710 }
1711
1712 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1713 target_sigset_t *set, CPUARMState *env)
1714 {
1715 struct target_sigaltstack stack;
1716 int i;
1717 abi_ulong *regspace;
1718
1719 /* Clear all the bits of the ucontext we don't use. */
1720 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1721
1722 memset(&stack, 0, sizeof(stack));
1723 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1724 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1725 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1726 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1727
1728 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1729 /* Save coprocessor signal frame. */
1730 regspace = uc->tuc_regspace;
1731 if (arm_feature(env, ARM_FEATURE_VFP)) {
1732 regspace = setup_sigframe_v2_vfp(regspace, env);
1733 }
1734 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1735 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1736 }
1737
1738 /* Write terminating magic word */
1739 __put_user(0, regspace);
1740
1741 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1742 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1743 }
1744 }
1745
1746 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1747 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1748 target_sigset_t *set, CPUARMState *regs)
1749 {
1750 struct sigframe_v1 *frame;
1751 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1752 int i;
1753
1754 trace_user_setup_frame(regs, frame_addr);
1755 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1756 return;
1757 }
1758
1759 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1760
1761 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1762 __put_user(set->sig[i], &frame->extramask[i - 1]);
1763 }
1764
1765 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1766 frame_addr + offsetof(struct sigframe_v1, retcode));
1767
1768 unlock_user_struct(frame, frame_addr, 1);
1769 }
1770
1771 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1772 target_sigset_t *set, CPUARMState *regs)
1773 {
1774 struct sigframe_v2 *frame;
1775 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1776
1777 trace_user_setup_frame(regs, frame_addr);
1778 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1779 return;
1780 }
1781
1782 setup_sigframe_v2(&frame->uc, set, regs);
1783
1784 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1785 frame_addr + offsetof(struct sigframe_v2, retcode));
1786
1787 unlock_user_struct(frame, frame_addr, 1);
1788 }
1789
1790 static void setup_frame(int usig, struct target_sigaction *ka,
1791 target_sigset_t *set, CPUARMState *regs)
1792 {
1793 if (get_osversion() >= 0x020612) {
1794 setup_frame_v2(usig, ka, set, regs);
1795 } else {
1796 setup_frame_v1(usig, ka, set, regs);
1797 }
1798 }
1799
1800 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1801 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1802 target_siginfo_t *info,
1803 target_sigset_t *set, CPUARMState *env)
1804 {
1805 struct rt_sigframe_v1 *frame;
1806 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1807 struct target_sigaltstack stack;
1808 int i;
1809 abi_ulong info_addr, uc_addr;
1810
1811 trace_user_setup_rt_frame(env, frame_addr);
1812 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1813 return /* 1 */;
1814 }
1815
1816 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1817 __put_user(info_addr, &frame->pinfo);
1818 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1819 __put_user(uc_addr, &frame->puc);
1820 tswap_siginfo(&frame->info, info);
1821
1822 /* Clear all the bits of the ucontext we don't use. */
1823 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1824
1825 memset(&stack, 0, sizeof(stack));
1826 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1827 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1828 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1829 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1830
1831 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1832 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1833 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1834 }
1835
1836 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1837 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1838
1839 env->regs[1] = info_addr;
1840 env->regs[2] = uc_addr;
1841
1842 unlock_user_struct(frame, frame_addr, 1);
1843 }
1844
1845 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1846 target_siginfo_t *info,
1847 target_sigset_t *set, CPUARMState *env)
1848 {
1849 struct rt_sigframe_v2 *frame;
1850 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1851 abi_ulong info_addr, uc_addr;
1852
1853 trace_user_setup_rt_frame(env, frame_addr);
1854 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1855 return /* 1 */;
1856 }
1857
1858 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1859 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1860 tswap_siginfo(&frame->info, info);
1861
1862 setup_sigframe_v2(&frame->uc, set, env);
1863
1864 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1865 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1866
1867 env->regs[1] = info_addr;
1868 env->regs[2] = uc_addr;
1869
1870 unlock_user_struct(frame, frame_addr, 1);
1871 }
1872
1873 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1874 target_siginfo_t *info,
1875 target_sigset_t *set, CPUARMState *env)
1876 {
1877 if (get_osversion() >= 0x020612) {
1878 setup_rt_frame_v2(usig, ka, info, set, env);
1879 } else {
1880 setup_rt_frame_v1(usig, ka, info, set, env);
1881 }
1882 }
1883
1884 static int
1885 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1886 {
1887 int err = 0;
1888 uint32_t cpsr;
1889
1890 __get_user(env->regs[0], &sc->arm_r0);
1891 __get_user(env->regs[1], &sc->arm_r1);
1892 __get_user(env->regs[2], &sc->arm_r2);
1893 __get_user(env->regs[3], &sc->arm_r3);
1894 __get_user(env->regs[4], &sc->arm_r4);
1895 __get_user(env->regs[5], &sc->arm_r5);
1896 __get_user(env->regs[6], &sc->arm_r6);
1897 __get_user(env->regs[7], &sc->arm_r7);
1898 __get_user(env->regs[8], &sc->arm_r8);
1899 __get_user(env->regs[9], &sc->arm_r9);
1900 __get_user(env->regs[10], &sc->arm_r10);
1901 __get_user(env->regs[11], &sc->arm_fp);
1902 __get_user(env->regs[12], &sc->arm_ip);
1903 __get_user(env->regs[13], &sc->arm_sp);
1904 __get_user(env->regs[14], &sc->arm_lr);
1905 __get_user(env->regs[15], &sc->arm_pc);
1906 #ifdef TARGET_CONFIG_CPU_32
1907 __get_user(cpsr, &sc->arm_cpsr);
1908 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1909 #endif
1910
1911 err |= !valid_user_regs(env);
1912
1913 return err;
1914 }
1915
1916 static long do_sigreturn_v1(CPUARMState *env)
1917 {
1918 abi_ulong frame_addr;
1919 struct sigframe_v1 *frame = NULL;
1920 target_sigset_t set;
1921 sigset_t host_set;
1922 int i;
1923
1924 /*
1925 * Since we stacked the signal on a 64-bit boundary,
1926 * then 'sp' should be word aligned here. If it's
1927 * not, then the user is trying to mess with us.
1928 */
1929 frame_addr = env->regs[13];
1930 trace_user_do_sigreturn(env, frame_addr);
1931 if (frame_addr & 7) {
1932 goto badframe;
1933 }
1934
1935 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1936 goto badframe;
1937 }
1938
1939 __get_user(set.sig[0], &frame->sc.oldmask);
1940 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1941 __get_user(set.sig[i], &frame->extramask[i - 1]);
1942 }
1943
1944 target_to_host_sigset_internal(&host_set, &set);
1945 set_sigmask(&host_set);
1946
1947 if (restore_sigcontext(env, &frame->sc)) {
1948 goto badframe;
1949 }
1950
1951 #if 0
1952 /* Send SIGTRAP if we're single-stepping */
1953 if (ptrace_cancel_bpt(current))
1954 send_sig(SIGTRAP, current, 1);
1955 #endif
1956 unlock_user_struct(frame, frame_addr, 0);
1957 return -TARGET_QEMU_ESIGRETURN;
1958
1959 badframe:
1960 force_sig(TARGET_SIGSEGV /* , current */);
1961 return 0;
1962 }
1963
1964 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1965 {
1966 int i;
1967 abi_ulong magic, sz;
1968 uint32_t fpscr, fpexc;
1969 struct target_vfp_sigframe *vfpframe;
1970 vfpframe = (struct target_vfp_sigframe *)regspace;
1971
1972 __get_user(magic, &vfpframe->magic);
1973 __get_user(sz, &vfpframe->size);
1974 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1975 return 0;
1976 }
1977 for (i = 0; i < 32; i++) {
1978 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1979 }
1980 __get_user(fpscr, &vfpframe->ufp.fpscr);
1981 vfp_set_fpscr(env, fpscr);
1982 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1983 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1984 * and the exception flag is cleared
1985 */
1986 fpexc |= (1 << 30);
1987 fpexc &= ~((1 << 31) | (1 << 28));
1988 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1989 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1990 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1991 return (abi_ulong*)(vfpframe + 1);
1992 }
1993
1994 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1995 abi_ulong *regspace)
1996 {
1997 int i;
1998 abi_ulong magic, sz;
1999 struct target_iwmmxt_sigframe *iwmmxtframe;
2000 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2001
2002 __get_user(magic, &iwmmxtframe->magic);
2003 __get_user(sz, &iwmmxtframe->size);
2004 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2005 return 0;
2006 }
2007 for (i = 0; i < 16; i++) {
2008 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2009 }
2010 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2011 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2012 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2013 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2014 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2015 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2016 return (abi_ulong*)(iwmmxtframe + 1);
2017 }
2018
2019 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2020 struct target_ucontext_v2 *uc)
2021 {
2022 sigset_t host_set;
2023 abi_ulong *regspace;
2024
2025 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2026 set_sigmask(&host_set);
2027
2028 if (restore_sigcontext(env, &uc->tuc_mcontext))
2029 return 1;
2030
2031 /* Restore coprocessor signal frame */
2032 regspace = uc->tuc_regspace;
2033 if (arm_feature(env, ARM_FEATURE_VFP)) {
2034 regspace = restore_sigframe_v2_vfp(env, regspace);
2035 if (!regspace) {
2036 return 1;
2037 }
2038 }
2039 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2040 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2041 if (!regspace) {
2042 return 1;
2043 }
2044 }
2045
2046 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2047 return 1;
2048
2049 #if 0
2050 /* Send SIGTRAP if we're single-stepping */
2051 if (ptrace_cancel_bpt(current))
2052 send_sig(SIGTRAP, current, 1);
2053 #endif
2054
2055 return 0;
2056 }
2057
2058 static long do_sigreturn_v2(CPUARMState *env)
2059 {
2060 abi_ulong frame_addr;
2061 struct sigframe_v2 *frame = NULL;
2062
2063 /*
2064 * Since we stacked the signal on a 64-bit boundary,
2065 * then 'sp' should be word aligned here. If it's
2066 * not, then the user is trying to mess with us.
2067 */
2068 frame_addr = env->regs[13];
2069 trace_user_do_sigreturn(env, frame_addr);
2070 if (frame_addr & 7) {
2071 goto badframe;
2072 }
2073
2074 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2075 goto badframe;
2076 }
2077
2078 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2079 goto badframe;
2080 }
2081
2082 unlock_user_struct(frame, frame_addr, 0);
2083 return -TARGET_QEMU_ESIGRETURN;
2084
2085 badframe:
2086 unlock_user_struct(frame, frame_addr, 0);
2087 force_sig(TARGET_SIGSEGV /* , current */);
2088 return 0;
2089 }
2090
2091 long do_sigreturn(CPUARMState *env)
2092 {
2093 if (get_osversion() >= 0x020612) {
2094 return do_sigreturn_v2(env);
2095 } else {
2096 return do_sigreturn_v1(env);
2097 }
2098 }
2099
2100 static long do_rt_sigreturn_v1(CPUARMState *env)
2101 {
2102 abi_ulong frame_addr;
2103 struct rt_sigframe_v1 *frame = NULL;
2104 sigset_t host_set;
2105
2106 /*
2107 * Since we stacked the signal on a 64-bit boundary,
2108 * then 'sp' should be word aligned here. If it's
2109 * not, then the user is trying to mess with us.
2110 */
2111 frame_addr = env->regs[13];
2112 trace_user_do_rt_sigreturn(env, frame_addr);
2113 if (frame_addr & 7) {
2114 goto badframe;
2115 }
2116
2117 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2118 goto badframe;
2119 }
2120
2121 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2122 set_sigmask(&host_set);
2123
2124 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2125 goto badframe;
2126 }
2127
2128 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2129 goto badframe;
2130
2131 #if 0
2132 /* Send SIGTRAP if we're single-stepping */
2133 if (ptrace_cancel_bpt(current))
2134 send_sig(SIGTRAP, current, 1);
2135 #endif
2136 unlock_user_struct(frame, frame_addr, 0);
2137 return -TARGET_QEMU_ESIGRETURN;
2138
2139 badframe:
2140 unlock_user_struct(frame, frame_addr, 0);
2141 force_sig(TARGET_SIGSEGV /* , current */);
2142 return 0;
2143 }
2144
2145 static long do_rt_sigreturn_v2(CPUARMState *env)
2146 {
2147 abi_ulong frame_addr;
2148 struct rt_sigframe_v2 *frame = NULL;
2149
2150 /*
2151 * Since we stacked the signal on a 64-bit boundary,
2152 * then 'sp' should be word aligned here. If it's
2153 * not, then the user is trying to mess with us.
2154 */
2155 frame_addr = env->regs[13];
2156 trace_user_do_rt_sigreturn(env, frame_addr);
2157 if (frame_addr & 7) {
2158 goto badframe;
2159 }
2160
2161 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2162 goto badframe;
2163 }
2164
2165 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2166 goto badframe;
2167 }
2168
2169 unlock_user_struct(frame, frame_addr, 0);
2170 return -TARGET_QEMU_ESIGRETURN;
2171
2172 badframe:
2173 unlock_user_struct(frame, frame_addr, 0);
2174 force_sig(TARGET_SIGSEGV /* , current */);
2175 return 0;
2176 }
2177
2178 long do_rt_sigreturn(CPUARMState *env)
2179 {
2180 if (get_osversion() >= 0x020612) {
2181 return do_rt_sigreturn_v2(env);
2182 } else {
2183 return do_rt_sigreturn_v1(env);
2184 }
2185 }
2186
2187 #elif defined(TARGET_SPARC)
2188
2189 #define __SUNOS_MAXWIN 31
2190
2191 /* This is what SunOS does, so shall I. */
2192 struct target_sigcontext {
2193 abi_ulong sigc_onstack; /* state to restore */
2194
2195 abi_ulong sigc_mask; /* sigmask to restore */
2196 abi_ulong sigc_sp; /* stack pointer */
2197 abi_ulong sigc_pc; /* program counter */
2198 abi_ulong sigc_npc; /* next program counter */
2199 abi_ulong sigc_psr; /* for condition codes etc */
2200 abi_ulong sigc_g1; /* User uses these two registers */
2201 abi_ulong sigc_o0; /* within the trampoline code. */
2202
2203 /* Now comes information regarding the users window set
2204 * at the time of the signal.
2205 */
2206 abi_ulong sigc_oswins; /* outstanding windows */
2207
2208 /* stack ptrs for each regwin buf */
2209 char *sigc_spbuf[__SUNOS_MAXWIN];
2210
2211 /* Windows to restore after signal */
2212 struct {
2213 abi_ulong locals[8];
2214 abi_ulong ins[8];
2215 } sigc_wbuf[__SUNOS_MAXWIN];
2216 };
2217 /* A Sparc stack frame */
2218 struct sparc_stackf {
2219 abi_ulong locals[8];
2220 abi_ulong ins[8];
2221 /* It's simpler to treat fp and callers_pc as elements of ins[]
2222 * since we never need to access them ourselves.
2223 */
2224 char *structptr;
2225 abi_ulong xargs[6];
2226 abi_ulong xxargs[1];
2227 };
2228
2229 typedef struct {
2230 struct {
2231 abi_ulong psr;
2232 abi_ulong pc;
2233 abi_ulong npc;
2234 abi_ulong y;
2235 abi_ulong u_regs[16]; /* globals and ins */
2236 } si_regs;
2237 int si_mask;
2238 } __siginfo_t;
2239
2240 typedef struct {
2241 abi_ulong si_float_regs[32];
2242 unsigned long si_fsr;
2243 unsigned long si_fpqdepth;
2244 struct {
2245 unsigned long *insn_addr;
2246 unsigned long insn;
2247 } si_fpqueue [16];
2248 } qemu_siginfo_fpu_t;
2249
2250
2251 struct target_signal_frame {
2252 struct sparc_stackf ss;
2253 __siginfo_t info;
2254 abi_ulong fpu_save;
2255 abi_ulong insns[2] __attribute__ ((aligned (8)));
2256 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2257 abi_ulong extra_size; /* Should be 0 */
2258 qemu_siginfo_fpu_t fpu_state;
2259 };
2260 struct target_rt_signal_frame {
2261 struct sparc_stackf ss;
2262 siginfo_t info;
2263 abi_ulong regs[20];
2264 sigset_t mask;
2265 abi_ulong fpu_save;
2266 unsigned int insns[2];
2267 stack_t stack;
2268 unsigned int extra_size; /* Should be 0 */
2269 qemu_siginfo_fpu_t fpu_state;
2270 };
2271
2272 #define UREG_O0 16
2273 #define UREG_O6 22
2274 #define UREG_I0 0
2275 #define UREG_I1 1
2276 #define UREG_I2 2
2277 #define UREG_I3 3
2278 #define UREG_I4 4
2279 #define UREG_I5 5
2280 #define UREG_I6 6
2281 #define UREG_I7 7
2282 #define UREG_L0 8
2283 #define UREG_FP UREG_I6
2284 #define UREG_SP UREG_O6
2285
2286 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2287 CPUSPARCState *env,
2288 unsigned long framesize)
2289 {
2290 abi_ulong sp;
2291
2292 sp = env->regwptr[UREG_FP];
2293
2294 /* This is the X/Open sanctioned signal stack switching. */
2295 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2296 if (!on_sig_stack(sp)
2297 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2298 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2299 }
2300 }
2301 return sp - framesize;
2302 }
2303
2304 static int
2305 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2306 {
2307 int err = 0, i;
2308
2309 __put_user(env->psr, &si->si_regs.psr);
2310 __put_user(env->pc, &si->si_regs.pc);
2311 __put_user(env->npc, &si->si_regs.npc);
2312 __put_user(env->y, &si->si_regs.y);
2313 for (i=0; i < 8; i++) {
2314 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2315 }
2316 for (i=0; i < 8; i++) {
2317 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2318 }
2319 __put_user(mask, &si->si_mask);
2320 return err;
2321 }
2322
2323 #if 0
2324 static int
2325 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2326 CPUSPARCState *env, unsigned long mask)
2327 {
2328 int err = 0;
2329
2330 __put_user(mask, &sc->sigc_mask);
2331 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2332 __put_user(env->pc, &sc->sigc_pc);
2333 __put_user(env->npc, &sc->sigc_npc);
2334 __put_user(env->psr, &sc->sigc_psr);
2335 __put_user(env->gregs[1], &sc->sigc_g1);
2336 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2337
2338 return err;
2339 }
2340 #endif
2341 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2342
2343 static void setup_frame(int sig, struct target_sigaction *ka,
2344 target_sigset_t *set, CPUSPARCState *env)
2345 {
2346 abi_ulong sf_addr;
2347 struct target_signal_frame *sf;
2348 int sigframe_size, err, i;
2349
2350 /* 1. Make sure everything is clean */
2351 //synchronize_user_stack();
2352
2353 sigframe_size = NF_ALIGNEDSZ;
2354 sf_addr = get_sigframe(ka, env, sigframe_size);
2355 trace_user_setup_frame(env, sf_addr);
2356
2357 sf = lock_user(VERIFY_WRITE, sf_addr,
2358 sizeof(struct target_signal_frame), 0);
2359 if (!sf) {
2360 goto sigsegv;
2361 }
2362 #if 0
2363 if (invalid_frame_pointer(sf, sigframe_size))
2364 goto sigill_and_return;
2365 #endif
2366 /* 2. Save the current process state */
2367 err = setup___siginfo(&sf->info, env, set->sig[0]);
2368 __put_user(0, &sf->extra_size);
2369
2370 //save_fpu_state(regs, &sf->fpu_state);
2371 //__put_user(&sf->fpu_state, &sf->fpu_save);
2372
2373 __put_user(set->sig[0], &sf->info.si_mask);
2374 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2375 __put_user(set->sig[i + 1], &sf->extramask[i]);
2376 }
2377
2378 for (i = 0; i < 8; i++) {
2379 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2380 }
2381 for (i = 0; i < 8; i++) {
2382 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2383 }
2384 if (err)
2385 goto sigsegv;
2386
2387 /* 3. signal handler back-trampoline and parameters */
2388 env->regwptr[UREG_FP] = sf_addr;
2389 env->regwptr[UREG_I0] = sig;
2390 env->regwptr[UREG_I1] = sf_addr +
2391 offsetof(struct target_signal_frame, info);
2392 env->regwptr[UREG_I2] = sf_addr +
2393 offsetof(struct target_signal_frame, info);
2394
2395 /* 4. signal handler */
2396 env->pc = ka->_sa_handler;
2397 env->npc = (env->pc + 4);
2398 /* 5. return to kernel instructions */
2399 if (ka->sa_restorer) {
2400 env->regwptr[UREG_I7] = ka->sa_restorer;
2401 } else {
2402 uint32_t val32;
2403
2404 env->regwptr[UREG_I7] = sf_addr +
2405 offsetof(struct target_signal_frame, insns) - 2 * 4;
2406
2407 /* mov __NR_sigreturn, %g1 */
2408 val32 = 0x821020d8;
2409 __put_user(val32, &sf->insns[0]);
2410
2411 /* t 0x10 */
2412 val32 = 0x91d02010;
2413 __put_user(val32, &sf->insns[1]);
2414 if (err)
2415 goto sigsegv;
2416
2417 /* Flush instruction space. */
2418 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2419 // tb_flush(env);
2420 }
2421 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2422 return;
2423 #if 0
2424 sigill_and_return:
2425 force_sig(TARGET_SIGILL);
2426 #endif
2427 sigsegv:
2428 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2429 force_sig(TARGET_SIGSEGV);
2430 }
2431
2432 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2433 target_siginfo_t *info,
2434 target_sigset_t *set, CPUSPARCState *env)
2435 {
2436 fprintf(stderr, "setup_rt_frame: not implemented\n");
2437 }
2438
2439 long do_sigreturn(CPUSPARCState *env)
2440 {
2441 abi_ulong sf_addr;
2442 struct target_signal_frame *sf;
2443 uint32_t up_psr, pc, npc;
2444 target_sigset_t set;
2445 sigset_t host_set;
2446 int err=0, i;
2447
2448 sf_addr = env->regwptr[UREG_FP];
2449 trace_user_do_sigreturn(env, sf_addr);
2450 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2451 goto segv_and_exit;
2452 }
2453
2454 /* 1. Make sure we are not getting garbage from the user */
2455
2456 if (sf_addr & 3)
2457 goto segv_and_exit;
2458
2459 __get_user(pc, &sf->info.si_regs.pc);
2460 __get_user(npc, &sf->info.si_regs.npc);
2461
2462 if ((pc | npc) & 3) {
2463 goto segv_and_exit;
2464 }
2465
2466 /* 2. Restore the state */
2467 __get_user(up_psr, &sf->info.si_regs.psr);
2468
2469 /* User can only change condition codes and FPU enabling in %psr. */
2470 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2471 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2472
2473 env->pc = pc;
2474 env->npc = npc;
2475 __get_user(env->y, &sf->info.si_regs.y);
2476 for (i=0; i < 8; i++) {
2477 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2478 }
2479 for (i=0; i < 8; i++) {
2480 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2481 }
2482
2483 /* FIXME: implement FPU save/restore:
2484 * __get_user(fpu_save, &sf->fpu_save);
2485 * if (fpu_save)
2486 * err |= restore_fpu_state(env, fpu_save);
2487 */
2488
2489 /* This is pretty much atomic, no amount locking would prevent
2490 * the races which exist anyways.
2491 */
2492 __get_user(set.sig[0], &sf->info.si_mask);
2493 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2494 __get_user(set.sig[i], &sf->extramask[i - 1]);
2495 }
2496
2497 target_to_host_sigset_internal(&host_set, &set);
2498 set_sigmask(&host_set);
2499
2500 if (err) {
2501 goto segv_and_exit;
2502 }
2503 unlock_user_struct(sf, sf_addr, 0);
2504 return -TARGET_QEMU_ESIGRETURN;
2505
2506 segv_and_exit:
2507 unlock_user_struct(sf, sf_addr, 0);
2508 force_sig(TARGET_SIGSEGV);
2509 }
2510
2511 long do_rt_sigreturn(CPUSPARCState *env)
2512 {
2513 trace_user_do_rt_sigreturn(env, 0);
2514 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2515 return -TARGET_ENOSYS;
2516 }
2517
2518 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2519 #define MC_TSTATE 0
2520 #define MC_PC 1
2521 #define MC_NPC 2
2522 #define MC_Y 3
2523 #define MC_G1 4
2524 #define MC_G2 5
2525 #define MC_G3 6
2526 #define MC_G4 7
2527 #define MC_G5 8
2528 #define MC_G6 9
2529 #define MC_G7 10
2530 #define MC_O0 11
2531 #define MC_O1 12
2532 #define MC_O2 13
2533 #define MC_O3 14
2534 #define MC_O4 15
2535 #define MC_O5 16
2536 #define MC_O6 17
2537 #define MC_O7 18
2538 #define MC_NGREG 19
2539
2540 typedef abi_ulong target_mc_greg_t;
2541 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2542
2543 struct target_mc_fq {
2544 abi_ulong *mcfq_addr;
2545 uint32_t mcfq_insn;
2546 };
2547
2548 struct target_mc_fpu {
2549 union {
2550 uint32_t sregs[32];
2551 uint64_t dregs[32];
2552 //uint128_t qregs[16];
2553 } mcfpu_fregs;
2554 abi_ulong mcfpu_fsr;
2555 abi_ulong mcfpu_fprs;
2556 abi_ulong mcfpu_gsr;
2557 struct target_mc_fq *mcfpu_fq;
2558 unsigned char mcfpu_qcnt;
2559 unsigned char mcfpu_qentsz;
2560 unsigned char mcfpu_enab;
2561 };
2562 typedef struct target_mc_fpu target_mc_fpu_t;
2563
2564 typedef struct {
2565 target_mc_gregset_t mc_gregs;
2566 target_mc_greg_t mc_fp;
2567 target_mc_greg_t mc_i7;
2568 target_mc_fpu_t mc_fpregs;
2569 } target_mcontext_t;
2570
2571 struct target_ucontext {
2572 struct target_ucontext *tuc_link;
2573 abi_ulong tuc_flags;
2574 target_sigset_t tuc_sigmask;
2575 target_mcontext_t tuc_mcontext;
2576 };
2577
2578 /* A V9 register window */
2579 struct target_reg_window {
2580 abi_ulong locals[8];
2581 abi_ulong ins[8];
2582 };
2583
2584 #define TARGET_STACK_BIAS 2047
2585
2586 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2587 void sparc64_set_context(CPUSPARCState *env)
2588 {
2589 abi_ulong ucp_addr;
2590 struct target_ucontext *ucp;
2591 target_mc_gregset_t *grp;
2592 abi_ulong pc, npc, tstate;
2593 abi_ulong fp, i7, w_addr;
2594 unsigned int i;
2595
2596 ucp_addr = env->regwptr[UREG_I0];
2597 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2598 goto do_sigsegv;
2599 }
2600 grp = &ucp->tuc_mcontext.mc_gregs;
2601 __get_user(pc, &((*grp)[MC_PC]));
2602 __get_user(npc, &((*grp)[MC_NPC]));
2603 if ((pc | npc) & 3) {
2604 goto do_sigsegv;
2605 }
2606 if (env->regwptr[UREG_I1]) {
2607 target_sigset_t target_set;
2608 sigset_t set;
2609
2610 if (TARGET_NSIG_WORDS == 1) {
2611 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2612 } else {
2613 abi_ulong *src, *dst;
2614 src = ucp->tuc_sigmask.sig;
2615 dst = target_set.sig;
2616 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2617 __get_user(*dst, src);
2618 }
2619 }
2620 target_to_host_sigset_internal(&set, &target_set);
2621 set_sigmask(&set);
2622 }
2623 env->pc = pc;
2624 env->npc = npc;
2625 __get_user(env->y, &((*grp)[MC_Y]));
2626 __get_user(tstate, &((*grp)[MC_TSTATE]));
2627 env->asi = (tstate >> 24) & 0xff;
2628 cpu_put_ccr(env, tstate >> 32);
2629 cpu_put_cwp64(env, tstate & 0x1f);
2630 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2631 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2632 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2633 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2634 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2635 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2636 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2637 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2638 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2639 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2640 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2641 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2642 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2643 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2644 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2645
2646 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2647 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2648
2649 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2650 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2651 abi_ulong) != 0) {
2652 goto do_sigsegv;
2653 }
2654 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2655 abi_ulong) != 0) {
2656 goto do_sigsegv;
2657 }
2658 /* FIXME this does not match how the kernel handles the FPU in
2659 * its sparc64_set_context implementation. In particular the FPU
2660 * is only restored if fenab is non-zero in:
2661 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2662 */
2663 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2664 {
2665 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2666 for (i = 0; i < 64; i++, src++) {
2667 if (i & 1) {
2668 __get_user(env->fpr[i/2].l.lower, src);
2669 } else {
2670 __get_user(env->fpr[i/2].l.upper, src);
2671 }
2672 }
2673 }
2674 __get_user(env->fsr,
2675 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2676 __get_user(env->gsr,
2677 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2678 unlock_user_struct(ucp, ucp_addr, 0);
2679 return;
2680 do_sigsegv:
2681 unlock_user_struct(ucp, ucp_addr, 0);
2682 force_sig(TARGET_SIGSEGV);
2683 }
2684
2685 void sparc64_get_context(CPUSPARCState *env)
2686 {
2687 abi_ulong ucp_addr;
2688 struct target_ucontext *ucp;
2689 target_mc_gregset_t *grp;
2690 target_mcontext_t *mcp;
2691 abi_ulong fp, i7, w_addr;
2692 int err;
2693 unsigned int i;
2694 target_sigset_t target_set;
2695 sigset_t set;
2696
2697 ucp_addr = env->regwptr[UREG_I0];
2698 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2699 goto do_sigsegv;
2700 }
2701
2702 mcp = &ucp->tuc_mcontext;
2703 grp = &mcp->mc_gregs;
2704
2705 /* Skip over the trap instruction, first. */
2706 env->pc = env->npc;
2707 env->npc += 4;
2708
2709 /* If we're only reading the signal mask then do_sigprocmask()
2710 * is guaranteed not to fail, which is important because we don't
2711 * have any way to signal a failure or restart this operation since
2712 * this is not a normal syscall.
2713 */
2714 err = do_sigprocmask(0, NULL, &set);
2715 assert(err == 0);
2716 host_to_target_sigset_internal(&target_set, &set);
2717 if (TARGET_NSIG_WORDS == 1) {
2718 __put_user(target_set.sig[0],
2719 (abi_ulong *)&ucp->tuc_sigmask);
2720 } else {
2721 abi_ulong *src, *dst;
2722 src = target_set.sig;
2723 dst = ucp->tuc_sigmask.sig;
2724 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2725 __put_user(*src, dst);
2726 }
2727 if (err)
2728 goto do_sigsegv;
2729 }
2730
2731 /* XXX: tstate must be saved properly */
2732 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2733 __put_user(env->pc, &((*grp)[MC_PC]));
2734 __put_user(env->npc, &((*grp)[MC_NPC]));
2735 __put_user(env->y, &((*grp)[MC_Y]));
2736 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2737 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2738 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2739 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2740 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2741 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2742 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2743 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2744 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2745 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2746 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2747 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2748 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2749 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2750 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2751
2752 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2753 fp = i7 = 0;
2754 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2755 abi_ulong) != 0) {
2756 goto do_sigsegv;
2757 }
2758 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2759 abi_ulong) != 0) {
2760 goto do_sigsegv;
2761 }
2762 __put_user(fp, &(mcp->mc_fp));
2763 __put_user(i7, &(mcp->mc_i7));
2764
2765 {
2766 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2767 for (i = 0; i < 64; i++, dst++) {
2768 if (i & 1) {
2769 __put_user(env->fpr[i/2].l.lower, dst);
2770 } else {
2771 __put_user(env->fpr[i/2].l.upper, dst);
2772 }
2773 }
2774 }
2775 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2776 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2777 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2778
2779 if (err)
2780 goto do_sigsegv;
2781 unlock_user_struct(ucp, ucp_addr, 1);
2782 return;
2783 do_sigsegv:
2784 unlock_user_struct(ucp, ucp_addr, 1);
2785 force_sig(TARGET_SIGSEGV);
2786 }
2787 #endif
2788 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2789
2790 # if defined(TARGET_ABI_MIPSO32)
2791 struct target_sigcontext {
2792 uint32_t sc_regmask; /* Unused */
2793 uint32_t sc_status;
2794 uint64_t sc_pc;
2795 uint64_t sc_regs[32];
2796 uint64_t sc_fpregs[32];
2797 uint32_t sc_ownedfp; /* Unused */
2798 uint32_t sc_fpc_csr;
2799 uint32_t sc_fpc_eir; /* Unused */
2800 uint32_t sc_used_math;
2801 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2802 uint32_t pad0;
2803 uint64_t sc_mdhi;
2804 uint64_t sc_mdlo;
2805 target_ulong sc_hi1; /* Was sc_cause */
2806 target_ulong sc_lo1; /* Was sc_badvaddr */
2807 target_ulong sc_hi2; /* Was sc_sigset[4] */
2808 target_ulong sc_lo2;
2809 target_ulong sc_hi3;
2810 target_ulong sc_lo3;
2811 };
2812 # else /* N32 || N64 */
2813 struct target_sigcontext {
2814 uint64_t sc_regs[32];
2815 uint64_t sc_fpregs[32];
2816 uint64_t sc_mdhi;
2817 uint64_t sc_hi1;
2818 uint64_t sc_hi2;
2819 uint64_t sc_hi3;
2820 uint64_t sc_mdlo;
2821 uint64_t sc_lo1;
2822 uint64_t sc_lo2;
2823 uint64_t sc_lo3;
2824 uint64_t sc_pc;
2825 uint32_t sc_fpc_csr;
2826 uint32_t sc_used_math;
2827 uint32_t sc_dsp;
2828 uint32_t sc_reserved;
2829 };
2830 # endif /* O32 */
2831
2832 struct sigframe {
2833 uint32_t sf_ass[4]; /* argument save space for o32 */
2834 uint32_t sf_code[2]; /* signal trampoline */
2835 struct target_sigcontext sf_sc;
2836 target_sigset_t sf_mask;
2837 };
2838
2839 struct target_ucontext {
2840 target_ulong tuc_flags;
2841 target_ulong tuc_link;
2842 target_stack_t tuc_stack;
2843 target_ulong pad0;
2844 struct target_sigcontext tuc_mcontext;
2845 target_sigset_t tuc_sigmask;
2846 };
2847
2848 struct target_rt_sigframe {
2849 uint32_t rs_ass[4]; /* argument save space for o32 */
2850 uint32_t rs_code[2]; /* signal trampoline */
2851 struct target_siginfo rs_info;
2852 struct target_ucontext rs_uc;
2853 };
2854
2855 /* Install trampoline to jump back from signal handler */
2856 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2857 {
2858 int err = 0;
2859
2860 /*
2861 * Set up the return code ...
2862 *
2863 * li v0, __NR__foo_sigreturn
2864 * syscall
2865 */
2866
2867 __put_user(0x24020000 + syscall, tramp + 0);
2868 __put_user(0x0000000c , tramp + 1);
2869 return err;
2870 }
2871
2872 static inline void setup_sigcontext(CPUMIPSState *regs,
2873 struct target_sigcontext *sc)
2874 {
2875 int i;
2876
2877 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2878 regs->hflags &= ~MIPS_HFLAG_BMASK;
2879
2880 __put_user(0, &sc->sc_regs[0]);
2881 for (i = 1; i < 32; ++i) {
2882 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2883 }
2884
2885 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2886 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2887
2888 /* Rather than checking for dsp existence, always copy. The storage
2889 would just be garbage otherwise. */
2890 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2891 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2892 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2893 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2894 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2895 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2896 {
2897 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2898 __put_user(dsp, &sc->sc_dsp);
2899 }
2900
2901 __put_user(1, &sc->sc_used_math);
2902
2903 for (i = 0; i < 32; ++i) {
2904 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2905 }
2906 }
2907
2908 static inline void
2909 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2910 {
2911 int i;
2912
2913 __get_user(regs->CP0_EPC, &sc->sc_pc);
2914
2915 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2916 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2917
2918 for (i = 1; i < 32; ++i) {
2919 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2920 }
2921
2922 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2923 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2924 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2925 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2926 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2927 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2928 {
2929 uint32_t dsp;
2930 __get_user(dsp, &sc->sc_dsp);
2931 cpu_wrdsp(dsp, 0x3ff, regs);
2932 }
2933
2934 for (i = 0; i < 32; ++i) {
2935 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2936 }
2937 }
2938
2939 /*
2940 * Determine which stack to use..
2941 */
2942 static inline abi_ulong
2943 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2944 {
2945 unsigned long sp;
2946
2947 /* Default to using normal stack */
2948 sp = regs->active_tc.gpr[29];
2949
2950 /*
2951 * FPU emulator may have its own trampoline active just
2952 * above the user stack, 16-bytes before the next lowest
2953 * 16 byte boundary. Try to avoid trashing it.
2954 */
2955 sp -= 32;
2956
2957 /* This is the X/Open sanctioned signal stack switching. */
2958 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2959 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2960 }
2961
2962 return (sp - frame_size) & ~7;
2963 }
2964
2965 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2966 {
2967 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2968 env->hflags &= ~MIPS_HFLAG_M16;
2969 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2970 env->active_tc.PC &= ~(target_ulong) 1;
2971 }
2972 }
2973
2974 # if defined(TARGET_ABI_MIPSO32)
2975 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2976 static void setup_frame(int sig, struct target_sigaction * ka,
2977 target_sigset_t *set, CPUMIPSState *regs)
2978 {
2979 struct sigframe *frame;
2980 abi_ulong frame_addr;
2981 int i;
2982
2983 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2984 trace_user_setup_frame(regs, frame_addr);
2985 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2986 goto give_sigsegv;
2987 }
2988
2989 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2990
2991 setup_sigcontext(regs, &frame->sf_sc);
2992
2993 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2994 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2995 }
2996
2997 /*
2998 * Arguments to signal handler:
2999 *
3000 * a0 = signal number
3001 * a1 = 0 (should be cause)
3002 * a2 = pointer to struct sigcontext
3003 *
3004 * $25 and PC point to the signal handler, $29 points to the
3005 * struct sigframe.
3006 */
3007 regs->active_tc.gpr[ 4] = sig;
3008 regs->active_tc.gpr[ 5] = 0;
3009 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3010 regs->active_tc.gpr[29] = frame_addr;
3011 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3012 /* The original kernel code sets CP0_EPC to the handler
3013 * since it returns to userland using eret
3014 * we cannot do this here, and we must set PC directly */
3015 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3016 mips_set_hflags_isa_mode_from_pc(regs);
3017 unlock_user_struct(frame, frame_addr, 1);
3018 return;
3019
3020 give_sigsegv:
3021 force_sig(TARGET_SIGSEGV/*, current*/);
3022 }
3023
3024 long do_sigreturn(CPUMIPSState *regs)
3025 {
3026 struct sigframe *frame;
3027 abi_ulong frame_addr;
3028 sigset_t blocked;
3029 target_sigset_t target_set;
3030 int i;
3031
3032 frame_addr = regs->active_tc.gpr[29];
3033 trace_user_do_sigreturn(regs, frame_addr);
3034 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3035 goto badframe;
3036
3037 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3038 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3039 }
3040
3041 target_to_host_sigset_internal(&blocked, &target_set);
3042 set_sigmask(&blocked);
3043
3044 restore_sigcontext(regs, &frame->sf_sc);
3045
3046 #if 0
3047 /*
3048 * Don't let your children do this ...
3049 */
3050 __asm__ __volatile__(
3051 "move\t$29, %0\n\t"
3052 "j\tsyscall_exit"
3053 :/* no outputs */
3054 :"r" (&regs));
3055 /* Unreached */
3056 #endif
3057
3058 regs->active_tc.PC = regs->CP0_EPC;
3059 mips_set_hflags_isa_mode_from_pc(regs);
3060 /* I am not sure this is right, but it seems to work
3061 * maybe a problem with nested signals ? */
3062 regs->CP0_EPC = 0;
3063 return -TARGET_QEMU_ESIGRETURN;
3064
3065 badframe:
3066 force_sig(TARGET_SIGSEGV/*, current*/);
3067 return 0;
3068 }
3069 # endif /* O32 */
3070
3071 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3072 target_siginfo_t *info,
3073 target_sigset_t *set, CPUMIPSState *env)
3074 {
3075 struct target_rt_sigframe *frame;
3076 abi_ulong frame_addr;
3077 int i;
3078
3079 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3080 trace_user_setup_rt_frame(env, frame_addr);
3081 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3082 goto give_sigsegv;
3083 }
3084
3085 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3086
3087 tswap_siginfo(&frame->rs_info, info);
3088
3089 __put_user(0, &frame->rs_uc.tuc_flags);
3090 __put_user(0, &frame->rs_uc.tuc_link);
3091 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3092 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3093 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3094 &frame->rs_uc.tuc_stack.ss_flags);
3095
3096 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3097
3098 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3099 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3100 }
3101
3102 /*
3103 * Arguments to signal handler:
3104 *
3105 * a0 = signal number
3106 * a1 = pointer to siginfo_t
3107 * a2 = pointer to struct ucontext
3108 *
3109 * $25 and PC point to the signal handler, $29 points to the
3110 * struct sigframe.
3111 */
3112 env->active_tc.gpr[ 4] = sig;
3113 env->active_tc.gpr[ 5] = frame_addr
3114 + offsetof(struct target_rt_sigframe, rs_info);
3115 env->active_tc.gpr[ 6] = frame_addr
3116 + offsetof(struct target_rt_sigframe, rs_uc);
3117 env->active_tc.gpr[29] = frame_addr;
3118 env->active_tc.gpr[31] = frame_addr
3119 + offsetof(struct target_rt_sigframe, rs_code);
3120 /* The original kernel code sets CP0_EPC to the handler
3121 * since it returns to userland using eret
3122 * we cannot do this here, and we must set PC directly */
3123 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3124 mips_set_hflags_isa_mode_from_pc(env);
3125 unlock_user_struct(frame, frame_addr, 1);
3126 return;
3127
3128 give_sigsegv:
3129 unlock_user_struct(frame, frame_addr, 1);
3130 force_sig(TARGET_SIGSEGV/*, current*/);
3131 }
3132
3133 long do_rt_sigreturn(CPUMIPSState *env)
3134 {
3135 struct target_rt_sigframe *frame;
3136 abi_ulong frame_addr;
3137 sigset_t blocked;
3138
3139 frame_addr = env->active_tc.gpr[29];
3140 trace_user_do_rt_sigreturn(env, frame_addr);
3141 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3142 goto badframe;
3143 }
3144
3145 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3146 set_sigmask(&blocked);
3147
3148 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3149
3150 if (do_sigaltstack(frame_addr +
3151 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3152 0, get_sp_from_cpustate(env)) == -EFAULT)
3153 goto badframe;
3154
3155 env->active_tc.PC = env->CP0_EPC;
3156 mips_set_hflags_isa_mode_from_pc(env);
3157 /* I am not sure this is right, but it seems to work
3158 * maybe a problem with nested signals ? */
3159 env->CP0_EPC = 0;
3160 return -TARGET_QEMU_ESIGRETURN;
3161
3162 badframe:
3163 force_sig(TARGET_SIGSEGV/*, current*/);
3164 return 0;
3165 }
3166
3167 #elif defined(TARGET_SH4)
3168
3169 /*
3170 * code and data structures from linux kernel:
3171 * include/asm-sh/sigcontext.h
3172 * arch/sh/kernel/signal.c
3173 */
3174
3175 struct target_sigcontext {
3176 target_ulong oldmask;
3177
3178 /* CPU registers */
3179 target_ulong sc_gregs[16];
3180 target_ulong sc_pc;
3181 target_ulong sc_pr;
3182 target_ulong sc_sr;
3183 target_ulong sc_gbr;
3184 target_ulong sc_mach;
3185 target_ulong sc_macl;
3186
3187 /* FPU registers */
3188 target_ulong sc_fpregs[16];
3189 target_ulong sc_xfpregs[16];
3190 unsigned int sc_fpscr;
3191 unsigned int sc_fpul;
3192 unsigned int sc_ownedfp;
3193 };
3194
3195 struct target_sigframe
3196 {
3197 struct target_sigcontext sc;
3198 target_ulong extramask[TARGET_NSIG_WORDS-1];
3199 uint16_t retcode[3];
3200 };
3201
3202
3203 struct target_ucontext {
3204 target_ulong tuc_flags;
3205 struct target_ucontext *tuc_link;
3206 target_stack_t tuc_stack;
3207 struct target_sigcontext tuc_mcontext;
3208 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3209 };
3210
3211 struct target_rt_sigframe
3212 {
3213 struct target_siginfo info;
3214 struct target_ucontext uc;
3215 uint16_t retcode[3];
3216 };
3217
3218
3219 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3220 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3221
3222 static abi_ulong get_sigframe(struct target_sigaction *ka,
3223 unsigned long sp, size_t frame_size)
3224 {
3225 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3226 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3227 }
3228
3229 return (sp - frame_size) & -8ul;
3230 }
3231
3232 static void setup_sigcontext(struct target_sigcontext *sc,
3233 CPUSH4State *regs, unsigned long mask)
3234 {
3235 int i;
3236
3237 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3238 COPY(gregs[0]); COPY(gregs[1]);
3239 COPY(gregs[2]); COPY(gregs[3]);
3240 COPY(gregs[4]); COPY(gregs[5]);
3241 COPY(gregs[6]); COPY(gregs[7]);
3242 COPY(gregs[8]); COPY(gregs[9]);
3243 COPY(gregs[10]); COPY(gregs[11]);
3244 COPY(gregs[12]); COPY(gregs[13]);
3245 COPY(gregs[14]); COPY(gregs[15]);
3246 COPY(gbr); COPY(mach);
3247 COPY(macl); COPY(pr);
3248 COPY(sr); COPY(pc);
3249 #undef COPY
3250
3251 for (i=0; i<16; i++) {
3252 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3253 }
3254 __put_user(regs->fpscr, &sc->sc_fpscr);
3255 __put_user(regs->fpul, &sc->sc_fpul);
3256
3257 /* non-iBCS2 extensions.. */
3258 __put_user(mask, &sc->oldmask);
3259 }
3260
3261 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3262 {
3263 int i;
3264
3265 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3266 COPY(gregs[0]); COPY(gregs[1]);
3267 COPY(gregs[2]); COPY(gregs[3]);
3268 COPY(gregs[4]); COPY(gregs[5]);
3269 COPY(gregs[6]); COPY(gregs[7]);
3270 COPY(gregs[8]); COPY(gregs[9]);
3271 COPY(gregs[10]); COPY(gregs[11]);
3272 COPY(gregs[12]); COPY(gregs[13]);
3273 COPY(gregs[14]); COPY(gregs[15]);
3274 COPY(gbr); COPY(mach);
3275 COPY(macl); COPY(pr);
3276 COPY(sr); COPY(pc);
3277 #undef COPY
3278
3279 for (i=0; i<16; i++) {
3280 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3281 }
3282 __get_user(regs->fpscr, &sc->sc_fpscr);
3283 __get_user(regs->fpul, &sc->sc_fpul);
3284
3285 regs->tra = -1; /* disable syscall checks */
3286 }
3287
3288 static void setup_frame(int sig, struct target_sigaction *ka,
3289 target_sigset_t *set, CPUSH4State *regs)
3290 {
3291 struct target_sigframe *frame;
3292 abi_ulong frame_addr;
3293 int i;
3294
3295 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3296 trace_user_setup_frame(regs, frame_addr);
3297 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3298 goto give_sigsegv;
3299 }
3300
3301 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3302
3303 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3304 __put_user(set->sig[i + 1], &frame->extramask[i]);
3305 }
3306
3307 /* Set up to return from userspace. If provided, use a stub
3308 already in userspace. */
3309 if (ka->sa_flags & TARGET_SA_RESTORER) {
3310 regs->pr = (unsigned long) ka->sa_restorer;
3311 } else {
3312 /* Generate return code (system call to sigreturn) */
3313 abi_ulong retcode_addr = frame_addr +
3314 offsetof(struct target_sigframe, retcode);
3315 __put_user(MOVW(2), &frame->retcode[0]);
3316 __put_user(TRAP_NOARG, &frame->retcode[1]);
3317 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3318 regs->pr = (unsigned long) retcode_addr;
3319 }
3320
3321 /* Set up registers for signal handler */
3322 regs->gregs[15] = frame_addr;
3323 regs->gregs[4] = sig; /* Arg for signal handler */
3324 regs->gregs[5] = 0;
3325 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3326 regs->pc = (unsigned long) ka->_sa_handler;
3327
3328 unlock_user_struct(frame, frame_addr, 1);
3329 return;
3330
3331 give_sigsegv:
3332 unlock_user_struct(frame, frame_addr, 1);
3333 force_sig(TARGET_SIGSEGV);
3334 }
3335
3336 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3337 target_siginfo_t *info,
3338 target_sigset_t *set, CPUSH4State *regs)
3339 {
3340 struct target_rt_sigframe *frame;
3341 abi_ulong frame_addr;
3342 int i;
3343
3344 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3345 trace_user_setup_rt_frame(regs, frame_addr);
3346 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3347 goto give_sigsegv;
3348 }
3349
3350 tswap_siginfo(&frame->info, info);
3351
3352 /* Create the ucontext. */
3353 __put_user(0, &frame->uc.tuc_flags);
3354 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3355 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3356 &frame->uc.tuc_stack.ss_sp);
3357 __put_user(sas_ss_flags(regs->gregs[15]),
3358 &frame->uc.tuc_stack.ss_flags);
3359 __put_user(target_sigaltstack_used.ss_size,
3360 &frame->uc.tuc_stack.ss_size);
3361 setup_sigcontext(&frame->uc.tuc_mcontext,
3362 regs, set->sig[0]);
3363 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3364 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3365 }
3366
3367 /* Set up to return from userspace. If provided, use a stub
3368 already in userspace. */
3369 if (ka->sa_flags & TARGET_SA_RESTORER) {
3370 regs->pr = (unsigned long) ka->sa_restorer;
3371 } else {
3372 /* Generate return code (system call to sigreturn) */
3373 abi_ulong retcode_addr = frame_addr +
3374 offsetof(struct target_rt_sigframe, retcode);
3375 __put_user(MOVW(2), &frame->retcode[0]);
3376 __put_user(TRAP_NOARG, &frame->retcode[1]);
3377 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3378 regs->pr = (unsigned long) retcode_addr;
3379 }
3380
3381 /* Set up registers for signal handler */
3382 regs->gregs[15] = frame_addr;
3383 regs->gregs[4] = sig; /* Arg for signal handler */
3384 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3385 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3386 regs->pc = (unsigned long) ka->_sa_handler;
3387
3388 unlock_user_struct(frame, frame_addr, 1);
3389 return;
3390
3391 give_sigsegv:
3392 unlock_user_struct(frame, frame_addr, 1);
3393 force_sig(TARGET_SIGSEGV);
3394 }
3395
3396 long do_sigreturn(CPUSH4State *regs)
3397 {
3398 struct target_sigframe *frame;
3399 abi_ulong frame_addr;
3400 sigset_t blocked;
3401 target_sigset_t target_set;
3402 int i;
3403 int err = 0;
3404
3405 frame_addr = regs->gregs[15];
3406 trace_user_do_sigreturn(regs, frame_addr);
3407 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3408 goto badframe;
3409 }
3410
3411 __get_user(target_set.sig[0], &frame->sc.oldmask);
3412 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3413 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3414 }
3415
3416 if (err)
3417 goto badframe;
3418
3419 target_to_host_sigset_internal(&blocked, &target_set);
3420 set_sigmask(&blocked);
3421
3422 restore_sigcontext(regs, &frame->sc);
3423
3424 unlock_user_struct(frame, frame_addr, 0);
3425 return -TARGET_QEMU_ESIGRETURN;
3426
3427 badframe:
3428 unlock_user_struct(frame, frame_addr, 0);
3429 force_sig(TARGET_SIGSEGV);
3430 return 0;
3431 }
3432
3433 long do_rt_sigreturn(CPUSH4State *regs)
3434 {
3435 struct target_rt_sigframe *frame;
3436 abi_ulong frame_addr;
3437 sigset_t blocked;
3438
3439 frame_addr = regs->gregs[15];
3440 trace_user_do_rt_sigreturn(regs, frame_addr);
3441 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3442 goto badframe;
3443 }
3444
3445 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3446 set_sigmask(&blocked);
3447
3448 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3449
3450 if (do_sigaltstack(frame_addr +
3451 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3452 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3453 goto badframe;
3454 }
3455
3456 unlock_user_struct(frame, frame_addr, 0);
3457 return -TARGET_QEMU_ESIGRETURN;
3458
3459 badframe:
3460 unlock_user_struct(frame, frame_addr, 0);
3461 force_sig(TARGET_SIGSEGV);
3462 return 0;
3463 }
3464 #elif defined(TARGET_MICROBLAZE)
3465
3466 struct target_sigcontext {
3467 struct target_pt_regs regs; /* needs to be first */
3468 uint32_t oldmask;
3469 };
3470
3471 struct target_stack_t {
3472 abi_ulong ss_sp;
3473 int ss_flags;
3474 unsigned int ss_size;
3475 };
3476
3477 struct target_ucontext {
3478 abi_ulong tuc_flags;
3479 abi_ulong tuc_link;
3480 struct target_stack_t tuc_stack;
3481 struct target_sigcontext tuc_mcontext;
3482 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3483 };
3484
3485 /* Signal frames. */
3486 struct target_signal_frame {
3487 struct target_ucontext uc;
3488 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3489 uint32_t tramp[2];
3490 };
3491
3492 struct rt_signal_frame {
3493 siginfo_t info;
3494 struct ucontext uc;
3495 uint32_t tramp[2];
3496 };
3497
3498 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3499 {
3500 __put_user(env->regs[0], &sc->regs.r0);
3501 __put_user(env->regs[1], &sc->regs.r1);
3502 __put_user(env->regs[2], &sc->regs.r2);
3503 __put_user(env->regs[3], &sc->regs.r3);
3504 __put_user(env->regs[4], &sc->regs.r4);
3505 __put_user(env->regs[5], &sc->regs.r5);
3506 __put_user(env->regs[6], &sc->regs.r6);
3507 __put_user(env->regs[7], &sc->regs.r7);
3508 __put_user(env->regs[8], &sc->regs.r8);
3509 __put_user(env->regs[9], &sc->regs.r9);
3510 __put_user(env->regs[10], &sc->regs.r10);
3511 __put_user(env->regs[11], &sc->regs.r11);
3512 __put_user(env->regs[12], &sc->regs.r12);
3513 __put_user(env->regs[13], &sc->regs.r13);
3514 __put_user(env->regs[14], &sc->regs.r14);
3515 __put_user(env->regs[15], &sc->regs.r15);
3516 __put_user(env->regs[16], &sc->regs.r16);
3517 __put_user(env->regs[17], &sc->regs.r17);
3518 __put_user(env->regs[18], &sc->regs.r18);
3519 __put_user(env->regs[19], &sc->regs.r19);
3520 __put_user(env->regs[20], &sc->regs.r20);
3521 __put_user(env->regs[21], &sc->regs.r21);
3522 __put_user(env->regs[22], &sc->regs.r22);
3523 __put_user(env->regs[23], &sc->regs.r23);
3524 __put_user(env->regs[24], &sc->regs.r24);
3525 __put_user(env->regs[25], &sc->regs.r25);
3526 __put_user(env->regs[26], &sc->regs.r26);
3527 __put_user(env->regs[27], &sc->regs.r27);
3528 __put_user(env->regs[28], &sc->regs.r28);
3529 __put_user(env->regs[29], &sc->regs.r29);
3530 __put_user(env->regs[30], &sc->regs.r30);
3531 __put_user(env->regs[31], &sc->regs.r31);
3532 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3533 }
3534
3535 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3536 {
3537 __get_user(env->regs[0], &sc->regs.r0);
3538 __get_user(env->regs[1], &sc->regs.r1);
3539 __get_user(env->regs[2], &sc->regs.r2);
3540 __get_user(env->regs[3], &sc->regs.r3);
3541 __get_user(env->regs[4], &sc->regs.r4);
3542 __get_user(env->regs[5], &sc->regs.r5);
3543 __get_user(env->regs[6], &sc->regs.r6);
3544 __get_user(env->regs[7], &sc->regs.r7);
3545 __get_user(env->regs[8], &sc->regs.r8);
3546 __get_user(env->regs[9], &sc->regs.r9);
3547 __get_user(env->regs[10], &sc->regs.r10);
3548 __get_user(env->regs[11], &sc->regs.r11);
3549 __get_user(env->regs[12], &sc->regs.r12);
3550 __get_user(env->regs[13], &sc->regs.r13);
3551 __get_user(env->regs[14], &sc->regs.r14);
3552 __get_user(env->regs[15], &sc->regs.r15);
3553 __get_user(env->regs[16], &sc->regs.r16);
3554 __get_user(env->regs[17], &sc->regs.r17);
3555 __get_user(env->regs[18], &sc->regs.r18);
3556 __get_user(env->regs[19], &sc->regs.r19);
3557 __get_user(env->regs[20], &sc->regs.r20);
3558 __get_user(env->regs[21], &sc->regs.r21);
3559 __get_user(env->regs[22], &sc->regs.r22);
3560 __get_user(env->regs[23], &sc->regs.r23);
3561 __get_user(env->regs[24], &sc->regs.r24);
3562 __get_user(env->regs[25], &sc->regs.r25);
3563 __get_user(env->regs[26], &sc->regs.r26);
3564 __get_user(env->regs[27], &sc->regs.r27);
3565 __get_user(env->regs[28], &sc->regs.r28);
3566 __get_user(env->regs[29], &sc->regs.r29);
3567 __get_user(env->regs[30], &sc->regs.r30);
3568 __get_user(env->regs[31], &sc->regs.r31);
3569 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3570 }
3571
3572 static abi_ulong get_sigframe(struct target_sigaction *ka,
3573 CPUMBState *env, int frame_size)
3574 {
3575 abi_ulong sp = env->regs[1];
3576
3577 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3578 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3579 }
3580
3581 return ((sp - frame_size) & -8UL);
3582 }
3583
3584 static void setup_frame(int sig, struct target_sigaction *ka,
3585 target_sigset_t *set, CPUMBState *env)
3586 {
3587 struct target_signal_frame *frame;
3588 abi_ulong frame_addr;
3589 int i;
3590
3591 frame_addr = get_sigframe(ka, env, sizeof *frame);
3592 trace_user_setup_frame(env, frame_addr);
3593 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3594 goto badframe;
3595
3596 /* Save the mask. */
3597 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3598
3599 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3600 __put_user(set->sig[i], &frame->extramask[i - 1]);
3601 }
3602
3603 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3604
3605 /* Set up to return from userspace. If provided, use a stub
3606 already in userspace. */
3607 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3608 if (ka->sa_flags & TARGET_SA_RESTORER) {
3609 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3610 } else {
3611 uint32_t t;
3612 /* Note, these encodings are _big endian_! */
3613 /* addi r12, r0, __NR_sigreturn */
3614 t = 0x31800000UL | TARGET_NR_sigreturn;
3615 __put_user(t, frame->tramp + 0);
3616 /* brki r14, 0x8 */
3617 t = 0xb9cc0008UL;
3618 __put_user(t, frame->tramp + 1);
3619
3620 /* Return from sighandler will jump to the tramp.
3621 Negative 8 offset because return is rtsd r15, 8 */
3622 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3623 - 8;
3624 }
3625
3626 /* Set up registers for signal handler */
3627 env->regs[1] = frame_addr;
3628 /* Signal handler args: */
3629 env->regs[5] = sig; /* Arg 0: signum */
3630 env->regs[6] = 0;
3631 /* arg 1: sigcontext */
3632 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3633
3634 /* Offset of 4 to handle microblaze rtid r14, 0 */
3635 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3636
3637 unlock_user_struct(frame, frame_addr, 1);
3638 return;
3639 badframe:
3640 force_sig(TARGET_SIGSEGV);
3641 }
3642
3643 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3644 target_siginfo_t *info,
3645 target_sigset_t *set, CPUMBState *env)
3646 {
3647 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3648 }
3649
3650 long do_sigreturn(CPUMBState *env)
3651 {
3652 struct target_signal_frame *frame;
3653 abi_ulong frame_addr;
3654 target_sigset_t target_set;
3655 sigset_t set;
3656 int i;
3657
3658 frame_addr = env->regs[R_SP];
3659 trace_user_do_sigreturn(env, frame_addr);
3660 /* Make sure the guest isn't playing games. */
3661 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3662 goto badframe;
3663
3664 /* Restore blocked signals */
3665 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3666 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3667 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3668 }
3669 target_to_host_sigset_internal(&set, &target_set);
3670 set_sigmask(&set);
3671
3672 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3673 /* We got here through a sigreturn syscall, our path back is via an
3674 rtb insn so setup r14 for that. */
3675 env->regs[14] = env->sregs[SR_PC];
3676
3677 unlock_user_struct(frame, frame_addr, 0);
3678 return -TARGET_QEMU_ESIGRETURN;
3679 badframe:
3680 force_sig(TARGET_SIGSEGV);
3681 }
3682
3683 long do_rt_sigreturn(CPUMBState *env)
3684 {
3685 trace_user_do_rt_sigreturn(env, 0);
3686 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3687 return -TARGET_ENOSYS;
3688 }
3689
3690 #elif defined(TARGET_CRIS)
3691
3692 struct target_sigcontext {
3693 struct target_pt_regs regs; /* needs to be first */
3694 uint32_t oldmask;
3695 uint32_t usp; /* usp before stacking this gunk on it */
3696 };
3697
3698 /* Signal frames. */
3699 struct target_signal_frame {
3700 struct target_sigcontext sc;
3701 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3702 uint16_t retcode[4]; /* Trampoline code. */
3703 };
3704
3705 struct rt_signal_frame {
3706 siginfo_t *pinfo;
3707 void *puc;
3708 siginfo_t info;
3709 struct ucontext uc;
3710 uint16_t retcode[4]; /* Trampoline code. */
3711 };
3712
3713 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3714 {
3715 __put_user(env->regs[0], &sc->regs.r0);
3716 __put_user(env->regs[1], &sc->regs.r1);
3717 __put_user(env->regs[2], &sc->regs.r2);
3718 __put_user(env->regs[3], &sc->regs.r3);
3719 __put_user(env->regs[4], &sc->regs.r4);
3720 __put_user(env->regs[5], &sc->regs.r5);
3721 __put_user(env->regs[6], &sc->regs.r6);
3722 __put_user(env->regs[7], &sc->regs.r7);
3723 __put_user(env->regs[8], &sc->regs.r8);
3724 __put_user(env->regs[9], &sc->regs.r9);
3725 __put_user(env->regs[10], &sc->regs.r10);
3726 __put_user(env->regs[11], &sc->regs.r11);
3727 __put_user(env->regs[12], &sc->regs.r12);
3728 __put_user(env->regs[13], &sc->regs.r13);
3729 __put_user(env->regs[14], &sc->usp);
3730 __put_user(env->regs[15], &sc->regs.acr);
3731 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3732 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3733 __put_user(env->pc, &sc->regs.erp);
3734 }
3735
3736 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3737 {
3738 __get_user(env->regs[0], &sc->regs.r0);
3739 __get_user(env->regs[1], &sc->regs.r1);
3740 __get_user(env->regs[2], &sc->regs.r2);
3741 __get_user(env->regs[3], &sc->regs.r3);
3742 __get_user(env->regs[4], &sc->regs.r4);
3743 __get_user(env->regs[5], &sc->regs.r5);
3744 __get_user(env->regs[6], &sc->regs.r6);
3745 __get_user(env->regs[7], &sc->regs.r7);
3746 __get_user(env->regs[8], &sc->regs.r8);
3747 __get_user(env->regs[9], &sc->regs.r9);
3748 __get_user(env->regs[10], &sc->regs.r10);
3749 __get_user(env->regs[11], &sc->regs.r11);
3750 __get_user(env->regs[12], &sc->regs.r12);
3751 __get_user(env->regs[13], &sc->regs.r13);
3752 __get_user(env->regs[14], &sc->usp);
3753 __get_user(env->regs[15], &sc->regs.acr);
3754 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3755 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3756 __get_user(env->pc, &sc->regs.erp);
3757 }
3758
3759 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3760 {
3761 abi_ulong sp;
3762 /* Align the stack downwards to 4. */
3763 sp = (env->regs[R_SP] & ~3);
3764 return sp - framesize;
3765 }
3766
3767 static void setup_frame(int sig, struct target_sigaction *ka,
3768 target_sigset_t *set, CPUCRISState *env)
3769 {
3770 struct target_signal_frame *frame;
3771 abi_ulong frame_addr;
3772 int i;
3773
3774 frame_addr = get_sigframe(env, sizeof *frame);
3775 trace_user_setup_frame(env, frame_addr);
3776 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3777 goto badframe;
3778
3779 /*
3780 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3781 * use this trampoline anymore but it sets it up for GDB.
3782 * In QEMU, using the trampoline simplifies things a bit so we use it.
3783 *
3784 * This is movu.w __NR_sigreturn, r9; break 13;
3785 */
3786 __put_user(0x9c5f, frame->retcode+0);
3787 __put_user(TARGET_NR_sigreturn,
3788 frame->retcode + 1);
3789 __put_user(0xe93d, frame->retcode + 2);
3790
3791 /* Save the mask. */
3792 __put_user(set->sig[0], &frame->sc.oldmask);
3793
3794 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3795 __put_user(set->sig[i], &frame->extramask[i - 1]);
3796 }
3797
3798 setup_sigcontext(&frame->sc, env);
3799
3800 /* Move the stack and setup the arguments for the handler. */
3801 env->regs[R_SP] = frame_addr;
3802 env->regs[10] = sig;
3803 env->pc = (unsigned long) ka->_sa_handler;
3804 /* Link SRP so the guest returns through the trampoline. */
3805 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3806
3807 unlock_user_struct(frame, frame_addr, 1);
3808 return;
3809 badframe:
3810 force_sig(TARGET_SIGSEGV);
3811 }
3812
3813 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3814 target_siginfo_t *info,
3815 target_sigset_t *set, CPUCRISState *env)
3816 {
3817 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3818 }
3819
3820 long do_sigreturn(CPUCRISState *env)
3821 {
3822 struct target_signal_frame *frame;
3823 abi_ulong frame_addr;
3824 target_sigset_t target_set;
3825 sigset_t set;
3826 int i;
3827
3828 frame_addr = env->regs[R_SP];
3829 trace_user_do_sigreturn(env, frame_addr);
3830 /* Make sure the guest isn't playing games. */
3831 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3832 goto badframe;
3833 }
3834
3835 /* Restore blocked signals */
3836 __get_user(target_set.sig[0], &frame->sc.oldmask);
3837 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3838 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3839 }
3840 target_to_host_sigset_internal(&set, &target_set);
3841 set_sigmask(&set);
3842
3843 restore_sigcontext(&frame->sc, env);
3844 unlock_user_struct(frame, frame_addr, 0);
3845 return -TARGET_QEMU_ESIGRETURN;
3846 badframe:
3847 force_sig(TARGET_SIGSEGV);
3848 }
3849
3850 long do_rt_sigreturn(CPUCRISState *env)
3851 {
3852 trace_user_do_rt_sigreturn(env, 0);
3853 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3854 return -TARGET_ENOSYS;
3855 }
3856
3857 #elif defined(TARGET_OPENRISC)
3858
3859 struct target_sigcontext {
3860 struct target_pt_regs regs;
3861 abi_ulong oldmask;
3862 abi_ulong usp;
3863 };
3864
3865 struct target_ucontext {
3866 abi_ulong tuc_flags;
3867 abi_ulong tuc_link;
3868 target_stack_t tuc_stack;
3869 struct target_sigcontext tuc_mcontext;
3870 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3871 };
3872
3873 struct target_rt_sigframe {
3874 abi_ulong pinfo;
3875 uint64_t puc;
3876 struct target_siginfo info;
3877 struct target_sigcontext sc;
3878 struct target_ucontext uc;
3879 unsigned char retcode[16]; /* trampoline code */
3880 };
3881
3882 /* This is the asm-generic/ucontext.h version */
3883 #if 0
3884 static int restore_sigcontext(CPUOpenRISCState *regs,
3885 struct target_sigcontext *sc)
3886 {
3887 unsigned int err = 0;
3888 unsigned long old_usp;
3889
3890 /* Alwys make any pending restarted system call return -EINTR */
3891 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3892
3893 /* restore the regs from &sc->regs (same as sc, since regs is first)
3894 * (sc is already checked for VERIFY_READ since the sigframe was
3895 * checked in sys_sigreturn previously)
3896 */
3897
3898 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3899 goto badframe;
3900 }
3901
3902 /* make sure the U-flag is set so user-mode cannot fool us */
3903
3904 regs->sr &= ~SR_SM;
3905
3906 /* restore the old USP as it was before we stacked the sc etc.
3907 * (we cannot just pop the sigcontext since we aligned the sp and
3908 * stuff after pushing it)
3909 */
3910
3911 __get_user(old_usp, &sc->usp);
3912 phx_signal("old_usp 0x%lx", old_usp);
3913
3914 __PHX__ REALLY /* ??? */
3915 wrusp(old_usp);
3916 regs->gpr[1] = old_usp;
3917
3918 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3919 * after this completes, but we don't use that mechanism. maybe we can
3920 * use it now ?
3921 */
3922
3923 return err;
3924
3925 badframe:
3926 return 1;
3927 }
3928 #endif
3929
3930 /* Set up a signal frame. */
3931
3932 static void setup_sigcontext(struct target_sigcontext *sc,
3933 CPUOpenRISCState *regs,
3934 unsigned long mask)
3935 {
3936 unsigned long usp = regs->gpr[1];
3937
3938 /* copy the regs. they are first in sc so we can use sc directly */
3939
3940 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3941
3942 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3943 the signal handler. The frametype will be restored to its previous
3944 value in restore_sigcontext. */
3945 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3946
3947 /* then some other stuff */
3948 __put_user(mask, &sc->oldmask);
3949 __put_user(usp, &sc->usp);
3950 }
3951
3952 static inline unsigned long align_sigframe(unsigned long sp)
3953 {
3954 unsigned long i;
3955 i = sp & ~3UL;
3956 return i;
3957 }
3958
3959 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3960 CPUOpenRISCState *regs,
3961 size_t frame_size)
3962 {
3963 unsigned long sp = regs->gpr[1];
3964 int onsigstack = on_sig_stack(sp);
3965
3966 /* redzone */
3967 /* This is the X/Open sanctioned signal stack switching. */
3968 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3969 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3970 }
3971
3972 sp = align_sigframe(sp - frame_size);
3973
3974 /*
3975 * If we are on the alternate signal stack and would overflow it, don't.
3976 * Return an always-bogus address instead so we will die with SIGSEGV.
3977 */
3978
3979 if (onsigstack && !likely(on_sig_stack(sp))) {
3980 return -1L;
3981 }
3982
3983 return sp;
3984 }
3985
3986 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3987 target_siginfo_t *info,
3988 target_sigset_t *set, CPUOpenRISCState *env)
3989 {
3990 int err = 0;
3991 abi_ulong frame_addr;
3992 unsigned long return_ip;
3993 struct target_rt_sigframe *frame;
3994 abi_ulong info_addr, uc_addr;
3995
3996 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3997 trace_user_setup_rt_frame(env, frame_addr);
3998 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3999 goto give_sigsegv;
4000 }
4001
4002 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4003 __put_user(info_addr, &frame->pinfo);
4004 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4005 __put_user(uc_addr, &frame->puc);
4006
4007 if (ka->sa_flags & SA_SIGINFO) {
4008 tswap_siginfo(&frame->info, info);
4009 }
4010
4011 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4012 __put_user(0, &frame->uc.tuc_flags);
4013 __put_user(0, &frame->uc.tuc_link);
4014 __put_user(target_sigaltstack_used.ss_sp,
4015 &frame->uc.tuc_stack.ss_sp);
4016 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4017 __put_user(target_sigaltstack_used.ss_size,
4018 &frame->uc.tuc_stack.ss_size);
4019 setup_sigcontext(&frame->sc, env, set->sig[0]);
4020
4021 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4022
4023 /* trampoline - the desired return ip is the retcode itself */
4024 return_ip = (unsigned long)&frame->retcode;
4025 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4026 __put_user(0xa960, (short *)(frame->retcode + 0));
4027 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4028 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4029 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4030
4031 if (err) {
4032 goto give_sigsegv;
4033 }
4034
4035 /* TODO what is the current->exec_domain stuff and invmap ? */
4036
4037 /* Set up registers for signal handler */
4038 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4039 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4040 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4041 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4042 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4043
4044 /* actually move the usp to reflect the stacked frame */
4045 env->gpr[1] = (unsigned long)frame;
4046
4047 return;
4048
4049 give_sigsegv:
4050 unlock_user_struct(frame, frame_addr, 1);
4051 if (sig == TARGET_SIGSEGV) {
4052 ka->_sa_handler = TARGET_SIG_DFL;
4053 }
4054 force_sig(TARGET_SIGSEGV);
4055 }
4056
4057 long do_sigreturn(CPUOpenRISCState *env)
4058 {
4059 trace_user_do_sigreturn(env, 0);
4060 fprintf(stderr, "do_sigreturn: not implemented\n");
4061 return -TARGET_ENOSYS;
4062 }
4063
4064 long do_rt_sigreturn(CPUOpenRISCState *env)
4065 {
4066 trace_user_do_rt_sigreturn(env, 0);
4067 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4068 return -TARGET_ENOSYS;
4069 }
4070 /* TARGET_OPENRISC */
4071
4072 #elif defined(TARGET_S390X)
4073
4074 #define __NUM_GPRS 16
4075 #define __NUM_FPRS 16
4076 #define __NUM_ACRS 16
4077
4078 #define S390_SYSCALL_SIZE 2
4079 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4080
4081 #define _SIGCONTEXT_NSIG 64
4082 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4083 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4084 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4085 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4086 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4087
4088 typedef struct {
4089 target_psw_t psw;
4090 target_ulong gprs[__NUM_GPRS];
4091 unsigned int acrs[__NUM_ACRS];
4092 } target_s390_regs_common;
4093
4094 typedef struct {
4095 unsigned int fpc;
4096 double fprs[__NUM_FPRS];
4097 } target_s390_fp_regs;
4098
4099 typedef struct {
4100 target_s390_regs_common regs;
4101 target_s390_fp_regs fpregs;
4102 } target_sigregs;
4103
4104 struct target_sigcontext {
4105 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4106 target_sigregs *sregs;
4107 };
4108
4109 typedef struct {
4110 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4111 struct target_sigcontext sc;
4112 target_sigregs sregs;
4113 int signo;
4114 uint8_t retcode[S390_SYSCALL_SIZE];
4115 } sigframe;
4116
4117 struct target_ucontext {
4118 target_ulong tuc_flags;
4119 struct target_ucontext *tuc_link;
4120 target_stack_t tuc_stack;
4121 target_sigregs tuc_mcontext;
4122 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4123 };
4124
4125 typedef struct {
4126 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4127 uint8_t retcode[S390_SYSCALL_SIZE];
4128 struct target_siginfo info;
4129 struct target_ucontext uc;
4130 } rt_sigframe;
4131
4132 static inline abi_ulong
4133 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4134 {
4135 abi_ulong sp;
4136
4137 /* Default to using normal stack */
4138 sp = env->regs[15];
4139
4140 /* This is the X/Open sanctioned signal stack switching. */
4141 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4142 if (!sas_ss_flags(sp)) {
4143 sp = target_sigaltstack_used.ss_sp +
4144 target_sigaltstack_used.ss_size;
4145 }
4146 }
4147
4148 /* This is the legacy signal stack switching. */
4149 else if (/* FIXME !user_mode(regs) */ 0 &&
4150 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4151 ka->sa_restorer) {
4152 sp = (abi_ulong) ka->sa_restorer;
4153 }
4154
4155 return (sp - frame_size) & -8ul;
4156 }
4157
4158 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4159 {
4160 int i;
4161 //save_access_regs(current->thread.acrs); FIXME
4162
4163 /* Copy a 'clean' PSW mask to the user to avoid leaking
4164 information about whether PER is currently on. */
4165 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4166 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4167 for (i = 0; i < 16; i++) {
4168 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4169 }
4170 for (i = 0; i < 16; i++) {
4171 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4172 }
4173 /*
4174 * We have to store the fp registers to current->thread.fp_regs
4175 * to merge them with the emulated registers.
4176 */
4177 //save_fp_regs(&current->thread.fp_regs); FIXME
4178 for (i = 0; i < 16; i++) {
4179 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4180 }
4181 }
4182
4183 static void setup_frame(int sig, struct target_sigaction *ka,
4184 target_sigset_t *set, CPUS390XState *env)
4185 {
4186 sigframe *frame;
4187 abi_ulong frame_addr;
4188
4189 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4190 trace_user_setup_frame(env, frame_addr);
4191 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4192 goto give_sigsegv;
4193 }
4194
4195 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4196
4197 save_sigregs(env, &frame->sregs);
4198
4199 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4200 (abi_ulong *)&frame->sc.sregs);
4201
4202 /* Set up to return from userspace. If provided, use a stub
4203 already in userspace. */
4204 if (ka->sa_flags & TARGET_SA_RESTORER) {
4205 env->regs[14] = (unsigned long)
4206 ka->sa_restorer | PSW_ADDR_AMODE;
4207 } else {
4208 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4209 | PSW_ADDR_AMODE;
4210 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4211 (uint16_t *)(frame->retcode));
4212 }
4213
4214 /* Set up backchain. */
4215 __put_user(env->regs[15], (abi_ulong *) frame);
4216
4217 /* Set up registers for signal handler */
4218 env->regs[15] = frame_addr;
4219 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4220
4221 env->regs[2] = sig; //map_signal(sig);
4222 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4223
4224 /* We forgot to include these in the sigcontext.
4225 To avoid breaking binary compatibility, they are passed as args. */
4226 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4227 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4228
4229 /* Place signal number on stack to allow backtrace from handler. */
4230 __put_user(env->regs[2], (int *) &frame->signo);
4231 unlock_user_struct(frame, frame_addr, 1);
4232 return;
4233
4234 give_sigsegv:
4235 force_sig(TARGET_SIGSEGV);
4236 }
4237
4238 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4239 target_siginfo_t *info,
4240 target_sigset_t *set, CPUS390XState *env)
4241 {
4242 int i;
4243 rt_sigframe *frame;
4244 abi_ulong frame_addr;
4245
4246 frame_addr = get_sigframe(ka, env, sizeof *frame);
4247 trace_user_setup_rt_frame(env, frame_addr);
4248 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4249 goto give_sigsegv;
4250 }
4251
4252 tswap_siginfo(&frame->info, info);
4253
4254 /* Create the ucontext. */
4255 __put_user(0, &frame->uc.tuc_flags);
4256 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4257 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4258 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4259 &frame->uc.tuc_stack.ss_flags);
4260 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4261 save_sigregs(env, &frame->uc.tuc_mcontext);
4262 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4263 __put_user((abi_ulong)set->sig[i],
4264 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4265 }
4266
4267 /* Set up to return from userspace. If provided, use a stub
4268 already in userspace. */
4269 if (ka->sa_flags & TARGET_SA_RESTORER) {
4270 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4271 } else {
4272 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4273 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4274 (uint16_t *)(frame->retcode));
4275 }
4276
4277 /* Set up backchain. */
4278 __put_user(env->regs[15], (abi_ulong *) frame);
4279
4280 /* Set up registers for signal handler */
4281 env->regs[15] = frame_addr;
4282 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4283
4284 env->regs[2] = sig; //map_signal(sig);
4285 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4286 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4287 return;
4288
4289 give_sigsegv:
4290 force_sig(TARGET_SIGSEGV);
4291 }
4292
4293 static int
4294 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4295 {
4296 int err = 0;
4297 int i;
4298
4299 for (i = 0; i < 16; i++) {
4300 __get_user(env->regs[i], &sc->regs.gprs[i]);
4301 }
4302
4303 __get_user(env->psw.mask, &sc->regs.psw.mask);
4304 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4305 (unsigned long long)env->psw.addr);
4306 __get_user(env->psw.addr, &sc->regs.psw.addr);
4307 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4308
4309 for (i = 0; i < 16; i++) {
4310 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4311 }
4312 for (i = 0; i < 16; i++) {
4313 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4314 }
4315
4316 return err;
4317 }
4318
4319 long do_sigreturn(CPUS390XState *env)
4320 {
4321 sigframe *frame;
4322 abi_ulong frame_addr = env->regs[15];
4323 target_sigset_t target_set;
4324 sigset_t set;
4325
4326 trace_user_do_sigreturn(env, frame_addr);
4327 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4328 goto badframe;
4329 }
4330 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4331
4332 target_to_host_sigset_internal(&set, &target_set);
4333 set_sigmask(&set); /* ~_BLOCKABLE? */
4334
4335 if (restore_sigregs(env, &frame->sregs)) {
4336 goto badframe;
4337 }
4338
4339 unlock_user_struct(frame, frame_addr, 0);
4340 return -TARGET_QEMU_ESIGRETURN;
4341
4342 badframe:
4343 force_sig(TARGET_SIGSEGV);
4344 return 0;
4345 }
4346
4347 long do_rt_sigreturn(CPUS390XState *env)
4348 {
4349 rt_sigframe *frame;
4350 abi_ulong frame_addr = env->regs[15];
4351 sigset_t set;
4352
4353 trace_user_do_rt_sigreturn(env, frame_addr);
4354 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4355 goto badframe;
4356 }
4357 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4358
4359 set_sigmask(&set); /* ~_BLOCKABLE? */
4360
4361 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4362 goto badframe;
4363 }
4364
4365 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4366 get_sp_from_cpustate(env)) == -EFAULT) {
4367 goto badframe;
4368 }
4369 unlock_user_struct(frame, frame_addr, 0);
4370 return -TARGET_QEMU_ESIGRETURN;
4371
4372 badframe:
4373 unlock_user_struct(frame, frame_addr, 0);
4374 force_sig(TARGET_SIGSEGV);
4375 return 0;
4376 }
4377
4378 #elif defined(TARGET_PPC)
4379
4380 /* Size of dummy stack frame allocated when calling signal handler.
4381 See arch/powerpc/include/asm/ptrace.h. */
4382 #if defined(TARGET_PPC64)
4383 #define SIGNAL_FRAMESIZE 128
4384 #else
4385 #define SIGNAL_FRAMESIZE 64
4386 #endif
4387
4388 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4389 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4390 struct target_mcontext {
4391 target_ulong mc_gregs[48];
4392 /* Includes fpscr. */
4393 uint64_t mc_fregs[33];
4394 target_ulong mc_pad[2];
4395 /* We need to handle Altivec and SPE at the same time, which no
4396 kernel needs to do. Fortunately, the kernel defines this bit to
4397 be Altivec-register-large all the time, rather than trying to
4398 twiddle it based on the specific platform. */
4399 union {
4400 /* SPE vector registers. One extra for SPEFSCR. */
4401 uint32_t spe[33];
4402 /* Altivec vector registers. The packing of VSCR and VRSAVE
4403 varies depending on whether we're PPC64 or not: PPC64 splits
4404 them apart; PPC32 stuffs them together. */
4405 #if defined(TARGET_PPC64)
4406 #define QEMU_NVRREG 34
4407 #else
4408 #define QEMU_NVRREG 33
4409 #endif
4410 ppc_avr_t altivec[QEMU_NVRREG];
4411 #undef QEMU_NVRREG
4412 } mc_vregs __attribute__((__aligned__(16)));
4413 };
4414
4415 /* See arch/powerpc/include/asm/sigcontext.h. */
4416 struct target_sigcontext {
4417 target_ulong _unused[4];
4418 int32_t signal;
4419 #if defined(TARGET_PPC64)
4420 int32_t pad0;
4421 #endif
4422 target_ulong handler;
4423 target_ulong oldmask;
4424 target_ulong regs; /* struct pt_regs __user * */
4425 #if defined(TARGET_PPC64)
4426 struct target_mcontext mcontext;
4427 #endif
4428 };
4429
4430 /* Indices for target_mcontext.mc_gregs, below.
4431 See arch/powerpc/include/asm/ptrace.h for details. */
4432 enum {
4433 TARGET_PT_R0 = 0,
4434 TARGET_PT_R1 = 1,
4435 TARGET_PT_R2 = 2,
4436 TARGET_PT_R3 = 3,
4437 TARGET_PT_R4 = 4,
4438 TARGET_PT_R5 = 5,
4439 TARGET_PT_R6 = 6,
4440 TARGET_PT_R7 = 7,
4441 TARGET_PT_R8 = 8,
4442 TARGET_PT_R9 = 9,
4443 TARGET_PT_R10 = 10,
4444 TARGET_PT_R11 = 11,
4445 TARGET_PT_R12 = 12,
4446 TARGET_PT_R13 = 13,
4447 TARGET_PT_R14 = 14,
4448 TARGET_PT_R15 = 15,
4449 TARGET_PT_R16 = 16,
4450 TARGET_PT_R17 = 17,
4451 TARGET_PT_R18 = 18,
4452 TARGET_PT_R19 = 19,
4453 TARGET_PT_R20 = 20,
4454 TARGET_PT_R21 = 21,
4455 TARGET_PT_R22 = 22,
4456 TARGET_PT_R23 = 23,
4457 TARGET_PT_R24 = 24,
4458 TARGET_PT_R25 = 25,
4459 TARGET_PT_R26 = 26,
4460 TARGET_PT_R27 = 27,
4461 TARGET_PT_R28 = 28,
4462 TARGET_PT_R29 = 29,
4463 TARGET_PT_R30 = 30,
4464 TARGET_PT_R31 = 31,
4465 TARGET_PT_NIP = 32,
4466 TARGET_PT_MSR = 33,
4467 TARGET_PT_ORIG_R3 = 34,
4468 TARGET_PT_CTR = 35,
4469 TARGET_PT_LNK = 36,
4470 TARGET_PT_XER = 37,
4471 TARGET_PT_CCR = 38,
4472 /* Yes, there are two registers with #39. One is 64-bit only. */
4473 TARGET_PT_MQ = 39,
4474 TARGET_PT_SOFTE = 39,
4475 TARGET_PT_TRAP = 40,
4476 TARGET_PT_DAR = 41,
4477 TARGET_PT_DSISR = 42,
4478 TARGET_PT_RESULT = 43,
4479 TARGET_PT_REGS_COUNT = 44
4480 };
4481
4482
4483 struct target_ucontext {
4484 target_ulong tuc_flags;
4485 target_ulong tuc_link; /* struct ucontext __user * */
4486 struct target_sigaltstack tuc_stack;
4487 #if !defined(TARGET_PPC64)
4488 int32_t tuc_pad[7];
4489 target_ulong tuc_regs; /* struct mcontext __user *
4490 points to uc_mcontext field */
4491 #endif
4492 target_sigset_t tuc_sigmask;
4493 #if defined(TARGET_PPC64)
4494 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4495 struct target_sigcontext tuc_sigcontext;
4496 #else
4497 int32_t tuc_maskext[30];
4498 int32_t tuc_pad2[3];
4499 struct target_mcontext tuc_mcontext;
4500 #endif
4501 };
4502
4503 /* See arch/powerpc/kernel/signal_32.c. */
4504 struct target_sigframe {
4505 struct target_sigcontext sctx;
4506 struct target_mcontext mctx;
4507 int32_t abigap[56];
4508 };
4509
4510 #if defined(TARGET_PPC64)
4511
4512 #define TARGET_TRAMP_SIZE 6
4513
4514 struct target_rt_sigframe {
4515 /* sys_rt_sigreturn requires the ucontext be the first field */
4516 struct target_ucontext uc;
4517 target_ulong _unused[2];
4518 uint32_t trampoline[TARGET_TRAMP_SIZE];
4519 target_ulong pinfo; /* struct siginfo __user * */
4520 target_ulong puc; /* void __user * */
4521 struct target_siginfo info;
4522 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4523 char abigap[288];
4524 } __attribute__((aligned(16)));
4525
4526 #else
4527
4528 struct target_rt_sigframe {
4529 struct target_siginfo info;
4530 struct target_ucontext uc;
4531 int32_t abigap[56];
4532 };
4533
4534 #endif
4535
4536 #if defined(TARGET_PPC64)
4537
4538 struct target_func_ptr {
4539 target_ulong entry;
4540 target_ulong toc;
4541 };
4542
4543 #endif
4544
4545 /* We use the mc_pad field for the signal return trampoline. */
4546 #define tramp mc_pad
4547
4548 /* See arch/powerpc/kernel/signal.c. */
4549 static target_ulong get_sigframe(struct target_sigaction *ka,
4550 CPUPPCState *env,
4551 int frame_size)
4552 {
4553 target_ulong oldsp, newsp;
4554
4555 oldsp = env->gpr[1];
4556
4557 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4558 (sas_ss_flags(oldsp) == 0)) {
4559 oldsp = (target_sigaltstack_used.ss_sp
4560 + target_sigaltstack_used.ss_size);
4561 }
4562
4563 newsp = (oldsp - frame_size) & ~0xFUL;
4564
4565 return newsp;
4566 }
4567
4568 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4569 {
4570 target_ulong msr = env->msr;
4571 int i;
4572 target_ulong ccr = 0;
4573
4574 /* In general, the kernel attempts to be intelligent about what it
4575 needs to save for Altivec/FP/SPE registers. We don't care that
4576 much, so we just go ahead and save everything. */
4577
4578 /* Save general registers. */
4579 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4580 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4581 }
4582 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4583 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4584 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4585 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4586
4587 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4588 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4589 }
4590 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4591
4592 /* Save Altivec registers if necessary. */
4593 if (env->insns_flags & PPC_ALTIVEC) {
4594 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4595 ppc_avr_t *avr = &env->avr[i];
4596 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4597
4598 __put_user(avr->u64[0], &vreg->u64[0]);
4599 __put_user(avr->u64[1], &vreg->u64[1]);
4600 }
4601 /* Set MSR_VR in the saved MSR value to indicate that
4602 frame->mc_vregs contains valid data. */
4603 msr |= MSR_VR;
4604 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4605 &frame->mc_vregs.altivec[32].u32[3]);
4606 }
4607
4608 /* Save floating point registers. */
4609 if (env->insns_flags & PPC_FLOAT) {
4610 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4611 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4612 }
4613 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4614 }
4615
4616 /* Save SPE registers. The kernel only saves the high half. */
4617 if (env->insns_flags & PPC_SPE) {
4618 #if defined(TARGET_PPC64)
4619 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4620 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4621 }
4622 #else
4623 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4624 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4625 }
4626 #endif
4627 /* Set MSR_SPE in the saved MSR value to indicate that
4628 frame->mc_vregs contains valid data. */
4629 msr |= MSR_SPE;
4630 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4631 }
4632
4633 /* Store MSR. */
4634 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4635 }
4636
4637 static void encode_trampoline(int sigret, uint32_t *tramp)
4638 {
4639 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4640 if (sigret) {
4641 __put_user(0x38000000 | sigret, &tramp[0]);
4642 __put_user(0x44000002, &tramp[1]);
4643 }
4644 }
4645
4646 static void restore_user_regs(CPUPPCState *env,
4647 struct target_mcontext *frame, int sig)
4648 {
4649 target_ulong save_r2 = 0;
4650 target_ulong msr;
4651 target_ulong ccr;
4652
4653 int i;
4654
4655 if (!sig) {
4656 save_r2 = env->gpr[2];
4657 }
4658
4659 /* Restore general registers. */
4660 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4661 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4662 }
4663 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4664 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4665 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4666 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4667 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4668
4669 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4670 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4671 }
4672
4673 if (!sig) {
4674 env->gpr[2] = save_r2;
4675 }
4676 /* Restore MSR. */
4677 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4678
4679 /* If doing signal return, restore the previous little-endian mode. */
4680 if (sig)
4681 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4682
4683 /* Restore Altivec registers if necessary. */
4684 if (env->insns_flags & PPC_ALTIVEC) {
4685 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4686 ppc_avr_t *avr = &env->avr[i];
4687 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4688
4689 __get_user(avr->u64[0], &vreg->u64[0]);
4690 __get_user(avr->u64[1], &vreg->u64[1]);
4691 }
4692 /* Set MSR_VEC in the saved MSR value to indicate that
4693 frame->mc_vregs contains valid data. */
4694 __get_user(env->spr[SPR_VRSAVE],
4695 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4696 }
4697
4698 /* Restore floating point registers. */
4699 if (env->insns_flags & PPC_FLOAT) {
4700 uint64_t fpscr;
4701 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4702 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4703 }
4704 __get_user(fpscr, &frame->mc_fregs[32]);
4705 env->fpscr = (uint32_t) fpscr;
4706 }
4707
4708 /* Save SPE registers. The kernel only saves the high half. */
4709 if (env->insns_flags & PPC_SPE) {
4710 #if defined(TARGET_PPC64)
4711 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4712 uint32_t hi;
4713
4714 __get_user(hi, &frame->mc_vregs.spe[i]);
4715 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4716 }
4717 #else
4718 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4719 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4720 }
4721 #endif
4722 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4723 }
4724 }
4725
4726 static void setup_frame(int sig, struct target_sigaction *ka,
4727 target_sigset_t *set, CPUPPCState *env)
4728 {
4729 struct target_sigframe *frame;
4730 struct target_sigcontext *sc;
4731 target_ulong frame_addr, newsp;
4732 int err = 0;
4733 #if defined(TARGET_PPC64)
4734 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4735 #endif
4736
4737 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4738 trace_user_setup_frame(env, frame_addr);
4739 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4740 goto sigsegv;
4741 sc = &frame->sctx;
4742
4743 __put_user(ka->_sa_handler, &sc->handler);
4744 __put_user(set->sig[0], &sc->oldmask);
4745 #if TARGET_ABI_BITS == 64
4746 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4747 #else
4748 __put_user(set->sig[1], &sc->_unused[3]);
4749 #endif
4750 __put_user(h2g(&frame->mctx), &sc->regs);
4751 __put_user(sig, &sc->signal);
4752
4753 /* Save user regs. */
4754 save_user_regs(env, &frame->mctx);
4755
4756 /* Construct the trampoline code on the stack. */
4757 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4758
4759 /* The kernel checks for the presence of a VDSO here. We don't
4760 emulate a vdso, so use a sigreturn system call. */
4761 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4762
4763 /* Turn off all fp exceptions. */
4764 env->fpscr = 0;
4765
4766 /* Create a stack frame for the caller of the handler. */
4767 newsp = frame_addr - SIGNAL_FRAMESIZE;
4768 err |= put_user(env->gpr[1], newsp, target_ulong);
4769
4770 if (err)
4771 goto sigsegv;
4772
4773 /* Set up registers for signal handler. */
4774 env->gpr[1] = newsp;
4775 env->gpr[3] = sig;
4776 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4777
4778 #if defined(TARGET_PPC64)
4779 if (get_ppc64_abi(image) < 2) {
4780 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4781 struct target_func_ptr *handler =
4782 (struct target_func_ptr *)g2h(ka->_sa_handler);
4783 env->nip = tswapl(handler->entry);
4784 env->gpr[2] = tswapl(handler->toc);
4785 } else {
4786 /* ELFv2 PPC64 function pointers are entry points, but R12
4787 * must also be set */
4788 env->nip = tswapl((target_ulong) ka->_sa_handler);
4789 env->gpr[12] = env->nip;
4790 }
4791 #else
4792 env->nip = (target_ulong) ka->_sa_handler;
4793 #endif
4794
4795 /* Signal handlers are entered in big-endian mode. */
4796 env->msr &= ~(1ull << MSR_LE);
4797
4798 unlock_user_struct(frame, frame_addr, 1);
4799 return;
4800
4801 sigsegv:
4802 unlock_user_struct(frame, frame_addr, 1);
4803 force_sig(TARGET_SIGSEGV);
4804 }
4805
4806 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4807 target_siginfo_t *info,
4808 target_sigset_t *set, CPUPPCState *env)
4809 {
4810 struct target_rt_sigframe *rt_sf;
4811 uint32_t *trampptr = 0;
4812 struct target_mcontext *mctx = 0;
4813 target_ulong rt_sf_addr, newsp = 0;
4814 int i, err = 0;
4815 #if defined(TARGET_PPC64)
4816 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4817 #endif
4818
4819 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4820 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4821 goto sigsegv;
4822
4823 tswap_siginfo(&rt_sf->info, info);
4824
4825 __put_user(0, &rt_sf->uc.tuc_flags);
4826 __put_user(0, &rt_sf->uc.tuc_link);
4827 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4828 &rt_sf->uc.tuc_stack.ss_sp);
4829 __put_user(sas_ss_flags(env->gpr[1]),
4830 &rt_sf->uc.tuc_stack.ss_flags);
4831 __put_user(target_sigaltstack_used.ss_size,
4832 &rt_sf->uc.tuc_stack.ss_size);
4833 #if !defined(TARGET_PPC64)
4834 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4835 &rt_sf->uc.tuc_regs);
4836 #endif
4837 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4838 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4839 }
4840
4841 #if defined(TARGET_PPC64)
4842 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4843 trampptr = &rt_sf->trampoline[0];
4844 #else
4845 mctx = &rt_sf->uc.tuc_mcontext;
4846 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4847 #endif
4848
4849 save_user_regs(env, mctx);
4850 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4851
4852 /* The kernel checks for the presence of a VDSO here. We don't
4853 emulate a vdso, so use a sigreturn system call. */
4854 env->lr = (target_ulong) h2g(trampptr);
4855
4856 /* Turn off all fp exceptions. */
4857 env->fpscr = 0;
4858
4859 /* Create a stack frame for the caller of the handler. */
4860 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4861 err |= put_user(env->gpr[1], newsp, target_ulong);
4862
4863 if (err)
4864 goto sigsegv;
4865
4866 /* Set up registers for signal handler. */
4867 env->gpr[1] = newsp;
4868 env->gpr[3] = (target_ulong) sig;
4869 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4870 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4871 env->gpr[6] = (target_ulong) h2g(rt_sf);
4872
4873 #if defined(TARGET_PPC64)
4874 if (get_ppc64_abi(image) < 2) {
4875 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4876 struct target_func_ptr *handler =
4877 (struct target_func_ptr *)g2h(ka->_sa_handler);
4878 env->nip = tswapl(handler->entry);
4879 env->gpr[2] = tswapl(handler->toc);
4880 } else {
4881 /* ELFv2 PPC64 function pointers are entry points, but R12
4882 * must also be set */
4883 env->nip = tswapl((target_ulong) ka->_sa_handler);
4884 env->gpr[12] = env->nip;
4885 }
4886 #else
4887 env->nip = (target_ulong) ka->_sa_handler;
4888 #endif
4889
4890 /* Signal handlers are entered in big-endian mode. */
4891 env->msr &= ~(1ull << MSR_LE);
4892
4893 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4894 return;
4895
4896 sigsegv:
4897 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4898 force_sig(TARGET_SIGSEGV);
4899
4900 }
4901
4902 long do_sigreturn(CPUPPCState *env)
4903 {
4904 struct target_sigcontext *sc = NULL;
4905 struct target_mcontext *sr = NULL;
4906 target_ulong sr_addr = 0, sc_addr;
4907 sigset_t blocked;
4908 target_sigset_t set;
4909
4910 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4911 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4912 goto sigsegv;
4913
4914 #if defined(TARGET_PPC64)
4915 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4916 #else
4917 __get_user(set.sig[0], &sc->oldmask);
4918 __get_user(set.sig[1], &sc->_unused[3]);
4919 #endif
4920 target_to_host_sigset_internal(&blocked, &set);
4921 set_sigmask(&blocked);
4922
4923 __get_user(sr_addr, &sc->regs);
4924 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4925 goto sigsegv;
4926 restore_user_regs(env, sr, 1);
4927
4928 unlock_user_struct(sr, sr_addr, 1);
4929 unlock_user_struct(sc, sc_addr, 1);
4930 return -TARGET_QEMU_ESIGRETURN;
4931
4932 sigsegv:
4933 unlock_user_struct(sr, sr_addr, 1);
4934 unlock_user_struct(sc, sc_addr, 1);
4935 force_sig(TARGET_SIGSEGV);
4936 return 0;
4937 }
4938
4939 /* See arch/powerpc/kernel/signal_32.c. */
4940 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4941 {
4942 struct target_mcontext *mcp;
4943 target_ulong mcp_addr;
4944 sigset_t blocked;
4945 target_sigset_t set;
4946
4947 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4948 sizeof (set)))
4949 return 1;
4950
4951 #if defined(TARGET_PPC64)
4952 mcp_addr = h2g(ucp) +
4953 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4954 #else
4955 __get_user(mcp_addr, &ucp->tuc_regs);
4956 #endif
4957
4958 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4959 return 1;
4960
4961 target_to_host_sigset_internal(&blocked, &set);
4962 set_sigmask(&blocked);
4963 restore_user_regs(env, mcp, sig);
4964
4965 unlock_user_struct(mcp, mcp_addr, 1);
4966 return 0;
4967 }
4968
4969 long do_rt_sigreturn(CPUPPCState *env)
4970 {
4971 struct target_rt_sigframe *rt_sf = NULL;
4972 target_ulong rt_sf_addr;
4973
4974 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4975 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4976 goto sigsegv;
4977
4978 if (do_setcontext(&rt_sf->uc, env, 1))
4979 goto sigsegv;
4980
4981 do_sigaltstack(rt_sf_addr
4982 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4983 0, env->gpr[1]);
4984
4985 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4986 return -TARGET_QEMU_ESIGRETURN;
4987
4988 sigsegv:
4989 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4990 force_sig(TARGET_SIGSEGV);
4991 return 0;
4992 }
4993
4994 #elif defined(TARGET_M68K)
4995
4996 struct target_sigcontext {
4997 abi_ulong sc_mask;
4998 abi_ulong sc_usp;
4999 abi_ulong sc_d0;
5000 abi_ulong sc_d1;
5001 abi_ulong sc_a0;
5002 abi_ulong sc_a1;
5003 unsigned short sc_sr;
5004 abi_ulong sc_pc;
5005 };
5006
5007 struct target_sigframe
5008 {
5009 abi_ulong pretcode;
5010 int sig;
5011 int code;
5012 abi_ulong psc;
5013 char retcode[8];
5014 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5015 struct target_sigcontext sc;
5016 };
5017
5018 typedef int target_greg_t;
5019 #define TARGET_NGREG 18
5020 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5021
5022 typedef struct target_fpregset {
5023 int f_fpcntl[3];
5024 int f_fpregs[8*3];
5025 } target_fpregset_t;
5026
5027 struct target_mcontext {
5028 int version;
5029 target_gregset_t gregs;
5030 target_fpregset_t fpregs;
5031 };
5032
5033 #define TARGET_MCONTEXT_VERSION 2
5034
5035 struct target_ucontext {
5036 abi_ulong tuc_flags;
5037 abi_ulong tuc_link;
5038 target_stack_t tuc_stack;
5039 struct target_mcontext tuc_mcontext;
5040 abi_long tuc_filler[80];
5041 target_sigset_t tuc_sigmask;
5042 };
5043
5044 struct target_rt_sigframe
5045 {
5046 abi_ulong pretcode;
5047 int sig;
5048 abi_ulong pinfo;
5049 abi_ulong puc;
5050 char retcode[8];
5051 struct target_siginfo info;
5052 struct target_ucontext uc;
5053 };
5054
5055 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5056 abi_ulong mask)
5057 {
5058 __put_user(mask, &sc->sc_mask);
5059 __put_user(env->aregs[7], &sc->sc_usp);
5060 __put_user(env->dregs[0], &sc->sc_d0);
5061 __put_user(env->dregs[1], &sc->sc_d1);
5062 __put_user(env->aregs[0], &sc->sc_a0);
5063 __put_user(env->aregs[1], &sc->sc_a1);
5064 __put_user(env->sr, &sc->sc_sr);
5065 __put_user(env->pc, &sc->sc_pc);
5066 }
5067
5068 static void
5069 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5070 {
5071 int temp;
5072
5073 __get_user(env->aregs[7], &sc->sc_usp);
5074 __get_user(env->dregs[0], &sc->sc_d0);
5075 __get_user(env->dregs[1], &sc->sc_d1);
5076 __get_user(env->aregs[0], &sc->sc_a0);
5077 __get_user(env->aregs[1], &sc->sc_a1);
5078 __get_user(env->pc, &sc->sc_pc);
5079 __get_user(temp, &sc->sc_sr);
5080 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5081 }
5082
5083 /*
5084 * Determine which stack to use..
5085 */
5086 static inline abi_ulong
5087 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5088 size_t frame_size)
5089 {
5090 unsigned long sp;
5091
5092 sp = regs->aregs[7];
5093
5094 /* This is the X/Open sanctioned signal stack switching. */
5095 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5096 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5097 }
5098
5099 return ((sp - frame_size) & -8UL);
5100 }
5101
5102 static void setup_frame(int sig, struct target_sigaction *ka,
5103 target_sigset_t *set, CPUM68KState *env)
5104 {
5105 struct target_sigframe *frame;
5106 abi_ulong frame_addr;
5107 abi_ulong retcode_addr;
5108 abi_ulong sc_addr;
5109 int i;
5110
5111 frame_addr = get_sigframe(ka, env, sizeof *frame);
5112 trace_user_setup_frame(env, frame_addr);
5113 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5114 goto give_sigsegv;
5115 }
5116
5117 __put_user(sig, &frame->sig);
5118
5119 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5120 __put_user(sc_addr, &frame->psc);
5121
5122 setup_sigcontext(&frame->sc, env, set->sig[0]);
5123
5124 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5125 __put_user(set->sig[i], &frame->extramask[i - 1]);
5126 }
5127
5128 /* Set up to return from userspace. */
5129
5130 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5131 __put_user(retcode_addr, &frame->pretcode);
5132
5133 /* moveq #,d0; trap #0 */
5134
5135 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5136 (uint32_t *)(frame->retcode));
5137
5138 /* Set up to return from userspace */
5139
5140 env->aregs[7] = frame_addr;
5141 env->pc = ka->_sa_handler;
5142
5143 unlock_user_struct(frame, frame_addr, 1);
5144 return;
5145
5146 give_sigsegv:
5147 force_sig(TARGET_SIGSEGV);
5148 }
5149
5150 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5151 CPUM68KState *env)
5152 {
5153 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5154
5155 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5156 __put_user(env->dregs[0], &gregs[0]);
5157 __put_user(env->dregs[1], &gregs[1]);
5158 __put_user(env->dregs[2], &gregs[2]);
5159 __put_user(env->dregs[3], &gregs[3]);
5160 __put_user(env->dregs[4], &gregs[4]);
5161 __put_user(env->dregs[5], &gregs[5]);
5162 __put_user(env->dregs[6], &gregs[6]);
5163 __put_user(env->dregs[7], &gregs[7]);
5164 __put_user(env->aregs[0], &gregs[8]);
5165 __put_user(env->aregs[1], &gregs[9]);
5166 __put_user(env->aregs[2], &gregs[10]);
5167 __put_user(env->aregs[3], &gregs[11]);
5168 __put_user(env->aregs[4], &gregs[12]);
5169 __put_user(env->aregs[5], &gregs[13]);
5170 __put_user(env->aregs[6], &gregs[14]);
5171 __put_user(env->aregs[7], &gregs[15]);
5172 __put_user(env->pc, &gregs[16]);
5173 __put_user(env->sr, &gregs[17]);
5174
5175 return 0;
5176 }
5177
5178 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5179 struct target_ucontext *uc)
5180 {
5181 int temp;
5182 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5183
5184 __get_user(temp, &uc->tuc_mcontext.version);
5185 if (temp != TARGET_MCONTEXT_VERSION)
5186 goto badframe;
5187
5188 /* restore passed registers */
5189 __get_user(env->dregs[0], &gregs[0]);
5190 __get_user(env->dregs[1], &gregs[1]);
5191 __get_user(env->dregs[2], &gregs[2]);
5192 __get_user(env->dregs[3], &gregs[3]);
5193 __get_user(env->dregs[4], &gregs[4]);
5194 __get_user(env->dregs[5], &gregs[5]);
5195 __get_user(env->dregs[6], &gregs[6]);
5196 __get_user(env->dregs[7], &gregs[7]);
5197 __get_user(env->aregs[0], &gregs[8]);
5198 __get_user(env->aregs[1], &gregs[9]);
5199 __get_user(env->aregs[2], &gregs[10]);
5200 __get_user(env->aregs[3], &gregs[11]);
5201 __get_user(env->aregs[4], &gregs[12]);
5202 __get_user(env->aregs[5], &gregs[13]);
5203 __get_user(env->aregs[6], &gregs[14]);
5204 __get_user(env->aregs[7], &gregs[15]);
5205 __get_user(env->pc, &gregs[16]);
5206 __get_user(temp, &gregs[17]);
5207 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5208
5209 return 0;
5210
5211 badframe:
5212 return 1;
5213 }
5214
5215 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5216 target_siginfo_t *info,
5217 target_sigset_t *set, CPUM68KState *env)
5218 {
5219 struct target_rt_sigframe *frame;
5220 abi_ulong frame_addr;
5221 abi_ulong retcode_addr;
5222 abi_ulong info_addr;
5223 abi_ulong uc_addr;
5224 int err = 0;
5225 int i;
5226
5227 frame_addr = get_sigframe(ka, env, sizeof *frame);
5228 trace_user_setup_rt_frame(env, frame_addr);
5229 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5230 goto give_sigsegv;
5231 }
5232
5233 __put_user(sig, &frame->sig);
5234
5235 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5236 __put_user(info_addr, &frame->pinfo);
5237
5238 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5239 __put_user(uc_addr, &frame->puc);
5240
5241 tswap_siginfo(&frame->info, info);
5242
5243 /* Create the ucontext */
5244
5245 __put_user(0, &frame->uc.tuc_flags);
5246 __put_user(0, &frame->uc.tuc_link);
5247 __put_user(target_sigaltstack_used.ss_sp,
5248 &frame->uc.tuc_stack.ss_sp);
5249 __put_user(sas_ss_flags(env->aregs[7]),
5250 &frame->uc.tuc_stack.ss_flags);
5251 __put_user(target_sigaltstack_used.ss_size,
5252 &frame->uc.tuc_stack.ss_size);
5253 err |= target_rt_setup_ucontext(&frame->uc, env);
5254
5255 if (err)
5256 goto give_sigsegv;
5257
5258 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5259 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5260 }
5261
5262 /* Set up to return from userspace. */
5263
5264 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5265 __put_user(retcode_addr, &frame->pretcode);
5266
5267 /* moveq #,d0; notb d0; trap #0 */
5268
5269 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5270 (uint32_t *)(frame->retcode + 0));
5271 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5272
5273 if (err)
5274 goto give_sigsegv;
5275
5276 /* Set up to return from userspace */
5277
5278 env->aregs[7] = frame_addr;
5279 env->pc = ka->_sa_handler;
5280
5281 unlock_user_struct(frame, frame_addr, 1);
5282 return;
5283
5284 give_sigsegv:
5285 unlock_user_struct(frame, frame_addr, 1);
5286 force_sig(TARGET_SIGSEGV);
5287 }
5288
5289 long do_sigreturn(CPUM68KState *env)
5290 {
5291 struct target_sigframe *frame;
5292 abi_ulong frame_addr = env->aregs[7] - 4;
5293 target_sigset_t target_set;
5294 sigset_t set;
5295 int i;
5296
5297 trace_user_do_sigreturn(env, frame_addr);
5298 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5299 goto badframe;
5300
5301 /* set blocked signals */
5302
5303 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5304
5305 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5306 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5307 }
5308
5309 target_to_host_sigset_internal(&set, &target_set);
5310 set_sigmask(&set);
5311
5312 /* restore registers */
5313
5314 restore_sigcontext(env, &frame->sc);
5315
5316 unlock_user_struct(frame, frame_addr, 0);
5317 return -TARGET_QEMU_ESIGRETURN;
5318
5319 badframe:
5320 force_sig(TARGET_SIGSEGV);
5321 return 0;
5322 }
5323
5324 long do_rt_sigreturn(CPUM68KState *env)
5325 {
5326 struct target_rt_sigframe *frame;
5327 abi_ulong frame_addr = env->aregs[7] - 4;
5328 target_sigset_t target_set;
5329 sigset_t set;
5330
5331 trace_user_do_rt_sigreturn(env, frame_addr);
5332 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5333 goto badframe;
5334
5335 target_to_host_sigset_internal(&set, &target_set);
5336 set_sigmask(&set);
5337
5338 /* restore registers */
5339
5340 if (target_rt_restore_ucontext(env, &frame->uc))
5341 goto badframe;
5342
5343 if (do_sigaltstack(frame_addr +
5344 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5345 0, get_sp_from_cpustate(env)) == -EFAULT)
5346 goto badframe;
5347
5348 unlock_user_struct(frame, frame_addr, 0);
5349 return -TARGET_QEMU_ESIGRETURN;
5350
5351 badframe:
5352 unlock_user_struct(frame, frame_addr, 0);
5353 force_sig(TARGET_SIGSEGV);
5354 return 0;
5355 }
5356
5357 #elif defined(TARGET_ALPHA)
5358
5359 struct target_sigcontext {
5360 abi_long sc_onstack;
5361 abi_long sc_mask;
5362 abi_long sc_pc;
5363 abi_long sc_ps;
5364 abi_long sc_regs[32];
5365 abi_long sc_ownedfp;
5366 abi_long sc_fpregs[32];
5367 abi_ulong sc_fpcr;
5368 abi_ulong sc_fp_control;
5369 abi_ulong sc_reserved1;
5370 abi_ulong sc_reserved2;
5371 abi_ulong sc_ssize;
5372 abi_ulong sc_sbase;
5373 abi_ulong sc_traparg_a0;
5374 abi_ulong sc_traparg_a1;
5375 abi_ulong sc_traparg_a2;
5376 abi_ulong sc_fp_trap_pc;
5377 abi_ulong sc_fp_trigger_sum;
5378 abi_ulong sc_fp_trigger_inst;
5379 };
5380
5381 struct target_ucontext {
5382 abi_ulong tuc_flags;
5383 abi_ulong tuc_link;
5384 abi_ulong tuc_osf_sigmask;
5385 target_stack_t tuc_stack;
5386 struct target_sigcontext tuc_mcontext;
5387 target_sigset_t tuc_sigmask;
5388 };
5389
5390 struct target_sigframe {
5391 struct target_sigcontext sc;
5392 unsigned int retcode[3];
5393 };
5394
5395 struct target_rt_sigframe {
5396 target_siginfo_t info;
5397 struct target_ucontext uc;
5398 unsigned int retcode[3];
5399 };
5400
5401 #define INSN_MOV_R30_R16 0x47fe0410
5402 #define INSN_LDI_R0 0x201f0000
5403 #define INSN_CALLSYS 0x00000083
5404
5405 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5406 abi_ulong frame_addr, target_sigset_t *set)
5407 {
5408 int i;
5409
5410 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5411 __put_user(set->sig[0], &sc->sc_mask);
5412 __put_user(env->pc, &sc->sc_pc);
5413 __put_user(8, &sc->sc_ps);
5414
5415 for (i = 0; i < 31; ++i) {
5416 __put_user(env->ir[i], &sc->sc_regs[i]);
5417 }
5418 __put_user(0, &sc->sc_regs[31]);
5419
5420 for (i = 0; i < 31; ++i) {
5421 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5422 }
5423 __put_user(0, &sc->sc_fpregs[31]);
5424 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5425
5426 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5427 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5428 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5429 }
5430
5431 static void restore_sigcontext(CPUAlphaState *env,
5432 struct target_sigcontext *sc)
5433 {
5434 uint64_t fpcr;
5435 int i;
5436
5437 __get_user(env->pc, &sc->sc_pc);
5438
5439 for (i = 0; i < 31; ++i) {
5440 __get_user(env->ir[i], &sc->sc_regs[i]);
5441 }
5442 for (i = 0; i < 31; ++i) {
5443 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5444 }
5445
5446 __get_user(fpcr, &sc->sc_fpcr);
5447 cpu_alpha_store_fpcr(env, fpcr);
5448 }
5449
5450 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5451 CPUAlphaState *env,
5452 unsigned long framesize)
5453 {
5454 abi_ulong sp = env->ir[IR_SP];
5455
5456 /* This is the X/Open sanctioned signal stack switching. */
5457 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5458 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5459 }
5460 return (sp - framesize) & -32;
5461 }
5462
5463 static void setup_frame(int sig, struct target_sigaction *ka,
5464 target_sigset_t *set, CPUAlphaState *env)
5465 {
5466 abi_ulong frame_addr, r26;
5467 struct target_sigframe *frame;
5468 int err = 0;
5469
5470 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5471 trace_user_setup_frame(env, frame_addr);
5472 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5473 goto give_sigsegv;
5474 }
5475
5476 setup_sigcontext(&frame->sc, env, frame_addr, set);
5477
5478 if (ka->sa_restorer) {
5479 r26 = ka->sa_restorer;
5480 } else {
5481 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5482 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5483 &frame->retcode[1]);
5484 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5485 /* imb() */
5486 r26 = frame_addr;
5487 }
5488
5489 unlock_user_struct(frame, frame_addr, 1);
5490
5491 if (err) {
5492 give_sigsegv:
5493 if (sig == TARGET_SIGSEGV) {
5494 ka->_sa_handler = TARGET_SIG_DFL;
5495 }
5496 force_sig(TARGET_SIGSEGV);
5497 }
5498
5499 env->ir[IR_RA] = r26;
5500 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5501 env->ir[IR_A0] = sig;
5502 env->ir[IR_A1] = 0;
5503 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5504 env->ir[IR_SP] = frame_addr;
5505 }
5506
5507 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5508 target_siginfo_t *info,
5509 target_sigset_t *set, CPUAlphaState *env)
5510 {
5511 abi_ulong frame_addr, r26;
5512 struct target_rt_sigframe *frame;
5513 int i, err = 0;
5514
5515 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5516 trace_user_setup_rt_frame(env, frame_addr);
5517 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5518 goto give_sigsegv;
5519 }
5520
5521 tswap_siginfo(&frame->info, info);
5522
5523 __put_user(0, &frame->uc.tuc_flags);
5524 __put_user(0, &frame->uc.tuc_link);
5525 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5526 __put_user(target_sigaltstack_used.ss_sp,
5527 &frame->uc.tuc_stack.ss_sp);
5528 __put_user(sas_ss_flags(env->ir[IR_SP]),
5529 &frame->uc.tuc_stack.ss_flags);
5530 __put_user(target_sigaltstack_used.ss_size,
5531 &frame->uc.tuc_stack.ss_size);
5532 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5533 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5534 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5535 }
5536
5537 if (ka->sa_restorer) {
5538 r26 = ka->sa_restorer;
5539 } else {
5540 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5541 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5542 &frame->retcode[1]);
5543 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5544 /* imb(); */
5545 r26 = frame_addr;
5546 }
5547
5548 if (err) {
5549 give_sigsegv:
5550 if (sig == TARGET_SIGSEGV) {
5551 ka->_sa_handler = TARGET_SIG_DFL;
5552 }
5553 force_sig(TARGET_SIGSEGV);
5554 }
5555
5556 env->ir[IR_RA] = r26;
5557 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5558 env->ir[IR_A0] = sig;
5559 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5560 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5561 env->ir[IR_SP] = frame_addr;
5562 }
5563
5564 long do_sigreturn(CPUAlphaState *env)
5565 {
5566 struct target_sigcontext *sc;
5567 abi_ulong sc_addr = env->ir[IR_A0];
5568 target_sigset_t target_set;
5569 sigset_t set;
5570
5571 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5572 goto badframe;
5573 }
5574
5575 target_sigemptyset(&target_set);
5576 __get_user(target_set.sig[0], &sc->sc_mask);
5577
5578 target_to_host_sigset_internal(&set, &target_set);
5579 set_sigmask(&set);
5580
5581 restore_sigcontext(env, sc);
5582 unlock_user_struct(sc, sc_addr, 0);
5583 return -TARGET_QEMU_ESIGRETURN;
5584
5585 badframe:
5586 force_sig(TARGET_SIGSEGV);
5587 }
5588
5589 long do_rt_sigreturn(CPUAlphaState *env)
5590 {
5591 abi_ulong frame_addr = env->ir[IR_A0];
5592 struct target_rt_sigframe *frame;
5593 sigset_t set;
5594
5595 trace_user_do_rt_sigreturn(env, frame_addr);
5596 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5597 goto badframe;
5598 }
5599 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5600 set_sigmask(&set);
5601
5602 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5603 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5604 uc.tuc_stack),
5605 0, env->ir[IR_SP]) == -EFAULT) {
5606 goto badframe;
5607 }
5608
5609 unlock_user_struct(frame, frame_addr, 0);
5610 return -TARGET_QEMU_ESIGRETURN;
5611
5612
5613 badframe:
5614 unlock_user_struct(frame, frame_addr, 0);
5615 force_sig(TARGET_SIGSEGV);
5616 }
5617
5618 #elif defined(TARGET_TILEGX)
5619
5620 struct target_sigcontext {
5621 union {
5622 /* General-purpose registers. */
5623 abi_ulong gregs[56];
5624 struct {
5625 abi_ulong __gregs[53];
5626 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5627 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5628 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5629 };
5630 };
5631 abi_ulong pc; /* Program counter. */
5632 abi_ulong ics; /* In Interrupt Critical Section? */
5633 abi_ulong faultnum; /* Fault number. */
5634 abi_ulong pad[5];
5635 };
5636
5637 struct target_ucontext {
5638 abi_ulong tuc_flags;
5639 abi_ulong tuc_link;
5640 target_stack_t tuc_stack;
5641 struct target_sigcontext tuc_mcontext;
5642 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5643 };
5644
5645 struct target_rt_sigframe {
5646 unsigned char save_area[16]; /* caller save area */
5647 struct target_siginfo info;
5648 struct target_ucontext uc;
5649 abi_ulong retcode[2];
5650 };
5651
5652 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5653 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5654
5655
5656 static void setup_sigcontext(struct target_sigcontext *sc,
5657 CPUArchState *env, int signo)
5658 {
5659 int i;
5660
5661 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5662 __put_user(env->regs[i], &sc->gregs[i]);
5663 }
5664
5665 __put_user(env->pc, &sc->pc);
5666 __put_user(0, &sc->ics);
5667 __put_user(signo, &sc->faultnum);
5668 }
5669
5670 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5671 {
5672 int i;
5673
5674 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5675 __get_user(env->regs[i], &sc->gregs[i]);
5676 }
5677
5678 __get_user(env->pc, &sc->pc);
5679 }
5680
5681 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5682 size_t frame_size)
5683 {
5684 unsigned long sp = env->regs[TILEGX_R_SP];
5685
5686 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5687 return -1UL;
5688 }
5689
5690 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5691 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5692 }
5693
5694 sp -= frame_size;
5695 sp &= -16UL;
5696 return sp;
5697 }
5698
5699 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5700 target_siginfo_t *info,
5701 target_sigset_t *set, CPUArchState *env)
5702 {
5703 abi_ulong frame_addr;
5704 struct target_rt_sigframe *frame;
5705 unsigned long restorer;
5706
5707 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5708 trace_user_setup_rt_frame(env, frame_addr);
5709 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5710 goto give_sigsegv;
5711 }
5712
5713 /* Always write at least the signal number for the stack backtracer. */
5714 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5715 /* At sigreturn time, restore the callee-save registers too. */
5716 tswap_siginfo(&frame->info, info);
5717 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5718 } else {
5719 __put_user(info->si_signo, &frame->info.si_signo);
5720 }
5721
5722 /* Create the ucontext. */
5723 __put_user(0, &frame->uc.tuc_flags);
5724 __put_user(0, &frame->uc.tuc_link);
5725 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5726 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5727 &frame->uc.tuc_stack.ss_flags);
5728 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5729 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5730
5731 if (ka->sa_flags & TARGET_SA_RESTORER) {
5732 restorer = (unsigned long) ka->sa_restorer;
5733 } else {
5734 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5735 __put_user(INSN_SWINT1, &frame->retcode[1]);
5736 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5737 }
5738 env->pc = (unsigned long) ka->_sa_handler;
5739 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5740 env->regs[TILEGX_R_LR] = restorer;
5741 env->regs[0] = (unsigned long) sig;
5742 env->regs[1] = (unsigned long) &frame->info;
5743 env->regs[2] = (unsigned long) &frame->uc;
5744 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5745
5746 unlock_user_struct(frame, frame_addr, 1);
5747 return;
5748
5749 give_sigsegv:
5750 if (sig == TARGET_SIGSEGV) {
5751 ka->_sa_handler = TARGET_SIG_DFL;
5752 }
5753 force_sig(TARGET_SIGSEGV /* , current */);
5754 }
5755
5756 long do_rt_sigreturn(CPUTLGState *env)
5757 {
5758 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5759 struct target_rt_sigframe *frame;
5760 sigset_t set;
5761
5762 trace_user_do_rt_sigreturn(env, frame_addr);
5763 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5764 goto badframe;
5765 }
5766 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5767 set_sigmask(&set);
5768
5769 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5770 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5771 uc.tuc_stack),
5772 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5773 goto badframe;
5774 }
5775
5776 unlock_user_struct(frame, frame_addr, 0);
5777 return -TARGET_QEMU_ESIGRETURN;
5778
5779
5780 badframe:
5781 unlock_user_struct(frame, frame_addr, 0);
5782 force_sig(TARGET_SIGSEGV);
5783 }
5784
5785 #else
5786
5787 static void setup_frame(int sig, struct target_sigaction *ka,
5788 target_sigset_t *set, CPUArchState *env)
5789 {
5790 fprintf(stderr, "setup_frame: not implemented\n");
5791 }
5792
5793 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5794 target_siginfo_t *info,
5795 target_sigset_t *set, CPUArchState *env)
5796 {
5797 fprintf(stderr, "setup_rt_frame: not implemented\n");
5798 }
5799
5800 long do_sigreturn(CPUArchState *env)
5801 {
5802 fprintf(stderr, "do_sigreturn: not implemented\n");
5803 return -TARGET_ENOSYS;
5804 }
5805
5806 long do_rt_sigreturn(CPUArchState *env)
5807 {
5808 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5809 return -TARGET_ENOSYS;
5810 }
5811
5812 #endif
5813
5814 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5815 {
5816 CPUState *cpu = ENV_GET_CPU(cpu_env);
5817 abi_ulong handler;
5818 sigset_t set;
5819 target_sigset_t target_old_set;
5820 struct target_sigaction *sa;
5821 TaskState *ts = cpu->opaque;
5822 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5823
5824 trace_user_handle_signal(cpu_env, sig);
5825 /* dequeue signal */
5826 k->pending = 0;
5827
5828 sig = gdb_handlesig(cpu, sig);
5829 if (!sig) {
5830 sa = NULL;
5831 handler = TARGET_SIG_IGN;
5832 } else {
5833 sa = &sigact_table[sig - 1];
5834 handler = sa->_sa_handler;
5835 }
5836
5837 if (handler == TARGET_SIG_DFL) {
5838 /* default handler : ignore some signal. The other are job control or fatal */
5839 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5840 kill(getpid(),SIGSTOP);
5841 } else if (sig != TARGET_SIGCHLD &&
5842 sig != TARGET_SIGURG &&
5843 sig != TARGET_SIGWINCH &&
5844 sig != TARGET_SIGCONT) {
5845 force_sig(sig);
5846 }
5847 } else if (handler == TARGET_SIG_IGN) {
5848 /* ignore sig */
5849 } else if (handler == TARGET_SIG_ERR) {
5850 force_sig(sig);
5851 } else {
5852 /* compute the blocked signals during the handler execution */
5853 sigset_t *blocked_set;
5854
5855 target_to_host_sigset(&set, &sa->sa_mask);
5856 /* SA_NODEFER indicates that the current signal should not be
5857 blocked during the handler */
5858 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5859 sigaddset(&set, target_to_host_signal(sig));
5860
5861 /* save the previous blocked signal state to restore it at the
5862 end of the signal execution (see do_sigreturn) */
5863 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5864
5865 /* block signals in the handler */
5866 blocked_set = ts->in_sigsuspend ?
5867 &ts->sigsuspend_mask : &ts->signal_mask;
5868 sigorset(&ts->signal_mask, blocked_set, &set);
5869 ts->in_sigsuspend = 0;
5870
5871 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5872 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5873 {
5874 CPUX86State *env = cpu_env;
5875 if (env->eflags & VM_MASK)
5876 save_v86_state(env);
5877 }
5878 #endif
5879 /* prepare the stack frame of the virtual CPU */
5880 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5881 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5882 /* These targets do not have traditional signals. */
5883 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5884 #else
5885 if (sa->sa_flags & TARGET_SA_SIGINFO)
5886 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5887 else
5888 setup_frame(sig, sa, &target_old_set, cpu_env);
5889 #endif
5890 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5891 sa->_sa_handler = TARGET_SIG_DFL;
5892 }
5893 }
5894 }
5895
5896 void process_pending_signals(CPUArchState *cpu_env)
5897 {
5898 CPUState *cpu = ENV_GET_CPU(cpu_env);
5899 int sig;
5900 TaskState *ts = cpu->opaque;
5901 sigset_t set;
5902 sigset_t *blocked_set;
5903
5904 while (atomic_read(&ts->signal_pending)) {
5905 /* FIXME: This is not threadsafe. */
5906 sigfillset(&set);
5907 sigprocmask(SIG_SETMASK, &set, 0);
5908
5909 sig = ts->sync_signal.pending;
5910 if (sig) {
5911 /* Synchronous signals are forced,
5912 * see force_sig_info() and callers in Linux
5913 * Note that not all of our queue_signal() calls in QEMU correspond
5914 * to force_sig_info() calls in Linux (some are send_sig_info()).
5915 * However it seems like a kernel bug to me to allow the process
5916 * to block a synchronous signal since it could then just end up
5917 * looping round and round indefinitely.
5918 */
5919 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
5920 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
5921 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
5922 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
5923 }
5924
5925 handle_pending_signal(cpu_env, sig);
5926 }
5927
5928 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5929 blocked_set = ts->in_sigsuspend ?
5930 &ts->sigsuspend_mask : &ts->signal_mask;
5931
5932 if (ts->sigtab[sig - 1].pending &&
5933 (!sigismember(blocked_set,
5934 target_to_host_signal_table[sig]))) {
5935 handle_pending_signal(cpu_env, sig);
5936 /* Restart scan from the beginning */
5937 sig = 1;
5938 }
5939 }
5940
5941 /* if no signal is pending, unblock signals and recheck (the act
5942 * of unblocking might cause us to take another host signal which
5943 * will set signal_pending again).
5944 */
5945 atomic_set(&ts->signal_pending, 0);
5946 ts->in_sigsuspend = 0;
5947 set = ts->signal_mask;
5948 sigdelset(&set, SIGSEGV);
5949 sigdelset(&set, SIGBUS);
5950 sigprocmask(SIG_SETMASK, &set, 0);
5951 }
5952 ts->in_sigsuspend = 0;
5953 }