]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: Remove real-time signal queuing
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/ucontext.h>
21 #include <sys/resource.h>
22
23 #include "qemu.h"
24 #include "qemu-common.h"
25 #include "target_signal.h"
26 #include "trace.h"
27
28 static struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32 };
33
34 static struct target_sigaction sigact_table[TARGET_NSIG];
35
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
38
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
81 };
82 static uint8_t target_to_host_signal_table[_NSIG];
83
84 static inline int on_sig_stack(unsigned long sp)
85 {
86 return (sp - target_sigaltstack_used.ss_sp
87 < target_sigaltstack_used.ss_size);
88 }
89
90 static inline int sas_ss_flags(unsigned long sp)
91 {
92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
93 : on_sig_stack(sp) ? SS_ONSTACK : 0);
94 }
95
96 int host_to_target_signal(int sig)
97 {
98 if (sig < 0 || sig >= _NSIG)
99 return sig;
100 return host_to_target_signal_table[sig];
101 }
102
103 int target_to_host_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return target_to_host_signal_table[sig];
108 }
109
110 static inline void target_sigemptyset(target_sigset_t *set)
111 {
112 memset(set, 0, sizeof(*set));
113 }
114
115 static inline void target_sigaddset(target_sigset_t *set, int signum)
116 {
117 signum--;
118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
119 set->sig[signum / TARGET_NSIG_BPW] |= mask;
120 }
121
122 static inline int target_sigismember(const target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
127 }
128
129 static void host_to_target_sigset_internal(target_sigset_t *d,
130 const sigset_t *s)
131 {
132 int i;
133 target_sigemptyset(d);
134 for (i = 1; i <= TARGET_NSIG; i++) {
135 if (sigismember(s, i)) {
136 target_sigaddset(d, host_to_target_signal(i));
137 }
138 }
139 }
140
141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
142 {
143 target_sigset_t d1;
144 int i;
145
146 host_to_target_sigset_internal(&d1, s);
147 for(i = 0;i < TARGET_NSIG_WORDS; i++)
148 d->sig[i] = tswapal(d1.sig[i]);
149 }
150
151 static void target_to_host_sigset_internal(sigset_t *d,
152 const target_sigset_t *s)
153 {
154 int i;
155 sigemptyset(d);
156 for (i = 1; i <= TARGET_NSIG; i++) {
157 if (target_sigismember(s, i)) {
158 sigaddset(d, target_to_host_signal(i));
159 }
160 }
161 }
162
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165 target_sigset_t s1;
166 int i;
167
168 for(i = 0;i < TARGET_NSIG_WORDS; i++)
169 s1.sig[i] = tswapal(s->sig[i]);
170 target_to_host_sigset_internal(d, &s1);
171 }
172
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174 const sigset_t *sigset)
175 {
176 target_sigset_t d;
177 host_to_target_sigset(&d, sigset);
178 *old_sigset = d.sig[0];
179 }
180
181 void target_to_host_old_sigset(sigset_t *sigset,
182 const abi_ulong *old_sigset)
183 {
184 target_sigset_t d;
185 int i;
186
187 d.sig[0] = *old_sigset;
188 for(i = 1;i < TARGET_NSIG_WORDS; i++)
189 d.sig[i] = 0;
190 target_to_host_sigset(sigset, &d);
191 }
192
193 int block_signals(void)
194 {
195 TaskState *ts = (TaskState *)thread_cpu->opaque;
196 sigset_t set;
197 int pending;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 pending = atomic_xchg(&ts->signal_pending, 1);
207
208 return pending;
209 }
210
211 /* Wrapper for sigprocmask function
212 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
213 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
214 * a signal was already pending and the syscall must be restarted, or
215 * 0 on success.
216 * If set is NULL, this is guaranteed not to fail.
217 */
218 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
219 {
220 TaskState *ts = (TaskState *)thread_cpu->opaque;
221
222 if (oldset) {
223 *oldset = ts->signal_mask;
224 }
225
226 if (set) {
227 int i;
228
229 if (block_signals()) {
230 return -TARGET_ERESTARTSYS;
231 }
232
233 switch (how) {
234 case SIG_BLOCK:
235 sigorset(&ts->signal_mask, &ts->signal_mask, set);
236 break;
237 case SIG_UNBLOCK:
238 for (i = 1; i <= NSIG; ++i) {
239 if (sigismember(set, i)) {
240 sigdelset(&ts->signal_mask, i);
241 }
242 }
243 break;
244 case SIG_SETMASK:
245 ts->signal_mask = *set;
246 break;
247 default:
248 g_assert_not_reached();
249 }
250
251 /* Silently ignore attempts to change blocking status of KILL or STOP */
252 sigdelset(&ts->signal_mask, SIGKILL);
253 sigdelset(&ts->signal_mask, SIGSTOP);
254 }
255 return 0;
256 }
257
258 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
259 !defined(TARGET_X86_64)
260 /* Just set the guest's signal mask to the specified value; the
261 * caller is assumed to have called block_signals() already.
262 */
263 static void set_sigmask(const sigset_t *set)
264 {
265 TaskState *ts = (TaskState *)thread_cpu->opaque;
266
267 ts->signal_mask = *set;
268 }
269 #endif
270
271 /* siginfo conversion */
272
273 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
274 const siginfo_t *info)
275 {
276 int sig = host_to_target_signal(info->si_signo);
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
280
281 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
282 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
283 /* Should never come here, but who knows. The information for
284 the target is irrelevant. */
285 tinfo->_sifields._sigfault._addr = 0;
286 } else if (sig == TARGET_SIGIO) {
287 tinfo->_sifields._sigpoll._band = info->si_band;
288 tinfo->_sifields._sigpoll._fd = info->si_fd;
289 } else if (sig == TARGET_SIGCHLD) {
290 tinfo->_sifields._sigchld._pid = info->si_pid;
291 tinfo->_sifields._sigchld._uid = info->si_uid;
292 tinfo->_sifields._sigchld._status
293 = host_to_target_waitstatus(info->si_status);
294 tinfo->_sifields._sigchld._utime = info->si_utime;
295 tinfo->_sifields._sigchld._stime = info->si_stime;
296 } else if (sig >= TARGET_SIGRTMIN) {
297 tinfo->_sifields._rt._pid = info->si_pid;
298 tinfo->_sifields._rt._uid = info->si_uid;
299 /* XXX: potential problem if 64 bit */
300 tinfo->_sifields._rt._sigval.sival_ptr
301 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
302 }
303 }
304
305 static void tswap_siginfo(target_siginfo_t *tinfo,
306 const target_siginfo_t *info)
307 {
308 int sig = info->si_signo;
309 tinfo->si_signo = tswap32(sig);
310 tinfo->si_errno = tswap32(info->si_errno);
311 tinfo->si_code = tswap32(info->si_code);
312
313 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
314 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
315 tinfo->_sifields._sigfault._addr
316 = tswapal(info->_sifields._sigfault._addr);
317 } else if (sig == TARGET_SIGIO) {
318 tinfo->_sifields._sigpoll._band
319 = tswap32(info->_sifields._sigpoll._band);
320 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
321 } else if (sig == TARGET_SIGCHLD) {
322 tinfo->_sifields._sigchld._pid
323 = tswap32(info->_sifields._sigchld._pid);
324 tinfo->_sifields._sigchld._uid
325 = tswap32(info->_sifields._sigchld._uid);
326 tinfo->_sifields._sigchld._status
327 = tswap32(info->_sifields._sigchld._status);
328 tinfo->_sifields._sigchld._utime
329 = tswapal(info->_sifields._sigchld._utime);
330 tinfo->_sifields._sigchld._stime
331 = tswapal(info->_sifields._sigchld._stime);
332 } else if (sig >= TARGET_SIGRTMIN) {
333 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
334 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
335 tinfo->_sifields._rt._sigval.sival_ptr
336 = tswapal(info->_sifields._rt._sigval.sival_ptr);
337 }
338 }
339
340
341 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
342 {
343 host_to_target_siginfo_noswap(tinfo, info);
344 tswap_siginfo(tinfo, tinfo);
345 }
346
347 /* XXX: we support only POSIX RT signals are used. */
348 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
349 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
350 {
351 info->si_signo = tswap32(tinfo->si_signo);
352 info->si_errno = tswap32(tinfo->si_errno);
353 info->si_code = tswap32(tinfo->si_code);
354 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
355 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
356 info->si_value.sival_ptr =
357 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
358 }
359
360 static int fatal_signal (int sig)
361 {
362 switch (sig) {
363 case TARGET_SIGCHLD:
364 case TARGET_SIGURG:
365 case TARGET_SIGWINCH:
366 /* Ignored by default. */
367 return 0;
368 case TARGET_SIGCONT:
369 case TARGET_SIGSTOP:
370 case TARGET_SIGTSTP:
371 case TARGET_SIGTTIN:
372 case TARGET_SIGTTOU:
373 /* Job control signals. */
374 return 0;
375 default:
376 return 1;
377 }
378 }
379
380 /* returns 1 if given signal should dump core if not handled */
381 static int core_dump_signal(int sig)
382 {
383 switch (sig) {
384 case TARGET_SIGABRT:
385 case TARGET_SIGFPE:
386 case TARGET_SIGILL:
387 case TARGET_SIGQUIT:
388 case TARGET_SIGSEGV:
389 case TARGET_SIGTRAP:
390 case TARGET_SIGBUS:
391 return (1);
392 default:
393 return (0);
394 }
395 }
396
397 void signal_init(void)
398 {
399 TaskState *ts = (TaskState *)thread_cpu->opaque;
400 struct sigaction act;
401 struct sigaction oact;
402 int i, j;
403 int host_sig;
404
405 /* generate signal conversion tables */
406 for(i = 1; i < _NSIG; i++) {
407 if (host_to_target_signal_table[i] == 0)
408 host_to_target_signal_table[i] = i;
409 }
410 for(i = 1; i < _NSIG; i++) {
411 j = host_to_target_signal_table[i];
412 target_to_host_signal_table[j] = i;
413 }
414
415 /* Set the signal mask from the host mask. */
416 sigprocmask(0, 0, &ts->signal_mask);
417
418 /* set all host signal handlers. ALL signals are blocked during
419 the handlers to serialize them. */
420 memset(sigact_table, 0, sizeof(sigact_table));
421
422 sigfillset(&act.sa_mask);
423 act.sa_flags = SA_SIGINFO;
424 act.sa_sigaction = host_signal_handler;
425 for(i = 1; i <= TARGET_NSIG; i++) {
426 host_sig = target_to_host_signal(i);
427 sigaction(host_sig, NULL, &oact);
428 if (oact.sa_sigaction == (void *)SIG_IGN) {
429 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
430 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
431 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
432 }
433 /* If there's already a handler installed then something has
434 gone horribly wrong, so don't even try to handle that case. */
435 /* Install some handlers for our own use. We need at least
436 SIGSEGV and SIGBUS, to detect exceptions. We can not just
437 trap all signals because it affects syscall interrupt
438 behavior. But do trap all default-fatal signals. */
439 if (fatal_signal (i))
440 sigaction(host_sig, &act, NULL);
441 }
442 }
443
444
445 /* abort execution with signal */
446 static void QEMU_NORETURN force_sig(int target_sig)
447 {
448 CPUState *cpu = thread_cpu;
449 CPUArchState *env = cpu->env_ptr;
450 TaskState *ts = (TaskState *)cpu->opaque;
451 int host_sig, core_dumped = 0;
452 struct sigaction act;
453
454 host_sig = target_to_host_signal(target_sig);
455 trace_user_force_sig(env, target_sig, host_sig);
456 gdb_signalled(env, target_sig);
457
458 /* dump core if supported by target binary format */
459 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
460 stop_all_tasks();
461 core_dumped =
462 ((*ts->bprm->core_dump)(target_sig, env) == 0);
463 }
464 if (core_dumped) {
465 /* we already dumped the core of target process, we don't want
466 * a coredump of qemu itself */
467 struct rlimit nodump;
468 getrlimit(RLIMIT_CORE, &nodump);
469 nodump.rlim_cur=0;
470 setrlimit(RLIMIT_CORE, &nodump);
471 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
472 target_sig, strsignal(host_sig), "core dumped" );
473 }
474
475 /* The proper exit code for dying from an uncaught signal is
476 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
477 * a negative value. To get the proper exit code we need to
478 * actually die from an uncaught signal. Here the default signal
479 * handler is installed, we send ourself a signal and we wait for
480 * it to arrive. */
481 sigfillset(&act.sa_mask);
482 act.sa_handler = SIG_DFL;
483 act.sa_flags = 0;
484 sigaction(host_sig, &act, NULL);
485
486 /* For some reason raise(host_sig) doesn't send the signal when
487 * statically linked on x86-64. */
488 kill(getpid(), host_sig);
489
490 /* Make sure the signal isn't masked (just reuse the mask inside
491 of act) */
492 sigdelset(&act.sa_mask, host_sig);
493 sigsuspend(&act.sa_mask);
494
495 /* unreachable */
496 abort();
497 }
498
499 /* queue a signal so that it will be send to the virtual CPU as soon
500 as possible */
501 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
502 {
503 CPUState *cpu = ENV_GET_CPU(env);
504 TaskState *ts = cpu->opaque;
505 struct emulated_sigtable *k;
506
507 trace_user_queue_signal(env, sig);
508 k = &ts->sigtab[sig - 1];
509
510 /* we queue exactly one signal */
511 if (k->pending) {
512 return 0;
513 }
514
515 k->info = *info;
516 k->pending = 1;
517 /* signal that a new signal is pending */
518 atomic_set(&ts->signal_pending, 1);
519 return 1; /* indicates that the signal was queued */
520 }
521
522 #ifndef HAVE_SAFE_SYSCALL
523 static inline void rewind_if_in_safe_syscall(void *puc)
524 {
525 /* Default version: never rewind */
526 }
527 #endif
528
529 static void host_signal_handler(int host_signum, siginfo_t *info,
530 void *puc)
531 {
532 CPUArchState *env = thread_cpu->env_ptr;
533 int sig;
534 target_siginfo_t tinfo;
535 ucontext_t *uc = puc;
536
537 /* the CPU emulator uses some host signals to detect exceptions,
538 we forward to it some signals */
539 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
540 && info->si_code > 0) {
541 if (cpu_signal_handler(host_signum, info, puc))
542 return;
543 }
544
545 /* get target signal number */
546 sig = host_to_target_signal(host_signum);
547 if (sig < 1 || sig > TARGET_NSIG)
548 return;
549 trace_user_host_signal(env, host_signum, sig);
550
551 rewind_if_in_safe_syscall(puc);
552
553 host_to_target_siginfo_noswap(&tinfo, info);
554 if (queue_signal(env, sig, &tinfo) == 1) {
555 /* Block host signals until target signal handler entered. We
556 * can't block SIGSEGV or SIGBUS while we're executing guest
557 * code in case the guest code provokes one in the window between
558 * now and it getting out to the main loop. Signals will be
559 * unblocked again in process_pending_signals().
560 */
561 sigfillset(&uc->uc_sigmask);
562 sigdelset(&uc->uc_sigmask, SIGSEGV);
563 sigdelset(&uc->uc_sigmask, SIGBUS);
564
565 /* interrupt the virtual CPU as soon as possible */
566 cpu_exit(thread_cpu);
567 }
568 }
569
570 /* do_sigaltstack() returns target values and errnos. */
571 /* compare linux/kernel/signal.c:do_sigaltstack() */
572 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
573 {
574 int ret;
575 struct target_sigaltstack oss;
576
577 /* XXX: test errors */
578 if(uoss_addr)
579 {
580 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
581 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
582 __put_user(sas_ss_flags(sp), &oss.ss_flags);
583 }
584
585 if(uss_addr)
586 {
587 struct target_sigaltstack *uss;
588 struct target_sigaltstack ss;
589 size_t minstacksize = TARGET_MINSIGSTKSZ;
590
591 #if defined(TARGET_PPC64)
592 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
593 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
594 if (get_ppc64_abi(image) > 1) {
595 minstacksize = 4096;
596 }
597 #endif
598
599 ret = -TARGET_EFAULT;
600 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
601 goto out;
602 }
603 __get_user(ss.ss_sp, &uss->ss_sp);
604 __get_user(ss.ss_size, &uss->ss_size);
605 __get_user(ss.ss_flags, &uss->ss_flags);
606 unlock_user_struct(uss, uss_addr, 0);
607
608 ret = -TARGET_EPERM;
609 if (on_sig_stack(sp))
610 goto out;
611
612 ret = -TARGET_EINVAL;
613 if (ss.ss_flags != TARGET_SS_DISABLE
614 && ss.ss_flags != TARGET_SS_ONSTACK
615 && ss.ss_flags != 0)
616 goto out;
617
618 if (ss.ss_flags == TARGET_SS_DISABLE) {
619 ss.ss_size = 0;
620 ss.ss_sp = 0;
621 } else {
622 ret = -TARGET_ENOMEM;
623 if (ss.ss_size < minstacksize) {
624 goto out;
625 }
626 }
627
628 target_sigaltstack_used.ss_sp = ss.ss_sp;
629 target_sigaltstack_used.ss_size = ss.ss_size;
630 }
631
632 if (uoss_addr) {
633 ret = -TARGET_EFAULT;
634 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
635 goto out;
636 }
637
638 ret = 0;
639 out:
640 return ret;
641 }
642
643 /* do_sigaction() return host values and errnos */
644 int do_sigaction(int sig, const struct target_sigaction *act,
645 struct target_sigaction *oact)
646 {
647 struct target_sigaction *k;
648 struct sigaction act1;
649 int host_sig;
650 int ret = 0;
651
652 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
653 return -EINVAL;
654 k = &sigact_table[sig - 1];
655 if (oact) {
656 __put_user(k->_sa_handler, &oact->_sa_handler);
657 __put_user(k->sa_flags, &oact->sa_flags);
658 #if !defined(TARGET_MIPS)
659 __put_user(k->sa_restorer, &oact->sa_restorer);
660 #endif
661 /* Not swapped. */
662 oact->sa_mask = k->sa_mask;
663 }
664 if (act) {
665 /* FIXME: This is not threadsafe. */
666 __get_user(k->_sa_handler, &act->_sa_handler);
667 __get_user(k->sa_flags, &act->sa_flags);
668 #if !defined(TARGET_MIPS)
669 __get_user(k->sa_restorer, &act->sa_restorer);
670 #endif
671 /* To be swapped in target_to_host_sigset. */
672 k->sa_mask = act->sa_mask;
673
674 /* we update the host linux signal state */
675 host_sig = target_to_host_signal(sig);
676 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
677 sigfillset(&act1.sa_mask);
678 act1.sa_flags = SA_SIGINFO;
679 if (k->sa_flags & TARGET_SA_RESTART)
680 act1.sa_flags |= SA_RESTART;
681 /* NOTE: it is important to update the host kernel signal
682 ignore state to avoid getting unexpected interrupted
683 syscalls */
684 if (k->_sa_handler == TARGET_SIG_IGN) {
685 act1.sa_sigaction = (void *)SIG_IGN;
686 } else if (k->_sa_handler == TARGET_SIG_DFL) {
687 if (fatal_signal (sig))
688 act1.sa_sigaction = host_signal_handler;
689 else
690 act1.sa_sigaction = (void *)SIG_DFL;
691 } else {
692 act1.sa_sigaction = host_signal_handler;
693 }
694 ret = sigaction(host_sig, &act1, NULL);
695 }
696 }
697 return ret;
698 }
699
700 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
701
702 /* from the Linux kernel */
703
704 struct target_fpreg {
705 uint16_t significand[4];
706 uint16_t exponent;
707 };
708
709 struct target_fpxreg {
710 uint16_t significand[4];
711 uint16_t exponent;
712 uint16_t padding[3];
713 };
714
715 struct target_xmmreg {
716 abi_ulong element[4];
717 };
718
719 struct target_fpstate {
720 /* Regular FPU environment */
721 abi_ulong cw;
722 abi_ulong sw;
723 abi_ulong tag;
724 abi_ulong ipoff;
725 abi_ulong cssel;
726 abi_ulong dataoff;
727 abi_ulong datasel;
728 struct target_fpreg _st[8];
729 uint16_t status;
730 uint16_t magic; /* 0xffff = regular FPU data only */
731
732 /* FXSR FPU environment */
733 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
734 abi_ulong mxcsr;
735 abi_ulong reserved;
736 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
737 struct target_xmmreg _xmm[8];
738 abi_ulong padding[56];
739 };
740
741 #define X86_FXSR_MAGIC 0x0000
742
743 struct target_sigcontext {
744 uint16_t gs, __gsh;
745 uint16_t fs, __fsh;
746 uint16_t es, __esh;
747 uint16_t ds, __dsh;
748 abi_ulong edi;
749 abi_ulong esi;
750 abi_ulong ebp;
751 abi_ulong esp;
752 abi_ulong ebx;
753 abi_ulong edx;
754 abi_ulong ecx;
755 abi_ulong eax;
756 abi_ulong trapno;
757 abi_ulong err;
758 abi_ulong eip;
759 uint16_t cs, __csh;
760 abi_ulong eflags;
761 abi_ulong esp_at_signal;
762 uint16_t ss, __ssh;
763 abi_ulong fpstate; /* pointer */
764 abi_ulong oldmask;
765 abi_ulong cr2;
766 };
767
768 struct target_ucontext {
769 abi_ulong tuc_flags;
770 abi_ulong tuc_link;
771 target_stack_t tuc_stack;
772 struct target_sigcontext tuc_mcontext;
773 target_sigset_t tuc_sigmask; /* mask last for extensibility */
774 };
775
776 struct sigframe
777 {
778 abi_ulong pretcode;
779 int sig;
780 struct target_sigcontext sc;
781 struct target_fpstate fpstate;
782 abi_ulong extramask[TARGET_NSIG_WORDS-1];
783 char retcode[8];
784 };
785
786 struct rt_sigframe
787 {
788 abi_ulong pretcode;
789 int sig;
790 abi_ulong pinfo;
791 abi_ulong puc;
792 struct target_siginfo info;
793 struct target_ucontext uc;
794 struct target_fpstate fpstate;
795 char retcode[8];
796 };
797
798 /*
799 * Set up a signal frame.
800 */
801
802 /* XXX: save x87 state */
803 static void setup_sigcontext(struct target_sigcontext *sc,
804 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
805 abi_ulong fpstate_addr)
806 {
807 CPUState *cs = CPU(x86_env_get_cpu(env));
808 uint16_t magic;
809
810 /* already locked in setup_frame() */
811 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
812 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
813 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
814 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
815 __put_user(env->regs[R_EDI], &sc->edi);
816 __put_user(env->regs[R_ESI], &sc->esi);
817 __put_user(env->regs[R_EBP], &sc->ebp);
818 __put_user(env->regs[R_ESP], &sc->esp);
819 __put_user(env->regs[R_EBX], &sc->ebx);
820 __put_user(env->regs[R_EDX], &sc->edx);
821 __put_user(env->regs[R_ECX], &sc->ecx);
822 __put_user(env->regs[R_EAX], &sc->eax);
823 __put_user(cs->exception_index, &sc->trapno);
824 __put_user(env->error_code, &sc->err);
825 __put_user(env->eip, &sc->eip);
826 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
827 __put_user(env->eflags, &sc->eflags);
828 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
829 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
830
831 cpu_x86_fsave(env, fpstate_addr, 1);
832 fpstate->status = fpstate->sw;
833 magic = 0xffff;
834 __put_user(magic, &fpstate->magic);
835 __put_user(fpstate_addr, &sc->fpstate);
836
837 /* non-iBCS2 extensions.. */
838 __put_user(mask, &sc->oldmask);
839 __put_user(env->cr[2], &sc->cr2);
840 }
841
842 /*
843 * Determine which stack to use..
844 */
845
846 static inline abi_ulong
847 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
848 {
849 unsigned long esp;
850
851 /* Default to using normal stack */
852 esp = env->regs[R_ESP];
853 /* This is the X/Open sanctioned signal stack switching. */
854 if (ka->sa_flags & TARGET_SA_ONSTACK) {
855 if (sas_ss_flags(esp) == 0) {
856 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
857 }
858 } else {
859
860 /* This is the legacy signal stack switching. */
861 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
862 !(ka->sa_flags & TARGET_SA_RESTORER) &&
863 ka->sa_restorer) {
864 esp = (unsigned long) ka->sa_restorer;
865 }
866 }
867 return (esp - frame_size) & -8ul;
868 }
869
870 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
871 static void setup_frame(int sig, struct target_sigaction *ka,
872 target_sigset_t *set, CPUX86State *env)
873 {
874 abi_ulong frame_addr;
875 struct sigframe *frame;
876 int i;
877
878 frame_addr = get_sigframe(ka, env, sizeof(*frame));
879 trace_user_setup_frame(env, frame_addr);
880
881 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
882 goto give_sigsegv;
883
884 __put_user(sig, &frame->sig);
885
886 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
887 frame_addr + offsetof(struct sigframe, fpstate));
888
889 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
890 __put_user(set->sig[i], &frame->extramask[i - 1]);
891 }
892
893 /* Set up to return from userspace. If provided, use a stub
894 already in userspace. */
895 if (ka->sa_flags & TARGET_SA_RESTORER) {
896 __put_user(ka->sa_restorer, &frame->pretcode);
897 } else {
898 uint16_t val16;
899 abi_ulong retcode_addr;
900 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
901 __put_user(retcode_addr, &frame->pretcode);
902 /* This is popl %eax ; movl $,%eax ; int $0x80 */
903 val16 = 0xb858;
904 __put_user(val16, (uint16_t *)(frame->retcode+0));
905 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
906 val16 = 0x80cd;
907 __put_user(val16, (uint16_t *)(frame->retcode+6));
908 }
909
910
911 /* Set up registers for signal handler */
912 env->regs[R_ESP] = frame_addr;
913 env->eip = ka->_sa_handler;
914
915 cpu_x86_load_seg(env, R_DS, __USER_DS);
916 cpu_x86_load_seg(env, R_ES, __USER_DS);
917 cpu_x86_load_seg(env, R_SS, __USER_DS);
918 cpu_x86_load_seg(env, R_CS, __USER_CS);
919 env->eflags &= ~TF_MASK;
920
921 unlock_user_struct(frame, frame_addr, 1);
922
923 return;
924
925 give_sigsegv:
926 if (sig == TARGET_SIGSEGV) {
927 ka->_sa_handler = TARGET_SIG_DFL;
928 }
929 force_sig(TARGET_SIGSEGV /* , current */);
930 }
931
932 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
933 static void setup_rt_frame(int sig, struct target_sigaction *ka,
934 target_siginfo_t *info,
935 target_sigset_t *set, CPUX86State *env)
936 {
937 abi_ulong frame_addr, addr;
938 struct rt_sigframe *frame;
939 int i;
940
941 frame_addr = get_sigframe(ka, env, sizeof(*frame));
942 trace_user_setup_rt_frame(env, frame_addr);
943
944 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
945 goto give_sigsegv;
946
947 __put_user(sig, &frame->sig);
948 addr = frame_addr + offsetof(struct rt_sigframe, info);
949 __put_user(addr, &frame->pinfo);
950 addr = frame_addr + offsetof(struct rt_sigframe, uc);
951 __put_user(addr, &frame->puc);
952 tswap_siginfo(&frame->info, info);
953
954 /* Create the ucontext. */
955 __put_user(0, &frame->uc.tuc_flags);
956 __put_user(0, &frame->uc.tuc_link);
957 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
958 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
959 &frame->uc.tuc_stack.ss_flags);
960 __put_user(target_sigaltstack_used.ss_size,
961 &frame->uc.tuc_stack.ss_size);
962 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
963 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
964
965 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
966 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
967 }
968
969 /* Set up to return from userspace. If provided, use a stub
970 already in userspace. */
971 if (ka->sa_flags & TARGET_SA_RESTORER) {
972 __put_user(ka->sa_restorer, &frame->pretcode);
973 } else {
974 uint16_t val16;
975 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
976 __put_user(addr, &frame->pretcode);
977 /* This is movl $,%eax ; int $0x80 */
978 __put_user(0xb8, (char *)(frame->retcode+0));
979 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
980 val16 = 0x80cd;
981 __put_user(val16, (uint16_t *)(frame->retcode+5));
982 }
983
984 /* Set up registers for signal handler */
985 env->regs[R_ESP] = frame_addr;
986 env->eip = ka->_sa_handler;
987
988 cpu_x86_load_seg(env, R_DS, __USER_DS);
989 cpu_x86_load_seg(env, R_ES, __USER_DS);
990 cpu_x86_load_seg(env, R_SS, __USER_DS);
991 cpu_x86_load_seg(env, R_CS, __USER_CS);
992 env->eflags &= ~TF_MASK;
993
994 unlock_user_struct(frame, frame_addr, 1);
995
996 return;
997
998 give_sigsegv:
999 if (sig == TARGET_SIGSEGV) {
1000 ka->_sa_handler = TARGET_SIG_DFL;
1001 }
1002 force_sig(TARGET_SIGSEGV /* , current */);
1003 }
1004
1005 static int
1006 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1007 {
1008 unsigned int err = 0;
1009 abi_ulong fpstate_addr;
1010 unsigned int tmpflags;
1011
1012 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1013 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1014 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1015 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1016
1017 env->regs[R_EDI] = tswapl(sc->edi);
1018 env->regs[R_ESI] = tswapl(sc->esi);
1019 env->regs[R_EBP] = tswapl(sc->ebp);
1020 env->regs[R_ESP] = tswapl(sc->esp);
1021 env->regs[R_EBX] = tswapl(sc->ebx);
1022 env->regs[R_EDX] = tswapl(sc->edx);
1023 env->regs[R_ECX] = tswapl(sc->ecx);
1024 env->regs[R_EAX] = tswapl(sc->eax);
1025 env->eip = tswapl(sc->eip);
1026
1027 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1028 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1029
1030 tmpflags = tswapl(sc->eflags);
1031 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1032 // regs->orig_eax = -1; /* disable syscall checks */
1033
1034 fpstate_addr = tswapl(sc->fpstate);
1035 if (fpstate_addr != 0) {
1036 if (!access_ok(VERIFY_READ, fpstate_addr,
1037 sizeof(struct target_fpstate)))
1038 goto badframe;
1039 cpu_x86_frstor(env, fpstate_addr, 1);
1040 }
1041
1042 return err;
1043 badframe:
1044 return 1;
1045 }
1046
1047 long do_sigreturn(CPUX86State *env)
1048 {
1049 struct sigframe *frame;
1050 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1051 target_sigset_t target_set;
1052 sigset_t set;
1053 int i;
1054
1055 trace_user_do_sigreturn(env, frame_addr);
1056 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1057 goto badframe;
1058 /* set blocked signals */
1059 __get_user(target_set.sig[0], &frame->sc.oldmask);
1060 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1061 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1062 }
1063
1064 target_to_host_sigset_internal(&set, &target_set);
1065 set_sigmask(&set);
1066
1067 /* restore registers */
1068 if (restore_sigcontext(env, &frame->sc))
1069 goto badframe;
1070 unlock_user_struct(frame, frame_addr, 0);
1071 return -TARGET_QEMU_ESIGRETURN;
1072
1073 badframe:
1074 unlock_user_struct(frame, frame_addr, 0);
1075 force_sig(TARGET_SIGSEGV);
1076 return 0;
1077 }
1078
1079 long do_rt_sigreturn(CPUX86State *env)
1080 {
1081 abi_ulong frame_addr;
1082 struct rt_sigframe *frame;
1083 sigset_t set;
1084
1085 frame_addr = env->regs[R_ESP] - 4;
1086 trace_user_do_rt_sigreturn(env, frame_addr);
1087 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1088 goto badframe;
1089 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1090 set_sigmask(&set);
1091
1092 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1093 goto badframe;
1094 }
1095
1096 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1097 get_sp_from_cpustate(env)) == -EFAULT) {
1098 goto badframe;
1099 }
1100
1101 unlock_user_struct(frame, frame_addr, 0);
1102 return -TARGET_QEMU_ESIGRETURN;
1103
1104 badframe:
1105 unlock_user_struct(frame, frame_addr, 0);
1106 force_sig(TARGET_SIGSEGV);
1107 return 0;
1108 }
1109
1110 #elif defined(TARGET_AARCH64)
1111
1112 struct target_sigcontext {
1113 uint64_t fault_address;
1114 /* AArch64 registers */
1115 uint64_t regs[31];
1116 uint64_t sp;
1117 uint64_t pc;
1118 uint64_t pstate;
1119 /* 4K reserved for FP/SIMD state and future expansion */
1120 char __reserved[4096] __attribute__((__aligned__(16)));
1121 };
1122
1123 struct target_ucontext {
1124 abi_ulong tuc_flags;
1125 abi_ulong tuc_link;
1126 target_stack_t tuc_stack;
1127 target_sigset_t tuc_sigmask;
1128 /* glibc uses a 1024-bit sigset_t */
1129 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1130 /* last for future expansion */
1131 struct target_sigcontext tuc_mcontext;
1132 };
1133
1134 /*
1135 * Header to be used at the beginning of structures extending the user
1136 * context. Such structures must be placed after the rt_sigframe on the stack
1137 * and be 16-byte aligned. The last structure must be a dummy one with the
1138 * magic and size set to 0.
1139 */
1140 struct target_aarch64_ctx {
1141 uint32_t magic;
1142 uint32_t size;
1143 };
1144
1145 #define TARGET_FPSIMD_MAGIC 0x46508001
1146
1147 struct target_fpsimd_context {
1148 struct target_aarch64_ctx head;
1149 uint32_t fpsr;
1150 uint32_t fpcr;
1151 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1152 };
1153
1154 /*
1155 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1156 * user space as it will change with the addition of new context. User space
1157 * should check the magic/size information.
1158 */
1159 struct target_aux_context {
1160 struct target_fpsimd_context fpsimd;
1161 /* additional context to be added before "end" */
1162 struct target_aarch64_ctx end;
1163 };
1164
1165 struct target_rt_sigframe {
1166 struct target_siginfo info;
1167 struct target_ucontext uc;
1168 uint64_t fp;
1169 uint64_t lr;
1170 uint32_t tramp[2];
1171 };
1172
1173 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1174 CPUARMState *env, target_sigset_t *set)
1175 {
1176 int i;
1177 struct target_aux_context *aux =
1178 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1179
1180 /* set up the stack frame for unwinding */
1181 __put_user(env->xregs[29], &sf->fp);
1182 __put_user(env->xregs[30], &sf->lr);
1183
1184 for (i = 0; i < 31; i++) {
1185 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1186 }
1187 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1188 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1189 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1190
1191 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1192
1193 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1194 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1195 }
1196
1197 for (i = 0; i < 32; i++) {
1198 #ifdef TARGET_WORDS_BIGENDIAN
1199 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1200 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1201 #else
1202 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1203 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1204 #endif
1205 }
1206 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1207 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1208 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1209 __put_user(sizeof(struct target_fpsimd_context),
1210 &aux->fpsimd.head.size);
1211
1212 /* set the "end" magic */
1213 __put_user(0, &aux->end.magic);
1214 __put_user(0, &aux->end.size);
1215
1216 return 0;
1217 }
1218
1219 static int target_restore_sigframe(CPUARMState *env,
1220 struct target_rt_sigframe *sf)
1221 {
1222 sigset_t set;
1223 int i;
1224 struct target_aux_context *aux =
1225 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1226 uint32_t magic, size, fpsr, fpcr;
1227 uint64_t pstate;
1228
1229 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1230 set_sigmask(&set);
1231
1232 for (i = 0; i < 31; i++) {
1233 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1234 }
1235
1236 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1237 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1238 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1239 pstate_write(env, pstate);
1240
1241 __get_user(magic, &aux->fpsimd.head.magic);
1242 __get_user(size, &aux->fpsimd.head.size);
1243
1244 if (magic != TARGET_FPSIMD_MAGIC
1245 || size != sizeof(struct target_fpsimd_context)) {
1246 return 1;
1247 }
1248
1249 for (i = 0; i < 32; i++) {
1250 #ifdef TARGET_WORDS_BIGENDIAN
1251 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1252 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1253 #else
1254 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1255 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1256 #endif
1257 }
1258 __get_user(fpsr, &aux->fpsimd.fpsr);
1259 vfp_set_fpsr(env, fpsr);
1260 __get_user(fpcr, &aux->fpsimd.fpcr);
1261 vfp_set_fpcr(env, fpcr);
1262
1263 return 0;
1264 }
1265
1266 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1267 {
1268 abi_ulong sp;
1269
1270 sp = env->xregs[31];
1271
1272 /*
1273 * This is the X/Open sanctioned signal stack switching.
1274 */
1275 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1276 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1277 }
1278
1279 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1280
1281 return sp;
1282 }
1283
1284 static void target_setup_frame(int usig, struct target_sigaction *ka,
1285 target_siginfo_t *info, target_sigset_t *set,
1286 CPUARMState *env)
1287 {
1288 struct target_rt_sigframe *frame;
1289 abi_ulong frame_addr, return_addr;
1290
1291 frame_addr = get_sigframe(ka, env);
1292 trace_user_setup_frame(env, frame_addr);
1293 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1294 goto give_sigsegv;
1295 }
1296
1297 __put_user(0, &frame->uc.tuc_flags);
1298 __put_user(0, &frame->uc.tuc_link);
1299
1300 __put_user(target_sigaltstack_used.ss_sp,
1301 &frame->uc.tuc_stack.ss_sp);
1302 __put_user(sas_ss_flags(env->xregs[31]),
1303 &frame->uc.tuc_stack.ss_flags);
1304 __put_user(target_sigaltstack_used.ss_size,
1305 &frame->uc.tuc_stack.ss_size);
1306 target_setup_sigframe(frame, env, set);
1307 if (ka->sa_flags & TARGET_SA_RESTORER) {
1308 return_addr = ka->sa_restorer;
1309 } else {
1310 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1311 __put_user(0xd2801168, &frame->tramp[0]);
1312 __put_user(0xd4000001, &frame->tramp[1]);
1313 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1314 }
1315 env->xregs[0] = usig;
1316 env->xregs[31] = frame_addr;
1317 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1318 env->pc = ka->_sa_handler;
1319 env->xregs[30] = return_addr;
1320 if (info) {
1321 tswap_siginfo(&frame->info, info);
1322 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1323 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1324 }
1325
1326 unlock_user_struct(frame, frame_addr, 1);
1327 return;
1328
1329 give_sigsegv:
1330 unlock_user_struct(frame, frame_addr, 1);
1331 force_sig(TARGET_SIGSEGV);
1332 }
1333
1334 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1335 target_siginfo_t *info, target_sigset_t *set,
1336 CPUARMState *env)
1337 {
1338 target_setup_frame(sig, ka, info, set, env);
1339 }
1340
1341 static void setup_frame(int sig, struct target_sigaction *ka,
1342 target_sigset_t *set, CPUARMState *env)
1343 {
1344 target_setup_frame(sig, ka, 0, set, env);
1345 }
1346
1347 long do_rt_sigreturn(CPUARMState *env)
1348 {
1349 struct target_rt_sigframe *frame = NULL;
1350 abi_ulong frame_addr = env->xregs[31];
1351
1352 trace_user_do_rt_sigreturn(env, frame_addr);
1353 if (frame_addr & 15) {
1354 goto badframe;
1355 }
1356
1357 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1358 goto badframe;
1359 }
1360
1361 if (target_restore_sigframe(env, frame)) {
1362 goto badframe;
1363 }
1364
1365 if (do_sigaltstack(frame_addr +
1366 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1367 0, get_sp_from_cpustate(env)) == -EFAULT) {
1368 goto badframe;
1369 }
1370
1371 unlock_user_struct(frame, frame_addr, 0);
1372 return -TARGET_QEMU_ESIGRETURN;
1373
1374 badframe:
1375 unlock_user_struct(frame, frame_addr, 0);
1376 force_sig(TARGET_SIGSEGV);
1377 return 0;
1378 }
1379
1380 long do_sigreturn(CPUARMState *env)
1381 {
1382 return do_rt_sigreturn(env);
1383 }
1384
1385 #elif defined(TARGET_ARM)
1386
1387 struct target_sigcontext {
1388 abi_ulong trap_no;
1389 abi_ulong error_code;
1390 abi_ulong oldmask;
1391 abi_ulong arm_r0;
1392 abi_ulong arm_r1;
1393 abi_ulong arm_r2;
1394 abi_ulong arm_r3;
1395 abi_ulong arm_r4;
1396 abi_ulong arm_r5;
1397 abi_ulong arm_r6;
1398 abi_ulong arm_r7;
1399 abi_ulong arm_r8;
1400 abi_ulong arm_r9;
1401 abi_ulong arm_r10;
1402 abi_ulong arm_fp;
1403 abi_ulong arm_ip;
1404 abi_ulong arm_sp;
1405 abi_ulong arm_lr;
1406 abi_ulong arm_pc;
1407 abi_ulong arm_cpsr;
1408 abi_ulong fault_address;
1409 };
1410
1411 struct target_ucontext_v1 {
1412 abi_ulong tuc_flags;
1413 abi_ulong tuc_link;
1414 target_stack_t tuc_stack;
1415 struct target_sigcontext tuc_mcontext;
1416 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1417 };
1418
1419 struct target_ucontext_v2 {
1420 abi_ulong tuc_flags;
1421 abi_ulong tuc_link;
1422 target_stack_t tuc_stack;
1423 struct target_sigcontext tuc_mcontext;
1424 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1425 char __unused[128 - sizeof(target_sigset_t)];
1426 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1427 };
1428
1429 struct target_user_vfp {
1430 uint64_t fpregs[32];
1431 abi_ulong fpscr;
1432 };
1433
1434 struct target_user_vfp_exc {
1435 abi_ulong fpexc;
1436 abi_ulong fpinst;
1437 abi_ulong fpinst2;
1438 };
1439
1440 struct target_vfp_sigframe {
1441 abi_ulong magic;
1442 abi_ulong size;
1443 struct target_user_vfp ufp;
1444 struct target_user_vfp_exc ufp_exc;
1445 } __attribute__((__aligned__(8)));
1446
1447 struct target_iwmmxt_sigframe {
1448 abi_ulong magic;
1449 abi_ulong size;
1450 uint64_t regs[16];
1451 /* Note that not all the coprocessor control registers are stored here */
1452 uint32_t wcssf;
1453 uint32_t wcasf;
1454 uint32_t wcgr0;
1455 uint32_t wcgr1;
1456 uint32_t wcgr2;
1457 uint32_t wcgr3;
1458 } __attribute__((__aligned__(8)));
1459
1460 #define TARGET_VFP_MAGIC 0x56465001
1461 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1462
1463 struct sigframe_v1
1464 {
1465 struct target_sigcontext sc;
1466 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1467 abi_ulong retcode;
1468 };
1469
1470 struct sigframe_v2
1471 {
1472 struct target_ucontext_v2 uc;
1473 abi_ulong retcode;
1474 };
1475
1476 struct rt_sigframe_v1
1477 {
1478 abi_ulong pinfo;
1479 abi_ulong puc;
1480 struct target_siginfo info;
1481 struct target_ucontext_v1 uc;
1482 abi_ulong retcode;
1483 };
1484
1485 struct rt_sigframe_v2
1486 {
1487 struct target_siginfo info;
1488 struct target_ucontext_v2 uc;
1489 abi_ulong retcode;
1490 };
1491
1492 #define TARGET_CONFIG_CPU_32 1
1493
1494 /*
1495 * For ARM syscalls, we encode the syscall number into the instruction.
1496 */
1497 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1498 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1499
1500 /*
1501 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1502 * need two 16-bit instructions.
1503 */
1504 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1505 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1506
1507 static const abi_ulong retcodes[4] = {
1508 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1509 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1510 };
1511
1512
1513 static inline int valid_user_regs(CPUARMState *regs)
1514 {
1515 return 1;
1516 }
1517
1518 static void
1519 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1520 CPUARMState *env, abi_ulong mask)
1521 {
1522 __put_user(env->regs[0], &sc->arm_r0);
1523 __put_user(env->regs[1], &sc->arm_r1);
1524 __put_user(env->regs[2], &sc->arm_r2);
1525 __put_user(env->regs[3], &sc->arm_r3);
1526 __put_user(env->regs[4], &sc->arm_r4);
1527 __put_user(env->regs[5], &sc->arm_r5);
1528 __put_user(env->regs[6], &sc->arm_r6);
1529 __put_user(env->regs[7], &sc->arm_r7);
1530 __put_user(env->regs[8], &sc->arm_r8);
1531 __put_user(env->regs[9], &sc->arm_r9);
1532 __put_user(env->regs[10], &sc->arm_r10);
1533 __put_user(env->regs[11], &sc->arm_fp);
1534 __put_user(env->regs[12], &sc->arm_ip);
1535 __put_user(env->regs[13], &sc->arm_sp);
1536 __put_user(env->regs[14], &sc->arm_lr);
1537 __put_user(env->regs[15], &sc->arm_pc);
1538 #ifdef TARGET_CONFIG_CPU_32
1539 __put_user(cpsr_read(env), &sc->arm_cpsr);
1540 #endif
1541
1542 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1543 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1544 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1545 __put_user(mask, &sc->oldmask);
1546 }
1547
1548 static inline abi_ulong
1549 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1550 {
1551 unsigned long sp = regs->regs[13];
1552
1553 /*
1554 * This is the X/Open sanctioned signal stack switching.
1555 */
1556 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1557 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1558 }
1559 /*
1560 * ATPCS B01 mandates 8-byte alignment
1561 */
1562 return (sp - framesize) & ~7;
1563 }
1564
1565 static void
1566 setup_return(CPUARMState *env, struct target_sigaction *ka,
1567 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1568 {
1569 abi_ulong handler = ka->_sa_handler;
1570 abi_ulong retcode;
1571 int thumb = handler & 1;
1572 uint32_t cpsr = cpsr_read(env);
1573
1574 cpsr &= ~CPSR_IT;
1575 if (thumb) {
1576 cpsr |= CPSR_T;
1577 } else {
1578 cpsr &= ~CPSR_T;
1579 }
1580
1581 if (ka->sa_flags & TARGET_SA_RESTORER) {
1582 retcode = ka->sa_restorer;
1583 } else {
1584 unsigned int idx = thumb;
1585
1586 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1587 idx += 2;
1588 }
1589
1590 __put_user(retcodes[idx], rc);
1591
1592 retcode = rc_addr + thumb;
1593 }
1594
1595 env->regs[0] = usig;
1596 env->regs[13] = frame_addr;
1597 env->regs[14] = retcode;
1598 env->regs[15] = handler & (thumb ? ~1 : ~3);
1599 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1600 }
1601
1602 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1603 {
1604 int i;
1605 struct target_vfp_sigframe *vfpframe;
1606 vfpframe = (struct target_vfp_sigframe *)regspace;
1607 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1608 __put_user(sizeof(*vfpframe), &vfpframe->size);
1609 for (i = 0; i < 32; i++) {
1610 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1611 }
1612 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1613 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1614 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1615 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1616 return (abi_ulong*)(vfpframe+1);
1617 }
1618
1619 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1620 CPUARMState *env)
1621 {
1622 int i;
1623 struct target_iwmmxt_sigframe *iwmmxtframe;
1624 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1625 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1626 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1627 for (i = 0; i < 16; i++) {
1628 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1629 }
1630 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1631 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1632 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1633 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1634 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1635 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1636 return (abi_ulong*)(iwmmxtframe+1);
1637 }
1638
1639 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1640 target_sigset_t *set, CPUARMState *env)
1641 {
1642 struct target_sigaltstack stack;
1643 int i;
1644 abi_ulong *regspace;
1645
1646 /* Clear all the bits of the ucontext we don't use. */
1647 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1648
1649 memset(&stack, 0, sizeof(stack));
1650 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1651 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1652 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1653 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1654
1655 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1656 /* Save coprocessor signal frame. */
1657 regspace = uc->tuc_regspace;
1658 if (arm_feature(env, ARM_FEATURE_VFP)) {
1659 regspace = setup_sigframe_v2_vfp(regspace, env);
1660 }
1661 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1662 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1663 }
1664
1665 /* Write terminating magic word */
1666 __put_user(0, regspace);
1667
1668 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1669 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1670 }
1671 }
1672
1673 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1674 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1675 target_sigset_t *set, CPUARMState *regs)
1676 {
1677 struct sigframe_v1 *frame;
1678 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1679 int i;
1680
1681 trace_user_setup_frame(regs, frame_addr);
1682 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1683 return;
1684 }
1685
1686 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1687
1688 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1689 __put_user(set->sig[i], &frame->extramask[i - 1]);
1690 }
1691
1692 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1693 frame_addr + offsetof(struct sigframe_v1, retcode));
1694
1695 unlock_user_struct(frame, frame_addr, 1);
1696 }
1697
1698 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1699 target_sigset_t *set, CPUARMState *regs)
1700 {
1701 struct sigframe_v2 *frame;
1702 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1703
1704 trace_user_setup_frame(regs, frame_addr);
1705 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1706 return;
1707 }
1708
1709 setup_sigframe_v2(&frame->uc, set, regs);
1710
1711 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1712 frame_addr + offsetof(struct sigframe_v2, retcode));
1713
1714 unlock_user_struct(frame, frame_addr, 1);
1715 }
1716
1717 static void setup_frame(int usig, struct target_sigaction *ka,
1718 target_sigset_t *set, CPUARMState *regs)
1719 {
1720 if (get_osversion() >= 0x020612) {
1721 setup_frame_v2(usig, ka, set, regs);
1722 } else {
1723 setup_frame_v1(usig, ka, set, regs);
1724 }
1725 }
1726
1727 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1728 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1729 target_siginfo_t *info,
1730 target_sigset_t *set, CPUARMState *env)
1731 {
1732 struct rt_sigframe_v1 *frame;
1733 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1734 struct target_sigaltstack stack;
1735 int i;
1736 abi_ulong info_addr, uc_addr;
1737
1738 trace_user_setup_rt_frame(env, frame_addr);
1739 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1740 return /* 1 */;
1741 }
1742
1743 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1744 __put_user(info_addr, &frame->pinfo);
1745 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1746 __put_user(uc_addr, &frame->puc);
1747 tswap_siginfo(&frame->info, info);
1748
1749 /* Clear all the bits of the ucontext we don't use. */
1750 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1751
1752 memset(&stack, 0, sizeof(stack));
1753 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1754 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1755 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1756 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1757
1758 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1759 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1760 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1761 }
1762
1763 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1764 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1765
1766 env->regs[1] = info_addr;
1767 env->regs[2] = uc_addr;
1768
1769 unlock_user_struct(frame, frame_addr, 1);
1770 }
1771
1772 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1773 target_siginfo_t *info,
1774 target_sigset_t *set, CPUARMState *env)
1775 {
1776 struct rt_sigframe_v2 *frame;
1777 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1778 abi_ulong info_addr, uc_addr;
1779
1780 trace_user_setup_rt_frame(env, frame_addr);
1781 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1782 return /* 1 */;
1783 }
1784
1785 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1786 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1787 tswap_siginfo(&frame->info, info);
1788
1789 setup_sigframe_v2(&frame->uc, set, env);
1790
1791 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1792 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1793
1794 env->regs[1] = info_addr;
1795 env->regs[2] = uc_addr;
1796
1797 unlock_user_struct(frame, frame_addr, 1);
1798 }
1799
1800 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1801 target_siginfo_t *info,
1802 target_sigset_t *set, CPUARMState *env)
1803 {
1804 if (get_osversion() >= 0x020612) {
1805 setup_rt_frame_v2(usig, ka, info, set, env);
1806 } else {
1807 setup_rt_frame_v1(usig, ka, info, set, env);
1808 }
1809 }
1810
1811 static int
1812 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1813 {
1814 int err = 0;
1815 uint32_t cpsr;
1816
1817 __get_user(env->regs[0], &sc->arm_r0);
1818 __get_user(env->regs[1], &sc->arm_r1);
1819 __get_user(env->regs[2], &sc->arm_r2);
1820 __get_user(env->regs[3], &sc->arm_r3);
1821 __get_user(env->regs[4], &sc->arm_r4);
1822 __get_user(env->regs[5], &sc->arm_r5);
1823 __get_user(env->regs[6], &sc->arm_r6);
1824 __get_user(env->regs[7], &sc->arm_r7);
1825 __get_user(env->regs[8], &sc->arm_r8);
1826 __get_user(env->regs[9], &sc->arm_r9);
1827 __get_user(env->regs[10], &sc->arm_r10);
1828 __get_user(env->regs[11], &sc->arm_fp);
1829 __get_user(env->regs[12], &sc->arm_ip);
1830 __get_user(env->regs[13], &sc->arm_sp);
1831 __get_user(env->regs[14], &sc->arm_lr);
1832 __get_user(env->regs[15], &sc->arm_pc);
1833 #ifdef TARGET_CONFIG_CPU_32
1834 __get_user(cpsr, &sc->arm_cpsr);
1835 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1836 #endif
1837
1838 err |= !valid_user_regs(env);
1839
1840 return err;
1841 }
1842
1843 static long do_sigreturn_v1(CPUARMState *env)
1844 {
1845 abi_ulong frame_addr;
1846 struct sigframe_v1 *frame = NULL;
1847 target_sigset_t set;
1848 sigset_t host_set;
1849 int i;
1850
1851 /*
1852 * Since we stacked the signal on a 64-bit boundary,
1853 * then 'sp' should be word aligned here. If it's
1854 * not, then the user is trying to mess with us.
1855 */
1856 frame_addr = env->regs[13];
1857 trace_user_do_sigreturn(env, frame_addr);
1858 if (frame_addr & 7) {
1859 goto badframe;
1860 }
1861
1862 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1863 goto badframe;
1864 }
1865
1866 __get_user(set.sig[0], &frame->sc.oldmask);
1867 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1868 __get_user(set.sig[i], &frame->extramask[i - 1]);
1869 }
1870
1871 target_to_host_sigset_internal(&host_set, &set);
1872 set_sigmask(&host_set);
1873
1874 if (restore_sigcontext(env, &frame->sc)) {
1875 goto badframe;
1876 }
1877
1878 #if 0
1879 /* Send SIGTRAP if we're single-stepping */
1880 if (ptrace_cancel_bpt(current))
1881 send_sig(SIGTRAP, current, 1);
1882 #endif
1883 unlock_user_struct(frame, frame_addr, 0);
1884 return -TARGET_QEMU_ESIGRETURN;
1885
1886 badframe:
1887 force_sig(TARGET_SIGSEGV /* , current */);
1888 return 0;
1889 }
1890
1891 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1892 {
1893 int i;
1894 abi_ulong magic, sz;
1895 uint32_t fpscr, fpexc;
1896 struct target_vfp_sigframe *vfpframe;
1897 vfpframe = (struct target_vfp_sigframe *)regspace;
1898
1899 __get_user(magic, &vfpframe->magic);
1900 __get_user(sz, &vfpframe->size);
1901 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1902 return 0;
1903 }
1904 for (i = 0; i < 32; i++) {
1905 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1906 }
1907 __get_user(fpscr, &vfpframe->ufp.fpscr);
1908 vfp_set_fpscr(env, fpscr);
1909 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1910 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1911 * and the exception flag is cleared
1912 */
1913 fpexc |= (1 << 30);
1914 fpexc &= ~((1 << 31) | (1 << 28));
1915 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1916 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1917 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1918 return (abi_ulong*)(vfpframe + 1);
1919 }
1920
1921 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1922 abi_ulong *regspace)
1923 {
1924 int i;
1925 abi_ulong magic, sz;
1926 struct target_iwmmxt_sigframe *iwmmxtframe;
1927 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1928
1929 __get_user(magic, &iwmmxtframe->magic);
1930 __get_user(sz, &iwmmxtframe->size);
1931 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1932 return 0;
1933 }
1934 for (i = 0; i < 16; i++) {
1935 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1936 }
1937 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1938 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1939 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1940 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1941 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1942 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1943 return (abi_ulong*)(iwmmxtframe + 1);
1944 }
1945
1946 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1947 struct target_ucontext_v2 *uc)
1948 {
1949 sigset_t host_set;
1950 abi_ulong *regspace;
1951
1952 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1953 set_sigmask(&host_set);
1954
1955 if (restore_sigcontext(env, &uc->tuc_mcontext))
1956 return 1;
1957
1958 /* Restore coprocessor signal frame */
1959 regspace = uc->tuc_regspace;
1960 if (arm_feature(env, ARM_FEATURE_VFP)) {
1961 regspace = restore_sigframe_v2_vfp(env, regspace);
1962 if (!regspace) {
1963 return 1;
1964 }
1965 }
1966 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1967 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1968 if (!regspace) {
1969 return 1;
1970 }
1971 }
1972
1973 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
1974 return 1;
1975
1976 #if 0
1977 /* Send SIGTRAP if we're single-stepping */
1978 if (ptrace_cancel_bpt(current))
1979 send_sig(SIGTRAP, current, 1);
1980 #endif
1981
1982 return 0;
1983 }
1984
1985 static long do_sigreturn_v2(CPUARMState *env)
1986 {
1987 abi_ulong frame_addr;
1988 struct sigframe_v2 *frame = NULL;
1989
1990 /*
1991 * Since we stacked the signal on a 64-bit boundary,
1992 * then 'sp' should be word aligned here. If it's
1993 * not, then the user is trying to mess with us.
1994 */
1995 frame_addr = env->regs[13];
1996 trace_user_do_sigreturn(env, frame_addr);
1997 if (frame_addr & 7) {
1998 goto badframe;
1999 }
2000
2001 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2002 goto badframe;
2003 }
2004
2005 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2006 goto badframe;
2007 }
2008
2009 unlock_user_struct(frame, frame_addr, 0);
2010 return -TARGET_QEMU_ESIGRETURN;
2011
2012 badframe:
2013 unlock_user_struct(frame, frame_addr, 0);
2014 force_sig(TARGET_SIGSEGV /* , current */);
2015 return 0;
2016 }
2017
2018 long do_sigreturn(CPUARMState *env)
2019 {
2020 if (get_osversion() >= 0x020612) {
2021 return do_sigreturn_v2(env);
2022 } else {
2023 return do_sigreturn_v1(env);
2024 }
2025 }
2026
2027 static long do_rt_sigreturn_v1(CPUARMState *env)
2028 {
2029 abi_ulong frame_addr;
2030 struct rt_sigframe_v1 *frame = NULL;
2031 sigset_t host_set;
2032
2033 /*
2034 * Since we stacked the signal on a 64-bit boundary,
2035 * then 'sp' should be word aligned here. If it's
2036 * not, then the user is trying to mess with us.
2037 */
2038 frame_addr = env->regs[13];
2039 trace_user_do_rt_sigreturn(env, frame_addr);
2040 if (frame_addr & 7) {
2041 goto badframe;
2042 }
2043
2044 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2045 goto badframe;
2046 }
2047
2048 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2049 set_sigmask(&host_set);
2050
2051 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2052 goto badframe;
2053 }
2054
2055 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2056 goto badframe;
2057
2058 #if 0
2059 /* Send SIGTRAP if we're single-stepping */
2060 if (ptrace_cancel_bpt(current))
2061 send_sig(SIGTRAP, current, 1);
2062 #endif
2063 unlock_user_struct(frame, frame_addr, 0);
2064 return -TARGET_QEMU_ESIGRETURN;
2065
2066 badframe:
2067 unlock_user_struct(frame, frame_addr, 0);
2068 force_sig(TARGET_SIGSEGV /* , current */);
2069 return 0;
2070 }
2071
2072 static long do_rt_sigreturn_v2(CPUARMState *env)
2073 {
2074 abi_ulong frame_addr;
2075 struct rt_sigframe_v2 *frame = NULL;
2076
2077 /*
2078 * Since we stacked the signal on a 64-bit boundary,
2079 * then 'sp' should be word aligned here. If it's
2080 * not, then the user is trying to mess with us.
2081 */
2082 frame_addr = env->regs[13];
2083 trace_user_do_rt_sigreturn(env, frame_addr);
2084 if (frame_addr & 7) {
2085 goto badframe;
2086 }
2087
2088 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2089 goto badframe;
2090 }
2091
2092 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2093 goto badframe;
2094 }
2095
2096 unlock_user_struct(frame, frame_addr, 0);
2097 return -TARGET_QEMU_ESIGRETURN;
2098
2099 badframe:
2100 unlock_user_struct(frame, frame_addr, 0);
2101 force_sig(TARGET_SIGSEGV /* , current */);
2102 return 0;
2103 }
2104
2105 long do_rt_sigreturn(CPUARMState *env)
2106 {
2107 if (get_osversion() >= 0x020612) {
2108 return do_rt_sigreturn_v2(env);
2109 } else {
2110 return do_rt_sigreturn_v1(env);
2111 }
2112 }
2113
2114 #elif defined(TARGET_SPARC)
2115
2116 #define __SUNOS_MAXWIN 31
2117
2118 /* This is what SunOS does, so shall I. */
2119 struct target_sigcontext {
2120 abi_ulong sigc_onstack; /* state to restore */
2121
2122 abi_ulong sigc_mask; /* sigmask to restore */
2123 abi_ulong sigc_sp; /* stack pointer */
2124 abi_ulong sigc_pc; /* program counter */
2125 abi_ulong sigc_npc; /* next program counter */
2126 abi_ulong sigc_psr; /* for condition codes etc */
2127 abi_ulong sigc_g1; /* User uses these two registers */
2128 abi_ulong sigc_o0; /* within the trampoline code. */
2129
2130 /* Now comes information regarding the users window set
2131 * at the time of the signal.
2132 */
2133 abi_ulong sigc_oswins; /* outstanding windows */
2134
2135 /* stack ptrs for each regwin buf */
2136 char *sigc_spbuf[__SUNOS_MAXWIN];
2137
2138 /* Windows to restore after signal */
2139 struct {
2140 abi_ulong locals[8];
2141 abi_ulong ins[8];
2142 } sigc_wbuf[__SUNOS_MAXWIN];
2143 };
2144 /* A Sparc stack frame */
2145 struct sparc_stackf {
2146 abi_ulong locals[8];
2147 abi_ulong ins[8];
2148 /* It's simpler to treat fp and callers_pc as elements of ins[]
2149 * since we never need to access them ourselves.
2150 */
2151 char *structptr;
2152 abi_ulong xargs[6];
2153 abi_ulong xxargs[1];
2154 };
2155
2156 typedef struct {
2157 struct {
2158 abi_ulong psr;
2159 abi_ulong pc;
2160 abi_ulong npc;
2161 abi_ulong y;
2162 abi_ulong u_regs[16]; /* globals and ins */
2163 } si_regs;
2164 int si_mask;
2165 } __siginfo_t;
2166
2167 typedef struct {
2168 abi_ulong si_float_regs[32];
2169 unsigned long si_fsr;
2170 unsigned long si_fpqdepth;
2171 struct {
2172 unsigned long *insn_addr;
2173 unsigned long insn;
2174 } si_fpqueue [16];
2175 } qemu_siginfo_fpu_t;
2176
2177
2178 struct target_signal_frame {
2179 struct sparc_stackf ss;
2180 __siginfo_t info;
2181 abi_ulong fpu_save;
2182 abi_ulong insns[2] __attribute__ ((aligned (8)));
2183 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2184 abi_ulong extra_size; /* Should be 0 */
2185 qemu_siginfo_fpu_t fpu_state;
2186 };
2187 struct target_rt_signal_frame {
2188 struct sparc_stackf ss;
2189 siginfo_t info;
2190 abi_ulong regs[20];
2191 sigset_t mask;
2192 abi_ulong fpu_save;
2193 unsigned int insns[2];
2194 stack_t stack;
2195 unsigned int extra_size; /* Should be 0 */
2196 qemu_siginfo_fpu_t fpu_state;
2197 };
2198
2199 #define UREG_O0 16
2200 #define UREG_O6 22
2201 #define UREG_I0 0
2202 #define UREG_I1 1
2203 #define UREG_I2 2
2204 #define UREG_I3 3
2205 #define UREG_I4 4
2206 #define UREG_I5 5
2207 #define UREG_I6 6
2208 #define UREG_I7 7
2209 #define UREG_L0 8
2210 #define UREG_FP UREG_I6
2211 #define UREG_SP UREG_O6
2212
2213 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2214 CPUSPARCState *env,
2215 unsigned long framesize)
2216 {
2217 abi_ulong sp;
2218
2219 sp = env->regwptr[UREG_FP];
2220
2221 /* This is the X/Open sanctioned signal stack switching. */
2222 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2223 if (!on_sig_stack(sp)
2224 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2225 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2226 }
2227 }
2228 return sp - framesize;
2229 }
2230
2231 static int
2232 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2233 {
2234 int err = 0, i;
2235
2236 __put_user(env->psr, &si->si_regs.psr);
2237 __put_user(env->pc, &si->si_regs.pc);
2238 __put_user(env->npc, &si->si_regs.npc);
2239 __put_user(env->y, &si->si_regs.y);
2240 for (i=0; i < 8; i++) {
2241 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2242 }
2243 for (i=0; i < 8; i++) {
2244 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2245 }
2246 __put_user(mask, &si->si_mask);
2247 return err;
2248 }
2249
2250 #if 0
2251 static int
2252 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2253 CPUSPARCState *env, unsigned long mask)
2254 {
2255 int err = 0;
2256
2257 __put_user(mask, &sc->sigc_mask);
2258 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2259 __put_user(env->pc, &sc->sigc_pc);
2260 __put_user(env->npc, &sc->sigc_npc);
2261 __put_user(env->psr, &sc->sigc_psr);
2262 __put_user(env->gregs[1], &sc->sigc_g1);
2263 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2264
2265 return err;
2266 }
2267 #endif
2268 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2269
2270 static void setup_frame(int sig, struct target_sigaction *ka,
2271 target_sigset_t *set, CPUSPARCState *env)
2272 {
2273 abi_ulong sf_addr;
2274 struct target_signal_frame *sf;
2275 int sigframe_size, err, i;
2276
2277 /* 1. Make sure everything is clean */
2278 //synchronize_user_stack();
2279
2280 sigframe_size = NF_ALIGNEDSZ;
2281 sf_addr = get_sigframe(ka, env, sigframe_size);
2282 trace_user_setup_frame(env, sf_addr);
2283
2284 sf = lock_user(VERIFY_WRITE, sf_addr,
2285 sizeof(struct target_signal_frame), 0);
2286 if (!sf) {
2287 goto sigsegv;
2288 }
2289 #if 0
2290 if (invalid_frame_pointer(sf, sigframe_size))
2291 goto sigill_and_return;
2292 #endif
2293 /* 2. Save the current process state */
2294 err = setup___siginfo(&sf->info, env, set->sig[0]);
2295 __put_user(0, &sf->extra_size);
2296
2297 //save_fpu_state(regs, &sf->fpu_state);
2298 //__put_user(&sf->fpu_state, &sf->fpu_save);
2299
2300 __put_user(set->sig[0], &sf->info.si_mask);
2301 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2302 __put_user(set->sig[i + 1], &sf->extramask[i]);
2303 }
2304
2305 for (i = 0; i < 8; i++) {
2306 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2307 }
2308 for (i = 0; i < 8; i++) {
2309 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2310 }
2311 if (err)
2312 goto sigsegv;
2313
2314 /* 3. signal handler back-trampoline and parameters */
2315 env->regwptr[UREG_FP] = sf_addr;
2316 env->regwptr[UREG_I0] = sig;
2317 env->regwptr[UREG_I1] = sf_addr +
2318 offsetof(struct target_signal_frame, info);
2319 env->regwptr[UREG_I2] = sf_addr +
2320 offsetof(struct target_signal_frame, info);
2321
2322 /* 4. signal handler */
2323 env->pc = ka->_sa_handler;
2324 env->npc = (env->pc + 4);
2325 /* 5. return to kernel instructions */
2326 if (ka->sa_restorer) {
2327 env->regwptr[UREG_I7] = ka->sa_restorer;
2328 } else {
2329 uint32_t val32;
2330
2331 env->regwptr[UREG_I7] = sf_addr +
2332 offsetof(struct target_signal_frame, insns) - 2 * 4;
2333
2334 /* mov __NR_sigreturn, %g1 */
2335 val32 = 0x821020d8;
2336 __put_user(val32, &sf->insns[0]);
2337
2338 /* t 0x10 */
2339 val32 = 0x91d02010;
2340 __put_user(val32, &sf->insns[1]);
2341 if (err)
2342 goto sigsegv;
2343
2344 /* Flush instruction space. */
2345 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2346 // tb_flush(env);
2347 }
2348 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2349 return;
2350 #if 0
2351 sigill_and_return:
2352 force_sig(TARGET_SIGILL);
2353 #endif
2354 sigsegv:
2355 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2356 force_sig(TARGET_SIGSEGV);
2357 }
2358
2359 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2360 target_siginfo_t *info,
2361 target_sigset_t *set, CPUSPARCState *env)
2362 {
2363 fprintf(stderr, "setup_rt_frame: not implemented\n");
2364 }
2365
2366 long do_sigreturn(CPUSPARCState *env)
2367 {
2368 abi_ulong sf_addr;
2369 struct target_signal_frame *sf;
2370 uint32_t up_psr, pc, npc;
2371 target_sigset_t set;
2372 sigset_t host_set;
2373 int err=0, i;
2374
2375 sf_addr = env->regwptr[UREG_FP];
2376 trace_user_do_sigreturn(env, sf_addr);
2377 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2378 goto segv_and_exit;
2379 }
2380
2381 /* 1. Make sure we are not getting garbage from the user */
2382
2383 if (sf_addr & 3)
2384 goto segv_and_exit;
2385
2386 __get_user(pc, &sf->info.si_regs.pc);
2387 __get_user(npc, &sf->info.si_regs.npc);
2388
2389 if ((pc | npc) & 3) {
2390 goto segv_and_exit;
2391 }
2392
2393 /* 2. Restore the state */
2394 __get_user(up_psr, &sf->info.si_regs.psr);
2395
2396 /* User can only change condition codes and FPU enabling in %psr. */
2397 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2398 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2399
2400 env->pc = pc;
2401 env->npc = npc;
2402 __get_user(env->y, &sf->info.si_regs.y);
2403 for (i=0; i < 8; i++) {
2404 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2405 }
2406 for (i=0; i < 8; i++) {
2407 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2408 }
2409
2410 /* FIXME: implement FPU save/restore:
2411 * __get_user(fpu_save, &sf->fpu_save);
2412 * if (fpu_save)
2413 * err |= restore_fpu_state(env, fpu_save);
2414 */
2415
2416 /* This is pretty much atomic, no amount locking would prevent
2417 * the races which exist anyways.
2418 */
2419 __get_user(set.sig[0], &sf->info.si_mask);
2420 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2421 __get_user(set.sig[i], &sf->extramask[i - 1]);
2422 }
2423
2424 target_to_host_sigset_internal(&host_set, &set);
2425 set_sigmask(&host_set);
2426
2427 if (err) {
2428 goto segv_and_exit;
2429 }
2430 unlock_user_struct(sf, sf_addr, 0);
2431 return -TARGET_QEMU_ESIGRETURN;
2432
2433 segv_and_exit:
2434 unlock_user_struct(sf, sf_addr, 0);
2435 force_sig(TARGET_SIGSEGV);
2436 }
2437
2438 long do_rt_sigreturn(CPUSPARCState *env)
2439 {
2440 trace_user_do_rt_sigreturn(env, 0);
2441 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2442 return -TARGET_ENOSYS;
2443 }
2444
2445 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2446 #define MC_TSTATE 0
2447 #define MC_PC 1
2448 #define MC_NPC 2
2449 #define MC_Y 3
2450 #define MC_G1 4
2451 #define MC_G2 5
2452 #define MC_G3 6
2453 #define MC_G4 7
2454 #define MC_G5 8
2455 #define MC_G6 9
2456 #define MC_G7 10
2457 #define MC_O0 11
2458 #define MC_O1 12
2459 #define MC_O2 13
2460 #define MC_O3 14
2461 #define MC_O4 15
2462 #define MC_O5 16
2463 #define MC_O6 17
2464 #define MC_O7 18
2465 #define MC_NGREG 19
2466
2467 typedef abi_ulong target_mc_greg_t;
2468 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2469
2470 struct target_mc_fq {
2471 abi_ulong *mcfq_addr;
2472 uint32_t mcfq_insn;
2473 };
2474
2475 struct target_mc_fpu {
2476 union {
2477 uint32_t sregs[32];
2478 uint64_t dregs[32];
2479 //uint128_t qregs[16];
2480 } mcfpu_fregs;
2481 abi_ulong mcfpu_fsr;
2482 abi_ulong mcfpu_fprs;
2483 abi_ulong mcfpu_gsr;
2484 struct target_mc_fq *mcfpu_fq;
2485 unsigned char mcfpu_qcnt;
2486 unsigned char mcfpu_qentsz;
2487 unsigned char mcfpu_enab;
2488 };
2489 typedef struct target_mc_fpu target_mc_fpu_t;
2490
2491 typedef struct {
2492 target_mc_gregset_t mc_gregs;
2493 target_mc_greg_t mc_fp;
2494 target_mc_greg_t mc_i7;
2495 target_mc_fpu_t mc_fpregs;
2496 } target_mcontext_t;
2497
2498 struct target_ucontext {
2499 struct target_ucontext *tuc_link;
2500 abi_ulong tuc_flags;
2501 target_sigset_t tuc_sigmask;
2502 target_mcontext_t tuc_mcontext;
2503 };
2504
2505 /* A V9 register window */
2506 struct target_reg_window {
2507 abi_ulong locals[8];
2508 abi_ulong ins[8];
2509 };
2510
2511 #define TARGET_STACK_BIAS 2047
2512
2513 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2514 void sparc64_set_context(CPUSPARCState *env)
2515 {
2516 abi_ulong ucp_addr;
2517 struct target_ucontext *ucp;
2518 target_mc_gregset_t *grp;
2519 abi_ulong pc, npc, tstate;
2520 abi_ulong fp, i7, w_addr;
2521 unsigned int i;
2522
2523 ucp_addr = env->regwptr[UREG_I0];
2524 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2525 goto do_sigsegv;
2526 }
2527 grp = &ucp->tuc_mcontext.mc_gregs;
2528 __get_user(pc, &((*grp)[MC_PC]));
2529 __get_user(npc, &((*grp)[MC_NPC]));
2530 if ((pc | npc) & 3) {
2531 goto do_sigsegv;
2532 }
2533 if (env->regwptr[UREG_I1]) {
2534 target_sigset_t target_set;
2535 sigset_t set;
2536
2537 if (TARGET_NSIG_WORDS == 1) {
2538 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2539 } else {
2540 abi_ulong *src, *dst;
2541 src = ucp->tuc_sigmask.sig;
2542 dst = target_set.sig;
2543 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2544 __get_user(*dst, src);
2545 }
2546 }
2547 target_to_host_sigset_internal(&set, &target_set);
2548 set_sigmask(&set);
2549 }
2550 env->pc = pc;
2551 env->npc = npc;
2552 __get_user(env->y, &((*grp)[MC_Y]));
2553 __get_user(tstate, &((*grp)[MC_TSTATE]));
2554 env->asi = (tstate >> 24) & 0xff;
2555 cpu_put_ccr(env, tstate >> 32);
2556 cpu_put_cwp64(env, tstate & 0x1f);
2557 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2558 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2559 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2560 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2561 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2562 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2563 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2564 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2565 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2566 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2567 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2568 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2569 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2570 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2571 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2572
2573 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2574 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2575
2576 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2577 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2578 abi_ulong) != 0) {
2579 goto do_sigsegv;
2580 }
2581 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2582 abi_ulong) != 0) {
2583 goto do_sigsegv;
2584 }
2585 /* FIXME this does not match how the kernel handles the FPU in
2586 * its sparc64_set_context implementation. In particular the FPU
2587 * is only restored if fenab is non-zero in:
2588 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2589 */
2590 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2591 {
2592 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2593 for (i = 0; i < 64; i++, src++) {
2594 if (i & 1) {
2595 __get_user(env->fpr[i/2].l.lower, src);
2596 } else {
2597 __get_user(env->fpr[i/2].l.upper, src);
2598 }
2599 }
2600 }
2601 __get_user(env->fsr,
2602 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2603 __get_user(env->gsr,
2604 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2605 unlock_user_struct(ucp, ucp_addr, 0);
2606 return;
2607 do_sigsegv:
2608 unlock_user_struct(ucp, ucp_addr, 0);
2609 force_sig(TARGET_SIGSEGV);
2610 }
2611
2612 void sparc64_get_context(CPUSPARCState *env)
2613 {
2614 abi_ulong ucp_addr;
2615 struct target_ucontext *ucp;
2616 target_mc_gregset_t *grp;
2617 target_mcontext_t *mcp;
2618 abi_ulong fp, i7, w_addr;
2619 int err;
2620 unsigned int i;
2621 target_sigset_t target_set;
2622 sigset_t set;
2623
2624 ucp_addr = env->regwptr[UREG_I0];
2625 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2626 goto do_sigsegv;
2627 }
2628
2629 mcp = &ucp->tuc_mcontext;
2630 grp = &mcp->mc_gregs;
2631
2632 /* Skip over the trap instruction, first. */
2633 env->pc = env->npc;
2634 env->npc += 4;
2635
2636 /* If we're only reading the signal mask then do_sigprocmask()
2637 * is guaranteed not to fail, which is important because we don't
2638 * have any way to signal a failure or restart this operation since
2639 * this is not a normal syscall.
2640 */
2641 err = do_sigprocmask(0, NULL, &set);
2642 assert(err == 0);
2643 host_to_target_sigset_internal(&target_set, &set);
2644 if (TARGET_NSIG_WORDS == 1) {
2645 __put_user(target_set.sig[0],
2646 (abi_ulong *)&ucp->tuc_sigmask);
2647 } else {
2648 abi_ulong *src, *dst;
2649 src = target_set.sig;
2650 dst = ucp->tuc_sigmask.sig;
2651 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2652 __put_user(*src, dst);
2653 }
2654 if (err)
2655 goto do_sigsegv;
2656 }
2657
2658 /* XXX: tstate must be saved properly */
2659 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2660 __put_user(env->pc, &((*grp)[MC_PC]));
2661 __put_user(env->npc, &((*grp)[MC_NPC]));
2662 __put_user(env->y, &((*grp)[MC_Y]));
2663 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2664 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2665 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2666 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2667 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2668 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2669 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2670 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2671 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2672 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2673 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2674 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2675 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2676 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2677 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2678
2679 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2680 fp = i7 = 0;
2681 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2682 abi_ulong) != 0) {
2683 goto do_sigsegv;
2684 }
2685 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2686 abi_ulong) != 0) {
2687 goto do_sigsegv;
2688 }
2689 __put_user(fp, &(mcp->mc_fp));
2690 __put_user(i7, &(mcp->mc_i7));
2691
2692 {
2693 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2694 for (i = 0; i < 64; i++, dst++) {
2695 if (i & 1) {
2696 __put_user(env->fpr[i/2].l.lower, dst);
2697 } else {
2698 __put_user(env->fpr[i/2].l.upper, dst);
2699 }
2700 }
2701 }
2702 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2703 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2704 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2705
2706 if (err)
2707 goto do_sigsegv;
2708 unlock_user_struct(ucp, ucp_addr, 1);
2709 return;
2710 do_sigsegv:
2711 unlock_user_struct(ucp, ucp_addr, 1);
2712 force_sig(TARGET_SIGSEGV);
2713 }
2714 #endif
2715 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2716
2717 # if defined(TARGET_ABI_MIPSO32)
2718 struct target_sigcontext {
2719 uint32_t sc_regmask; /* Unused */
2720 uint32_t sc_status;
2721 uint64_t sc_pc;
2722 uint64_t sc_regs[32];
2723 uint64_t sc_fpregs[32];
2724 uint32_t sc_ownedfp; /* Unused */
2725 uint32_t sc_fpc_csr;
2726 uint32_t sc_fpc_eir; /* Unused */
2727 uint32_t sc_used_math;
2728 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2729 uint32_t pad0;
2730 uint64_t sc_mdhi;
2731 uint64_t sc_mdlo;
2732 target_ulong sc_hi1; /* Was sc_cause */
2733 target_ulong sc_lo1; /* Was sc_badvaddr */
2734 target_ulong sc_hi2; /* Was sc_sigset[4] */
2735 target_ulong sc_lo2;
2736 target_ulong sc_hi3;
2737 target_ulong sc_lo3;
2738 };
2739 # else /* N32 || N64 */
2740 struct target_sigcontext {
2741 uint64_t sc_regs[32];
2742 uint64_t sc_fpregs[32];
2743 uint64_t sc_mdhi;
2744 uint64_t sc_hi1;
2745 uint64_t sc_hi2;
2746 uint64_t sc_hi3;
2747 uint64_t sc_mdlo;
2748 uint64_t sc_lo1;
2749 uint64_t sc_lo2;
2750 uint64_t sc_lo3;
2751 uint64_t sc_pc;
2752 uint32_t sc_fpc_csr;
2753 uint32_t sc_used_math;
2754 uint32_t sc_dsp;
2755 uint32_t sc_reserved;
2756 };
2757 # endif /* O32 */
2758
2759 struct sigframe {
2760 uint32_t sf_ass[4]; /* argument save space for o32 */
2761 uint32_t sf_code[2]; /* signal trampoline */
2762 struct target_sigcontext sf_sc;
2763 target_sigset_t sf_mask;
2764 };
2765
2766 struct target_ucontext {
2767 target_ulong tuc_flags;
2768 target_ulong tuc_link;
2769 target_stack_t tuc_stack;
2770 target_ulong pad0;
2771 struct target_sigcontext tuc_mcontext;
2772 target_sigset_t tuc_sigmask;
2773 };
2774
2775 struct target_rt_sigframe {
2776 uint32_t rs_ass[4]; /* argument save space for o32 */
2777 uint32_t rs_code[2]; /* signal trampoline */
2778 struct target_siginfo rs_info;
2779 struct target_ucontext rs_uc;
2780 };
2781
2782 /* Install trampoline to jump back from signal handler */
2783 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2784 {
2785 int err = 0;
2786
2787 /*
2788 * Set up the return code ...
2789 *
2790 * li v0, __NR__foo_sigreturn
2791 * syscall
2792 */
2793
2794 __put_user(0x24020000 + syscall, tramp + 0);
2795 __put_user(0x0000000c , tramp + 1);
2796 return err;
2797 }
2798
2799 static inline void setup_sigcontext(CPUMIPSState *regs,
2800 struct target_sigcontext *sc)
2801 {
2802 int i;
2803
2804 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2805 regs->hflags &= ~MIPS_HFLAG_BMASK;
2806
2807 __put_user(0, &sc->sc_regs[0]);
2808 for (i = 1; i < 32; ++i) {
2809 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2810 }
2811
2812 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2813 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2814
2815 /* Rather than checking for dsp existence, always copy. The storage
2816 would just be garbage otherwise. */
2817 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2818 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2819 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2820 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2821 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2822 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2823 {
2824 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2825 __put_user(dsp, &sc->sc_dsp);
2826 }
2827
2828 __put_user(1, &sc->sc_used_math);
2829
2830 for (i = 0; i < 32; ++i) {
2831 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2832 }
2833 }
2834
2835 static inline void
2836 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2837 {
2838 int i;
2839
2840 __get_user(regs->CP0_EPC, &sc->sc_pc);
2841
2842 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2843 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2844
2845 for (i = 1; i < 32; ++i) {
2846 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2847 }
2848
2849 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2850 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2851 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2852 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2853 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2854 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2855 {
2856 uint32_t dsp;
2857 __get_user(dsp, &sc->sc_dsp);
2858 cpu_wrdsp(dsp, 0x3ff, regs);
2859 }
2860
2861 for (i = 0; i < 32; ++i) {
2862 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2863 }
2864 }
2865
2866 /*
2867 * Determine which stack to use..
2868 */
2869 static inline abi_ulong
2870 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2871 {
2872 unsigned long sp;
2873
2874 /* Default to using normal stack */
2875 sp = regs->active_tc.gpr[29];
2876
2877 /*
2878 * FPU emulator may have its own trampoline active just
2879 * above the user stack, 16-bytes before the next lowest
2880 * 16 byte boundary. Try to avoid trashing it.
2881 */
2882 sp -= 32;
2883
2884 /* This is the X/Open sanctioned signal stack switching. */
2885 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2886 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2887 }
2888
2889 return (sp - frame_size) & ~7;
2890 }
2891
2892 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2893 {
2894 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2895 env->hflags &= ~MIPS_HFLAG_M16;
2896 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2897 env->active_tc.PC &= ~(target_ulong) 1;
2898 }
2899 }
2900
2901 # if defined(TARGET_ABI_MIPSO32)
2902 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2903 static void setup_frame(int sig, struct target_sigaction * ka,
2904 target_sigset_t *set, CPUMIPSState *regs)
2905 {
2906 struct sigframe *frame;
2907 abi_ulong frame_addr;
2908 int i;
2909
2910 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2911 trace_user_setup_frame(regs, frame_addr);
2912 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2913 goto give_sigsegv;
2914 }
2915
2916 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2917
2918 setup_sigcontext(regs, &frame->sf_sc);
2919
2920 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2921 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2922 }
2923
2924 /*
2925 * Arguments to signal handler:
2926 *
2927 * a0 = signal number
2928 * a1 = 0 (should be cause)
2929 * a2 = pointer to struct sigcontext
2930 *
2931 * $25 and PC point to the signal handler, $29 points to the
2932 * struct sigframe.
2933 */
2934 regs->active_tc.gpr[ 4] = sig;
2935 regs->active_tc.gpr[ 5] = 0;
2936 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2937 regs->active_tc.gpr[29] = frame_addr;
2938 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2939 /* The original kernel code sets CP0_EPC to the handler
2940 * since it returns to userland using eret
2941 * we cannot do this here, and we must set PC directly */
2942 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2943 mips_set_hflags_isa_mode_from_pc(regs);
2944 unlock_user_struct(frame, frame_addr, 1);
2945 return;
2946
2947 give_sigsegv:
2948 force_sig(TARGET_SIGSEGV/*, current*/);
2949 }
2950
2951 long do_sigreturn(CPUMIPSState *regs)
2952 {
2953 struct sigframe *frame;
2954 abi_ulong frame_addr;
2955 sigset_t blocked;
2956 target_sigset_t target_set;
2957 int i;
2958
2959 frame_addr = regs->active_tc.gpr[29];
2960 trace_user_do_sigreturn(regs, frame_addr);
2961 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2962 goto badframe;
2963
2964 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2965 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2966 }
2967
2968 target_to_host_sigset_internal(&blocked, &target_set);
2969 set_sigmask(&blocked);
2970
2971 restore_sigcontext(regs, &frame->sf_sc);
2972
2973 #if 0
2974 /*
2975 * Don't let your children do this ...
2976 */
2977 __asm__ __volatile__(
2978 "move\t$29, %0\n\t"
2979 "j\tsyscall_exit"
2980 :/* no outputs */
2981 :"r" (&regs));
2982 /* Unreached */
2983 #endif
2984
2985 regs->active_tc.PC = regs->CP0_EPC;
2986 mips_set_hflags_isa_mode_from_pc(regs);
2987 /* I am not sure this is right, but it seems to work
2988 * maybe a problem with nested signals ? */
2989 regs->CP0_EPC = 0;
2990 return -TARGET_QEMU_ESIGRETURN;
2991
2992 badframe:
2993 force_sig(TARGET_SIGSEGV/*, current*/);
2994 return 0;
2995 }
2996 # endif /* O32 */
2997
2998 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2999 target_siginfo_t *info,
3000 target_sigset_t *set, CPUMIPSState *env)
3001 {
3002 struct target_rt_sigframe *frame;
3003 abi_ulong frame_addr;
3004 int i;
3005
3006 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3007 trace_user_setup_rt_frame(env, frame_addr);
3008 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3009 goto give_sigsegv;
3010 }
3011
3012 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3013
3014 tswap_siginfo(&frame->rs_info, info);
3015
3016 __put_user(0, &frame->rs_uc.tuc_flags);
3017 __put_user(0, &frame->rs_uc.tuc_link);
3018 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3019 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3020 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3021 &frame->rs_uc.tuc_stack.ss_flags);
3022
3023 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3024
3025 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3026 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3027 }
3028
3029 /*
3030 * Arguments to signal handler:
3031 *
3032 * a0 = signal number
3033 * a1 = pointer to siginfo_t
3034 * a2 = pointer to struct ucontext
3035 *
3036 * $25 and PC point to the signal handler, $29 points to the
3037 * struct sigframe.
3038 */
3039 env->active_tc.gpr[ 4] = sig;
3040 env->active_tc.gpr[ 5] = frame_addr
3041 + offsetof(struct target_rt_sigframe, rs_info);
3042 env->active_tc.gpr[ 6] = frame_addr
3043 + offsetof(struct target_rt_sigframe, rs_uc);
3044 env->active_tc.gpr[29] = frame_addr;
3045 env->active_tc.gpr[31] = frame_addr
3046 + offsetof(struct target_rt_sigframe, rs_code);
3047 /* The original kernel code sets CP0_EPC to the handler
3048 * since it returns to userland using eret
3049 * we cannot do this here, and we must set PC directly */
3050 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3051 mips_set_hflags_isa_mode_from_pc(env);
3052 unlock_user_struct(frame, frame_addr, 1);
3053 return;
3054
3055 give_sigsegv:
3056 unlock_user_struct(frame, frame_addr, 1);
3057 force_sig(TARGET_SIGSEGV/*, current*/);
3058 }
3059
3060 long do_rt_sigreturn(CPUMIPSState *env)
3061 {
3062 struct target_rt_sigframe *frame;
3063 abi_ulong frame_addr;
3064 sigset_t blocked;
3065
3066 frame_addr = env->active_tc.gpr[29];
3067 trace_user_do_rt_sigreturn(env, frame_addr);
3068 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3069 goto badframe;
3070 }
3071
3072 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3073 set_sigmask(&blocked);
3074
3075 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3076
3077 if (do_sigaltstack(frame_addr +
3078 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3079 0, get_sp_from_cpustate(env)) == -EFAULT)
3080 goto badframe;
3081
3082 env->active_tc.PC = env->CP0_EPC;
3083 mips_set_hflags_isa_mode_from_pc(env);
3084 /* I am not sure this is right, but it seems to work
3085 * maybe a problem with nested signals ? */
3086 env->CP0_EPC = 0;
3087 return -TARGET_QEMU_ESIGRETURN;
3088
3089 badframe:
3090 force_sig(TARGET_SIGSEGV/*, current*/);
3091 return 0;
3092 }
3093
3094 #elif defined(TARGET_SH4)
3095
3096 /*
3097 * code and data structures from linux kernel:
3098 * include/asm-sh/sigcontext.h
3099 * arch/sh/kernel/signal.c
3100 */
3101
3102 struct target_sigcontext {
3103 target_ulong oldmask;
3104
3105 /* CPU registers */
3106 target_ulong sc_gregs[16];
3107 target_ulong sc_pc;
3108 target_ulong sc_pr;
3109 target_ulong sc_sr;
3110 target_ulong sc_gbr;
3111 target_ulong sc_mach;
3112 target_ulong sc_macl;
3113
3114 /* FPU registers */
3115 target_ulong sc_fpregs[16];
3116 target_ulong sc_xfpregs[16];
3117 unsigned int sc_fpscr;
3118 unsigned int sc_fpul;
3119 unsigned int sc_ownedfp;
3120 };
3121
3122 struct target_sigframe
3123 {
3124 struct target_sigcontext sc;
3125 target_ulong extramask[TARGET_NSIG_WORDS-1];
3126 uint16_t retcode[3];
3127 };
3128
3129
3130 struct target_ucontext {
3131 target_ulong tuc_flags;
3132 struct target_ucontext *tuc_link;
3133 target_stack_t tuc_stack;
3134 struct target_sigcontext tuc_mcontext;
3135 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3136 };
3137
3138 struct target_rt_sigframe
3139 {
3140 struct target_siginfo info;
3141 struct target_ucontext uc;
3142 uint16_t retcode[3];
3143 };
3144
3145
3146 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3147 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3148
3149 static abi_ulong get_sigframe(struct target_sigaction *ka,
3150 unsigned long sp, size_t frame_size)
3151 {
3152 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3153 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3154 }
3155
3156 return (sp - frame_size) & -8ul;
3157 }
3158
3159 static void setup_sigcontext(struct target_sigcontext *sc,
3160 CPUSH4State *regs, unsigned long mask)
3161 {
3162 int i;
3163
3164 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3165 COPY(gregs[0]); COPY(gregs[1]);
3166 COPY(gregs[2]); COPY(gregs[3]);
3167 COPY(gregs[4]); COPY(gregs[5]);
3168 COPY(gregs[6]); COPY(gregs[7]);
3169 COPY(gregs[8]); COPY(gregs[9]);
3170 COPY(gregs[10]); COPY(gregs[11]);
3171 COPY(gregs[12]); COPY(gregs[13]);
3172 COPY(gregs[14]); COPY(gregs[15]);
3173 COPY(gbr); COPY(mach);
3174 COPY(macl); COPY(pr);
3175 COPY(sr); COPY(pc);
3176 #undef COPY
3177
3178 for (i=0; i<16; i++) {
3179 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3180 }
3181 __put_user(regs->fpscr, &sc->sc_fpscr);
3182 __put_user(regs->fpul, &sc->sc_fpul);
3183
3184 /* non-iBCS2 extensions.. */
3185 __put_user(mask, &sc->oldmask);
3186 }
3187
3188 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3189 {
3190 int i;
3191
3192 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3193 COPY(gregs[0]); COPY(gregs[1]);
3194 COPY(gregs[2]); COPY(gregs[3]);
3195 COPY(gregs[4]); COPY(gregs[5]);
3196 COPY(gregs[6]); COPY(gregs[7]);
3197 COPY(gregs[8]); COPY(gregs[9]);
3198 COPY(gregs[10]); COPY(gregs[11]);
3199 COPY(gregs[12]); COPY(gregs[13]);
3200 COPY(gregs[14]); COPY(gregs[15]);
3201 COPY(gbr); COPY(mach);
3202 COPY(macl); COPY(pr);
3203 COPY(sr); COPY(pc);
3204 #undef COPY
3205
3206 for (i=0; i<16; i++) {
3207 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3208 }
3209 __get_user(regs->fpscr, &sc->sc_fpscr);
3210 __get_user(regs->fpul, &sc->sc_fpul);
3211
3212 regs->tra = -1; /* disable syscall checks */
3213 }
3214
3215 static void setup_frame(int sig, struct target_sigaction *ka,
3216 target_sigset_t *set, CPUSH4State *regs)
3217 {
3218 struct target_sigframe *frame;
3219 abi_ulong frame_addr;
3220 int i;
3221
3222 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3223 trace_user_setup_frame(regs, frame_addr);
3224 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3225 goto give_sigsegv;
3226 }
3227
3228 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3229
3230 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3231 __put_user(set->sig[i + 1], &frame->extramask[i]);
3232 }
3233
3234 /* Set up to return from userspace. If provided, use a stub
3235 already in userspace. */
3236 if (ka->sa_flags & TARGET_SA_RESTORER) {
3237 regs->pr = (unsigned long) ka->sa_restorer;
3238 } else {
3239 /* Generate return code (system call to sigreturn) */
3240 abi_ulong retcode_addr = frame_addr +
3241 offsetof(struct target_sigframe, retcode);
3242 __put_user(MOVW(2), &frame->retcode[0]);
3243 __put_user(TRAP_NOARG, &frame->retcode[1]);
3244 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3245 regs->pr = (unsigned long) retcode_addr;
3246 }
3247
3248 /* Set up registers for signal handler */
3249 regs->gregs[15] = frame_addr;
3250 regs->gregs[4] = sig; /* Arg for signal handler */
3251 regs->gregs[5] = 0;
3252 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3253 regs->pc = (unsigned long) ka->_sa_handler;
3254
3255 unlock_user_struct(frame, frame_addr, 1);
3256 return;
3257
3258 give_sigsegv:
3259 unlock_user_struct(frame, frame_addr, 1);
3260 force_sig(TARGET_SIGSEGV);
3261 }
3262
3263 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3264 target_siginfo_t *info,
3265 target_sigset_t *set, CPUSH4State *regs)
3266 {
3267 struct target_rt_sigframe *frame;
3268 abi_ulong frame_addr;
3269 int i;
3270
3271 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3272 trace_user_setup_rt_frame(regs, frame_addr);
3273 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3274 goto give_sigsegv;
3275 }
3276
3277 tswap_siginfo(&frame->info, info);
3278
3279 /* Create the ucontext. */
3280 __put_user(0, &frame->uc.tuc_flags);
3281 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3282 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3283 &frame->uc.tuc_stack.ss_sp);
3284 __put_user(sas_ss_flags(regs->gregs[15]),
3285 &frame->uc.tuc_stack.ss_flags);
3286 __put_user(target_sigaltstack_used.ss_size,
3287 &frame->uc.tuc_stack.ss_size);
3288 setup_sigcontext(&frame->uc.tuc_mcontext,
3289 regs, set->sig[0]);
3290 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3291 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3292 }
3293
3294 /* Set up to return from userspace. If provided, use a stub
3295 already in userspace. */
3296 if (ka->sa_flags & TARGET_SA_RESTORER) {
3297 regs->pr = (unsigned long) ka->sa_restorer;
3298 } else {
3299 /* Generate return code (system call to sigreturn) */
3300 abi_ulong retcode_addr = frame_addr +
3301 offsetof(struct target_rt_sigframe, retcode);
3302 __put_user(MOVW(2), &frame->retcode[0]);
3303 __put_user(TRAP_NOARG, &frame->retcode[1]);
3304 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3305 regs->pr = (unsigned long) retcode_addr;
3306 }
3307
3308 /* Set up registers for signal handler */
3309 regs->gregs[15] = frame_addr;
3310 regs->gregs[4] = sig; /* Arg for signal handler */
3311 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3312 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3313 regs->pc = (unsigned long) ka->_sa_handler;
3314
3315 unlock_user_struct(frame, frame_addr, 1);
3316 return;
3317
3318 give_sigsegv:
3319 unlock_user_struct(frame, frame_addr, 1);
3320 force_sig(TARGET_SIGSEGV);
3321 }
3322
3323 long do_sigreturn(CPUSH4State *regs)
3324 {
3325 struct target_sigframe *frame;
3326 abi_ulong frame_addr;
3327 sigset_t blocked;
3328 target_sigset_t target_set;
3329 int i;
3330 int err = 0;
3331
3332 frame_addr = regs->gregs[15];
3333 trace_user_do_sigreturn(regs, frame_addr);
3334 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3335 goto badframe;
3336 }
3337
3338 __get_user(target_set.sig[0], &frame->sc.oldmask);
3339 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3340 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3341 }
3342
3343 if (err)
3344 goto badframe;
3345
3346 target_to_host_sigset_internal(&blocked, &target_set);
3347 set_sigmask(&blocked);
3348
3349 restore_sigcontext(regs, &frame->sc);
3350
3351 unlock_user_struct(frame, frame_addr, 0);
3352 return -TARGET_QEMU_ESIGRETURN;
3353
3354 badframe:
3355 unlock_user_struct(frame, frame_addr, 0);
3356 force_sig(TARGET_SIGSEGV);
3357 return 0;
3358 }
3359
3360 long do_rt_sigreturn(CPUSH4State *regs)
3361 {
3362 struct target_rt_sigframe *frame;
3363 abi_ulong frame_addr;
3364 sigset_t blocked;
3365
3366 frame_addr = regs->gregs[15];
3367 trace_user_do_rt_sigreturn(regs, frame_addr);
3368 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3369 goto badframe;
3370 }
3371
3372 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3373 set_sigmask(&blocked);
3374
3375 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3376
3377 if (do_sigaltstack(frame_addr +
3378 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3379 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3380 goto badframe;
3381 }
3382
3383 unlock_user_struct(frame, frame_addr, 0);
3384 return -TARGET_QEMU_ESIGRETURN;
3385
3386 badframe:
3387 unlock_user_struct(frame, frame_addr, 0);
3388 force_sig(TARGET_SIGSEGV);
3389 return 0;
3390 }
3391 #elif defined(TARGET_MICROBLAZE)
3392
3393 struct target_sigcontext {
3394 struct target_pt_regs regs; /* needs to be first */
3395 uint32_t oldmask;
3396 };
3397
3398 struct target_stack_t {
3399 abi_ulong ss_sp;
3400 int ss_flags;
3401 unsigned int ss_size;
3402 };
3403
3404 struct target_ucontext {
3405 abi_ulong tuc_flags;
3406 abi_ulong tuc_link;
3407 struct target_stack_t tuc_stack;
3408 struct target_sigcontext tuc_mcontext;
3409 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3410 };
3411
3412 /* Signal frames. */
3413 struct target_signal_frame {
3414 struct target_ucontext uc;
3415 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3416 uint32_t tramp[2];
3417 };
3418
3419 struct rt_signal_frame {
3420 siginfo_t info;
3421 struct ucontext uc;
3422 uint32_t tramp[2];
3423 };
3424
3425 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3426 {
3427 __put_user(env->regs[0], &sc->regs.r0);
3428 __put_user(env->regs[1], &sc->regs.r1);
3429 __put_user(env->regs[2], &sc->regs.r2);
3430 __put_user(env->regs[3], &sc->regs.r3);
3431 __put_user(env->regs[4], &sc->regs.r4);
3432 __put_user(env->regs[5], &sc->regs.r5);
3433 __put_user(env->regs[6], &sc->regs.r6);
3434 __put_user(env->regs[7], &sc->regs.r7);
3435 __put_user(env->regs[8], &sc->regs.r8);
3436 __put_user(env->regs[9], &sc->regs.r9);
3437 __put_user(env->regs[10], &sc->regs.r10);
3438 __put_user(env->regs[11], &sc->regs.r11);
3439 __put_user(env->regs[12], &sc->regs.r12);
3440 __put_user(env->regs[13], &sc->regs.r13);
3441 __put_user(env->regs[14], &sc->regs.r14);
3442 __put_user(env->regs[15], &sc->regs.r15);
3443 __put_user(env->regs[16], &sc->regs.r16);
3444 __put_user(env->regs[17], &sc->regs.r17);
3445 __put_user(env->regs[18], &sc->regs.r18);
3446 __put_user(env->regs[19], &sc->regs.r19);
3447 __put_user(env->regs[20], &sc->regs.r20);
3448 __put_user(env->regs[21], &sc->regs.r21);
3449 __put_user(env->regs[22], &sc->regs.r22);
3450 __put_user(env->regs[23], &sc->regs.r23);
3451 __put_user(env->regs[24], &sc->regs.r24);
3452 __put_user(env->regs[25], &sc->regs.r25);
3453 __put_user(env->regs[26], &sc->regs.r26);
3454 __put_user(env->regs[27], &sc->regs.r27);
3455 __put_user(env->regs[28], &sc->regs.r28);
3456 __put_user(env->regs[29], &sc->regs.r29);
3457 __put_user(env->regs[30], &sc->regs.r30);
3458 __put_user(env->regs[31], &sc->regs.r31);
3459 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3460 }
3461
3462 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3463 {
3464 __get_user(env->regs[0], &sc->regs.r0);
3465 __get_user(env->regs[1], &sc->regs.r1);
3466 __get_user(env->regs[2], &sc->regs.r2);
3467 __get_user(env->regs[3], &sc->regs.r3);
3468 __get_user(env->regs[4], &sc->regs.r4);
3469 __get_user(env->regs[5], &sc->regs.r5);
3470 __get_user(env->regs[6], &sc->regs.r6);
3471 __get_user(env->regs[7], &sc->regs.r7);
3472 __get_user(env->regs[8], &sc->regs.r8);
3473 __get_user(env->regs[9], &sc->regs.r9);
3474 __get_user(env->regs[10], &sc->regs.r10);
3475 __get_user(env->regs[11], &sc->regs.r11);
3476 __get_user(env->regs[12], &sc->regs.r12);
3477 __get_user(env->regs[13], &sc->regs.r13);
3478 __get_user(env->regs[14], &sc->regs.r14);
3479 __get_user(env->regs[15], &sc->regs.r15);
3480 __get_user(env->regs[16], &sc->regs.r16);
3481 __get_user(env->regs[17], &sc->regs.r17);
3482 __get_user(env->regs[18], &sc->regs.r18);
3483 __get_user(env->regs[19], &sc->regs.r19);
3484 __get_user(env->regs[20], &sc->regs.r20);
3485 __get_user(env->regs[21], &sc->regs.r21);
3486 __get_user(env->regs[22], &sc->regs.r22);
3487 __get_user(env->regs[23], &sc->regs.r23);
3488 __get_user(env->regs[24], &sc->regs.r24);
3489 __get_user(env->regs[25], &sc->regs.r25);
3490 __get_user(env->regs[26], &sc->regs.r26);
3491 __get_user(env->regs[27], &sc->regs.r27);
3492 __get_user(env->regs[28], &sc->regs.r28);
3493 __get_user(env->regs[29], &sc->regs.r29);
3494 __get_user(env->regs[30], &sc->regs.r30);
3495 __get_user(env->regs[31], &sc->regs.r31);
3496 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3497 }
3498
3499 static abi_ulong get_sigframe(struct target_sigaction *ka,
3500 CPUMBState *env, int frame_size)
3501 {
3502 abi_ulong sp = env->regs[1];
3503
3504 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3505 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3506 }
3507
3508 return ((sp - frame_size) & -8UL);
3509 }
3510
3511 static void setup_frame(int sig, struct target_sigaction *ka,
3512 target_sigset_t *set, CPUMBState *env)
3513 {
3514 struct target_signal_frame *frame;
3515 abi_ulong frame_addr;
3516 int i;
3517
3518 frame_addr = get_sigframe(ka, env, sizeof *frame);
3519 trace_user_setup_frame(env, frame_addr);
3520 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3521 goto badframe;
3522
3523 /* Save the mask. */
3524 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3525
3526 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3527 __put_user(set->sig[i], &frame->extramask[i - 1]);
3528 }
3529
3530 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3531
3532 /* Set up to return from userspace. If provided, use a stub
3533 already in userspace. */
3534 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3535 if (ka->sa_flags & TARGET_SA_RESTORER) {
3536 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3537 } else {
3538 uint32_t t;
3539 /* Note, these encodings are _big endian_! */
3540 /* addi r12, r0, __NR_sigreturn */
3541 t = 0x31800000UL | TARGET_NR_sigreturn;
3542 __put_user(t, frame->tramp + 0);
3543 /* brki r14, 0x8 */
3544 t = 0xb9cc0008UL;
3545 __put_user(t, frame->tramp + 1);
3546
3547 /* Return from sighandler will jump to the tramp.
3548 Negative 8 offset because return is rtsd r15, 8 */
3549 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3550 - 8;
3551 }
3552
3553 /* Set up registers for signal handler */
3554 env->regs[1] = frame_addr;
3555 /* Signal handler args: */
3556 env->regs[5] = sig; /* Arg 0: signum */
3557 env->regs[6] = 0;
3558 /* arg 1: sigcontext */
3559 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3560
3561 /* Offset of 4 to handle microblaze rtid r14, 0 */
3562 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3563
3564 unlock_user_struct(frame, frame_addr, 1);
3565 return;
3566 badframe:
3567 force_sig(TARGET_SIGSEGV);
3568 }
3569
3570 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3571 target_siginfo_t *info,
3572 target_sigset_t *set, CPUMBState *env)
3573 {
3574 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3575 }
3576
3577 long do_sigreturn(CPUMBState *env)
3578 {
3579 struct target_signal_frame *frame;
3580 abi_ulong frame_addr;
3581 target_sigset_t target_set;
3582 sigset_t set;
3583 int i;
3584
3585 frame_addr = env->regs[R_SP];
3586 trace_user_do_sigreturn(env, frame_addr);
3587 /* Make sure the guest isn't playing games. */
3588 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3589 goto badframe;
3590
3591 /* Restore blocked signals */
3592 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3593 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3594 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3595 }
3596 target_to_host_sigset_internal(&set, &target_set);
3597 set_sigmask(&set);
3598
3599 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3600 /* We got here through a sigreturn syscall, our path back is via an
3601 rtb insn so setup r14 for that. */
3602 env->regs[14] = env->sregs[SR_PC];
3603
3604 unlock_user_struct(frame, frame_addr, 0);
3605 return -TARGET_QEMU_ESIGRETURN;
3606 badframe:
3607 force_sig(TARGET_SIGSEGV);
3608 }
3609
3610 long do_rt_sigreturn(CPUMBState *env)
3611 {
3612 trace_user_do_rt_sigreturn(env, 0);
3613 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3614 return -TARGET_ENOSYS;
3615 }
3616
3617 #elif defined(TARGET_CRIS)
3618
3619 struct target_sigcontext {
3620 struct target_pt_regs regs; /* needs to be first */
3621 uint32_t oldmask;
3622 uint32_t usp; /* usp before stacking this gunk on it */
3623 };
3624
3625 /* Signal frames. */
3626 struct target_signal_frame {
3627 struct target_sigcontext sc;
3628 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3629 uint16_t retcode[4]; /* Trampoline code. */
3630 };
3631
3632 struct rt_signal_frame {
3633 siginfo_t *pinfo;
3634 void *puc;
3635 siginfo_t info;
3636 struct ucontext uc;
3637 uint16_t retcode[4]; /* Trampoline code. */
3638 };
3639
3640 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3641 {
3642 __put_user(env->regs[0], &sc->regs.r0);
3643 __put_user(env->regs[1], &sc->regs.r1);
3644 __put_user(env->regs[2], &sc->regs.r2);
3645 __put_user(env->regs[3], &sc->regs.r3);
3646 __put_user(env->regs[4], &sc->regs.r4);
3647 __put_user(env->regs[5], &sc->regs.r5);
3648 __put_user(env->regs[6], &sc->regs.r6);
3649 __put_user(env->regs[7], &sc->regs.r7);
3650 __put_user(env->regs[8], &sc->regs.r8);
3651 __put_user(env->regs[9], &sc->regs.r9);
3652 __put_user(env->regs[10], &sc->regs.r10);
3653 __put_user(env->regs[11], &sc->regs.r11);
3654 __put_user(env->regs[12], &sc->regs.r12);
3655 __put_user(env->regs[13], &sc->regs.r13);
3656 __put_user(env->regs[14], &sc->usp);
3657 __put_user(env->regs[15], &sc->regs.acr);
3658 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3659 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3660 __put_user(env->pc, &sc->regs.erp);
3661 }
3662
3663 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3664 {
3665 __get_user(env->regs[0], &sc->regs.r0);
3666 __get_user(env->regs[1], &sc->regs.r1);
3667 __get_user(env->regs[2], &sc->regs.r2);
3668 __get_user(env->regs[3], &sc->regs.r3);
3669 __get_user(env->regs[4], &sc->regs.r4);
3670 __get_user(env->regs[5], &sc->regs.r5);
3671 __get_user(env->regs[6], &sc->regs.r6);
3672 __get_user(env->regs[7], &sc->regs.r7);
3673 __get_user(env->regs[8], &sc->regs.r8);
3674 __get_user(env->regs[9], &sc->regs.r9);
3675 __get_user(env->regs[10], &sc->regs.r10);
3676 __get_user(env->regs[11], &sc->regs.r11);
3677 __get_user(env->regs[12], &sc->regs.r12);
3678 __get_user(env->regs[13], &sc->regs.r13);
3679 __get_user(env->regs[14], &sc->usp);
3680 __get_user(env->regs[15], &sc->regs.acr);
3681 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3682 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3683 __get_user(env->pc, &sc->regs.erp);
3684 }
3685
3686 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3687 {
3688 abi_ulong sp;
3689 /* Align the stack downwards to 4. */
3690 sp = (env->regs[R_SP] & ~3);
3691 return sp - framesize;
3692 }
3693
3694 static void setup_frame(int sig, struct target_sigaction *ka,
3695 target_sigset_t *set, CPUCRISState *env)
3696 {
3697 struct target_signal_frame *frame;
3698 abi_ulong frame_addr;
3699 int i;
3700
3701 frame_addr = get_sigframe(env, sizeof *frame);
3702 trace_user_setup_frame(env, frame_addr);
3703 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3704 goto badframe;
3705
3706 /*
3707 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3708 * use this trampoline anymore but it sets it up for GDB.
3709 * In QEMU, using the trampoline simplifies things a bit so we use it.
3710 *
3711 * This is movu.w __NR_sigreturn, r9; break 13;
3712 */
3713 __put_user(0x9c5f, frame->retcode+0);
3714 __put_user(TARGET_NR_sigreturn,
3715 frame->retcode + 1);
3716 __put_user(0xe93d, frame->retcode + 2);
3717
3718 /* Save the mask. */
3719 __put_user(set->sig[0], &frame->sc.oldmask);
3720
3721 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3722 __put_user(set->sig[i], &frame->extramask[i - 1]);
3723 }
3724
3725 setup_sigcontext(&frame->sc, env);
3726
3727 /* Move the stack and setup the arguments for the handler. */
3728 env->regs[R_SP] = frame_addr;
3729 env->regs[10] = sig;
3730 env->pc = (unsigned long) ka->_sa_handler;
3731 /* Link SRP so the guest returns through the trampoline. */
3732 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3733
3734 unlock_user_struct(frame, frame_addr, 1);
3735 return;
3736 badframe:
3737 force_sig(TARGET_SIGSEGV);
3738 }
3739
3740 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3741 target_siginfo_t *info,
3742 target_sigset_t *set, CPUCRISState *env)
3743 {
3744 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3745 }
3746
3747 long do_sigreturn(CPUCRISState *env)
3748 {
3749 struct target_signal_frame *frame;
3750 abi_ulong frame_addr;
3751 target_sigset_t target_set;
3752 sigset_t set;
3753 int i;
3754
3755 frame_addr = env->regs[R_SP];
3756 trace_user_do_sigreturn(env, frame_addr);
3757 /* Make sure the guest isn't playing games. */
3758 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3759 goto badframe;
3760 }
3761
3762 /* Restore blocked signals */
3763 __get_user(target_set.sig[0], &frame->sc.oldmask);
3764 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3765 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3766 }
3767 target_to_host_sigset_internal(&set, &target_set);
3768 set_sigmask(&set);
3769
3770 restore_sigcontext(&frame->sc, env);
3771 unlock_user_struct(frame, frame_addr, 0);
3772 return -TARGET_QEMU_ESIGRETURN;
3773 badframe:
3774 force_sig(TARGET_SIGSEGV);
3775 }
3776
3777 long do_rt_sigreturn(CPUCRISState *env)
3778 {
3779 trace_user_do_rt_sigreturn(env, 0);
3780 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3781 return -TARGET_ENOSYS;
3782 }
3783
3784 #elif defined(TARGET_OPENRISC)
3785
3786 struct target_sigcontext {
3787 struct target_pt_regs regs;
3788 abi_ulong oldmask;
3789 abi_ulong usp;
3790 };
3791
3792 struct target_ucontext {
3793 abi_ulong tuc_flags;
3794 abi_ulong tuc_link;
3795 target_stack_t tuc_stack;
3796 struct target_sigcontext tuc_mcontext;
3797 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3798 };
3799
3800 struct target_rt_sigframe {
3801 abi_ulong pinfo;
3802 uint64_t puc;
3803 struct target_siginfo info;
3804 struct target_sigcontext sc;
3805 struct target_ucontext uc;
3806 unsigned char retcode[16]; /* trampoline code */
3807 };
3808
3809 /* This is the asm-generic/ucontext.h version */
3810 #if 0
3811 static int restore_sigcontext(CPUOpenRISCState *regs,
3812 struct target_sigcontext *sc)
3813 {
3814 unsigned int err = 0;
3815 unsigned long old_usp;
3816
3817 /* Alwys make any pending restarted system call return -EINTR */
3818 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3819
3820 /* restore the regs from &sc->regs (same as sc, since regs is first)
3821 * (sc is already checked for VERIFY_READ since the sigframe was
3822 * checked in sys_sigreturn previously)
3823 */
3824
3825 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3826 goto badframe;
3827 }
3828
3829 /* make sure the U-flag is set so user-mode cannot fool us */
3830
3831 regs->sr &= ~SR_SM;
3832
3833 /* restore the old USP as it was before we stacked the sc etc.
3834 * (we cannot just pop the sigcontext since we aligned the sp and
3835 * stuff after pushing it)
3836 */
3837
3838 __get_user(old_usp, &sc->usp);
3839 phx_signal("old_usp 0x%lx", old_usp);
3840
3841 __PHX__ REALLY /* ??? */
3842 wrusp(old_usp);
3843 regs->gpr[1] = old_usp;
3844
3845 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3846 * after this completes, but we don't use that mechanism. maybe we can
3847 * use it now ?
3848 */
3849
3850 return err;
3851
3852 badframe:
3853 return 1;
3854 }
3855 #endif
3856
3857 /* Set up a signal frame. */
3858
3859 static void setup_sigcontext(struct target_sigcontext *sc,
3860 CPUOpenRISCState *regs,
3861 unsigned long mask)
3862 {
3863 unsigned long usp = regs->gpr[1];
3864
3865 /* copy the regs. they are first in sc so we can use sc directly */
3866
3867 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3868
3869 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3870 the signal handler. The frametype will be restored to its previous
3871 value in restore_sigcontext. */
3872 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3873
3874 /* then some other stuff */
3875 __put_user(mask, &sc->oldmask);
3876 __put_user(usp, &sc->usp);
3877 }
3878
3879 static inline unsigned long align_sigframe(unsigned long sp)
3880 {
3881 unsigned long i;
3882 i = sp & ~3UL;
3883 return i;
3884 }
3885
3886 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3887 CPUOpenRISCState *regs,
3888 size_t frame_size)
3889 {
3890 unsigned long sp = regs->gpr[1];
3891 int onsigstack = on_sig_stack(sp);
3892
3893 /* redzone */
3894 /* This is the X/Open sanctioned signal stack switching. */
3895 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3896 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3897 }
3898
3899 sp = align_sigframe(sp - frame_size);
3900
3901 /*
3902 * If we are on the alternate signal stack and would overflow it, don't.
3903 * Return an always-bogus address instead so we will die with SIGSEGV.
3904 */
3905
3906 if (onsigstack && !likely(on_sig_stack(sp))) {
3907 return -1L;
3908 }
3909
3910 return sp;
3911 }
3912
3913 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3914 target_siginfo_t *info,
3915 target_sigset_t *set, CPUOpenRISCState *env)
3916 {
3917 int err = 0;
3918 abi_ulong frame_addr;
3919 unsigned long return_ip;
3920 struct target_rt_sigframe *frame;
3921 abi_ulong info_addr, uc_addr;
3922
3923 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3924 trace_user_setup_rt_frame(env, frame_addr);
3925 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3926 goto give_sigsegv;
3927 }
3928
3929 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
3930 __put_user(info_addr, &frame->pinfo);
3931 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
3932 __put_user(uc_addr, &frame->puc);
3933
3934 if (ka->sa_flags & SA_SIGINFO) {
3935 tswap_siginfo(&frame->info, info);
3936 }
3937
3938 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
3939 __put_user(0, &frame->uc.tuc_flags);
3940 __put_user(0, &frame->uc.tuc_link);
3941 __put_user(target_sigaltstack_used.ss_sp,
3942 &frame->uc.tuc_stack.ss_sp);
3943 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
3944 __put_user(target_sigaltstack_used.ss_size,
3945 &frame->uc.tuc_stack.ss_size);
3946 setup_sigcontext(&frame->sc, env, set->sig[0]);
3947
3948 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
3949
3950 /* trampoline - the desired return ip is the retcode itself */
3951 return_ip = (unsigned long)&frame->retcode;
3952 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
3953 __put_user(0xa960, (short *)(frame->retcode + 0));
3954 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
3955 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
3956 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
3957
3958 if (err) {
3959 goto give_sigsegv;
3960 }
3961
3962 /* TODO what is the current->exec_domain stuff and invmap ? */
3963
3964 /* Set up registers for signal handler */
3965 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
3966 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
3967 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
3968 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
3969 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
3970
3971 /* actually move the usp to reflect the stacked frame */
3972 env->gpr[1] = (unsigned long)frame;
3973
3974 return;
3975
3976 give_sigsegv:
3977 unlock_user_struct(frame, frame_addr, 1);
3978 if (sig == TARGET_SIGSEGV) {
3979 ka->_sa_handler = TARGET_SIG_DFL;
3980 }
3981 force_sig(TARGET_SIGSEGV);
3982 }
3983
3984 long do_sigreturn(CPUOpenRISCState *env)
3985 {
3986 trace_user_do_sigreturn(env, 0);
3987 fprintf(stderr, "do_sigreturn: not implemented\n");
3988 return -TARGET_ENOSYS;
3989 }
3990
3991 long do_rt_sigreturn(CPUOpenRISCState *env)
3992 {
3993 trace_user_do_rt_sigreturn(env, 0);
3994 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
3995 return -TARGET_ENOSYS;
3996 }
3997 /* TARGET_OPENRISC */
3998
3999 #elif defined(TARGET_S390X)
4000
4001 #define __NUM_GPRS 16
4002 #define __NUM_FPRS 16
4003 #define __NUM_ACRS 16
4004
4005 #define S390_SYSCALL_SIZE 2
4006 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4007
4008 #define _SIGCONTEXT_NSIG 64
4009 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4010 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4011 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4012 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4013 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4014
4015 typedef struct {
4016 target_psw_t psw;
4017 target_ulong gprs[__NUM_GPRS];
4018 unsigned int acrs[__NUM_ACRS];
4019 } target_s390_regs_common;
4020
4021 typedef struct {
4022 unsigned int fpc;
4023 double fprs[__NUM_FPRS];
4024 } target_s390_fp_regs;
4025
4026 typedef struct {
4027 target_s390_regs_common regs;
4028 target_s390_fp_regs fpregs;
4029 } target_sigregs;
4030
4031 struct target_sigcontext {
4032 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4033 target_sigregs *sregs;
4034 };
4035
4036 typedef struct {
4037 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4038 struct target_sigcontext sc;
4039 target_sigregs sregs;
4040 int signo;
4041 uint8_t retcode[S390_SYSCALL_SIZE];
4042 } sigframe;
4043
4044 struct target_ucontext {
4045 target_ulong tuc_flags;
4046 struct target_ucontext *tuc_link;
4047 target_stack_t tuc_stack;
4048 target_sigregs tuc_mcontext;
4049 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4050 };
4051
4052 typedef struct {
4053 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4054 uint8_t retcode[S390_SYSCALL_SIZE];
4055 struct target_siginfo info;
4056 struct target_ucontext uc;
4057 } rt_sigframe;
4058
4059 static inline abi_ulong
4060 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4061 {
4062 abi_ulong sp;
4063
4064 /* Default to using normal stack */
4065 sp = env->regs[15];
4066
4067 /* This is the X/Open sanctioned signal stack switching. */
4068 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4069 if (!sas_ss_flags(sp)) {
4070 sp = target_sigaltstack_used.ss_sp +
4071 target_sigaltstack_used.ss_size;
4072 }
4073 }
4074
4075 /* This is the legacy signal stack switching. */
4076 else if (/* FIXME !user_mode(regs) */ 0 &&
4077 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4078 ka->sa_restorer) {
4079 sp = (abi_ulong) ka->sa_restorer;
4080 }
4081
4082 return (sp - frame_size) & -8ul;
4083 }
4084
4085 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4086 {
4087 int i;
4088 //save_access_regs(current->thread.acrs); FIXME
4089
4090 /* Copy a 'clean' PSW mask to the user to avoid leaking
4091 information about whether PER is currently on. */
4092 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4093 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4094 for (i = 0; i < 16; i++) {
4095 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4096 }
4097 for (i = 0; i < 16; i++) {
4098 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4099 }
4100 /*
4101 * We have to store the fp registers to current->thread.fp_regs
4102 * to merge them with the emulated registers.
4103 */
4104 //save_fp_regs(&current->thread.fp_regs); FIXME
4105 for (i = 0; i < 16; i++) {
4106 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4107 }
4108 }
4109
4110 static void setup_frame(int sig, struct target_sigaction *ka,
4111 target_sigset_t *set, CPUS390XState *env)
4112 {
4113 sigframe *frame;
4114 abi_ulong frame_addr;
4115
4116 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4117 trace_user_setup_frame(env, frame_addr);
4118 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4119 goto give_sigsegv;
4120 }
4121
4122 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4123
4124 save_sigregs(env, &frame->sregs);
4125
4126 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4127 (abi_ulong *)&frame->sc.sregs);
4128
4129 /* Set up to return from userspace. If provided, use a stub
4130 already in userspace. */
4131 if (ka->sa_flags & TARGET_SA_RESTORER) {
4132 env->regs[14] = (unsigned long)
4133 ka->sa_restorer | PSW_ADDR_AMODE;
4134 } else {
4135 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4136 | PSW_ADDR_AMODE;
4137 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4138 (uint16_t *)(frame->retcode));
4139 }
4140
4141 /* Set up backchain. */
4142 __put_user(env->regs[15], (abi_ulong *) frame);
4143
4144 /* Set up registers for signal handler */
4145 env->regs[15] = frame_addr;
4146 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4147
4148 env->regs[2] = sig; //map_signal(sig);
4149 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4150
4151 /* We forgot to include these in the sigcontext.
4152 To avoid breaking binary compatibility, they are passed as args. */
4153 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4154 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4155
4156 /* Place signal number on stack to allow backtrace from handler. */
4157 __put_user(env->regs[2], (int *) &frame->signo);
4158 unlock_user_struct(frame, frame_addr, 1);
4159 return;
4160
4161 give_sigsegv:
4162 force_sig(TARGET_SIGSEGV);
4163 }
4164
4165 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4166 target_siginfo_t *info,
4167 target_sigset_t *set, CPUS390XState *env)
4168 {
4169 int i;
4170 rt_sigframe *frame;
4171 abi_ulong frame_addr;
4172
4173 frame_addr = get_sigframe(ka, env, sizeof *frame);
4174 trace_user_setup_rt_frame(env, frame_addr);
4175 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4176 goto give_sigsegv;
4177 }
4178
4179 tswap_siginfo(&frame->info, info);
4180
4181 /* Create the ucontext. */
4182 __put_user(0, &frame->uc.tuc_flags);
4183 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4184 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4185 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4186 &frame->uc.tuc_stack.ss_flags);
4187 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4188 save_sigregs(env, &frame->uc.tuc_mcontext);
4189 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4190 __put_user((abi_ulong)set->sig[i],
4191 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4192 }
4193
4194 /* Set up to return from userspace. If provided, use a stub
4195 already in userspace. */
4196 if (ka->sa_flags & TARGET_SA_RESTORER) {
4197 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4198 } else {
4199 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4200 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4201 (uint16_t *)(frame->retcode));
4202 }
4203
4204 /* Set up backchain. */
4205 __put_user(env->regs[15], (abi_ulong *) frame);
4206
4207 /* Set up registers for signal handler */
4208 env->regs[15] = frame_addr;
4209 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4210
4211 env->regs[2] = sig; //map_signal(sig);
4212 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4213 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4214 return;
4215
4216 give_sigsegv:
4217 force_sig(TARGET_SIGSEGV);
4218 }
4219
4220 static int
4221 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4222 {
4223 int err = 0;
4224 int i;
4225
4226 for (i = 0; i < 16; i++) {
4227 __get_user(env->regs[i], &sc->regs.gprs[i]);
4228 }
4229
4230 __get_user(env->psw.mask, &sc->regs.psw.mask);
4231 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4232 (unsigned long long)env->psw.addr);
4233 __get_user(env->psw.addr, &sc->regs.psw.addr);
4234 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4235
4236 for (i = 0; i < 16; i++) {
4237 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4238 }
4239 for (i = 0; i < 16; i++) {
4240 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4241 }
4242
4243 return err;
4244 }
4245
4246 long do_sigreturn(CPUS390XState *env)
4247 {
4248 sigframe *frame;
4249 abi_ulong frame_addr = env->regs[15];
4250 target_sigset_t target_set;
4251 sigset_t set;
4252
4253 trace_user_do_sigreturn(env, frame_addr);
4254 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4255 goto badframe;
4256 }
4257 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4258
4259 target_to_host_sigset_internal(&set, &target_set);
4260 set_sigmask(&set); /* ~_BLOCKABLE? */
4261
4262 if (restore_sigregs(env, &frame->sregs)) {
4263 goto badframe;
4264 }
4265
4266 unlock_user_struct(frame, frame_addr, 0);
4267 return -TARGET_QEMU_ESIGRETURN;
4268
4269 badframe:
4270 force_sig(TARGET_SIGSEGV);
4271 return 0;
4272 }
4273
4274 long do_rt_sigreturn(CPUS390XState *env)
4275 {
4276 rt_sigframe *frame;
4277 abi_ulong frame_addr = env->regs[15];
4278 sigset_t set;
4279
4280 trace_user_do_rt_sigreturn(env, frame_addr);
4281 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4282 goto badframe;
4283 }
4284 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4285
4286 set_sigmask(&set); /* ~_BLOCKABLE? */
4287
4288 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4289 goto badframe;
4290 }
4291
4292 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4293 get_sp_from_cpustate(env)) == -EFAULT) {
4294 goto badframe;
4295 }
4296 unlock_user_struct(frame, frame_addr, 0);
4297 return -TARGET_QEMU_ESIGRETURN;
4298
4299 badframe:
4300 unlock_user_struct(frame, frame_addr, 0);
4301 force_sig(TARGET_SIGSEGV);
4302 return 0;
4303 }
4304
4305 #elif defined(TARGET_PPC)
4306
4307 /* Size of dummy stack frame allocated when calling signal handler.
4308 See arch/powerpc/include/asm/ptrace.h. */
4309 #if defined(TARGET_PPC64)
4310 #define SIGNAL_FRAMESIZE 128
4311 #else
4312 #define SIGNAL_FRAMESIZE 64
4313 #endif
4314
4315 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4316 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4317 struct target_mcontext {
4318 target_ulong mc_gregs[48];
4319 /* Includes fpscr. */
4320 uint64_t mc_fregs[33];
4321 target_ulong mc_pad[2];
4322 /* We need to handle Altivec and SPE at the same time, which no
4323 kernel needs to do. Fortunately, the kernel defines this bit to
4324 be Altivec-register-large all the time, rather than trying to
4325 twiddle it based on the specific platform. */
4326 union {
4327 /* SPE vector registers. One extra for SPEFSCR. */
4328 uint32_t spe[33];
4329 /* Altivec vector registers. The packing of VSCR and VRSAVE
4330 varies depending on whether we're PPC64 or not: PPC64 splits
4331 them apart; PPC32 stuffs them together. */
4332 #if defined(TARGET_PPC64)
4333 #define QEMU_NVRREG 34
4334 #else
4335 #define QEMU_NVRREG 33
4336 #endif
4337 ppc_avr_t altivec[QEMU_NVRREG];
4338 #undef QEMU_NVRREG
4339 } mc_vregs __attribute__((__aligned__(16)));
4340 };
4341
4342 /* See arch/powerpc/include/asm/sigcontext.h. */
4343 struct target_sigcontext {
4344 target_ulong _unused[4];
4345 int32_t signal;
4346 #if defined(TARGET_PPC64)
4347 int32_t pad0;
4348 #endif
4349 target_ulong handler;
4350 target_ulong oldmask;
4351 target_ulong regs; /* struct pt_regs __user * */
4352 #if defined(TARGET_PPC64)
4353 struct target_mcontext mcontext;
4354 #endif
4355 };
4356
4357 /* Indices for target_mcontext.mc_gregs, below.
4358 See arch/powerpc/include/asm/ptrace.h for details. */
4359 enum {
4360 TARGET_PT_R0 = 0,
4361 TARGET_PT_R1 = 1,
4362 TARGET_PT_R2 = 2,
4363 TARGET_PT_R3 = 3,
4364 TARGET_PT_R4 = 4,
4365 TARGET_PT_R5 = 5,
4366 TARGET_PT_R6 = 6,
4367 TARGET_PT_R7 = 7,
4368 TARGET_PT_R8 = 8,
4369 TARGET_PT_R9 = 9,
4370 TARGET_PT_R10 = 10,
4371 TARGET_PT_R11 = 11,
4372 TARGET_PT_R12 = 12,
4373 TARGET_PT_R13 = 13,
4374 TARGET_PT_R14 = 14,
4375 TARGET_PT_R15 = 15,
4376 TARGET_PT_R16 = 16,
4377 TARGET_PT_R17 = 17,
4378 TARGET_PT_R18 = 18,
4379 TARGET_PT_R19 = 19,
4380 TARGET_PT_R20 = 20,
4381 TARGET_PT_R21 = 21,
4382 TARGET_PT_R22 = 22,
4383 TARGET_PT_R23 = 23,
4384 TARGET_PT_R24 = 24,
4385 TARGET_PT_R25 = 25,
4386 TARGET_PT_R26 = 26,
4387 TARGET_PT_R27 = 27,
4388 TARGET_PT_R28 = 28,
4389 TARGET_PT_R29 = 29,
4390 TARGET_PT_R30 = 30,
4391 TARGET_PT_R31 = 31,
4392 TARGET_PT_NIP = 32,
4393 TARGET_PT_MSR = 33,
4394 TARGET_PT_ORIG_R3 = 34,
4395 TARGET_PT_CTR = 35,
4396 TARGET_PT_LNK = 36,
4397 TARGET_PT_XER = 37,
4398 TARGET_PT_CCR = 38,
4399 /* Yes, there are two registers with #39. One is 64-bit only. */
4400 TARGET_PT_MQ = 39,
4401 TARGET_PT_SOFTE = 39,
4402 TARGET_PT_TRAP = 40,
4403 TARGET_PT_DAR = 41,
4404 TARGET_PT_DSISR = 42,
4405 TARGET_PT_RESULT = 43,
4406 TARGET_PT_REGS_COUNT = 44
4407 };
4408
4409
4410 struct target_ucontext {
4411 target_ulong tuc_flags;
4412 target_ulong tuc_link; /* struct ucontext __user * */
4413 struct target_sigaltstack tuc_stack;
4414 #if !defined(TARGET_PPC64)
4415 int32_t tuc_pad[7];
4416 target_ulong tuc_regs; /* struct mcontext __user *
4417 points to uc_mcontext field */
4418 #endif
4419 target_sigset_t tuc_sigmask;
4420 #if defined(TARGET_PPC64)
4421 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4422 struct target_sigcontext tuc_sigcontext;
4423 #else
4424 int32_t tuc_maskext[30];
4425 int32_t tuc_pad2[3];
4426 struct target_mcontext tuc_mcontext;
4427 #endif
4428 };
4429
4430 /* See arch/powerpc/kernel/signal_32.c. */
4431 struct target_sigframe {
4432 struct target_sigcontext sctx;
4433 struct target_mcontext mctx;
4434 int32_t abigap[56];
4435 };
4436
4437 #if defined(TARGET_PPC64)
4438
4439 #define TARGET_TRAMP_SIZE 6
4440
4441 struct target_rt_sigframe {
4442 /* sys_rt_sigreturn requires the ucontext be the first field */
4443 struct target_ucontext uc;
4444 target_ulong _unused[2];
4445 uint32_t trampoline[TARGET_TRAMP_SIZE];
4446 target_ulong pinfo; /* struct siginfo __user * */
4447 target_ulong puc; /* void __user * */
4448 struct target_siginfo info;
4449 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4450 char abigap[288];
4451 } __attribute__((aligned(16)));
4452
4453 #else
4454
4455 struct target_rt_sigframe {
4456 struct target_siginfo info;
4457 struct target_ucontext uc;
4458 int32_t abigap[56];
4459 };
4460
4461 #endif
4462
4463 #if defined(TARGET_PPC64)
4464
4465 struct target_func_ptr {
4466 target_ulong entry;
4467 target_ulong toc;
4468 };
4469
4470 #endif
4471
4472 /* We use the mc_pad field for the signal return trampoline. */
4473 #define tramp mc_pad
4474
4475 /* See arch/powerpc/kernel/signal.c. */
4476 static target_ulong get_sigframe(struct target_sigaction *ka,
4477 CPUPPCState *env,
4478 int frame_size)
4479 {
4480 target_ulong oldsp, newsp;
4481
4482 oldsp = env->gpr[1];
4483
4484 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4485 (sas_ss_flags(oldsp) == 0)) {
4486 oldsp = (target_sigaltstack_used.ss_sp
4487 + target_sigaltstack_used.ss_size);
4488 }
4489
4490 newsp = (oldsp - frame_size) & ~0xFUL;
4491
4492 return newsp;
4493 }
4494
4495 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4496 {
4497 target_ulong msr = env->msr;
4498 int i;
4499 target_ulong ccr = 0;
4500
4501 /* In general, the kernel attempts to be intelligent about what it
4502 needs to save for Altivec/FP/SPE registers. We don't care that
4503 much, so we just go ahead and save everything. */
4504
4505 /* Save general registers. */
4506 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4507 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4508 }
4509 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4510 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4511 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4512 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4513
4514 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4515 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4516 }
4517 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4518
4519 /* Save Altivec registers if necessary. */
4520 if (env->insns_flags & PPC_ALTIVEC) {
4521 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4522 ppc_avr_t *avr = &env->avr[i];
4523 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4524
4525 __put_user(avr->u64[0], &vreg->u64[0]);
4526 __put_user(avr->u64[1], &vreg->u64[1]);
4527 }
4528 /* Set MSR_VR in the saved MSR value to indicate that
4529 frame->mc_vregs contains valid data. */
4530 msr |= MSR_VR;
4531 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4532 &frame->mc_vregs.altivec[32].u32[3]);
4533 }
4534
4535 /* Save floating point registers. */
4536 if (env->insns_flags & PPC_FLOAT) {
4537 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4538 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4539 }
4540 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4541 }
4542
4543 /* Save SPE registers. The kernel only saves the high half. */
4544 if (env->insns_flags & PPC_SPE) {
4545 #if defined(TARGET_PPC64)
4546 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4547 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4548 }
4549 #else
4550 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4551 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4552 }
4553 #endif
4554 /* Set MSR_SPE in the saved MSR value to indicate that
4555 frame->mc_vregs contains valid data. */
4556 msr |= MSR_SPE;
4557 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4558 }
4559
4560 /* Store MSR. */
4561 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4562 }
4563
4564 static void encode_trampoline(int sigret, uint32_t *tramp)
4565 {
4566 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4567 if (sigret) {
4568 __put_user(0x38000000 | sigret, &tramp[0]);
4569 __put_user(0x44000002, &tramp[1]);
4570 }
4571 }
4572
4573 static void restore_user_regs(CPUPPCState *env,
4574 struct target_mcontext *frame, int sig)
4575 {
4576 target_ulong save_r2 = 0;
4577 target_ulong msr;
4578 target_ulong ccr;
4579
4580 int i;
4581
4582 if (!sig) {
4583 save_r2 = env->gpr[2];
4584 }
4585
4586 /* Restore general registers. */
4587 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4588 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4589 }
4590 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4591 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4592 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4593 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4594 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4595
4596 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4597 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4598 }
4599
4600 if (!sig) {
4601 env->gpr[2] = save_r2;
4602 }
4603 /* Restore MSR. */
4604 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4605
4606 /* If doing signal return, restore the previous little-endian mode. */
4607 if (sig)
4608 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4609
4610 /* Restore Altivec registers if necessary. */
4611 if (env->insns_flags & PPC_ALTIVEC) {
4612 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4613 ppc_avr_t *avr = &env->avr[i];
4614 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4615
4616 __get_user(avr->u64[0], &vreg->u64[0]);
4617 __get_user(avr->u64[1], &vreg->u64[1]);
4618 }
4619 /* Set MSR_VEC in the saved MSR value to indicate that
4620 frame->mc_vregs contains valid data. */
4621 __get_user(env->spr[SPR_VRSAVE],
4622 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4623 }
4624
4625 /* Restore floating point registers. */
4626 if (env->insns_flags & PPC_FLOAT) {
4627 uint64_t fpscr;
4628 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4629 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4630 }
4631 __get_user(fpscr, &frame->mc_fregs[32]);
4632 env->fpscr = (uint32_t) fpscr;
4633 }
4634
4635 /* Save SPE registers. The kernel only saves the high half. */
4636 if (env->insns_flags & PPC_SPE) {
4637 #if defined(TARGET_PPC64)
4638 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4639 uint32_t hi;
4640
4641 __get_user(hi, &frame->mc_vregs.spe[i]);
4642 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4643 }
4644 #else
4645 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4646 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4647 }
4648 #endif
4649 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4650 }
4651 }
4652
4653 static void setup_frame(int sig, struct target_sigaction *ka,
4654 target_sigset_t *set, CPUPPCState *env)
4655 {
4656 struct target_sigframe *frame;
4657 struct target_sigcontext *sc;
4658 target_ulong frame_addr, newsp;
4659 int err = 0;
4660 #if defined(TARGET_PPC64)
4661 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4662 #endif
4663
4664 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4665 trace_user_setup_frame(env, frame_addr);
4666 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4667 goto sigsegv;
4668 sc = &frame->sctx;
4669
4670 __put_user(ka->_sa_handler, &sc->handler);
4671 __put_user(set->sig[0], &sc->oldmask);
4672 #if TARGET_ABI_BITS == 64
4673 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4674 #else
4675 __put_user(set->sig[1], &sc->_unused[3]);
4676 #endif
4677 __put_user(h2g(&frame->mctx), &sc->regs);
4678 __put_user(sig, &sc->signal);
4679
4680 /* Save user regs. */
4681 save_user_regs(env, &frame->mctx);
4682
4683 /* Construct the trampoline code on the stack. */
4684 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4685
4686 /* The kernel checks for the presence of a VDSO here. We don't
4687 emulate a vdso, so use a sigreturn system call. */
4688 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4689
4690 /* Turn off all fp exceptions. */
4691 env->fpscr = 0;
4692
4693 /* Create a stack frame for the caller of the handler. */
4694 newsp = frame_addr - SIGNAL_FRAMESIZE;
4695 err |= put_user(env->gpr[1], newsp, target_ulong);
4696
4697 if (err)
4698 goto sigsegv;
4699
4700 /* Set up registers for signal handler. */
4701 env->gpr[1] = newsp;
4702 env->gpr[3] = sig;
4703 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4704
4705 #if defined(TARGET_PPC64)
4706 if (get_ppc64_abi(image) < 2) {
4707 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4708 struct target_func_ptr *handler =
4709 (struct target_func_ptr *)g2h(ka->_sa_handler);
4710 env->nip = tswapl(handler->entry);
4711 env->gpr[2] = tswapl(handler->toc);
4712 } else {
4713 /* ELFv2 PPC64 function pointers are entry points, but R12
4714 * must also be set */
4715 env->nip = tswapl((target_ulong) ka->_sa_handler);
4716 env->gpr[12] = env->nip;
4717 }
4718 #else
4719 env->nip = (target_ulong) ka->_sa_handler;
4720 #endif
4721
4722 /* Signal handlers are entered in big-endian mode. */
4723 env->msr &= ~(1ull << MSR_LE);
4724
4725 unlock_user_struct(frame, frame_addr, 1);
4726 return;
4727
4728 sigsegv:
4729 unlock_user_struct(frame, frame_addr, 1);
4730 force_sig(TARGET_SIGSEGV);
4731 }
4732
4733 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4734 target_siginfo_t *info,
4735 target_sigset_t *set, CPUPPCState *env)
4736 {
4737 struct target_rt_sigframe *rt_sf;
4738 uint32_t *trampptr = 0;
4739 struct target_mcontext *mctx = 0;
4740 target_ulong rt_sf_addr, newsp = 0;
4741 int i, err = 0;
4742 #if defined(TARGET_PPC64)
4743 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4744 #endif
4745
4746 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4747 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4748 goto sigsegv;
4749
4750 tswap_siginfo(&rt_sf->info, info);
4751
4752 __put_user(0, &rt_sf->uc.tuc_flags);
4753 __put_user(0, &rt_sf->uc.tuc_link);
4754 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4755 &rt_sf->uc.tuc_stack.ss_sp);
4756 __put_user(sas_ss_flags(env->gpr[1]),
4757 &rt_sf->uc.tuc_stack.ss_flags);
4758 __put_user(target_sigaltstack_used.ss_size,
4759 &rt_sf->uc.tuc_stack.ss_size);
4760 #if !defined(TARGET_PPC64)
4761 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4762 &rt_sf->uc.tuc_regs);
4763 #endif
4764 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4765 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4766 }
4767
4768 #if defined(TARGET_PPC64)
4769 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4770 trampptr = &rt_sf->trampoline[0];
4771 #else
4772 mctx = &rt_sf->uc.tuc_mcontext;
4773 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4774 #endif
4775
4776 save_user_regs(env, mctx);
4777 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4778
4779 /* The kernel checks for the presence of a VDSO here. We don't
4780 emulate a vdso, so use a sigreturn system call. */
4781 env->lr = (target_ulong) h2g(trampptr);
4782
4783 /* Turn off all fp exceptions. */
4784 env->fpscr = 0;
4785
4786 /* Create a stack frame for the caller of the handler. */
4787 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4788 err |= put_user(env->gpr[1], newsp, target_ulong);
4789
4790 if (err)
4791 goto sigsegv;
4792
4793 /* Set up registers for signal handler. */
4794 env->gpr[1] = newsp;
4795 env->gpr[3] = (target_ulong) sig;
4796 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4797 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4798 env->gpr[6] = (target_ulong) h2g(rt_sf);
4799
4800 #if defined(TARGET_PPC64)
4801 if (get_ppc64_abi(image) < 2) {
4802 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4803 struct target_func_ptr *handler =
4804 (struct target_func_ptr *)g2h(ka->_sa_handler);
4805 env->nip = tswapl(handler->entry);
4806 env->gpr[2] = tswapl(handler->toc);
4807 } else {
4808 /* ELFv2 PPC64 function pointers are entry points, but R12
4809 * must also be set */
4810 env->nip = tswapl((target_ulong) ka->_sa_handler);
4811 env->gpr[12] = env->nip;
4812 }
4813 #else
4814 env->nip = (target_ulong) ka->_sa_handler;
4815 #endif
4816
4817 /* Signal handlers are entered in big-endian mode. */
4818 env->msr &= ~(1ull << MSR_LE);
4819
4820 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4821 return;
4822
4823 sigsegv:
4824 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4825 force_sig(TARGET_SIGSEGV);
4826
4827 }
4828
4829 long do_sigreturn(CPUPPCState *env)
4830 {
4831 struct target_sigcontext *sc = NULL;
4832 struct target_mcontext *sr = NULL;
4833 target_ulong sr_addr = 0, sc_addr;
4834 sigset_t blocked;
4835 target_sigset_t set;
4836
4837 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4838 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4839 goto sigsegv;
4840
4841 #if defined(TARGET_PPC64)
4842 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4843 #else
4844 __get_user(set.sig[0], &sc->oldmask);
4845 __get_user(set.sig[1], &sc->_unused[3]);
4846 #endif
4847 target_to_host_sigset_internal(&blocked, &set);
4848 set_sigmask(&blocked);
4849
4850 __get_user(sr_addr, &sc->regs);
4851 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4852 goto sigsegv;
4853 restore_user_regs(env, sr, 1);
4854
4855 unlock_user_struct(sr, sr_addr, 1);
4856 unlock_user_struct(sc, sc_addr, 1);
4857 return -TARGET_QEMU_ESIGRETURN;
4858
4859 sigsegv:
4860 unlock_user_struct(sr, sr_addr, 1);
4861 unlock_user_struct(sc, sc_addr, 1);
4862 force_sig(TARGET_SIGSEGV);
4863 return 0;
4864 }
4865
4866 /* See arch/powerpc/kernel/signal_32.c. */
4867 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4868 {
4869 struct target_mcontext *mcp;
4870 target_ulong mcp_addr;
4871 sigset_t blocked;
4872 target_sigset_t set;
4873
4874 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4875 sizeof (set)))
4876 return 1;
4877
4878 #if defined(TARGET_PPC64)
4879 mcp_addr = h2g(ucp) +
4880 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4881 #else
4882 __get_user(mcp_addr, &ucp->tuc_regs);
4883 #endif
4884
4885 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4886 return 1;
4887
4888 target_to_host_sigset_internal(&blocked, &set);
4889 set_sigmask(&blocked);
4890 restore_user_regs(env, mcp, sig);
4891
4892 unlock_user_struct(mcp, mcp_addr, 1);
4893 return 0;
4894 }
4895
4896 long do_rt_sigreturn(CPUPPCState *env)
4897 {
4898 struct target_rt_sigframe *rt_sf = NULL;
4899 target_ulong rt_sf_addr;
4900
4901 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4902 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4903 goto sigsegv;
4904
4905 if (do_setcontext(&rt_sf->uc, env, 1))
4906 goto sigsegv;
4907
4908 do_sigaltstack(rt_sf_addr
4909 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4910 0, env->gpr[1]);
4911
4912 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4913 return -TARGET_QEMU_ESIGRETURN;
4914
4915 sigsegv:
4916 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4917 force_sig(TARGET_SIGSEGV);
4918 return 0;
4919 }
4920
4921 #elif defined(TARGET_M68K)
4922
4923 struct target_sigcontext {
4924 abi_ulong sc_mask;
4925 abi_ulong sc_usp;
4926 abi_ulong sc_d0;
4927 abi_ulong sc_d1;
4928 abi_ulong sc_a0;
4929 abi_ulong sc_a1;
4930 unsigned short sc_sr;
4931 abi_ulong sc_pc;
4932 };
4933
4934 struct target_sigframe
4935 {
4936 abi_ulong pretcode;
4937 int sig;
4938 int code;
4939 abi_ulong psc;
4940 char retcode[8];
4941 abi_ulong extramask[TARGET_NSIG_WORDS-1];
4942 struct target_sigcontext sc;
4943 };
4944
4945 typedef int target_greg_t;
4946 #define TARGET_NGREG 18
4947 typedef target_greg_t target_gregset_t[TARGET_NGREG];
4948
4949 typedef struct target_fpregset {
4950 int f_fpcntl[3];
4951 int f_fpregs[8*3];
4952 } target_fpregset_t;
4953
4954 struct target_mcontext {
4955 int version;
4956 target_gregset_t gregs;
4957 target_fpregset_t fpregs;
4958 };
4959
4960 #define TARGET_MCONTEXT_VERSION 2
4961
4962 struct target_ucontext {
4963 abi_ulong tuc_flags;
4964 abi_ulong tuc_link;
4965 target_stack_t tuc_stack;
4966 struct target_mcontext tuc_mcontext;
4967 abi_long tuc_filler[80];
4968 target_sigset_t tuc_sigmask;
4969 };
4970
4971 struct target_rt_sigframe
4972 {
4973 abi_ulong pretcode;
4974 int sig;
4975 abi_ulong pinfo;
4976 abi_ulong puc;
4977 char retcode[8];
4978 struct target_siginfo info;
4979 struct target_ucontext uc;
4980 };
4981
4982 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
4983 abi_ulong mask)
4984 {
4985 __put_user(mask, &sc->sc_mask);
4986 __put_user(env->aregs[7], &sc->sc_usp);
4987 __put_user(env->dregs[0], &sc->sc_d0);
4988 __put_user(env->dregs[1], &sc->sc_d1);
4989 __put_user(env->aregs[0], &sc->sc_a0);
4990 __put_user(env->aregs[1], &sc->sc_a1);
4991 __put_user(env->sr, &sc->sc_sr);
4992 __put_user(env->pc, &sc->sc_pc);
4993 }
4994
4995 static void
4996 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
4997 {
4998 int temp;
4999
5000 __get_user(env->aregs[7], &sc->sc_usp);
5001 __get_user(env->dregs[0], &sc->sc_d0);
5002 __get_user(env->dregs[1], &sc->sc_d1);
5003 __get_user(env->aregs[0], &sc->sc_a0);
5004 __get_user(env->aregs[1], &sc->sc_a1);
5005 __get_user(env->pc, &sc->sc_pc);
5006 __get_user(temp, &sc->sc_sr);
5007 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5008 }
5009
5010 /*
5011 * Determine which stack to use..
5012 */
5013 static inline abi_ulong
5014 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5015 size_t frame_size)
5016 {
5017 unsigned long sp;
5018
5019 sp = regs->aregs[7];
5020
5021 /* This is the X/Open sanctioned signal stack switching. */
5022 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5023 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5024 }
5025
5026 return ((sp - frame_size) & -8UL);
5027 }
5028
5029 static void setup_frame(int sig, struct target_sigaction *ka,
5030 target_sigset_t *set, CPUM68KState *env)
5031 {
5032 struct target_sigframe *frame;
5033 abi_ulong frame_addr;
5034 abi_ulong retcode_addr;
5035 abi_ulong sc_addr;
5036 int i;
5037
5038 frame_addr = get_sigframe(ka, env, sizeof *frame);
5039 trace_user_setup_frame(env, frame_addr);
5040 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5041 goto give_sigsegv;
5042 }
5043
5044 __put_user(sig, &frame->sig);
5045
5046 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5047 __put_user(sc_addr, &frame->psc);
5048
5049 setup_sigcontext(&frame->sc, env, set->sig[0]);
5050
5051 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5052 __put_user(set->sig[i], &frame->extramask[i - 1]);
5053 }
5054
5055 /* Set up to return from userspace. */
5056
5057 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5058 __put_user(retcode_addr, &frame->pretcode);
5059
5060 /* moveq #,d0; trap #0 */
5061
5062 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5063 (uint32_t *)(frame->retcode));
5064
5065 /* Set up to return from userspace */
5066
5067 env->aregs[7] = frame_addr;
5068 env->pc = ka->_sa_handler;
5069
5070 unlock_user_struct(frame, frame_addr, 1);
5071 return;
5072
5073 give_sigsegv:
5074 force_sig(TARGET_SIGSEGV);
5075 }
5076
5077 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5078 CPUM68KState *env)
5079 {
5080 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5081
5082 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5083 __put_user(env->dregs[0], &gregs[0]);
5084 __put_user(env->dregs[1], &gregs[1]);
5085 __put_user(env->dregs[2], &gregs[2]);
5086 __put_user(env->dregs[3], &gregs[3]);
5087 __put_user(env->dregs[4], &gregs[4]);
5088 __put_user(env->dregs[5], &gregs[5]);
5089 __put_user(env->dregs[6], &gregs[6]);
5090 __put_user(env->dregs[7], &gregs[7]);
5091 __put_user(env->aregs[0], &gregs[8]);
5092 __put_user(env->aregs[1], &gregs[9]);
5093 __put_user(env->aregs[2], &gregs[10]);
5094 __put_user(env->aregs[3], &gregs[11]);
5095 __put_user(env->aregs[4], &gregs[12]);
5096 __put_user(env->aregs[5], &gregs[13]);
5097 __put_user(env->aregs[6], &gregs[14]);
5098 __put_user(env->aregs[7], &gregs[15]);
5099 __put_user(env->pc, &gregs[16]);
5100 __put_user(env->sr, &gregs[17]);
5101
5102 return 0;
5103 }
5104
5105 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5106 struct target_ucontext *uc)
5107 {
5108 int temp;
5109 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5110
5111 __get_user(temp, &uc->tuc_mcontext.version);
5112 if (temp != TARGET_MCONTEXT_VERSION)
5113 goto badframe;
5114
5115 /* restore passed registers */
5116 __get_user(env->dregs[0], &gregs[0]);
5117 __get_user(env->dregs[1], &gregs[1]);
5118 __get_user(env->dregs[2], &gregs[2]);
5119 __get_user(env->dregs[3], &gregs[3]);
5120 __get_user(env->dregs[4], &gregs[4]);
5121 __get_user(env->dregs[5], &gregs[5]);
5122 __get_user(env->dregs[6], &gregs[6]);
5123 __get_user(env->dregs[7], &gregs[7]);
5124 __get_user(env->aregs[0], &gregs[8]);
5125 __get_user(env->aregs[1], &gregs[9]);
5126 __get_user(env->aregs[2], &gregs[10]);
5127 __get_user(env->aregs[3], &gregs[11]);
5128 __get_user(env->aregs[4], &gregs[12]);
5129 __get_user(env->aregs[5], &gregs[13]);
5130 __get_user(env->aregs[6], &gregs[14]);
5131 __get_user(env->aregs[7], &gregs[15]);
5132 __get_user(env->pc, &gregs[16]);
5133 __get_user(temp, &gregs[17]);
5134 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5135
5136 return 0;
5137
5138 badframe:
5139 return 1;
5140 }
5141
5142 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5143 target_siginfo_t *info,
5144 target_sigset_t *set, CPUM68KState *env)
5145 {
5146 struct target_rt_sigframe *frame;
5147 abi_ulong frame_addr;
5148 abi_ulong retcode_addr;
5149 abi_ulong info_addr;
5150 abi_ulong uc_addr;
5151 int err = 0;
5152 int i;
5153
5154 frame_addr = get_sigframe(ka, env, sizeof *frame);
5155 trace_user_setup_rt_frame(env, frame_addr);
5156 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5157 goto give_sigsegv;
5158 }
5159
5160 __put_user(sig, &frame->sig);
5161
5162 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5163 __put_user(info_addr, &frame->pinfo);
5164
5165 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5166 __put_user(uc_addr, &frame->puc);
5167
5168 tswap_siginfo(&frame->info, info);
5169
5170 /* Create the ucontext */
5171
5172 __put_user(0, &frame->uc.tuc_flags);
5173 __put_user(0, &frame->uc.tuc_link);
5174 __put_user(target_sigaltstack_used.ss_sp,
5175 &frame->uc.tuc_stack.ss_sp);
5176 __put_user(sas_ss_flags(env->aregs[7]),
5177 &frame->uc.tuc_stack.ss_flags);
5178 __put_user(target_sigaltstack_used.ss_size,
5179 &frame->uc.tuc_stack.ss_size);
5180 err |= target_rt_setup_ucontext(&frame->uc, env);
5181
5182 if (err)
5183 goto give_sigsegv;
5184
5185 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5186 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5187 }
5188
5189 /* Set up to return from userspace. */
5190
5191 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5192 __put_user(retcode_addr, &frame->pretcode);
5193
5194 /* moveq #,d0; notb d0; trap #0 */
5195
5196 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5197 (uint32_t *)(frame->retcode + 0));
5198 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5199
5200 if (err)
5201 goto give_sigsegv;
5202
5203 /* Set up to return from userspace */
5204
5205 env->aregs[7] = frame_addr;
5206 env->pc = ka->_sa_handler;
5207
5208 unlock_user_struct(frame, frame_addr, 1);
5209 return;
5210
5211 give_sigsegv:
5212 unlock_user_struct(frame, frame_addr, 1);
5213 force_sig(TARGET_SIGSEGV);
5214 }
5215
5216 long do_sigreturn(CPUM68KState *env)
5217 {
5218 struct target_sigframe *frame;
5219 abi_ulong frame_addr = env->aregs[7] - 4;
5220 target_sigset_t target_set;
5221 sigset_t set;
5222 int i;
5223
5224 trace_user_do_sigreturn(env, frame_addr);
5225 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5226 goto badframe;
5227
5228 /* set blocked signals */
5229
5230 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5231
5232 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5233 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5234 }
5235
5236 target_to_host_sigset_internal(&set, &target_set);
5237 set_sigmask(&set);
5238
5239 /* restore registers */
5240
5241 restore_sigcontext(env, &frame->sc);
5242
5243 unlock_user_struct(frame, frame_addr, 0);
5244 return -TARGET_QEMU_ESIGRETURN;
5245
5246 badframe:
5247 force_sig(TARGET_SIGSEGV);
5248 return 0;
5249 }
5250
5251 long do_rt_sigreturn(CPUM68KState *env)
5252 {
5253 struct target_rt_sigframe *frame;
5254 abi_ulong frame_addr = env->aregs[7] - 4;
5255 target_sigset_t target_set;
5256 sigset_t set;
5257
5258 trace_user_do_rt_sigreturn(env, frame_addr);
5259 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5260 goto badframe;
5261
5262 target_to_host_sigset_internal(&set, &target_set);
5263 set_sigmask(&set);
5264
5265 /* restore registers */
5266
5267 if (target_rt_restore_ucontext(env, &frame->uc))
5268 goto badframe;
5269
5270 if (do_sigaltstack(frame_addr +
5271 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5272 0, get_sp_from_cpustate(env)) == -EFAULT)
5273 goto badframe;
5274
5275 unlock_user_struct(frame, frame_addr, 0);
5276 return -TARGET_QEMU_ESIGRETURN;
5277
5278 badframe:
5279 unlock_user_struct(frame, frame_addr, 0);
5280 force_sig(TARGET_SIGSEGV);
5281 return 0;
5282 }
5283
5284 #elif defined(TARGET_ALPHA)
5285
5286 struct target_sigcontext {
5287 abi_long sc_onstack;
5288 abi_long sc_mask;
5289 abi_long sc_pc;
5290 abi_long sc_ps;
5291 abi_long sc_regs[32];
5292 abi_long sc_ownedfp;
5293 abi_long sc_fpregs[32];
5294 abi_ulong sc_fpcr;
5295 abi_ulong sc_fp_control;
5296 abi_ulong sc_reserved1;
5297 abi_ulong sc_reserved2;
5298 abi_ulong sc_ssize;
5299 abi_ulong sc_sbase;
5300 abi_ulong sc_traparg_a0;
5301 abi_ulong sc_traparg_a1;
5302 abi_ulong sc_traparg_a2;
5303 abi_ulong sc_fp_trap_pc;
5304 abi_ulong sc_fp_trigger_sum;
5305 abi_ulong sc_fp_trigger_inst;
5306 };
5307
5308 struct target_ucontext {
5309 abi_ulong tuc_flags;
5310 abi_ulong tuc_link;
5311 abi_ulong tuc_osf_sigmask;
5312 target_stack_t tuc_stack;
5313 struct target_sigcontext tuc_mcontext;
5314 target_sigset_t tuc_sigmask;
5315 };
5316
5317 struct target_sigframe {
5318 struct target_sigcontext sc;
5319 unsigned int retcode[3];
5320 };
5321
5322 struct target_rt_sigframe {
5323 target_siginfo_t info;
5324 struct target_ucontext uc;
5325 unsigned int retcode[3];
5326 };
5327
5328 #define INSN_MOV_R30_R16 0x47fe0410
5329 #define INSN_LDI_R0 0x201f0000
5330 #define INSN_CALLSYS 0x00000083
5331
5332 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5333 abi_ulong frame_addr, target_sigset_t *set)
5334 {
5335 int i;
5336
5337 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5338 __put_user(set->sig[0], &sc->sc_mask);
5339 __put_user(env->pc, &sc->sc_pc);
5340 __put_user(8, &sc->sc_ps);
5341
5342 for (i = 0; i < 31; ++i) {
5343 __put_user(env->ir[i], &sc->sc_regs[i]);
5344 }
5345 __put_user(0, &sc->sc_regs[31]);
5346
5347 for (i = 0; i < 31; ++i) {
5348 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5349 }
5350 __put_user(0, &sc->sc_fpregs[31]);
5351 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5352
5353 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5354 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5355 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5356 }
5357
5358 static void restore_sigcontext(CPUAlphaState *env,
5359 struct target_sigcontext *sc)
5360 {
5361 uint64_t fpcr;
5362 int i;
5363
5364 __get_user(env->pc, &sc->sc_pc);
5365
5366 for (i = 0; i < 31; ++i) {
5367 __get_user(env->ir[i], &sc->sc_regs[i]);
5368 }
5369 for (i = 0; i < 31; ++i) {
5370 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5371 }
5372
5373 __get_user(fpcr, &sc->sc_fpcr);
5374 cpu_alpha_store_fpcr(env, fpcr);
5375 }
5376
5377 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5378 CPUAlphaState *env,
5379 unsigned long framesize)
5380 {
5381 abi_ulong sp = env->ir[IR_SP];
5382
5383 /* This is the X/Open sanctioned signal stack switching. */
5384 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5385 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5386 }
5387 return (sp - framesize) & -32;
5388 }
5389
5390 static void setup_frame(int sig, struct target_sigaction *ka,
5391 target_sigset_t *set, CPUAlphaState *env)
5392 {
5393 abi_ulong frame_addr, r26;
5394 struct target_sigframe *frame;
5395 int err = 0;
5396
5397 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5398 trace_user_setup_frame(env, frame_addr);
5399 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5400 goto give_sigsegv;
5401 }
5402
5403 setup_sigcontext(&frame->sc, env, frame_addr, set);
5404
5405 if (ka->sa_restorer) {
5406 r26 = ka->sa_restorer;
5407 } else {
5408 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5409 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5410 &frame->retcode[1]);
5411 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5412 /* imb() */
5413 r26 = frame_addr;
5414 }
5415
5416 unlock_user_struct(frame, frame_addr, 1);
5417
5418 if (err) {
5419 give_sigsegv:
5420 if (sig == TARGET_SIGSEGV) {
5421 ka->_sa_handler = TARGET_SIG_DFL;
5422 }
5423 force_sig(TARGET_SIGSEGV);
5424 }
5425
5426 env->ir[IR_RA] = r26;
5427 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5428 env->ir[IR_A0] = sig;
5429 env->ir[IR_A1] = 0;
5430 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5431 env->ir[IR_SP] = frame_addr;
5432 }
5433
5434 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5435 target_siginfo_t *info,
5436 target_sigset_t *set, CPUAlphaState *env)
5437 {
5438 abi_ulong frame_addr, r26;
5439 struct target_rt_sigframe *frame;
5440 int i, err = 0;
5441
5442 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5443 trace_user_setup_rt_frame(env, frame_addr);
5444 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5445 goto give_sigsegv;
5446 }
5447
5448 tswap_siginfo(&frame->info, info);
5449
5450 __put_user(0, &frame->uc.tuc_flags);
5451 __put_user(0, &frame->uc.tuc_link);
5452 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5453 __put_user(target_sigaltstack_used.ss_sp,
5454 &frame->uc.tuc_stack.ss_sp);
5455 __put_user(sas_ss_flags(env->ir[IR_SP]),
5456 &frame->uc.tuc_stack.ss_flags);
5457 __put_user(target_sigaltstack_used.ss_size,
5458 &frame->uc.tuc_stack.ss_size);
5459 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5460 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5461 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5462 }
5463
5464 if (ka->sa_restorer) {
5465 r26 = ka->sa_restorer;
5466 } else {
5467 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5468 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5469 &frame->retcode[1]);
5470 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5471 /* imb(); */
5472 r26 = frame_addr;
5473 }
5474
5475 if (err) {
5476 give_sigsegv:
5477 if (sig == TARGET_SIGSEGV) {
5478 ka->_sa_handler = TARGET_SIG_DFL;
5479 }
5480 force_sig(TARGET_SIGSEGV);
5481 }
5482
5483 env->ir[IR_RA] = r26;
5484 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5485 env->ir[IR_A0] = sig;
5486 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5487 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5488 env->ir[IR_SP] = frame_addr;
5489 }
5490
5491 long do_sigreturn(CPUAlphaState *env)
5492 {
5493 struct target_sigcontext *sc;
5494 abi_ulong sc_addr = env->ir[IR_A0];
5495 target_sigset_t target_set;
5496 sigset_t set;
5497
5498 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5499 goto badframe;
5500 }
5501
5502 target_sigemptyset(&target_set);
5503 __get_user(target_set.sig[0], &sc->sc_mask);
5504
5505 target_to_host_sigset_internal(&set, &target_set);
5506 set_sigmask(&set);
5507
5508 restore_sigcontext(env, sc);
5509 unlock_user_struct(sc, sc_addr, 0);
5510 return -TARGET_QEMU_ESIGRETURN;
5511
5512 badframe:
5513 force_sig(TARGET_SIGSEGV);
5514 }
5515
5516 long do_rt_sigreturn(CPUAlphaState *env)
5517 {
5518 abi_ulong frame_addr = env->ir[IR_A0];
5519 struct target_rt_sigframe *frame;
5520 sigset_t set;
5521
5522 trace_user_do_rt_sigreturn(env, frame_addr);
5523 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5524 goto badframe;
5525 }
5526 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5527 set_sigmask(&set);
5528
5529 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5530 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5531 uc.tuc_stack),
5532 0, env->ir[IR_SP]) == -EFAULT) {
5533 goto badframe;
5534 }
5535
5536 unlock_user_struct(frame, frame_addr, 0);
5537 return -TARGET_QEMU_ESIGRETURN;
5538
5539
5540 badframe:
5541 unlock_user_struct(frame, frame_addr, 0);
5542 force_sig(TARGET_SIGSEGV);
5543 }
5544
5545 #elif defined(TARGET_TILEGX)
5546
5547 struct target_sigcontext {
5548 union {
5549 /* General-purpose registers. */
5550 abi_ulong gregs[56];
5551 struct {
5552 abi_ulong __gregs[53];
5553 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5554 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5555 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5556 };
5557 };
5558 abi_ulong pc; /* Program counter. */
5559 abi_ulong ics; /* In Interrupt Critical Section? */
5560 abi_ulong faultnum; /* Fault number. */
5561 abi_ulong pad[5];
5562 };
5563
5564 struct target_ucontext {
5565 abi_ulong tuc_flags;
5566 abi_ulong tuc_link;
5567 target_stack_t tuc_stack;
5568 struct target_sigcontext tuc_mcontext;
5569 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5570 };
5571
5572 struct target_rt_sigframe {
5573 unsigned char save_area[16]; /* caller save area */
5574 struct target_siginfo info;
5575 struct target_ucontext uc;
5576 abi_ulong retcode[2];
5577 };
5578
5579 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5580 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5581
5582
5583 static void setup_sigcontext(struct target_sigcontext *sc,
5584 CPUArchState *env, int signo)
5585 {
5586 int i;
5587
5588 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5589 __put_user(env->regs[i], &sc->gregs[i]);
5590 }
5591
5592 __put_user(env->pc, &sc->pc);
5593 __put_user(0, &sc->ics);
5594 __put_user(signo, &sc->faultnum);
5595 }
5596
5597 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5598 {
5599 int i;
5600
5601 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5602 __get_user(env->regs[i], &sc->gregs[i]);
5603 }
5604
5605 __get_user(env->pc, &sc->pc);
5606 }
5607
5608 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5609 size_t frame_size)
5610 {
5611 unsigned long sp = env->regs[TILEGX_R_SP];
5612
5613 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5614 return -1UL;
5615 }
5616
5617 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5618 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5619 }
5620
5621 sp -= frame_size;
5622 sp &= -16UL;
5623 return sp;
5624 }
5625
5626 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5627 target_siginfo_t *info,
5628 target_sigset_t *set, CPUArchState *env)
5629 {
5630 abi_ulong frame_addr;
5631 struct target_rt_sigframe *frame;
5632 unsigned long restorer;
5633
5634 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5635 trace_user_setup_rt_frame(env, frame_addr);
5636 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5637 goto give_sigsegv;
5638 }
5639
5640 /* Always write at least the signal number for the stack backtracer. */
5641 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5642 /* At sigreturn time, restore the callee-save registers too. */
5643 tswap_siginfo(&frame->info, info);
5644 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5645 } else {
5646 __put_user(info->si_signo, &frame->info.si_signo);
5647 }
5648
5649 /* Create the ucontext. */
5650 __put_user(0, &frame->uc.tuc_flags);
5651 __put_user(0, &frame->uc.tuc_link);
5652 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5653 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5654 &frame->uc.tuc_stack.ss_flags);
5655 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5656 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5657
5658 if (ka->sa_flags & TARGET_SA_RESTORER) {
5659 restorer = (unsigned long) ka->sa_restorer;
5660 } else {
5661 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5662 __put_user(INSN_SWINT1, &frame->retcode[1]);
5663 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5664 }
5665 env->pc = (unsigned long) ka->_sa_handler;
5666 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5667 env->regs[TILEGX_R_LR] = restorer;
5668 env->regs[0] = (unsigned long) sig;
5669 env->regs[1] = (unsigned long) &frame->info;
5670 env->regs[2] = (unsigned long) &frame->uc;
5671 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5672
5673 unlock_user_struct(frame, frame_addr, 1);
5674 return;
5675
5676 give_sigsegv:
5677 if (sig == TARGET_SIGSEGV) {
5678 ka->_sa_handler = TARGET_SIG_DFL;
5679 }
5680 force_sig(TARGET_SIGSEGV /* , current */);
5681 }
5682
5683 long do_rt_sigreturn(CPUTLGState *env)
5684 {
5685 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5686 struct target_rt_sigframe *frame;
5687 sigset_t set;
5688
5689 trace_user_do_rt_sigreturn(env, frame_addr);
5690 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5691 goto badframe;
5692 }
5693 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5694 set_sigmask(&set);
5695
5696 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5697 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5698 uc.tuc_stack),
5699 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5700 goto badframe;
5701 }
5702
5703 unlock_user_struct(frame, frame_addr, 0);
5704 return -TARGET_QEMU_ESIGRETURN;
5705
5706
5707 badframe:
5708 unlock_user_struct(frame, frame_addr, 0);
5709 force_sig(TARGET_SIGSEGV);
5710 }
5711
5712 #else
5713
5714 static void setup_frame(int sig, struct target_sigaction *ka,
5715 target_sigset_t *set, CPUArchState *env)
5716 {
5717 fprintf(stderr, "setup_frame: not implemented\n");
5718 }
5719
5720 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5721 target_siginfo_t *info,
5722 target_sigset_t *set, CPUArchState *env)
5723 {
5724 fprintf(stderr, "setup_rt_frame: not implemented\n");
5725 }
5726
5727 long do_sigreturn(CPUArchState *env)
5728 {
5729 fprintf(stderr, "do_sigreturn: not implemented\n");
5730 return -TARGET_ENOSYS;
5731 }
5732
5733 long do_rt_sigreturn(CPUArchState *env)
5734 {
5735 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5736 return -TARGET_ENOSYS;
5737 }
5738
5739 #endif
5740
5741 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5742 {
5743 CPUState *cpu = ENV_GET_CPU(cpu_env);
5744 abi_ulong handler;
5745 sigset_t set;
5746 target_sigset_t target_old_set;
5747 struct target_sigaction *sa;
5748 TaskState *ts = cpu->opaque;
5749 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5750
5751 trace_user_handle_signal(cpu_env, sig);
5752 /* dequeue signal */
5753 k->pending = 0;
5754
5755 sig = gdb_handlesig(cpu, sig);
5756 if (!sig) {
5757 sa = NULL;
5758 handler = TARGET_SIG_IGN;
5759 } else {
5760 sa = &sigact_table[sig - 1];
5761 handler = sa->_sa_handler;
5762 }
5763
5764 if (sig == TARGET_SIGSEGV && sigismember(&ts->signal_mask, SIGSEGV)) {
5765 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
5766 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
5767 * because it got a real MMU fault), and treat as if default handler.
5768 */
5769 handler = TARGET_SIG_DFL;
5770 }
5771
5772 if (handler == TARGET_SIG_DFL) {
5773 /* default handler : ignore some signal. The other are job control or fatal */
5774 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5775 kill(getpid(),SIGSTOP);
5776 } else if (sig != TARGET_SIGCHLD &&
5777 sig != TARGET_SIGURG &&
5778 sig != TARGET_SIGWINCH &&
5779 sig != TARGET_SIGCONT) {
5780 force_sig(sig);
5781 }
5782 } else if (handler == TARGET_SIG_IGN) {
5783 /* ignore sig */
5784 } else if (handler == TARGET_SIG_ERR) {
5785 force_sig(sig);
5786 } else {
5787 /* compute the blocked signals during the handler execution */
5788 sigset_t *blocked_set;
5789
5790 target_to_host_sigset(&set, &sa->sa_mask);
5791 /* SA_NODEFER indicates that the current signal should not be
5792 blocked during the handler */
5793 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5794 sigaddset(&set, target_to_host_signal(sig));
5795
5796 /* save the previous blocked signal state to restore it at the
5797 end of the signal execution (see do_sigreturn) */
5798 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5799
5800 /* block signals in the handler */
5801 blocked_set = ts->in_sigsuspend ?
5802 &ts->sigsuspend_mask : &ts->signal_mask;
5803 sigorset(&ts->signal_mask, blocked_set, &set);
5804 ts->in_sigsuspend = 0;
5805
5806 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5807 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5808 {
5809 CPUX86State *env = cpu_env;
5810 if (env->eflags & VM_MASK)
5811 save_v86_state(env);
5812 }
5813 #endif
5814 /* prepare the stack frame of the virtual CPU */
5815 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5816 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5817 /* These targets do not have traditional signals. */
5818 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5819 #else
5820 if (sa->sa_flags & TARGET_SA_SIGINFO)
5821 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5822 else
5823 setup_frame(sig, sa, &target_old_set, cpu_env);
5824 #endif
5825 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5826 sa->_sa_handler = TARGET_SIG_DFL;
5827 }
5828 }
5829 }
5830
5831 void process_pending_signals(CPUArchState *cpu_env)
5832 {
5833 CPUState *cpu = ENV_GET_CPU(cpu_env);
5834 int sig;
5835 TaskState *ts = cpu->opaque;
5836 sigset_t set;
5837 sigset_t *blocked_set;
5838
5839 while (atomic_read(&ts->signal_pending)) {
5840 /* FIXME: This is not threadsafe. */
5841 sigfillset(&set);
5842 sigprocmask(SIG_SETMASK, &set, 0);
5843
5844 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5845 blocked_set = ts->in_sigsuspend ?
5846 &ts->sigsuspend_mask : &ts->signal_mask;
5847
5848 if (ts->sigtab[sig - 1].pending &&
5849 (!sigismember(blocked_set,
5850 target_to_host_signal_table[sig])
5851 || sig == TARGET_SIGSEGV)) {
5852 handle_pending_signal(cpu_env, sig);
5853 /* Restart scan from the beginning */
5854 sig = 1;
5855 }
5856 }
5857
5858 /* if no signal is pending, unblock signals and recheck (the act
5859 * of unblocking might cause us to take another host signal which
5860 * will set signal_pending again).
5861 */
5862 atomic_set(&ts->signal_pending, 0);
5863 ts->in_sigsuspend = 0;
5864 set = ts->signal_mask;
5865 sigdelset(&set, SIGSEGV);
5866 sigdelset(&set, SIGBUS);
5867 sigprocmask(SIG_SETMASK, &set, 0);
5868 }
5869 ts->in_sigsuspend = 0;
5870 }