]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: Fix stray tab-indent
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/ucontext.h>
21 #include <sys/resource.h>
22
23 #include "qemu.h"
24 #include "qemu-common.h"
25 #include "target_signal.h"
26 #include "trace.h"
27
28 static struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32 };
33
34 static struct target_sigaction sigact_table[TARGET_NSIG];
35
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
38
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
81 };
82 static uint8_t target_to_host_signal_table[_NSIG];
83
84 static inline int on_sig_stack(unsigned long sp)
85 {
86 return (sp - target_sigaltstack_used.ss_sp
87 < target_sigaltstack_used.ss_size);
88 }
89
90 static inline int sas_ss_flags(unsigned long sp)
91 {
92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
93 : on_sig_stack(sp) ? SS_ONSTACK : 0);
94 }
95
96 int host_to_target_signal(int sig)
97 {
98 if (sig < 0 || sig >= _NSIG)
99 return sig;
100 return host_to_target_signal_table[sig];
101 }
102
103 int target_to_host_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return target_to_host_signal_table[sig];
108 }
109
110 static inline void target_sigemptyset(target_sigset_t *set)
111 {
112 memset(set, 0, sizeof(*set));
113 }
114
115 static inline void target_sigaddset(target_sigset_t *set, int signum)
116 {
117 signum--;
118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
119 set->sig[signum / TARGET_NSIG_BPW] |= mask;
120 }
121
122 static inline int target_sigismember(const target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
127 }
128
129 static void host_to_target_sigset_internal(target_sigset_t *d,
130 const sigset_t *s)
131 {
132 int i;
133 target_sigemptyset(d);
134 for (i = 1; i <= TARGET_NSIG; i++) {
135 if (sigismember(s, i)) {
136 target_sigaddset(d, host_to_target_signal(i));
137 }
138 }
139 }
140
141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
142 {
143 target_sigset_t d1;
144 int i;
145
146 host_to_target_sigset_internal(&d1, s);
147 for(i = 0;i < TARGET_NSIG_WORDS; i++)
148 d->sig[i] = tswapal(d1.sig[i]);
149 }
150
151 static void target_to_host_sigset_internal(sigset_t *d,
152 const target_sigset_t *s)
153 {
154 int i;
155 sigemptyset(d);
156 for (i = 1; i <= TARGET_NSIG; i++) {
157 if (target_sigismember(s, i)) {
158 sigaddset(d, target_to_host_signal(i));
159 }
160 }
161 }
162
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165 target_sigset_t s1;
166 int i;
167
168 for(i = 0;i < TARGET_NSIG_WORDS; i++)
169 s1.sig[i] = tswapal(s->sig[i]);
170 target_to_host_sigset_internal(d, &s1);
171 }
172
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174 const sigset_t *sigset)
175 {
176 target_sigset_t d;
177 host_to_target_sigset(&d, sigset);
178 *old_sigset = d.sig[0];
179 }
180
181 void target_to_host_old_sigset(sigset_t *sigset,
182 const abi_ulong *old_sigset)
183 {
184 target_sigset_t d;
185 int i;
186
187 d.sig[0] = *old_sigset;
188 for(i = 1;i < TARGET_NSIG_WORDS; i++)
189 d.sig[i] = 0;
190 target_to_host_sigset(sigset, &d);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. This wraps the sigprocmask host calls
196 * that should be protected (calls originated from guest)
197 */
198 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
199 {
200 int ret;
201 sigset_t val;
202 sigset_t *temp = NULL;
203 CPUState *cpu = thread_cpu;
204 TaskState *ts = (TaskState *)cpu->opaque;
205 bool segv_was_blocked = ts->sigsegv_blocked;
206
207 if (set) {
208 bool has_sigsegv = sigismember(set, SIGSEGV);
209 val = *set;
210 temp = &val;
211
212 sigdelset(temp, SIGSEGV);
213
214 switch (how) {
215 case SIG_BLOCK:
216 if (has_sigsegv) {
217 ts->sigsegv_blocked = true;
218 }
219 break;
220 case SIG_UNBLOCK:
221 if (has_sigsegv) {
222 ts->sigsegv_blocked = false;
223 }
224 break;
225 case SIG_SETMASK:
226 ts->sigsegv_blocked = has_sigsegv;
227 break;
228 default:
229 g_assert_not_reached();
230 }
231 }
232
233 ret = sigprocmask(how, temp, oldset);
234
235 if (oldset && segv_was_blocked) {
236 sigaddset(oldset, SIGSEGV);
237 }
238
239 return ret;
240 }
241
242 /* siginfo conversion */
243
244 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
245 const siginfo_t *info)
246 {
247 int sig = host_to_target_signal(info->si_signo);
248 tinfo->si_signo = sig;
249 tinfo->si_errno = 0;
250 tinfo->si_code = info->si_code;
251
252 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
253 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
254 /* Should never come here, but who knows. The information for
255 the target is irrelevant. */
256 tinfo->_sifields._sigfault._addr = 0;
257 } else if (sig == TARGET_SIGIO) {
258 tinfo->_sifields._sigpoll._band = info->si_band;
259 tinfo->_sifields._sigpoll._fd = info->si_fd;
260 } else if (sig == TARGET_SIGCHLD) {
261 tinfo->_sifields._sigchld._pid = info->si_pid;
262 tinfo->_sifields._sigchld._uid = info->si_uid;
263 tinfo->_sifields._sigchld._status
264 = host_to_target_waitstatus(info->si_status);
265 tinfo->_sifields._sigchld._utime = info->si_utime;
266 tinfo->_sifields._sigchld._stime = info->si_stime;
267 } else if (sig >= TARGET_SIGRTMIN) {
268 tinfo->_sifields._rt._pid = info->si_pid;
269 tinfo->_sifields._rt._uid = info->si_uid;
270 /* XXX: potential problem if 64 bit */
271 tinfo->_sifields._rt._sigval.sival_ptr
272 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
273 }
274 }
275
276 static void tswap_siginfo(target_siginfo_t *tinfo,
277 const target_siginfo_t *info)
278 {
279 int sig = info->si_signo;
280 tinfo->si_signo = tswap32(sig);
281 tinfo->si_errno = tswap32(info->si_errno);
282 tinfo->si_code = tswap32(info->si_code);
283
284 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
285 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
286 tinfo->_sifields._sigfault._addr
287 = tswapal(info->_sifields._sigfault._addr);
288 } else if (sig == TARGET_SIGIO) {
289 tinfo->_sifields._sigpoll._band
290 = tswap32(info->_sifields._sigpoll._band);
291 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
292 } else if (sig == TARGET_SIGCHLD) {
293 tinfo->_sifields._sigchld._pid
294 = tswap32(info->_sifields._sigchld._pid);
295 tinfo->_sifields._sigchld._uid
296 = tswap32(info->_sifields._sigchld._uid);
297 tinfo->_sifields._sigchld._status
298 = tswap32(info->_sifields._sigchld._status);
299 tinfo->_sifields._sigchld._utime
300 = tswapal(info->_sifields._sigchld._utime);
301 tinfo->_sifields._sigchld._stime
302 = tswapal(info->_sifields._sigchld._stime);
303 } else if (sig >= TARGET_SIGRTMIN) {
304 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
305 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
306 tinfo->_sifields._rt._sigval.sival_ptr
307 = tswapal(info->_sifields._rt._sigval.sival_ptr);
308 }
309 }
310
311
312 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
313 {
314 host_to_target_siginfo_noswap(tinfo, info);
315 tswap_siginfo(tinfo, tinfo);
316 }
317
318 /* XXX: we support only POSIX RT signals are used. */
319 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
320 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
321 {
322 info->si_signo = tswap32(tinfo->si_signo);
323 info->si_errno = tswap32(tinfo->si_errno);
324 info->si_code = tswap32(tinfo->si_code);
325 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
326 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
327 info->si_value.sival_ptr =
328 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
329 }
330
331 static int fatal_signal (int sig)
332 {
333 switch (sig) {
334 case TARGET_SIGCHLD:
335 case TARGET_SIGURG:
336 case TARGET_SIGWINCH:
337 /* Ignored by default. */
338 return 0;
339 case TARGET_SIGCONT:
340 case TARGET_SIGSTOP:
341 case TARGET_SIGTSTP:
342 case TARGET_SIGTTIN:
343 case TARGET_SIGTTOU:
344 /* Job control signals. */
345 return 0;
346 default:
347 return 1;
348 }
349 }
350
351 /* returns 1 if given signal should dump core if not handled */
352 static int core_dump_signal(int sig)
353 {
354 switch (sig) {
355 case TARGET_SIGABRT:
356 case TARGET_SIGFPE:
357 case TARGET_SIGILL:
358 case TARGET_SIGQUIT:
359 case TARGET_SIGSEGV:
360 case TARGET_SIGTRAP:
361 case TARGET_SIGBUS:
362 return (1);
363 default:
364 return (0);
365 }
366 }
367
368 void signal_init(void)
369 {
370 struct sigaction act;
371 struct sigaction oact;
372 int i, j;
373 int host_sig;
374
375 /* generate signal conversion tables */
376 for(i = 1; i < _NSIG; i++) {
377 if (host_to_target_signal_table[i] == 0)
378 host_to_target_signal_table[i] = i;
379 }
380 for(i = 1; i < _NSIG; i++) {
381 j = host_to_target_signal_table[i];
382 target_to_host_signal_table[j] = i;
383 }
384
385 /* set all host signal handlers. ALL signals are blocked during
386 the handlers to serialize them. */
387 memset(sigact_table, 0, sizeof(sigact_table));
388
389 sigfillset(&act.sa_mask);
390 act.sa_flags = SA_SIGINFO;
391 act.sa_sigaction = host_signal_handler;
392 for(i = 1; i <= TARGET_NSIG; i++) {
393 host_sig = target_to_host_signal(i);
394 sigaction(host_sig, NULL, &oact);
395 if (oact.sa_sigaction == (void *)SIG_IGN) {
396 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
397 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
398 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
399 }
400 /* If there's already a handler installed then something has
401 gone horribly wrong, so don't even try to handle that case. */
402 /* Install some handlers for our own use. We need at least
403 SIGSEGV and SIGBUS, to detect exceptions. We can not just
404 trap all signals because it affects syscall interrupt
405 behavior. But do trap all default-fatal signals. */
406 if (fatal_signal (i))
407 sigaction(host_sig, &act, NULL);
408 }
409 }
410
411 /* signal queue handling */
412
413 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
414 {
415 CPUState *cpu = ENV_GET_CPU(env);
416 TaskState *ts = cpu->opaque;
417 struct sigqueue *q = ts->first_free;
418 if (!q)
419 return NULL;
420 ts->first_free = q->next;
421 return q;
422 }
423
424 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
425 {
426 CPUState *cpu = ENV_GET_CPU(env);
427 TaskState *ts = cpu->opaque;
428
429 q->next = ts->first_free;
430 ts->first_free = q;
431 }
432
433 /* abort execution with signal */
434 static void QEMU_NORETURN force_sig(int target_sig)
435 {
436 CPUState *cpu = thread_cpu;
437 CPUArchState *env = cpu->env_ptr;
438 TaskState *ts = (TaskState *)cpu->opaque;
439 int host_sig, core_dumped = 0;
440 struct sigaction act;
441
442 host_sig = target_to_host_signal(target_sig);
443 trace_user_force_sig(env, target_sig, host_sig);
444 gdb_signalled(env, target_sig);
445
446 /* dump core if supported by target binary format */
447 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
448 stop_all_tasks();
449 core_dumped =
450 ((*ts->bprm->core_dump)(target_sig, env) == 0);
451 }
452 if (core_dumped) {
453 /* we already dumped the core of target process, we don't want
454 * a coredump of qemu itself */
455 struct rlimit nodump;
456 getrlimit(RLIMIT_CORE, &nodump);
457 nodump.rlim_cur=0;
458 setrlimit(RLIMIT_CORE, &nodump);
459 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
460 target_sig, strsignal(host_sig), "core dumped" );
461 }
462
463 /* The proper exit code for dying from an uncaught signal is
464 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
465 * a negative value. To get the proper exit code we need to
466 * actually die from an uncaught signal. Here the default signal
467 * handler is installed, we send ourself a signal and we wait for
468 * it to arrive. */
469 sigfillset(&act.sa_mask);
470 act.sa_handler = SIG_DFL;
471 act.sa_flags = 0;
472 sigaction(host_sig, &act, NULL);
473
474 /* For some reason raise(host_sig) doesn't send the signal when
475 * statically linked on x86-64. */
476 kill(getpid(), host_sig);
477
478 /* Make sure the signal isn't masked (just reuse the mask inside
479 of act) */
480 sigdelset(&act.sa_mask, host_sig);
481 sigsuspend(&act.sa_mask);
482
483 /* unreachable */
484 abort();
485 }
486
487 /* queue a signal so that it will be send to the virtual CPU as soon
488 as possible */
489 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
490 {
491 CPUState *cpu = ENV_GET_CPU(env);
492 TaskState *ts = cpu->opaque;
493 struct emulated_sigtable *k;
494 struct sigqueue *q, **pq;
495 abi_ulong handler;
496 int queue;
497
498 trace_user_queue_signal(env, sig);
499 k = &ts->sigtab[sig - 1];
500 queue = gdb_queuesig ();
501 handler = sigact_table[sig - 1]._sa_handler;
502
503 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
504 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
505 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
506 * because it got a real MMU fault). A blocked SIGSEGV in that
507 * situation is treated as if using the default handler. This is
508 * not correct if some other process has randomly sent us a SIGSEGV
509 * via kill(), but that is not easy to distinguish at this point,
510 * so we assume it doesn't happen.
511 */
512 handler = TARGET_SIG_DFL;
513 }
514
515 if (!queue && handler == TARGET_SIG_DFL) {
516 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
517 kill(getpid(),SIGSTOP);
518 return 0;
519 } else
520 /* default handler : ignore some signal. The other are fatal */
521 if (sig != TARGET_SIGCHLD &&
522 sig != TARGET_SIGURG &&
523 sig != TARGET_SIGWINCH &&
524 sig != TARGET_SIGCONT) {
525 force_sig(sig);
526 } else {
527 return 0; /* indicate ignored */
528 }
529 } else if (!queue && handler == TARGET_SIG_IGN) {
530 /* ignore signal */
531 return 0;
532 } else if (!queue && handler == TARGET_SIG_ERR) {
533 force_sig(sig);
534 } else {
535 pq = &k->first;
536 if (sig < TARGET_SIGRTMIN) {
537 /* if non real time signal, we queue exactly one signal */
538 if (!k->pending)
539 q = &k->info;
540 else
541 return 0;
542 } else {
543 if (!k->pending) {
544 /* first signal */
545 q = &k->info;
546 } else {
547 q = alloc_sigqueue(env);
548 if (!q)
549 return -EAGAIN;
550 while (*pq != NULL)
551 pq = &(*pq)->next;
552 }
553 }
554 *pq = q;
555 q->info = *info;
556 q->next = NULL;
557 k->pending = 1;
558 /* signal that a new signal is pending */
559 ts->signal_pending = 1;
560 return 1; /* indicates that the signal was queued */
561 }
562 }
563
564 #ifndef HAVE_SAFE_SYSCALL
565 static inline void rewind_if_in_safe_syscall(void *puc)
566 {
567 /* Default version: never rewind */
568 }
569 #endif
570
571 static void host_signal_handler(int host_signum, siginfo_t *info,
572 void *puc)
573 {
574 CPUArchState *env = thread_cpu->env_ptr;
575 int sig;
576 target_siginfo_t tinfo;
577
578 /* the CPU emulator uses some host signals to detect exceptions,
579 we forward to it some signals */
580 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
581 && info->si_code > 0) {
582 if (cpu_signal_handler(host_signum, info, puc))
583 return;
584 }
585
586 /* get target signal number */
587 sig = host_to_target_signal(host_signum);
588 if (sig < 1 || sig > TARGET_NSIG)
589 return;
590 trace_user_host_signal(env, host_signum, sig);
591
592 rewind_if_in_safe_syscall(puc);
593
594 host_to_target_siginfo_noswap(&tinfo, info);
595 if (queue_signal(env, sig, &tinfo) == 1) {
596 /* interrupt the virtual CPU as soon as possible */
597 cpu_exit(thread_cpu);
598 }
599 }
600
601 /* do_sigaltstack() returns target values and errnos. */
602 /* compare linux/kernel/signal.c:do_sigaltstack() */
603 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
604 {
605 int ret;
606 struct target_sigaltstack oss;
607
608 /* XXX: test errors */
609 if(uoss_addr)
610 {
611 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
612 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
613 __put_user(sas_ss_flags(sp), &oss.ss_flags);
614 }
615
616 if(uss_addr)
617 {
618 struct target_sigaltstack *uss;
619 struct target_sigaltstack ss;
620 size_t minstacksize = TARGET_MINSIGSTKSZ;
621
622 #if defined(TARGET_PPC64)
623 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
624 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
625 if (get_ppc64_abi(image) > 1) {
626 minstacksize = 4096;
627 }
628 #endif
629
630 ret = -TARGET_EFAULT;
631 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
632 goto out;
633 }
634 __get_user(ss.ss_sp, &uss->ss_sp);
635 __get_user(ss.ss_size, &uss->ss_size);
636 __get_user(ss.ss_flags, &uss->ss_flags);
637 unlock_user_struct(uss, uss_addr, 0);
638
639 ret = -TARGET_EPERM;
640 if (on_sig_stack(sp))
641 goto out;
642
643 ret = -TARGET_EINVAL;
644 if (ss.ss_flags != TARGET_SS_DISABLE
645 && ss.ss_flags != TARGET_SS_ONSTACK
646 && ss.ss_flags != 0)
647 goto out;
648
649 if (ss.ss_flags == TARGET_SS_DISABLE) {
650 ss.ss_size = 0;
651 ss.ss_sp = 0;
652 } else {
653 ret = -TARGET_ENOMEM;
654 if (ss.ss_size < minstacksize) {
655 goto out;
656 }
657 }
658
659 target_sigaltstack_used.ss_sp = ss.ss_sp;
660 target_sigaltstack_used.ss_size = ss.ss_size;
661 }
662
663 if (uoss_addr) {
664 ret = -TARGET_EFAULT;
665 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
666 goto out;
667 }
668
669 ret = 0;
670 out:
671 return ret;
672 }
673
674 /* do_sigaction() return host values and errnos */
675 int do_sigaction(int sig, const struct target_sigaction *act,
676 struct target_sigaction *oact)
677 {
678 struct target_sigaction *k;
679 struct sigaction act1;
680 int host_sig;
681 int ret = 0;
682
683 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
684 return -EINVAL;
685 k = &sigact_table[sig - 1];
686 if (oact) {
687 __put_user(k->_sa_handler, &oact->_sa_handler);
688 __put_user(k->sa_flags, &oact->sa_flags);
689 #if !defined(TARGET_MIPS)
690 __put_user(k->sa_restorer, &oact->sa_restorer);
691 #endif
692 /* Not swapped. */
693 oact->sa_mask = k->sa_mask;
694 }
695 if (act) {
696 /* FIXME: This is not threadsafe. */
697 __get_user(k->_sa_handler, &act->_sa_handler);
698 __get_user(k->sa_flags, &act->sa_flags);
699 #if !defined(TARGET_MIPS)
700 __get_user(k->sa_restorer, &act->sa_restorer);
701 #endif
702 /* To be swapped in target_to_host_sigset. */
703 k->sa_mask = act->sa_mask;
704
705 /* we update the host linux signal state */
706 host_sig = target_to_host_signal(sig);
707 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
708 sigfillset(&act1.sa_mask);
709 act1.sa_flags = SA_SIGINFO;
710 if (k->sa_flags & TARGET_SA_RESTART)
711 act1.sa_flags |= SA_RESTART;
712 /* NOTE: it is important to update the host kernel signal
713 ignore state to avoid getting unexpected interrupted
714 syscalls */
715 if (k->_sa_handler == TARGET_SIG_IGN) {
716 act1.sa_sigaction = (void *)SIG_IGN;
717 } else if (k->_sa_handler == TARGET_SIG_DFL) {
718 if (fatal_signal (sig))
719 act1.sa_sigaction = host_signal_handler;
720 else
721 act1.sa_sigaction = (void *)SIG_DFL;
722 } else {
723 act1.sa_sigaction = host_signal_handler;
724 }
725 ret = sigaction(host_sig, &act1, NULL);
726 }
727 }
728 return ret;
729 }
730
731 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
732
733 /* from the Linux kernel */
734
735 struct target_fpreg {
736 uint16_t significand[4];
737 uint16_t exponent;
738 };
739
740 struct target_fpxreg {
741 uint16_t significand[4];
742 uint16_t exponent;
743 uint16_t padding[3];
744 };
745
746 struct target_xmmreg {
747 abi_ulong element[4];
748 };
749
750 struct target_fpstate {
751 /* Regular FPU environment */
752 abi_ulong cw;
753 abi_ulong sw;
754 abi_ulong tag;
755 abi_ulong ipoff;
756 abi_ulong cssel;
757 abi_ulong dataoff;
758 abi_ulong datasel;
759 struct target_fpreg _st[8];
760 uint16_t status;
761 uint16_t magic; /* 0xffff = regular FPU data only */
762
763 /* FXSR FPU environment */
764 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
765 abi_ulong mxcsr;
766 abi_ulong reserved;
767 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
768 struct target_xmmreg _xmm[8];
769 abi_ulong padding[56];
770 };
771
772 #define X86_FXSR_MAGIC 0x0000
773
774 struct target_sigcontext {
775 uint16_t gs, __gsh;
776 uint16_t fs, __fsh;
777 uint16_t es, __esh;
778 uint16_t ds, __dsh;
779 abi_ulong edi;
780 abi_ulong esi;
781 abi_ulong ebp;
782 abi_ulong esp;
783 abi_ulong ebx;
784 abi_ulong edx;
785 abi_ulong ecx;
786 abi_ulong eax;
787 abi_ulong trapno;
788 abi_ulong err;
789 abi_ulong eip;
790 uint16_t cs, __csh;
791 abi_ulong eflags;
792 abi_ulong esp_at_signal;
793 uint16_t ss, __ssh;
794 abi_ulong fpstate; /* pointer */
795 abi_ulong oldmask;
796 abi_ulong cr2;
797 };
798
799 struct target_ucontext {
800 abi_ulong tuc_flags;
801 abi_ulong tuc_link;
802 target_stack_t tuc_stack;
803 struct target_sigcontext tuc_mcontext;
804 target_sigset_t tuc_sigmask; /* mask last for extensibility */
805 };
806
807 struct sigframe
808 {
809 abi_ulong pretcode;
810 int sig;
811 struct target_sigcontext sc;
812 struct target_fpstate fpstate;
813 abi_ulong extramask[TARGET_NSIG_WORDS-1];
814 char retcode[8];
815 };
816
817 struct rt_sigframe
818 {
819 abi_ulong pretcode;
820 int sig;
821 abi_ulong pinfo;
822 abi_ulong puc;
823 struct target_siginfo info;
824 struct target_ucontext uc;
825 struct target_fpstate fpstate;
826 char retcode[8];
827 };
828
829 /*
830 * Set up a signal frame.
831 */
832
833 /* XXX: save x87 state */
834 static void setup_sigcontext(struct target_sigcontext *sc,
835 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
836 abi_ulong fpstate_addr)
837 {
838 CPUState *cs = CPU(x86_env_get_cpu(env));
839 uint16_t magic;
840
841 /* already locked in setup_frame() */
842 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
843 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
844 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
845 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
846 __put_user(env->regs[R_EDI], &sc->edi);
847 __put_user(env->regs[R_ESI], &sc->esi);
848 __put_user(env->regs[R_EBP], &sc->ebp);
849 __put_user(env->regs[R_ESP], &sc->esp);
850 __put_user(env->regs[R_EBX], &sc->ebx);
851 __put_user(env->regs[R_EDX], &sc->edx);
852 __put_user(env->regs[R_ECX], &sc->ecx);
853 __put_user(env->regs[R_EAX], &sc->eax);
854 __put_user(cs->exception_index, &sc->trapno);
855 __put_user(env->error_code, &sc->err);
856 __put_user(env->eip, &sc->eip);
857 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
858 __put_user(env->eflags, &sc->eflags);
859 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
860 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
861
862 cpu_x86_fsave(env, fpstate_addr, 1);
863 fpstate->status = fpstate->sw;
864 magic = 0xffff;
865 __put_user(magic, &fpstate->magic);
866 __put_user(fpstate_addr, &sc->fpstate);
867
868 /* non-iBCS2 extensions.. */
869 __put_user(mask, &sc->oldmask);
870 __put_user(env->cr[2], &sc->cr2);
871 }
872
873 /*
874 * Determine which stack to use..
875 */
876
877 static inline abi_ulong
878 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
879 {
880 unsigned long esp;
881
882 /* Default to using normal stack */
883 esp = env->regs[R_ESP];
884 /* This is the X/Open sanctioned signal stack switching. */
885 if (ka->sa_flags & TARGET_SA_ONSTACK) {
886 if (sas_ss_flags(esp) == 0) {
887 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
888 }
889 } else {
890
891 /* This is the legacy signal stack switching. */
892 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
893 !(ka->sa_flags & TARGET_SA_RESTORER) &&
894 ka->sa_restorer) {
895 esp = (unsigned long) ka->sa_restorer;
896 }
897 }
898 return (esp - frame_size) & -8ul;
899 }
900
901 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
902 static void setup_frame(int sig, struct target_sigaction *ka,
903 target_sigset_t *set, CPUX86State *env)
904 {
905 abi_ulong frame_addr;
906 struct sigframe *frame;
907 int i;
908
909 frame_addr = get_sigframe(ka, env, sizeof(*frame));
910 trace_user_setup_frame(env, frame_addr);
911
912 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
913 goto give_sigsegv;
914
915 __put_user(sig, &frame->sig);
916
917 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
918 frame_addr + offsetof(struct sigframe, fpstate));
919
920 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
921 __put_user(set->sig[i], &frame->extramask[i - 1]);
922 }
923
924 /* Set up to return from userspace. If provided, use a stub
925 already in userspace. */
926 if (ka->sa_flags & TARGET_SA_RESTORER) {
927 __put_user(ka->sa_restorer, &frame->pretcode);
928 } else {
929 uint16_t val16;
930 abi_ulong retcode_addr;
931 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
932 __put_user(retcode_addr, &frame->pretcode);
933 /* This is popl %eax ; movl $,%eax ; int $0x80 */
934 val16 = 0xb858;
935 __put_user(val16, (uint16_t *)(frame->retcode+0));
936 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
937 val16 = 0x80cd;
938 __put_user(val16, (uint16_t *)(frame->retcode+6));
939 }
940
941
942 /* Set up registers for signal handler */
943 env->regs[R_ESP] = frame_addr;
944 env->eip = ka->_sa_handler;
945
946 cpu_x86_load_seg(env, R_DS, __USER_DS);
947 cpu_x86_load_seg(env, R_ES, __USER_DS);
948 cpu_x86_load_seg(env, R_SS, __USER_DS);
949 cpu_x86_load_seg(env, R_CS, __USER_CS);
950 env->eflags &= ~TF_MASK;
951
952 unlock_user_struct(frame, frame_addr, 1);
953
954 return;
955
956 give_sigsegv:
957 if (sig == TARGET_SIGSEGV) {
958 ka->_sa_handler = TARGET_SIG_DFL;
959 }
960 force_sig(TARGET_SIGSEGV /* , current */);
961 }
962
963 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
964 static void setup_rt_frame(int sig, struct target_sigaction *ka,
965 target_siginfo_t *info,
966 target_sigset_t *set, CPUX86State *env)
967 {
968 abi_ulong frame_addr, addr;
969 struct rt_sigframe *frame;
970 int i;
971
972 frame_addr = get_sigframe(ka, env, sizeof(*frame));
973 trace_user_setup_rt_frame(env, frame_addr);
974
975 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
976 goto give_sigsegv;
977
978 __put_user(sig, &frame->sig);
979 addr = frame_addr + offsetof(struct rt_sigframe, info);
980 __put_user(addr, &frame->pinfo);
981 addr = frame_addr + offsetof(struct rt_sigframe, uc);
982 __put_user(addr, &frame->puc);
983 tswap_siginfo(&frame->info, info);
984
985 /* Create the ucontext. */
986 __put_user(0, &frame->uc.tuc_flags);
987 __put_user(0, &frame->uc.tuc_link);
988 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
989 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
990 &frame->uc.tuc_stack.ss_flags);
991 __put_user(target_sigaltstack_used.ss_size,
992 &frame->uc.tuc_stack.ss_size);
993 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
994 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
995
996 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
997 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
998 }
999
1000 /* Set up to return from userspace. If provided, use a stub
1001 already in userspace. */
1002 if (ka->sa_flags & TARGET_SA_RESTORER) {
1003 __put_user(ka->sa_restorer, &frame->pretcode);
1004 } else {
1005 uint16_t val16;
1006 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1007 __put_user(addr, &frame->pretcode);
1008 /* This is movl $,%eax ; int $0x80 */
1009 __put_user(0xb8, (char *)(frame->retcode+0));
1010 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1011 val16 = 0x80cd;
1012 __put_user(val16, (uint16_t *)(frame->retcode+5));
1013 }
1014
1015 /* Set up registers for signal handler */
1016 env->regs[R_ESP] = frame_addr;
1017 env->eip = ka->_sa_handler;
1018
1019 cpu_x86_load_seg(env, R_DS, __USER_DS);
1020 cpu_x86_load_seg(env, R_ES, __USER_DS);
1021 cpu_x86_load_seg(env, R_SS, __USER_DS);
1022 cpu_x86_load_seg(env, R_CS, __USER_CS);
1023 env->eflags &= ~TF_MASK;
1024
1025 unlock_user_struct(frame, frame_addr, 1);
1026
1027 return;
1028
1029 give_sigsegv:
1030 if (sig == TARGET_SIGSEGV) {
1031 ka->_sa_handler = TARGET_SIG_DFL;
1032 }
1033 force_sig(TARGET_SIGSEGV /* , current */);
1034 }
1035
1036 static int
1037 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1038 {
1039 unsigned int err = 0;
1040 abi_ulong fpstate_addr;
1041 unsigned int tmpflags;
1042
1043 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1044 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1045 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1046 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1047
1048 env->regs[R_EDI] = tswapl(sc->edi);
1049 env->regs[R_ESI] = tswapl(sc->esi);
1050 env->regs[R_EBP] = tswapl(sc->ebp);
1051 env->regs[R_ESP] = tswapl(sc->esp);
1052 env->regs[R_EBX] = tswapl(sc->ebx);
1053 env->regs[R_EDX] = tswapl(sc->edx);
1054 env->regs[R_ECX] = tswapl(sc->ecx);
1055 env->regs[R_EAX] = tswapl(sc->eax);
1056 env->eip = tswapl(sc->eip);
1057
1058 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1059 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1060
1061 tmpflags = tswapl(sc->eflags);
1062 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1063 // regs->orig_eax = -1; /* disable syscall checks */
1064
1065 fpstate_addr = tswapl(sc->fpstate);
1066 if (fpstate_addr != 0) {
1067 if (!access_ok(VERIFY_READ, fpstate_addr,
1068 sizeof(struct target_fpstate)))
1069 goto badframe;
1070 cpu_x86_frstor(env, fpstate_addr, 1);
1071 }
1072
1073 return err;
1074 badframe:
1075 return 1;
1076 }
1077
1078 long do_sigreturn(CPUX86State *env)
1079 {
1080 struct sigframe *frame;
1081 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1082 target_sigset_t target_set;
1083 sigset_t set;
1084 int i;
1085
1086 trace_user_do_sigreturn(env, frame_addr);
1087 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1088 goto badframe;
1089 /* set blocked signals */
1090 __get_user(target_set.sig[0], &frame->sc.oldmask);
1091 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1092 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1093 }
1094
1095 target_to_host_sigset_internal(&set, &target_set);
1096 do_sigprocmask(SIG_SETMASK, &set, NULL);
1097
1098 /* restore registers */
1099 if (restore_sigcontext(env, &frame->sc))
1100 goto badframe;
1101 unlock_user_struct(frame, frame_addr, 0);
1102 return -TARGET_QEMU_ESIGRETURN;
1103
1104 badframe:
1105 unlock_user_struct(frame, frame_addr, 0);
1106 force_sig(TARGET_SIGSEGV);
1107 return 0;
1108 }
1109
1110 long do_rt_sigreturn(CPUX86State *env)
1111 {
1112 abi_ulong frame_addr;
1113 struct rt_sigframe *frame;
1114 sigset_t set;
1115
1116 frame_addr = env->regs[R_ESP] - 4;
1117 trace_user_do_rt_sigreturn(env, frame_addr);
1118 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1119 goto badframe;
1120 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1121 do_sigprocmask(SIG_SETMASK, &set, NULL);
1122
1123 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1124 goto badframe;
1125 }
1126
1127 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1128 get_sp_from_cpustate(env)) == -EFAULT) {
1129 goto badframe;
1130 }
1131
1132 unlock_user_struct(frame, frame_addr, 0);
1133 return -TARGET_QEMU_ESIGRETURN;
1134
1135 badframe:
1136 unlock_user_struct(frame, frame_addr, 0);
1137 force_sig(TARGET_SIGSEGV);
1138 return 0;
1139 }
1140
1141 #elif defined(TARGET_AARCH64)
1142
1143 struct target_sigcontext {
1144 uint64_t fault_address;
1145 /* AArch64 registers */
1146 uint64_t regs[31];
1147 uint64_t sp;
1148 uint64_t pc;
1149 uint64_t pstate;
1150 /* 4K reserved for FP/SIMD state and future expansion */
1151 char __reserved[4096] __attribute__((__aligned__(16)));
1152 };
1153
1154 struct target_ucontext {
1155 abi_ulong tuc_flags;
1156 abi_ulong tuc_link;
1157 target_stack_t tuc_stack;
1158 target_sigset_t tuc_sigmask;
1159 /* glibc uses a 1024-bit sigset_t */
1160 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1161 /* last for future expansion */
1162 struct target_sigcontext tuc_mcontext;
1163 };
1164
1165 /*
1166 * Header to be used at the beginning of structures extending the user
1167 * context. Such structures must be placed after the rt_sigframe on the stack
1168 * and be 16-byte aligned. The last structure must be a dummy one with the
1169 * magic and size set to 0.
1170 */
1171 struct target_aarch64_ctx {
1172 uint32_t magic;
1173 uint32_t size;
1174 };
1175
1176 #define TARGET_FPSIMD_MAGIC 0x46508001
1177
1178 struct target_fpsimd_context {
1179 struct target_aarch64_ctx head;
1180 uint32_t fpsr;
1181 uint32_t fpcr;
1182 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1183 };
1184
1185 /*
1186 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1187 * user space as it will change with the addition of new context. User space
1188 * should check the magic/size information.
1189 */
1190 struct target_aux_context {
1191 struct target_fpsimd_context fpsimd;
1192 /* additional context to be added before "end" */
1193 struct target_aarch64_ctx end;
1194 };
1195
1196 struct target_rt_sigframe {
1197 struct target_siginfo info;
1198 struct target_ucontext uc;
1199 uint64_t fp;
1200 uint64_t lr;
1201 uint32_t tramp[2];
1202 };
1203
1204 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1205 CPUARMState *env, target_sigset_t *set)
1206 {
1207 int i;
1208 struct target_aux_context *aux =
1209 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1210
1211 /* set up the stack frame for unwinding */
1212 __put_user(env->xregs[29], &sf->fp);
1213 __put_user(env->xregs[30], &sf->lr);
1214
1215 for (i = 0; i < 31; i++) {
1216 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1217 }
1218 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1219 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1220 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1221
1222 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1223
1224 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1225 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1226 }
1227
1228 for (i = 0; i < 32; i++) {
1229 #ifdef TARGET_WORDS_BIGENDIAN
1230 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1231 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1232 #else
1233 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1234 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1235 #endif
1236 }
1237 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1238 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1239 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1240 __put_user(sizeof(struct target_fpsimd_context),
1241 &aux->fpsimd.head.size);
1242
1243 /* set the "end" magic */
1244 __put_user(0, &aux->end.magic);
1245 __put_user(0, &aux->end.size);
1246
1247 return 0;
1248 }
1249
1250 static int target_restore_sigframe(CPUARMState *env,
1251 struct target_rt_sigframe *sf)
1252 {
1253 sigset_t set;
1254 int i;
1255 struct target_aux_context *aux =
1256 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1257 uint32_t magic, size, fpsr, fpcr;
1258 uint64_t pstate;
1259
1260 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1261 do_sigprocmask(SIG_SETMASK, &set, NULL);
1262
1263 for (i = 0; i < 31; i++) {
1264 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1265 }
1266
1267 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1268 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1269 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1270 pstate_write(env, pstate);
1271
1272 __get_user(magic, &aux->fpsimd.head.magic);
1273 __get_user(size, &aux->fpsimd.head.size);
1274
1275 if (magic != TARGET_FPSIMD_MAGIC
1276 || size != sizeof(struct target_fpsimd_context)) {
1277 return 1;
1278 }
1279
1280 for (i = 0; i < 32; i++) {
1281 #ifdef TARGET_WORDS_BIGENDIAN
1282 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1283 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1284 #else
1285 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1286 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1287 #endif
1288 }
1289 __get_user(fpsr, &aux->fpsimd.fpsr);
1290 vfp_set_fpsr(env, fpsr);
1291 __get_user(fpcr, &aux->fpsimd.fpcr);
1292 vfp_set_fpcr(env, fpcr);
1293
1294 return 0;
1295 }
1296
1297 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1298 {
1299 abi_ulong sp;
1300
1301 sp = env->xregs[31];
1302
1303 /*
1304 * This is the X/Open sanctioned signal stack switching.
1305 */
1306 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1307 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1308 }
1309
1310 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1311
1312 return sp;
1313 }
1314
1315 static void target_setup_frame(int usig, struct target_sigaction *ka,
1316 target_siginfo_t *info, target_sigset_t *set,
1317 CPUARMState *env)
1318 {
1319 struct target_rt_sigframe *frame;
1320 abi_ulong frame_addr, return_addr;
1321
1322 frame_addr = get_sigframe(ka, env);
1323 trace_user_setup_frame(env, frame_addr);
1324 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1325 goto give_sigsegv;
1326 }
1327
1328 __put_user(0, &frame->uc.tuc_flags);
1329 __put_user(0, &frame->uc.tuc_link);
1330
1331 __put_user(target_sigaltstack_used.ss_sp,
1332 &frame->uc.tuc_stack.ss_sp);
1333 __put_user(sas_ss_flags(env->xregs[31]),
1334 &frame->uc.tuc_stack.ss_flags);
1335 __put_user(target_sigaltstack_used.ss_size,
1336 &frame->uc.tuc_stack.ss_size);
1337 target_setup_sigframe(frame, env, set);
1338 if (ka->sa_flags & TARGET_SA_RESTORER) {
1339 return_addr = ka->sa_restorer;
1340 } else {
1341 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1342 __put_user(0xd2801168, &frame->tramp[0]);
1343 __put_user(0xd4000001, &frame->tramp[1]);
1344 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1345 }
1346 env->xregs[0] = usig;
1347 env->xregs[31] = frame_addr;
1348 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1349 env->pc = ka->_sa_handler;
1350 env->xregs[30] = return_addr;
1351 if (info) {
1352 tswap_siginfo(&frame->info, info);
1353 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1354 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1355 }
1356
1357 unlock_user_struct(frame, frame_addr, 1);
1358 return;
1359
1360 give_sigsegv:
1361 unlock_user_struct(frame, frame_addr, 1);
1362 force_sig(TARGET_SIGSEGV);
1363 }
1364
1365 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1366 target_siginfo_t *info, target_sigset_t *set,
1367 CPUARMState *env)
1368 {
1369 target_setup_frame(sig, ka, info, set, env);
1370 }
1371
1372 static void setup_frame(int sig, struct target_sigaction *ka,
1373 target_sigset_t *set, CPUARMState *env)
1374 {
1375 target_setup_frame(sig, ka, 0, set, env);
1376 }
1377
1378 long do_rt_sigreturn(CPUARMState *env)
1379 {
1380 struct target_rt_sigframe *frame = NULL;
1381 abi_ulong frame_addr = env->xregs[31];
1382
1383 trace_user_do_rt_sigreturn(env, frame_addr);
1384 if (frame_addr & 15) {
1385 goto badframe;
1386 }
1387
1388 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1389 goto badframe;
1390 }
1391
1392 if (target_restore_sigframe(env, frame)) {
1393 goto badframe;
1394 }
1395
1396 if (do_sigaltstack(frame_addr +
1397 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1398 0, get_sp_from_cpustate(env)) == -EFAULT) {
1399 goto badframe;
1400 }
1401
1402 unlock_user_struct(frame, frame_addr, 0);
1403 return -TARGET_QEMU_ESIGRETURN;
1404
1405 badframe:
1406 unlock_user_struct(frame, frame_addr, 0);
1407 force_sig(TARGET_SIGSEGV);
1408 return 0;
1409 }
1410
1411 long do_sigreturn(CPUARMState *env)
1412 {
1413 return do_rt_sigreturn(env);
1414 }
1415
1416 #elif defined(TARGET_ARM)
1417
1418 struct target_sigcontext {
1419 abi_ulong trap_no;
1420 abi_ulong error_code;
1421 abi_ulong oldmask;
1422 abi_ulong arm_r0;
1423 abi_ulong arm_r1;
1424 abi_ulong arm_r2;
1425 abi_ulong arm_r3;
1426 abi_ulong arm_r4;
1427 abi_ulong arm_r5;
1428 abi_ulong arm_r6;
1429 abi_ulong arm_r7;
1430 abi_ulong arm_r8;
1431 abi_ulong arm_r9;
1432 abi_ulong arm_r10;
1433 abi_ulong arm_fp;
1434 abi_ulong arm_ip;
1435 abi_ulong arm_sp;
1436 abi_ulong arm_lr;
1437 abi_ulong arm_pc;
1438 abi_ulong arm_cpsr;
1439 abi_ulong fault_address;
1440 };
1441
1442 struct target_ucontext_v1 {
1443 abi_ulong tuc_flags;
1444 abi_ulong tuc_link;
1445 target_stack_t tuc_stack;
1446 struct target_sigcontext tuc_mcontext;
1447 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1448 };
1449
1450 struct target_ucontext_v2 {
1451 abi_ulong tuc_flags;
1452 abi_ulong tuc_link;
1453 target_stack_t tuc_stack;
1454 struct target_sigcontext tuc_mcontext;
1455 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1456 char __unused[128 - sizeof(target_sigset_t)];
1457 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1458 };
1459
1460 struct target_user_vfp {
1461 uint64_t fpregs[32];
1462 abi_ulong fpscr;
1463 };
1464
1465 struct target_user_vfp_exc {
1466 abi_ulong fpexc;
1467 abi_ulong fpinst;
1468 abi_ulong fpinst2;
1469 };
1470
1471 struct target_vfp_sigframe {
1472 abi_ulong magic;
1473 abi_ulong size;
1474 struct target_user_vfp ufp;
1475 struct target_user_vfp_exc ufp_exc;
1476 } __attribute__((__aligned__(8)));
1477
1478 struct target_iwmmxt_sigframe {
1479 abi_ulong magic;
1480 abi_ulong size;
1481 uint64_t regs[16];
1482 /* Note that not all the coprocessor control registers are stored here */
1483 uint32_t wcssf;
1484 uint32_t wcasf;
1485 uint32_t wcgr0;
1486 uint32_t wcgr1;
1487 uint32_t wcgr2;
1488 uint32_t wcgr3;
1489 } __attribute__((__aligned__(8)));
1490
1491 #define TARGET_VFP_MAGIC 0x56465001
1492 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1493
1494 struct sigframe_v1
1495 {
1496 struct target_sigcontext sc;
1497 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1498 abi_ulong retcode;
1499 };
1500
1501 struct sigframe_v2
1502 {
1503 struct target_ucontext_v2 uc;
1504 abi_ulong retcode;
1505 };
1506
1507 struct rt_sigframe_v1
1508 {
1509 abi_ulong pinfo;
1510 abi_ulong puc;
1511 struct target_siginfo info;
1512 struct target_ucontext_v1 uc;
1513 abi_ulong retcode;
1514 };
1515
1516 struct rt_sigframe_v2
1517 {
1518 struct target_siginfo info;
1519 struct target_ucontext_v2 uc;
1520 abi_ulong retcode;
1521 };
1522
1523 #define TARGET_CONFIG_CPU_32 1
1524
1525 /*
1526 * For ARM syscalls, we encode the syscall number into the instruction.
1527 */
1528 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1529 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1530
1531 /*
1532 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1533 * need two 16-bit instructions.
1534 */
1535 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1536 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1537
1538 static const abi_ulong retcodes[4] = {
1539 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1540 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1541 };
1542
1543
1544 static inline int valid_user_regs(CPUARMState *regs)
1545 {
1546 return 1;
1547 }
1548
1549 static void
1550 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1551 CPUARMState *env, abi_ulong mask)
1552 {
1553 __put_user(env->regs[0], &sc->arm_r0);
1554 __put_user(env->regs[1], &sc->arm_r1);
1555 __put_user(env->regs[2], &sc->arm_r2);
1556 __put_user(env->regs[3], &sc->arm_r3);
1557 __put_user(env->regs[4], &sc->arm_r4);
1558 __put_user(env->regs[5], &sc->arm_r5);
1559 __put_user(env->regs[6], &sc->arm_r6);
1560 __put_user(env->regs[7], &sc->arm_r7);
1561 __put_user(env->regs[8], &sc->arm_r8);
1562 __put_user(env->regs[9], &sc->arm_r9);
1563 __put_user(env->regs[10], &sc->arm_r10);
1564 __put_user(env->regs[11], &sc->arm_fp);
1565 __put_user(env->regs[12], &sc->arm_ip);
1566 __put_user(env->regs[13], &sc->arm_sp);
1567 __put_user(env->regs[14], &sc->arm_lr);
1568 __put_user(env->regs[15], &sc->arm_pc);
1569 #ifdef TARGET_CONFIG_CPU_32
1570 __put_user(cpsr_read(env), &sc->arm_cpsr);
1571 #endif
1572
1573 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1574 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1575 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1576 __put_user(mask, &sc->oldmask);
1577 }
1578
1579 static inline abi_ulong
1580 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1581 {
1582 unsigned long sp = regs->regs[13];
1583
1584 /*
1585 * This is the X/Open sanctioned signal stack switching.
1586 */
1587 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1588 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1589 }
1590 /*
1591 * ATPCS B01 mandates 8-byte alignment
1592 */
1593 return (sp - framesize) & ~7;
1594 }
1595
1596 static void
1597 setup_return(CPUARMState *env, struct target_sigaction *ka,
1598 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1599 {
1600 abi_ulong handler = ka->_sa_handler;
1601 abi_ulong retcode;
1602 int thumb = handler & 1;
1603 uint32_t cpsr = cpsr_read(env);
1604
1605 cpsr &= ~CPSR_IT;
1606 if (thumb) {
1607 cpsr |= CPSR_T;
1608 } else {
1609 cpsr &= ~CPSR_T;
1610 }
1611
1612 if (ka->sa_flags & TARGET_SA_RESTORER) {
1613 retcode = ka->sa_restorer;
1614 } else {
1615 unsigned int idx = thumb;
1616
1617 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1618 idx += 2;
1619 }
1620
1621 __put_user(retcodes[idx], rc);
1622
1623 retcode = rc_addr + thumb;
1624 }
1625
1626 env->regs[0] = usig;
1627 env->regs[13] = frame_addr;
1628 env->regs[14] = retcode;
1629 env->regs[15] = handler & (thumb ? ~1 : ~3);
1630 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1631 }
1632
1633 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1634 {
1635 int i;
1636 struct target_vfp_sigframe *vfpframe;
1637 vfpframe = (struct target_vfp_sigframe *)regspace;
1638 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1639 __put_user(sizeof(*vfpframe), &vfpframe->size);
1640 for (i = 0; i < 32; i++) {
1641 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1642 }
1643 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1644 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1645 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1646 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1647 return (abi_ulong*)(vfpframe+1);
1648 }
1649
1650 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1651 CPUARMState *env)
1652 {
1653 int i;
1654 struct target_iwmmxt_sigframe *iwmmxtframe;
1655 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1656 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1657 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1658 for (i = 0; i < 16; i++) {
1659 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1660 }
1661 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1662 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1663 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1664 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1665 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1666 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1667 return (abi_ulong*)(iwmmxtframe+1);
1668 }
1669
1670 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1671 target_sigset_t *set, CPUARMState *env)
1672 {
1673 struct target_sigaltstack stack;
1674 int i;
1675 abi_ulong *regspace;
1676
1677 /* Clear all the bits of the ucontext we don't use. */
1678 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1679
1680 memset(&stack, 0, sizeof(stack));
1681 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1682 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1683 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1684 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1685
1686 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1687 /* Save coprocessor signal frame. */
1688 regspace = uc->tuc_regspace;
1689 if (arm_feature(env, ARM_FEATURE_VFP)) {
1690 regspace = setup_sigframe_v2_vfp(regspace, env);
1691 }
1692 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1693 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1694 }
1695
1696 /* Write terminating magic word */
1697 __put_user(0, regspace);
1698
1699 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1700 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1701 }
1702 }
1703
1704 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1705 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1706 target_sigset_t *set, CPUARMState *regs)
1707 {
1708 struct sigframe_v1 *frame;
1709 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1710 int i;
1711
1712 trace_user_setup_frame(regs, frame_addr);
1713 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1714 return;
1715 }
1716
1717 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1718
1719 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1720 __put_user(set->sig[i], &frame->extramask[i - 1]);
1721 }
1722
1723 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1724 frame_addr + offsetof(struct sigframe_v1, retcode));
1725
1726 unlock_user_struct(frame, frame_addr, 1);
1727 }
1728
1729 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1730 target_sigset_t *set, CPUARMState *regs)
1731 {
1732 struct sigframe_v2 *frame;
1733 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1734
1735 trace_user_setup_frame(regs, frame_addr);
1736 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1737 return;
1738 }
1739
1740 setup_sigframe_v2(&frame->uc, set, regs);
1741
1742 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1743 frame_addr + offsetof(struct sigframe_v2, retcode));
1744
1745 unlock_user_struct(frame, frame_addr, 1);
1746 }
1747
1748 static void setup_frame(int usig, struct target_sigaction *ka,
1749 target_sigset_t *set, CPUARMState *regs)
1750 {
1751 if (get_osversion() >= 0x020612) {
1752 setup_frame_v2(usig, ka, set, regs);
1753 } else {
1754 setup_frame_v1(usig, ka, set, regs);
1755 }
1756 }
1757
1758 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1759 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1760 target_siginfo_t *info,
1761 target_sigset_t *set, CPUARMState *env)
1762 {
1763 struct rt_sigframe_v1 *frame;
1764 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1765 struct target_sigaltstack stack;
1766 int i;
1767 abi_ulong info_addr, uc_addr;
1768
1769 trace_user_setup_rt_frame(env, frame_addr);
1770 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1771 return /* 1 */;
1772 }
1773
1774 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1775 __put_user(info_addr, &frame->pinfo);
1776 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1777 __put_user(uc_addr, &frame->puc);
1778 tswap_siginfo(&frame->info, info);
1779
1780 /* Clear all the bits of the ucontext we don't use. */
1781 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1782
1783 memset(&stack, 0, sizeof(stack));
1784 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1785 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1786 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1787 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1788
1789 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1790 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1791 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1792 }
1793
1794 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1795 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1796
1797 env->regs[1] = info_addr;
1798 env->regs[2] = uc_addr;
1799
1800 unlock_user_struct(frame, frame_addr, 1);
1801 }
1802
1803 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1804 target_siginfo_t *info,
1805 target_sigset_t *set, CPUARMState *env)
1806 {
1807 struct rt_sigframe_v2 *frame;
1808 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1809 abi_ulong info_addr, uc_addr;
1810
1811 trace_user_setup_rt_frame(env, frame_addr);
1812 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1813 return /* 1 */;
1814 }
1815
1816 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1817 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1818 tswap_siginfo(&frame->info, info);
1819
1820 setup_sigframe_v2(&frame->uc, set, env);
1821
1822 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1823 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1824
1825 env->regs[1] = info_addr;
1826 env->regs[2] = uc_addr;
1827
1828 unlock_user_struct(frame, frame_addr, 1);
1829 }
1830
1831 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1832 target_siginfo_t *info,
1833 target_sigset_t *set, CPUARMState *env)
1834 {
1835 if (get_osversion() >= 0x020612) {
1836 setup_rt_frame_v2(usig, ka, info, set, env);
1837 } else {
1838 setup_rt_frame_v1(usig, ka, info, set, env);
1839 }
1840 }
1841
1842 static int
1843 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1844 {
1845 int err = 0;
1846 uint32_t cpsr;
1847
1848 __get_user(env->regs[0], &sc->arm_r0);
1849 __get_user(env->regs[1], &sc->arm_r1);
1850 __get_user(env->regs[2], &sc->arm_r2);
1851 __get_user(env->regs[3], &sc->arm_r3);
1852 __get_user(env->regs[4], &sc->arm_r4);
1853 __get_user(env->regs[5], &sc->arm_r5);
1854 __get_user(env->regs[6], &sc->arm_r6);
1855 __get_user(env->regs[7], &sc->arm_r7);
1856 __get_user(env->regs[8], &sc->arm_r8);
1857 __get_user(env->regs[9], &sc->arm_r9);
1858 __get_user(env->regs[10], &sc->arm_r10);
1859 __get_user(env->regs[11], &sc->arm_fp);
1860 __get_user(env->regs[12], &sc->arm_ip);
1861 __get_user(env->regs[13], &sc->arm_sp);
1862 __get_user(env->regs[14], &sc->arm_lr);
1863 __get_user(env->regs[15], &sc->arm_pc);
1864 #ifdef TARGET_CONFIG_CPU_32
1865 __get_user(cpsr, &sc->arm_cpsr);
1866 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1867 #endif
1868
1869 err |= !valid_user_regs(env);
1870
1871 return err;
1872 }
1873
1874 static long do_sigreturn_v1(CPUARMState *env)
1875 {
1876 abi_ulong frame_addr;
1877 struct sigframe_v1 *frame = NULL;
1878 target_sigset_t set;
1879 sigset_t host_set;
1880 int i;
1881
1882 /*
1883 * Since we stacked the signal on a 64-bit boundary,
1884 * then 'sp' should be word aligned here. If it's
1885 * not, then the user is trying to mess with us.
1886 */
1887 frame_addr = env->regs[13];
1888 trace_user_do_sigreturn(env, frame_addr);
1889 if (frame_addr & 7) {
1890 goto badframe;
1891 }
1892
1893 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1894 goto badframe;
1895 }
1896
1897 __get_user(set.sig[0], &frame->sc.oldmask);
1898 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1899 __get_user(set.sig[i], &frame->extramask[i - 1]);
1900 }
1901
1902 target_to_host_sigset_internal(&host_set, &set);
1903 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1904
1905 if (restore_sigcontext(env, &frame->sc)) {
1906 goto badframe;
1907 }
1908
1909 #if 0
1910 /* Send SIGTRAP if we're single-stepping */
1911 if (ptrace_cancel_bpt(current))
1912 send_sig(SIGTRAP, current, 1);
1913 #endif
1914 unlock_user_struct(frame, frame_addr, 0);
1915 return -TARGET_QEMU_ESIGRETURN;
1916
1917 badframe:
1918 force_sig(TARGET_SIGSEGV /* , current */);
1919 return 0;
1920 }
1921
1922 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1923 {
1924 int i;
1925 abi_ulong magic, sz;
1926 uint32_t fpscr, fpexc;
1927 struct target_vfp_sigframe *vfpframe;
1928 vfpframe = (struct target_vfp_sigframe *)regspace;
1929
1930 __get_user(magic, &vfpframe->magic);
1931 __get_user(sz, &vfpframe->size);
1932 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1933 return 0;
1934 }
1935 for (i = 0; i < 32; i++) {
1936 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1937 }
1938 __get_user(fpscr, &vfpframe->ufp.fpscr);
1939 vfp_set_fpscr(env, fpscr);
1940 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1941 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1942 * and the exception flag is cleared
1943 */
1944 fpexc |= (1 << 30);
1945 fpexc &= ~((1 << 31) | (1 << 28));
1946 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1947 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1948 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1949 return (abi_ulong*)(vfpframe + 1);
1950 }
1951
1952 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1953 abi_ulong *regspace)
1954 {
1955 int i;
1956 abi_ulong magic, sz;
1957 struct target_iwmmxt_sigframe *iwmmxtframe;
1958 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1959
1960 __get_user(magic, &iwmmxtframe->magic);
1961 __get_user(sz, &iwmmxtframe->size);
1962 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1963 return 0;
1964 }
1965 for (i = 0; i < 16; i++) {
1966 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1967 }
1968 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1969 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1970 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1971 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1972 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1973 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1974 return (abi_ulong*)(iwmmxtframe + 1);
1975 }
1976
1977 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1978 struct target_ucontext_v2 *uc)
1979 {
1980 sigset_t host_set;
1981 abi_ulong *regspace;
1982
1983 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1984 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1985
1986 if (restore_sigcontext(env, &uc->tuc_mcontext))
1987 return 1;
1988
1989 /* Restore coprocessor signal frame */
1990 regspace = uc->tuc_regspace;
1991 if (arm_feature(env, ARM_FEATURE_VFP)) {
1992 regspace = restore_sigframe_v2_vfp(env, regspace);
1993 if (!regspace) {
1994 return 1;
1995 }
1996 }
1997 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1998 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1999 if (!regspace) {
2000 return 1;
2001 }
2002 }
2003
2004 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2005 return 1;
2006
2007 #if 0
2008 /* Send SIGTRAP if we're single-stepping */
2009 if (ptrace_cancel_bpt(current))
2010 send_sig(SIGTRAP, current, 1);
2011 #endif
2012
2013 return 0;
2014 }
2015
2016 static long do_sigreturn_v2(CPUARMState *env)
2017 {
2018 abi_ulong frame_addr;
2019 struct sigframe_v2 *frame = NULL;
2020
2021 /*
2022 * Since we stacked the signal on a 64-bit boundary,
2023 * then 'sp' should be word aligned here. If it's
2024 * not, then the user is trying to mess with us.
2025 */
2026 frame_addr = env->regs[13];
2027 trace_user_do_sigreturn(env, frame_addr);
2028 if (frame_addr & 7) {
2029 goto badframe;
2030 }
2031
2032 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2033 goto badframe;
2034 }
2035
2036 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2037 goto badframe;
2038 }
2039
2040 unlock_user_struct(frame, frame_addr, 0);
2041 return -TARGET_QEMU_ESIGRETURN;
2042
2043 badframe:
2044 unlock_user_struct(frame, frame_addr, 0);
2045 force_sig(TARGET_SIGSEGV /* , current */);
2046 return 0;
2047 }
2048
2049 long do_sigreturn(CPUARMState *env)
2050 {
2051 if (get_osversion() >= 0x020612) {
2052 return do_sigreturn_v2(env);
2053 } else {
2054 return do_sigreturn_v1(env);
2055 }
2056 }
2057
2058 static long do_rt_sigreturn_v1(CPUARMState *env)
2059 {
2060 abi_ulong frame_addr;
2061 struct rt_sigframe_v1 *frame = NULL;
2062 sigset_t host_set;
2063
2064 /*
2065 * Since we stacked the signal on a 64-bit boundary,
2066 * then 'sp' should be word aligned here. If it's
2067 * not, then the user is trying to mess with us.
2068 */
2069 frame_addr = env->regs[13];
2070 trace_user_do_rt_sigreturn(env, frame_addr);
2071 if (frame_addr & 7) {
2072 goto badframe;
2073 }
2074
2075 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2076 goto badframe;
2077 }
2078
2079 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2080 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2081
2082 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2083 goto badframe;
2084 }
2085
2086 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2087 goto badframe;
2088
2089 #if 0
2090 /* Send SIGTRAP if we're single-stepping */
2091 if (ptrace_cancel_bpt(current))
2092 send_sig(SIGTRAP, current, 1);
2093 #endif
2094 unlock_user_struct(frame, frame_addr, 0);
2095 return -TARGET_QEMU_ESIGRETURN;
2096
2097 badframe:
2098 unlock_user_struct(frame, frame_addr, 0);
2099 force_sig(TARGET_SIGSEGV /* , current */);
2100 return 0;
2101 }
2102
2103 static long do_rt_sigreturn_v2(CPUARMState *env)
2104 {
2105 abi_ulong frame_addr;
2106 struct rt_sigframe_v2 *frame = NULL;
2107
2108 /*
2109 * Since we stacked the signal on a 64-bit boundary,
2110 * then 'sp' should be word aligned here. If it's
2111 * not, then the user is trying to mess with us.
2112 */
2113 frame_addr = env->regs[13];
2114 trace_user_do_rt_sigreturn(env, frame_addr);
2115 if (frame_addr & 7) {
2116 goto badframe;
2117 }
2118
2119 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2120 goto badframe;
2121 }
2122
2123 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2124 goto badframe;
2125 }
2126
2127 unlock_user_struct(frame, frame_addr, 0);
2128 return -TARGET_QEMU_ESIGRETURN;
2129
2130 badframe:
2131 unlock_user_struct(frame, frame_addr, 0);
2132 force_sig(TARGET_SIGSEGV /* , current */);
2133 return 0;
2134 }
2135
2136 long do_rt_sigreturn(CPUARMState *env)
2137 {
2138 if (get_osversion() >= 0x020612) {
2139 return do_rt_sigreturn_v2(env);
2140 } else {
2141 return do_rt_sigreturn_v1(env);
2142 }
2143 }
2144
2145 #elif defined(TARGET_SPARC)
2146
2147 #define __SUNOS_MAXWIN 31
2148
2149 /* This is what SunOS does, so shall I. */
2150 struct target_sigcontext {
2151 abi_ulong sigc_onstack; /* state to restore */
2152
2153 abi_ulong sigc_mask; /* sigmask to restore */
2154 abi_ulong sigc_sp; /* stack pointer */
2155 abi_ulong sigc_pc; /* program counter */
2156 abi_ulong sigc_npc; /* next program counter */
2157 abi_ulong sigc_psr; /* for condition codes etc */
2158 abi_ulong sigc_g1; /* User uses these two registers */
2159 abi_ulong sigc_o0; /* within the trampoline code. */
2160
2161 /* Now comes information regarding the users window set
2162 * at the time of the signal.
2163 */
2164 abi_ulong sigc_oswins; /* outstanding windows */
2165
2166 /* stack ptrs for each regwin buf */
2167 char *sigc_spbuf[__SUNOS_MAXWIN];
2168
2169 /* Windows to restore after signal */
2170 struct {
2171 abi_ulong locals[8];
2172 abi_ulong ins[8];
2173 } sigc_wbuf[__SUNOS_MAXWIN];
2174 };
2175 /* A Sparc stack frame */
2176 struct sparc_stackf {
2177 abi_ulong locals[8];
2178 abi_ulong ins[8];
2179 /* It's simpler to treat fp and callers_pc as elements of ins[]
2180 * since we never need to access them ourselves.
2181 */
2182 char *structptr;
2183 abi_ulong xargs[6];
2184 abi_ulong xxargs[1];
2185 };
2186
2187 typedef struct {
2188 struct {
2189 abi_ulong psr;
2190 abi_ulong pc;
2191 abi_ulong npc;
2192 abi_ulong y;
2193 abi_ulong u_regs[16]; /* globals and ins */
2194 } si_regs;
2195 int si_mask;
2196 } __siginfo_t;
2197
2198 typedef struct {
2199 abi_ulong si_float_regs[32];
2200 unsigned long si_fsr;
2201 unsigned long si_fpqdepth;
2202 struct {
2203 unsigned long *insn_addr;
2204 unsigned long insn;
2205 } si_fpqueue [16];
2206 } qemu_siginfo_fpu_t;
2207
2208
2209 struct target_signal_frame {
2210 struct sparc_stackf ss;
2211 __siginfo_t info;
2212 abi_ulong fpu_save;
2213 abi_ulong insns[2] __attribute__ ((aligned (8)));
2214 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2215 abi_ulong extra_size; /* Should be 0 */
2216 qemu_siginfo_fpu_t fpu_state;
2217 };
2218 struct target_rt_signal_frame {
2219 struct sparc_stackf ss;
2220 siginfo_t info;
2221 abi_ulong regs[20];
2222 sigset_t mask;
2223 abi_ulong fpu_save;
2224 unsigned int insns[2];
2225 stack_t stack;
2226 unsigned int extra_size; /* Should be 0 */
2227 qemu_siginfo_fpu_t fpu_state;
2228 };
2229
2230 #define UREG_O0 16
2231 #define UREG_O6 22
2232 #define UREG_I0 0
2233 #define UREG_I1 1
2234 #define UREG_I2 2
2235 #define UREG_I3 3
2236 #define UREG_I4 4
2237 #define UREG_I5 5
2238 #define UREG_I6 6
2239 #define UREG_I7 7
2240 #define UREG_L0 8
2241 #define UREG_FP UREG_I6
2242 #define UREG_SP UREG_O6
2243
2244 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2245 CPUSPARCState *env,
2246 unsigned long framesize)
2247 {
2248 abi_ulong sp;
2249
2250 sp = env->regwptr[UREG_FP];
2251
2252 /* This is the X/Open sanctioned signal stack switching. */
2253 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2254 if (!on_sig_stack(sp)
2255 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2256 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2257 }
2258 }
2259 return sp - framesize;
2260 }
2261
2262 static int
2263 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2264 {
2265 int err = 0, i;
2266
2267 __put_user(env->psr, &si->si_regs.psr);
2268 __put_user(env->pc, &si->si_regs.pc);
2269 __put_user(env->npc, &si->si_regs.npc);
2270 __put_user(env->y, &si->si_regs.y);
2271 for (i=0; i < 8; i++) {
2272 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2273 }
2274 for (i=0; i < 8; i++) {
2275 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2276 }
2277 __put_user(mask, &si->si_mask);
2278 return err;
2279 }
2280
2281 #if 0
2282 static int
2283 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2284 CPUSPARCState *env, unsigned long mask)
2285 {
2286 int err = 0;
2287
2288 __put_user(mask, &sc->sigc_mask);
2289 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2290 __put_user(env->pc, &sc->sigc_pc);
2291 __put_user(env->npc, &sc->sigc_npc);
2292 __put_user(env->psr, &sc->sigc_psr);
2293 __put_user(env->gregs[1], &sc->sigc_g1);
2294 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2295
2296 return err;
2297 }
2298 #endif
2299 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2300
2301 static void setup_frame(int sig, struct target_sigaction *ka,
2302 target_sigset_t *set, CPUSPARCState *env)
2303 {
2304 abi_ulong sf_addr;
2305 struct target_signal_frame *sf;
2306 int sigframe_size, err, i;
2307
2308 /* 1. Make sure everything is clean */
2309 //synchronize_user_stack();
2310
2311 sigframe_size = NF_ALIGNEDSZ;
2312 sf_addr = get_sigframe(ka, env, sigframe_size);
2313 trace_user_setup_frame(env, sf_addr);
2314
2315 sf = lock_user(VERIFY_WRITE, sf_addr,
2316 sizeof(struct target_signal_frame), 0);
2317 if (!sf) {
2318 goto sigsegv;
2319 }
2320 #if 0
2321 if (invalid_frame_pointer(sf, sigframe_size))
2322 goto sigill_and_return;
2323 #endif
2324 /* 2. Save the current process state */
2325 err = setup___siginfo(&sf->info, env, set->sig[0]);
2326 __put_user(0, &sf->extra_size);
2327
2328 //save_fpu_state(regs, &sf->fpu_state);
2329 //__put_user(&sf->fpu_state, &sf->fpu_save);
2330
2331 __put_user(set->sig[0], &sf->info.si_mask);
2332 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2333 __put_user(set->sig[i + 1], &sf->extramask[i]);
2334 }
2335
2336 for (i = 0; i < 8; i++) {
2337 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2338 }
2339 for (i = 0; i < 8; i++) {
2340 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2341 }
2342 if (err)
2343 goto sigsegv;
2344
2345 /* 3. signal handler back-trampoline and parameters */
2346 env->regwptr[UREG_FP] = sf_addr;
2347 env->regwptr[UREG_I0] = sig;
2348 env->regwptr[UREG_I1] = sf_addr +
2349 offsetof(struct target_signal_frame, info);
2350 env->regwptr[UREG_I2] = sf_addr +
2351 offsetof(struct target_signal_frame, info);
2352
2353 /* 4. signal handler */
2354 env->pc = ka->_sa_handler;
2355 env->npc = (env->pc + 4);
2356 /* 5. return to kernel instructions */
2357 if (ka->sa_restorer) {
2358 env->regwptr[UREG_I7] = ka->sa_restorer;
2359 } else {
2360 uint32_t val32;
2361
2362 env->regwptr[UREG_I7] = sf_addr +
2363 offsetof(struct target_signal_frame, insns) - 2 * 4;
2364
2365 /* mov __NR_sigreturn, %g1 */
2366 val32 = 0x821020d8;
2367 __put_user(val32, &sf->insns[0]);
2368
2369 /* t 0x10 */
2370 val32 = 0x91d02010;
2371 __put_user(val32, &sf->insns[1]);
2372 if (err)
2373 goto sigsegv;
2374
2375 /* Flush instruction space. */
2376 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2377 // tb_flush(env);
2378 }
2379 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2380 return;
2381 #if 0
2382 sigill_and_return:
2383 force_sig(TARGET_SIGILL);
2384 #endif
2385 sigsegv:
2386 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2387 force_sig(TARGET_SIGSEGV);
2388 }
2389
2390 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2391 target_siginfo_t *info,
2392 target_sigset_t *set, CPUSPARCState *env)
2393 {
2394 fprintf(stderr, "setup_rt_frame: not implemented\n");
2395 }
2396
2397 long do_sigreturn(CPUSPARCState *env)
2398 {
2399 abi_ulong sf_addr;
2400 struct target_signal_frame *sf;
2401 uint32_t up_psr, pc, npc;
2402 target_sigset_t set;
2403 sigset_t host_set;
2404 int err=0, i;
2405
2406 sf_addr = env->regwptr[UREG_FP];
2407 trace_user_do_sigreturn(env, sf_addr);
2408 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2409 goto segv_and_exit;
2410 }
2411
2412 /* 1. Make sure we are not getting garbage from the user */
2413
2414 if (sf_addr & 3)
2415 goto segv_and_exit;
2416
2417 __get_user(pc, &sf->info.si_regs.pc);
2418 __get_user(npc, &sf->info.si_regs.npc);
2419
2420 if ((pc | npc) & 3) {
2421 goto segv_and_exit;
2422 }
2423
2424 /* 2. Restore the state */
2425 __get_user(up_psr, &sf->info.si_regs.psr);
2426
2427 /* User can only change condition codes and FPU enabling in %psr. */
2428 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2429 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2430
2431 env->pc = pc;
2432 env->npc = npc;
2433 __get_user(env->y, &sf->info.si_regs.y);
2434 for (i=0; i < 8; i++) {
2435 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2436 }
2437 for (i=0; i < 8; i++) {
2438 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2439 }
2440
2441 /* FIXME: implement FPU save/restore:
2442 * __get_user(fpu_save, &sf->fpu_save);
2443 * if (fpu_save)
2444 * err |= restore_fpu_state(env, fpu_save);
2445 */
2446
2447 /* This is pretty much atomic, no amount locking would prevent
2448 * the races which exist anyways.
2449 */
2450 __get_user(set.sig[0], &sf->info.si_mask);
2451 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2452 __get_user(set.sig[i], &sf->extramask[i - 1]);
2453 }
2454
2455 target_to_host_sigset_internal(&host_set, &set);
2456 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2457
2458 if (err) {
2459 goto segv_and_exit;
2460 }
2461 unlock_user_struct(sf, sf_addr, 0);
2462 return -TARGET_QEMU_ESIGRETURN;
2463
2464 segv_and_exit:
2465 unlock_user_struct(sf, sf_addr, 0);
2466 force_sig(TARGET_SIGSEGV);
2467 }
2468
2469 long do_rt_sigreturn(CPUSPARCState *env)
2470 {
2471 trace_user_do_rt_sigreturn(env, 0);
2472 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2473 return -TARGET_ENOSYS;
2474 }
2475
2476 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2477 #define MC_TSTATE 0
2478 #define MC_PC 1
2479 #define MC_NPC 2
2480 #define MC_Y 3
2481 #define MC_G1 4
2482 #define MC_G2 5
2483 #define MC_G3 6
2484 #define MC_G4 7
2485 #define MC_G5 8
2486 #define MC_G6 9
2487 #define MC_G7 10
2488 #define MC_O0 11
2489 #define MC_O1 12
2490 #define MC_O2 13
2491 #define MC_O3 14
2492 #define MC_O4 15
2493 #define MC_O5 16
2494 #define MC_O6 17
2495 #define MC_O7 18
2496 #define MC_NGREG 19
2497
2498 typedef abi_ulong target_mc_greg_t;
2499 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2500
2501 struct target_mc_fq {
2502 abi_ulong *mcfq_addr;
2503 uint32_t mcfq_insn;
2504 };
2505
2506 struct target_mc_fpu {
2507 union {
2508 uint32_t sregs[32];
2509 uint64_t dregs[32];
2510 //uint128_t qregs[16];
2511 } mcfpu_fregs;
2512 abi_ulong mcfpu_fsr;
2513 abi_ulong mcfpu_fprs;
2514 abi_ulong mcfpu_gsr;
2515 struct target_mc_fq *mcfpu_fq;
2516 unsigned char mcfpu_qcnt;
2517 unsigned char mcfpu_qentsz;
2518 unsigned char mcfpu_enab;
2519 };
2520 typedef struct target_mc_fpu target_mc_fpu_t;
2521
2522 typedef struct {
2523 target_mc_gregset_t mc_gregs;
2524 target_mc_greg_t mc_fp;
2525 target_mc_greg_t mc_i7;
2526 target_mc_fpu_t mc_fpregs;
2527 } target_mcontext_t;
2528
2529 struct target_ucontext {
2530 struct target_ucontext *tuc_link;
2531 abi_ulong tuc_flags;
2532 target_sigset_t tuc_sigmask;
2533 target_mcontext_t tuc_mcontext;
2534 };
2535
2536 /* A V9 register window */
2537 struct target_reg_window {
2538 abi_ulong locals[8];
2539 abi_ulong ins[8];
2540 };
2541
2542 #define TARGET_STACK_BIAS 2047
2543
2544 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2545 void sparc64_set_context(CPUSPARCState *env)
2546 {
2547 abi_ulong ucp_addr;
2548 struct target_ucontext *ucp;
2549 target_mc_gregset_t *grp;
2550 abi_ulong pc, npc, tstate;
2551 abi_ulong fp, i7, w_addr;
2552 unsigned int i;
2553
2554 ucp_addr = env->regwptr[UREG_I0];
2555 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2556 goto do_sigsegv;
2557 }
2558 grp = &ucp->tuc_mcontext.mc_gregs;
2559 __get_user(pc, &((*grp)[MC_PC]));
2560 __get_user(npc, &((*grp)[MC_NPC]));
2561 if ((pc | npc) & 3) {
2562 goto do_sigsegv;
2563 }
2564 if (env->regwptr[UREG_I1]) {
2565 target_sigset_t target_set;
2566 sigset_t set;
2567
2568 if (TARGET_NSIG_WORDS == 1) {
2569 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2570 } else {
2571 abi_ulong *src, *dst;
2572 src = ucp->tuc_sigmask.sig;
2573 dst = target_set.sig;
2574 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2575 __get_user(*dst, src);
2576 }
2577 }
2578 target_to_host_sigset_internal(&set, &target_set);
2579 do_sigprocmask(SIG_SETMASK, &set, NULL);
2580 }
2581 env->pc = pc;
2582 env->npc = npc;
2583 __get_user(env->y, &((*grp)[MC_Y]));
2584 __get_user(tstate, &((*grp)[MC_TSTATE]));
2585 env->asi = (tstate >> 24) & 0xff;
2586 cpu_put_ccr(env, tstate >> 32);
2587 cpu_put_cwp64(env, tstate & 0x1f);
2588 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2589 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2590 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2591 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2592 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2593 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2594 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2595 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2596 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2597 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2598 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2599 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2600 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2601 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2602 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2603
2604 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2605 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2606
2607 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2608 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2609 abi_ulong) != 0) {
2610 goto do_sigsegv;
2611 }
2612 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2613 abi_ulong) != 0) {
2614 goto do_sigsegv;
2615 }
2616 /* FIXME this does not match how the kernel handles the FPU in
2617 * its sparc64_set_context implementation. In particular the FPU
2618 * is only restored if fenab is non-zero in:
2619 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2620 */
2621 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2622 {
2623 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2624 for (i = 0; i < 64; i++, src++) {
2625 if (i & 1) {
2626 __get_user(env->fpr[i/2].l.lower, src);
2627 } else {
2628 __get_user(env->fpr[i/2].l.upper, src);
2629 }
2630 }
2631 }
2632 __get_user(env->fsr,
2633 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2634 __get_user(env->gsr,
2635 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2636 unlock_user_struct(ucp, ucp_addr, 0);
2637 return;
2638 do_sigsegv:
2639 unlock_user_struct(ucp, ucp_addr, 0);
2640 force_sig(TARGET_SIGSEGV);
2641 }
2642
2643 void sparc64_get_context(CPUSPARCState *env)
2644 {
2645 abi_ulong ucp_addr;
2646 struct target_ucontext *ucp;
2647 target_mc_gregset_t *grp;
2648 target_mcontext_t *mcp;
2649 abi_ulong fp, i7, w_addr;
2650 int err;
2651 unsigned int i;
2652 target_sigset_t target_set;
2653 sigset_t set;
2654
2655 ucp_addr = env->regwptr[UREG_I0];
2656 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2657 goto do_sigsegv;
2658 }
2659
2660 mcp = &ucp->tuc_mcontext;
2661 grp = &mcp->mc_gregs;
2662
2663 /* Skip over the trap instruction, first. */
2664 env->pc = env->npc;
2665 env->npc += 4;
2666
2667 err = 0;
2668
2669 do_sigprocmask(0, NULL, &set);
2670 host_to_target_sigset_internal(&target_set, &set);
2671 if (TARGET_NSIG_WORDS == 1) {
2672 __put_user(target_set.sig[0],
2673 (abi_ulong *)&ucp->tuc_sigmask);
2674 } else {
2675 abi_ulong *src, *dst;
2676 src = target_set.sig;
2677 dst = ucp->tuc_sigmask.sig;
2678 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2679 __put_user(*src, dst);
2680 }
2681 if (err)
2682 goto do_sigsegv;
2683 }
2684
2685 /* XXX: tstate must be saved properly */
2686 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2687 __put_user(env->pc, &((*grp)[MC_PC]));
2688 __put_user(env->npc, &((*grp)[MC_NPC]));
2689 __put_user(env->y, &((*grp)[MC_Y]));
2690 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2691 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2692 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2693 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2694 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2695 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2696 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2697 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2698 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2699 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2700 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2701 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2702 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2703 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2704 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2705
2706 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2707 fp = i7 = 0;
2708 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2709 abi_ulong) != 0) {
2710 goto do_sigsegv;
2711 }
2712 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2713 abi_ulong) != 0) {
2714 goto do_sigsegv;
2715 }
2716 __put_user(fp, &(mcp->mc_fp));
2717 __put_user(i7, &(mcp->mc_i7));
2718
2719 {
2720 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2721 for (i = 0; i < 64; i++, dst++) {
2722 if (i & 1) {
2723 __put_user(env->fpr[i/2].l.lower, dst);
2724 } else {
2725 __put_user(env->fpr[i/2].l.upper, dst);
2726 }
2727 }
2728 }
2729 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2730 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2731 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2732
2733 if (err)
2734 goto do_sigsegv;
2735 unlock_user_struct(ucp, ucp_addr, 1);
2736 return;
2737 do_sigsegv:
2738 unlock_user_struct(ucp, ucp_addr, 1);
2739 force_sig(TARGET_SIGSEGV);
2740 }
2741 #endif
2742 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2743
2744 # if defined(TARGET_ABI_MIPSO32)
2745 struct target_sigcontext {
2746 uint32_t sc_regmask; /* Unused */
2747 uint32_t sc_status;
2748 uint64_t sc_pc;
2749 uint64_t sc_regs[32];
2750 uint64_t sc_fpregs[32];
2751 uint32_t sc_ownedfp; /* Unused */
2752 uint32_t sc_fpc_csr;
2753 uint32_t sc_fpc_eir; /* Unused */
2754 uint32_t sc_used_math;
2755 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2756 uint32_t pad0;
2757 uint64_t sc_mdhi;
2758 uint64_t sc_mdlo;
2759 target_ulong sc_hi1; /* Was sc_cause */
2760 target_ulong sc_lo1; /* Was sc_badvaddr */
2761 target_ulong sc_hi2; /* Was sc_sigset[4] */
2762 target_ulong sc_lo2;
2763 target_ulong sc_hi3;
2764 target_ulong sc_lo3;
2765 };
2766 # else /* N32 || N64 */
2767 struct target_sigcontext {
2768 uint64_t sc_regs[32];
2769 uint64_t sc_fpregs[32];
2770 uint64_t sc_mdhi;
2771 uint64_t sc_hi1;
2772 uint64_t sc_hi2;
2773 uint64_t sc_hi3;
2774 uint64_t sc_mdlo;
2775 uint64_t sc_lo1;
2776 uint64_t sc_lo2;
2777 uint64_t sc_lo3;
2778 uint64_t sc_pc;
2779 uint32_t sc_fpc_csr;
2780 uint32_t sc_used_math;
2781 uint32_t sc_dsp;
2782 uint32_t sc_reserved;
2783 };
2784 # endif /* O32 */
2785
2786 struct sigframe {
2787 uint32_t sf_ass[4]; /* argument save space for o32 */
2788 uint32_t sf_code[2]; /* signal trampoline */
2789 struct target_sigcontext sf_sc;
2790 target_sigset_t sf_mask;
2791 };
2792
2793 struct target_ucontext {
2794 target_ulong tuc_flags;
2795 target_ulong tuc_link;
2796 target_stack_t tuc_stack;
2797 target_ulong pad0;
2798 struct target_sigcontext tuc_mcontext;
2799 target_sigset_t tuc_sigmask;
2800 };
2801
2802 struct target_rt_sigframe {
2803 uint32_t rs_ass[4]; /* argument save space for o32 */
2804 uint32_t rs_code[2]; /* signal trampoline */
2805 struct target_siginfo rs_info;
2806 struct target_ucontext rs_uc;
2807 };
2808
2809 /* Install trampoline to jump back from signal handler */
2810 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2811 {
2812 int err = 0;
2813
2814 /*
2815 * Set up the return code ...
2816 *
2817 * li v0, __NR__foo_sigreturn
2818 * syscall
2819 */
2820
2821 __put_user(0x24020000 + syscall, tramp + 0);
2822 __put_user(0x0000000c , tramp + 1);
2823 return err;
2824 }
2825
2826 static inline void setup_sigcontext(CPUMIPSState *regs,
2827 struct target_sigcontext *sc)
2828 {
2829 int i;
2830
2831 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2832 regs->hflags &= ~MIPS_HFLAG_BMASK;
2833
2834 __put_user(0, &sc->sc_regs[0]);
2835 for (i = 1; i < 32; ++i) {
2836 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2837 }
2838
2839 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2840 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2841
2842 /* Rather than checking for dsp existence, always copy. The storage
2843 would just be garbage otherwise. */
2844 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2845 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2846 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2847 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2848 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2849 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2850 {
2851 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2852 __put_user(dsp, &sc->sc_dsp);
2853 }
2854
2855 __put_user(1, &sc->sc_used_math);
2856
2857 for (i = 0; i < 32; ++i) {
2858 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2859 }
2860 }
2861
2862 static inline void
2863 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2864 {
2865 int i;
2866
2867 __get_user(regs->CP0_EPC, &sc->sc_pc);
2868
2869 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2870 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2871
2872 for (i = 1; i < 32; ++i) {
2873 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2874 }
2875
2876 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2877 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2878 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2879 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2880 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2881 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2882 {
2883 uint32_t dsp;
2884 __get_user(dsp, &sc->sc_dsp);
2885 cpu_wrdsp(dsp, 0x3ff, regs);
2886 }
2887
2888 for (i = 0; i < 32; ++i) {
2889 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2890 }
2891 }
2892
2893 /*
2894 * Determine which stack to use..
2895 */
2896 static inline abi_ulong
2897 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2898 {
2899 unsigned long sp;
2900
2901 /* Default to using normal stack */
2902 sp = regs->active_tc.gpr[29];
2903
2904 /*
2905 * FPU emulator may have its own trampoline active just
2906 * above the user stack, 16-bytes before the next lowest
2907 * 16 byte boundary. Try to avoid trashing it.
2908 */
2909 sp -= 32;
2910
2911 /* This is the X/Open sanctioned signal stack switching. */
2912 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2913 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2914 }
2915
2916 return (sp - frame_size) & ~7;
2917 }
2918
2919 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2920 {
2921 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2922 env->hflags &= ~MIPS_HFLAG_M16;
2923 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2924 env->active_tc.PC &= ~(target_ulong) 1;
2925 }
2926 }
2927
2928 # if defined(TARGET_ABI_MIPSO32)
2929 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2930 static void setup_frame(int sig, struct target_sigaction * ka,
2931 target_sigset_t *set, CPUMIPSState *regs)
2932 {
2933 struct sigframe *frame;
2934 abi_ulong frame_addr;
2935 int i;
2936
2937 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2938 trace_user_setup_frame(regs, frame_addr);
2939 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2940 goto give_sigsegv;
2941 }
2942
2943 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2944
2945 setup_sigcontext(regs, &frame->sf_sc);
2946
2947 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2948 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2949 }
2950
2951 /*
2952 * Arguments to signal handler:
2953 *
2954 * a0 = signal number
2955 * a1 = 0 (should be cause)
2956 * a2 = pointer to struct sigcontext
2957 *
2958 * $25 and PC point to the signal handler, $29 points to the
2959 * struct sigframe.
2960 */
2961 regs->active_tc.gpr[ 4] = sig;
2962 regs->active_tc.gpr[ 5] = 0;
2963 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2964 regs->active_tc.gpr[29] = frame_addr;
2965 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2966 /* The original kernel code sets CP0_EPC to the handler
2967 * since it returns to userland using eret
2968 * we cannot do this here, and we must set PC directly */
2969 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2970 mips_set_hflags_isa_mode_from_pc(regs);
2971 unlock_user_struct(frame, frame_addr, 1);
2972 return;
2973
2974 give_sigsegv:
2975 force_sig(TARGET_SIGSEGV/*, current*/);
2976 }
2977
2978 long do_sigreturn(CPUMIPSState *regs)
2979 {
2980 struct sigframe *frame;
2981 abi_ulong frame_addr;
2982 sigset_t blocked;
2983 target_sigset_t target_set;
2984 int i;
2985
2986 frame_addr = regs->active_tc.gpr[29];
2987 trace_user_do_sigreturn(regs, frame_addr);
2988 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2989 goto badframe;
2990
2991 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2992 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2993 }
2994
2995 target_to_host_sigset_internal(&blocked, &target_set);
2996 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
2997
2998 restore_sigcontext(regs, &frame->sf_sc);
2999
3000 #if 0
3001 /*
3002 * Don't let your children do this ...
3003 */
3004 __asm__ __volatile__(
3005 "move\t$29, %0\n\t"
3006 "j\tsyscall_exit"
3007 :/* no outputs */
3008 :"r" (&regs));
3009 /* Unreached */
3010 #endif
3011
3012 regs->active_tc.PC = regs->CP0_EPC;
3013 mips_set_hflags_isa_mode_from_pc(regs);
3014 /* I am not sure this is right, but it seems to work
3015 * maybe a problem with nested signals ? */
3016 regs->CP0_EPC = 0;
3017 return -TARGET_QEMU_ESIGRETURN;
3018
3019 badframe:
3020 force_sig(TARGET_SIGSEGV/*, current*/);
3021 return 0;
3022 }
3023 # endif /* O32 */
3024
3025 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3026 target_siginfo_t *info,
3027 target_sigset_t *set, CPUMIPSState *env)
3028 {
3029 struct target_rt_sigframe *frame;
3030 abi_ulong frame_addr;
3031 int i;
3032
3033 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3034 trace_user_setup_rt_frame(env, frame_addr);
3035 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3036 goto give_sigsegv;
3037 }
3038
3039 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3040
3041 tswap_siginfo(&frame->rs_info, info);
3042
3043 __put_user(0, &frame->rs_uc.tuc_flags);
3044 __put_user(0, &frame->rs_uc.tuc_link);
3045 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3046 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3047 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3048 &frame->rs_uc.tuc_stack.ss_flags);
3049
3050 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3051
3052 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3053 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3054 }
3055
3056 /*
3057 * Arguments to signal handler:
3058 *
3059 * a0 = signal number
3060 * a1 = pointer to siginfo_t
3061 * a2 = pointer to struct ucontext
3062 *
3063 * $25 and PC point to the signal handler, $29 points to the
3064 * struct sigframe.
3065 */
3066 env->active_tc.gpr[ 4] = sig;
3067 env->active_tc.gpr[ 5] = frame_addr
3068 + offsetof(struct target_rt_sigframe, rs_info);
3069 env->active_tc.gpr[ 6] = frame_addr
3070 + offsetof(struct target_rt_sigframe, rs_uc);
3071 env->active_tc.gpr[29] = frame_addr;
3072 env->active_tc.gpr[31] = frame_addr
3073 + offsetof(struct target_rt_sigframe, rs_code);
3074 /* The original kernel code sets CP0_EPC to the handler
3075 * since it returns to userland using eret
3076 * we cannot do this here, and we must set PC directly */
3077 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3078 mips_set_hflags_isa_mode_from_pc(env);
3079 unlock_user_struct(frame, frame_addr, 1);
3080 return;
3081
3082 give_sigsegv:
3083 unlock_user_struct(frame, frame_addr, 1);
3084 force_sig(TARGET_SIGSEGV/*, current*/);
3085 }
3086
3087 long do_rt_sigreturn(CPUMIPSState *env)
3088 {
3089 struct target_rt_sigframe *frame;
3090 abi_ulong frame_addr;
3091 sigset_t blocked;
3092
3093 frame_addr = env->active_tc.gpr[29];
3094 trace_user_do_rt_sigreturn(env, frame_addr);
3095 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3096 goto badframe;
3097 }
3098
3099 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3100 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3101
3102 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3103
3104 if (do_sigaltstack(frame_addr +
3105 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3106 0, get_sp_from_cpustate(env)) == -EFAULT)
3107 goto badframe;
3108
3109 env->active_tc.PC = env->CP0_EPC;
3110 mips_set_hflags_isa_mode_from_pc(env);
3111 /* I am not sure this is right, but it seems to work
3112 * maybe a problem with nested signals ? */
3113 env->CP0_EPC = 0;
3114 return -TARGET_QEMU_ESIGRETURN;
3115
3116 badframe:
3117 force_sig(TARGET_SIGSEGV/*, current*/);
3118 return 0;
3119 }
3120
3121 #elif defined(TARGET_SH4)
3122
3123 /*
3124 * code and data structures from linux kernel:
3125 * include/asm-sh/sigcontext.h
3126 * arch/sh/kernel/signal.c
3127 */
3128
3129 struct target_sigcontext {
3130 target_ulong oldmask;
3131
3132 /* CPU registers */
3133 target_ulong sc_gregs[16];
3134 target_ulong sc_pc;
3135 target_ulong sc_pr;
3136 target_ulong sc_sr;
3137 target_ulong sc_gbr;
3138 target_ulong sc_mach;
3139 target_ulong sc_macl;
3140
3141 /* FPU registers */
3142 target_ulong sc_fpregs[16];
3143 target_ulong sc_xfpregs[16];
3144 unsigned int sc_fpscr;
3145 unsigned int sc_fpul;
3146 unsigned int sc_ownedfp;
3147 };
3148
3149 struct target_sigframe
3150 {
3151 struct target_sigcontext sc;
3152 target_ulong extramask[TARGET_NSIG_WORDS-1];
3153 uint16_t retcode[3];
3154 };
3155
3156
3157 struct target_ucontext {
3158 target_ulong tuc_flags;
3159 struct target_ucontext *tuc_link;
3160 target_stack_t tuc_stack;
3161 struct target_sigcontext tuc_mcontext;
3162 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3163 };
3164
3165 struct target_rt_sigframe
3166 {
3167 struct target_siginfo info;
3168 struct target_ucontext uc;
3169 uint16_t retcode[3];
3170 };
3171
3172
3173 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3174 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3175
3176 static abi_ulong get_sigframe(struct target_sigaction *ka,
3177 unsigned long sp, size_t frame_size)
3178 {
3179 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3180 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3181 }
3182
3183 return (sp - frame_size) & -8ul;
3184 }
3185
3186 static void setup_sigcontext(struct target_sigcontext *sc,
3187 CPUSH4State *regs, unsigned long mask)
3188 {
3189 int i;
3190
3191 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3192 COPY(gregs[0]); COPY(gregs[1]);
3193 COPY(gregs[2]); COPY(gregs[3]);
3194 COPY(gregs[4]); COPY(gregs[5]);
3195 COPY(gregs[6]); COPY(gregs[7]);
3196 COPY(gregs[8]); COPY(gregs[9]);
3197 COPY(gregs[10]); COPY(gregs[11]);
3198 COPY(gregs[12]); COPY(gregs[13]);
3199 COPY(gregs[14]); COPY(gregs[15]);
3200 COPY(gbr); COPY(mach);
3201 COPY(macl); COPY(pr);
3202 COPY(sr); COPY(pc);
3203 #undef COPY
3204
3205 for (i=0; i<16; i++) {
3206 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3207 }
3208 __put_user(regs->fpscr, &sc->sc_fpscr);
3209 __put_user(regs->fpul, &sc->sc_fpul);
3210
3211 /* non-iBCS2 extensions.. */
3212 __put_user(mask, &sc->oldmask);
3213 }
3214
3215 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3216 {
3217 int i;
3218
3219 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3220 COPY(gregs[0]); COPY(gregs[1]);
3221 COPY(gregs[2]); COPY(gregs[3]);
3222 COPY(gregs[4]); COPY(gregs[5]);
3223 COPY(gregs[6]); COPY(gregs[7]);
3224 COPY(gregs[8]); COPY(gregs[9]);
3225 COPY(gregs[10]); COPY(gregs[11]);
3226 COPY(gregs[12]); COPY(gregs[13]);
3227 COPY(gregs[14]); COPY(gregs[15]);
3228 COPY(gbr); COPY(mach);
3229 COPY(macl); COPY(pr);
3230 COPY(sr); COPY(pc);
3231 #undef COPY
3232
3233 for (i=0; i<16; i++) {
3234 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3235 }
3236 __get_user(regs->fpscr, &sc->sc_fpscr);
3237 __get_user(regs->fpul, &sc->sc_fpul);
3238
3239 regs->tra = -1; /* disable syscall checks */
3240 }
3241
3242 static void setup_frame(int sig, struct target_sigaction *ka,
3243 target_sigset_t *set, CPUSH4State *regs)
3244 {
3245 struct target_sigframe *frame;
3246 abi_ulong frame_addr;
3247 int i;
3248
3249 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3250 trace_user_setup_frame(regs, frame_addr);
3251 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3252 goto give_sigsegv;
3253 }
3254
3255 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3256
3257 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3258 __put_user(set->sig[i + 1], &frame->extramask[i]);
3259 }
3260
3261 /* Set up to return from userspace. If provided, use a stub
3262 already in userspace. */
3263 if (ka->sa_flags & TARGET_SA_RESTORER) {
3264 regs->pr = (unsigned long) ka->sa_restorer;
3265 } else {
3266 /* Generate return code (system call to sigreturn) */
3267 abi_ulong retcode_addr = frame_addr +
3268 offsetof(struct target_sigframe, retcode);
3269 __put_user(MOVW(2), &frame->retcode[0]);
3270 __put_user(TRAP_NOARG, &frame->retcode[1]);
3271 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3272 regs->pr = (unsigned long) retcode_addr;
3273 }
3274
3275 /* Set up registers for signal handler */
3276 regs->gregs[15] = frame_addr;
3277 regs->gregs[4] = sig; /* Arg for signal handler */
3278 regs->gregs[5] = 0;
3279 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3280 regs->pc = (unsigned long) ka->_sa_handler;
3281
3282 unlock_user_struct(frame, frame_addr, 1);
3283 return;
3284
3285 give_sigsegv:
3286 unlock_user_struct(frame, frame_addr, 1);
3287 force_sig(TARGET_SIGSEGV);
3288 }
3289
3290 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3291 target_siginfo_t *info,
3292 target_sigset_t *set, CPUSH4State *regs)
3293 {
3294 struct target_rt_sigframe *frame;
3295 abi_ulong frame_addr;
3296 int i;
3297
3298 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3299 trace_user_setup_rt_frame(regs, frame_addr);
3300 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3301 goto give_sigsegv;
3302 }
3303
3304 tswap_siginfo(&frame->info, info);
3305
3306 /* Create the ucontext. */
3307 __put_user(0, &frame->uc.tuc_flags);
3308 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3309 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3310 &frame->uc.tuc_stack.ss_sp);
3311 __put_user(sas_ss_flags(regs->gregs[15]),
3312 &frame->uc.tuc_stack.ss_flags);
3313 __put_user(target_sigaltstack_used.ss_size,
3314 &frame->uc.tuc_stack.ss_size);
3315 setup_sigcontext(&frame->uc.tuc_mcontext,
3316 regs, set->sig[0]);
3317 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3318 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3319 }
3320
3321 /* Set up to return from userspace. If provided, use a stub
3322 already in userspace. */
3323 if (ka->sa_flags & TARGET_SA_RESTORER) {
3324 regs->pr = (unsigned long) ka->sa_restorer;
3325 } else {
3326 /* Generate return code (system call to sigreturn) */
3327 abi_ulong retcode_addr = frame_addr +
3328 offsetof(struct target_rt_sigframe, retcode);
3329 __put_user(MOVW(2), &frame->retcode[0]);
3330 __put_user(TRAP_NOARG, &frame->retcode[1]);
3331 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3332 regs->pr = (unsigned long) retcode_addr;
3333 }
3334
3335 /* Set up registers for signal handler */
3336 regs->gregs[15] = frame_addr;
3337 regs->gregs[4] = sig; /* Arg for signal handler */
3338 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3339 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3340 regs->pc = (unsigned long) ka->_sa_handler;
3341
3342 unlock_user_struct(frame, frame_addr, 1);
3343 return;
3344
3345 give_sigsegv:
3346 unlock_user_struct(frame, frame_addr, 1);
3347 force_sig(TARGET_SIGSEGV);
3348 }
3349
3350 long do_sigreturn(CPUSH4State *regs)
3351 {
3352 struct target_sigframe *frame;
3353 abi_ulong frame_addr;
3354 sigset_t blocked;
3355 target_sigset_t target_set;
3356 int i;
3357 int err = 0;
3358
3359 frame_addr = regs->gregs[15];
3360 trace_user_do_sigreturn(regs, frame_addr);
3361 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3362 goto badframe;
3363 }
3364
3365 __get_user(target_set.sig[0], &frame->sc.oldmask);
3366 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3367 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3368 }
3369
3370 if (err)
3371 goto badframe;
3372
3373 target_to_host_sigset_internal(&blocked, &target_set);
3374 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3375
3376 restore_sigcontext(regs, &frame->sc);
3377
3378 unlock_user_struct(frame, frame_addr, 0);
3379 return -TARGET_QEMU_ESIGRETURN;
3380
3381 badframe:
3382 unlock_user_struct(frame, frame_addr, 0);
3383 force_sig(TARGET_SIGSEGV);
3384 return 0;
3385 }
3386
3387 long do_rt_sigreturn(CPUSH4State *regs)
3388 {
3389 struct target_rt_sigframe *frame;
3390 abi_ulong frame_addr;
3391 sigset_t blocked;
3392
3393 frame_addr = regs->gregs[15];
3394 trace_user_do_rt_sigreturn(regs, frame_addr);
3395 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3396 goto badframe;
3397 }
3398
3399 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3400 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3401
3402 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3403
3404 if (do_sigaltstack(frame_addr +
3405 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3406 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3407 goto badframe;
3408 }
3409
3410 unlock_user_struct(frame, frame_addr, 0);
3411 return -TARGET_QEMU_ESIGRETURN;
3412
3413 badframe:
3414 unlock_user_struct(frame, frame_addr, 0);
3415 force_sig(TARGET_SIGSEGV);
3416 return 0;
3417 }
3418 #elif defined(TARGET_MICROBLAZE)
3419
3420 struct target_sigcontext {
3421 struct target_pt_regs regs; /* needs to be first */
3422 uint32_t oldmask;
3423 };
3424
3425 struct target_stack_t {
3426 abi_ulong ss_sp;
3427 int ss_flags;
3428 unsigned int ss_size;
3429 };
3430
3431 struct target_ucontext {
3432 abi_ulong tuc_flags;
3433 abi_ulong tuc_link;
3434 struct target_stack_t tuc_stack;
3435 struct target_sigcontext tuc_mcontext;
3436 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3437 };
3438
3439 /* Signal frames. */
3440 struct target_signal_frame {
3441 struct target_ucontext uc;
3442 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3443 uint32_t tramp[2];
3444 };
3445
3446 struct rt_signal_frame {
3447 siginfo_t info;
3448 struct ucontext uc;
3449 uint32_t tramp[2];
3450 };
3451
3452 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3453 {
3454 __put_user(env->regs[0], &sc->regs.r0);
3455 __put_user(env->regs[1], &sc->regs.r1);
3456 __put_user(env->regs[2], &sc->regs.r2);
3457 __put_user(env->regs[3], &sc->regs.r3);
3458 __put_user(env->regs[4], &sc->regs.r4);
3459 __put_user(env->regs[5], &sc->regs.r5);
3460 __put_user(env->regs[6], &sc->regs.r6);
3461 __put_user(env->regs[7], &sc->regs.r7);
3462 __put_user(env->regs[8], &sc->regs.r8);
3463 __put_user(env->regs[9], &sc->regs.r9);
3464 __put_user(env->regs[10], &sc->regs.r10);
3465 __put_user(env->regs[11], &sc->regs.r11);
3466 __put_user(env->regs[12], &sc->regs.r12);
3467 __put_user(env->regs[13], &sc->regs.r13);
3468 __put_user(env->regs[14], &sc->regs.r14);
3469 __put_user(env->regs[15], &sc->regs.r15);
3470 __put_user(env->regs[16], &sc->regs.r16);
3471 __put_user(env->regs[17], &sc->regs.r17);
3472 __put_user(env->regs[18], &sc->regs.r18);
3473 __put_user(env->regs[19], &sc->regs.r19);
3474 __put_user(env->regs[20], &sc->regs.r20);
3475 __put_user(env->regs[21], &sc->regs.r21);
3476 __put_user(env->regs[22], &sc->regs.r22);
3477 __put_user(env->regs[23], &sc->regs.r23);
3478 __put_user(env->regs[24], &sc->regs.r24);
3479 __put_user(env->regs[25], &sc->regs.r25);
3480 __put_user(env->regs[26], &sc->regs.r26);
3481 __put_user(env->regs[27], &sc->regs.r27);
3482 __put_user(env->regs[28], &sc->regs.r28);
3483 __put_user(env->regs[29], &sc->regs.r29);
3484 __put_user(env->regs[30], &sc->regs.r30);
3485 __put_user(env->regs[31], &sc->regs.r31);
3486 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3487 }
3488
3489 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3490 {
3491 __get_user(env->regs[0], &sc->regs.r0);
3492 __get_user(env->regs[1], &sc->regs.r1);
3493 __get_user(env->regs[2], &sc->regs.r2);
3494 __get_user(env->regs[3], &sc->regs.r3);
3495 __get_user(env->regs[4], &sc->regs.r4);
3496 __get_user(env->regs[5], &sc->regs.r5);
3497 __get_user(env->regs[6], &sc->regs.r6);
3498 __get_user(env->regs[7], &sc->regs.r7);
3499 __get_user(env->regs[8], &sc->regs.r8);
3500 __get_user(env->regs[9], &sc->regs.r9);
3501 __get_user(env->regs[10], &sc->regs.r10);
3502 __get_user(env->regs[11], &sc->regs.r11);
3503 __get_user(env->regs[12], &sc->regs.r12);
3504 __get_user(env->regs[13], &sc->regs.r13);
3505 __get_user(env->regs[14], &sc->regs.r14);
3506 __get_user(env->regs[15], &sc->regs.r15);
3507 __get_user(env->regs[16], &sc->regs.r16);
3508 __get_user(env->regs[17], &sc->regs.r17);
3509 __get_user(env->regs[18], &sc->regs.r18);
3510 __get_user(env->regs[19], &sc->regs.r19);
3511 __get_user(env->regs[20], &sc->regs.r20);
3512 __get_user(env->regs[21], &sc->regs.r21);
3513 __get_user(env->regs[22], &sc->regs.r22);
3514 __get_user(env->regs[23], &sc->regs.r23);
3515 __get_user(env->regs[24], &sc->regs.r24);
3516 __get_user(env->regs[25], &sc->regs.r25);
3517 __get_user(env->regs[26], &sc->regs.r26);
3518 __get_user(env->regs[27], &sc->regs.r27);
3519 __get_user(env->regs[28], &sc->regs.r28);
3520 __get_user(env->regs[29], &sc->regs.r29);
3521 __get_user(env->regs[30], &sc->regs.r30);
3522 __get_user(env->regs[31], &sc->regs.r31);
3523 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3524 }
3525
3526 static abi_ulong get_sigframe(struct target_sigaction *ka,
3527 CPUMBState *env, int frame_size)
3528 {
3529 abi_ulong sp = env->regs[1];
3530
3531 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3532 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3533 }
3534
3535 return ((sp - frame_size) & -8UL);
3536 }
3537
3538 static void setup_frame(int sig, struct target_sigaction *ka,
3539 target_sigset_t *set, CPUMBState *env)
3540 {
3541 struct target_signal_frame *frame;
3542 abi_ulong frame_addr;
3543 int i;
3544
3545 frame_addr = get_sigframe(ka, env, sizeof *frame);
3546 trace_user_setup_frame(env, frame_addr);
3547 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3548 goto badframe;
3549
3550 /* Save the mask. */
3551 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3552
3553 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3554 __put_user(set->sig[i], &frame->extramask[i - 1]);
3555 }
3556
3557 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3558
3559 /* Set up to return from userspace. If provided, use a stub
3560 already in userspace. */
3561 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3562 if (ka->sa_flags & TARGET_SA_RESTORER) {
3563 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3564 } else {
3565 uint32_t t;
3566 /* Note, these encodings are _big endian_! */
3567 /* addi r12, r0, __NR_sigreturn */
3568 t = 0x31800000UL | TARGET_NR_sigreturn;
3569 __put_user(t, frame->tramp + 0);
3570 /* brki r14, 0x8 */
3571 t = 0xb9cc0008UL;
3572 __put_user(t, frame->tramp + 1);
3573
3574 /* Return from sighandler will jump to the tramp.
3575 Negative 8 offset because return is rtsd r15, 8 */
3576 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3577 - 8;
3578 }
3579
3580 /* Set up registers for signal handler */
3581 env->regs[1] = frame_addr;
3582 /* Signal handler args: */
3583 env->regs[5] = sig; /* Arg 0: signum */
3584 env->regs[6] = 0;
3585 /* arg 1: sigcontext */
3586 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3587
3588 /* Offset of 4 to handle microblaze rtid r14, 0 */
3589 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3590
3591 unlock_user_struct(frame, frame_addr, 1);
3592 return;
3593 badframe:
3594 force_sig(TARGET_SIGSEGV);
3595 }
3596
3597 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3598 target_siginfo_t *info,
3599 target_sigset_t *set, CPUMBState *env)
3600 {
3601 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3602 }
3603
3604 long do_sigreturn(CPUMBState *env)
3605 {
3606 struct target_signal_frame *frame;
3607 abi_ulong frame_addr;
3608 target_sigset_t target_set;
3609 sigset_t set;
3610 int i;
3611
3612 frame_addr = env->regs[R_SP];
3613 trace_user_do_sigreturn(env, frame_addr);
3614 /* Make sure the guest isn't playing games. */
3615 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3616 goto badframe;
3617
3618 /* Restore blocked signals */
3619 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3620 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3621 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3622 }
3623 target_to_host_sigset_internal(&set, &target_set);
3624 do_sigprocmask(SIG_SETMASK, &set, NULL);
3625
3626 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3627 /* We got here through a sigreturn syscall, our path back is via an
3628 rtb insn so setup r14 for that. */
3629 env->regs[14] = env->sregs[SR_PC];
3630
3631 unlock_user_struct(frame, frame_addr, 0);
3632 return -TARGET_QEMU_ESIGRETURN;
3633 badframe:
3634 force_sig(TARGET_SIGSEGV);
3635 }
3636
3637 long do_rt_sigreturn(CPUMBState *env)
3638 {
3639 trace_user_do_rt_sigreturn(env, 0);
3640 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3641 return -TARGET_ENOSYS;
3642 }
3643
3644 #elif defined(TARGET_CRIS)
3645
3646 struct target_sigcontext {
3647 struct target_pt_regs regs; /* needs to be first */
3648 uint32_t oldmask;
3649 uint32_t usp; /* usp before stacking this gunk on it */
3650 };
3651
3652 /* Signal frames. */
3653 struct target_signal_frame {
3654 struct target_sigcontext sc;
3655 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3656 uint16_t retcode[4]; /* Trampoline code. */
3657 };
3658
3659 struct rt_signal_frame {
3660 siginfo_t *pinfo;
3661 void *puc;
3662 siginfo_t info;
3663 struct ucontext uc;
3664 uint16_t retcode[4]; /* Trampoline code. */
3665 };
3666
3667 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3668 {
3669 __put_user(env->regs[0], &sc->regs.r0);
3670 __put_user(env->regs[1], &sc->regs.r1);
3671 __put_user(env->regs[2], &sc->regs.r2);
3672 __put_user(env->regs[3], &sc->regs.r3);
3673 __put_user(env->regs[4], &sc->regs.r4);
3674 __put_user(env->regs[5], &sc->regs.r5);
3675 __put_user(env->regs[6], &sc->regs.r6);
3676 __put_user(env->regs[7], &sc->regs.r7);
3677 __put_user(env->regs[8], &sc->regs.r8);
3678 __put_user(env->regs[9], &sc->regs.r9);
3679 __put_user(env->regs[10], &sc->regs.r10);
3680 __put_user(env->regs[11], &sc->regs.r11);
3681 __put_user(env->regs[12], &sc->regs.r12);
3682 __put_user(env->regs[13], &sc->regs.r13);
3683 __put_user(env->regs[14], &sc->usp);
3684 __put_user(env->regs[15], &sc->regs.acr);
3685 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3686 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3687 __put_user(env->pc, &sc->regs.erp);
3688 }
3689
3690 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3691 {
3692 __get_user(env->regs[0], &sc->regs.r0);
3693 __get_user(env->regs[1], &sc->regs.r1);
3694 __get_user(env->regs[2], &sc->regs.r2);
3695 __get_user(env->regs[3], &sc->regs.r3);
3696 __get_user(env->regs[4], &sc->regs.r4);
3697 __get_user(env->regs[5], &sc->regs.r5);
3698 __get_user(env->regs[6], &sc->regs.r6);
3699 __get_user(env->regs[7], &sc->regs.r7);
3700 __get_user(env->regs[8], &sc->regs.r8);
3701 __get_user(env->regs[9], &sc->regs.r9);
3702 __get_user(env->regs[10], &sc->regs.r10);
3703 __get_user(env->regs[11], &sc->regs.r11);
3704 __get_user(env->regs[12], &sc->regs.r12);
3705 __get_user(env->regs[13], &sc->regs.r13);
3706 __get_user(env->regs[14], &sc->usp);
3707 __get_user(env->regs[15], &sc->regs.acr);
3708 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3709 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3710 __get_user(env->pc, &sc->regs.erp);
3711 }
3712
3713 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3714 {
3715 abi_ulong sp;
3716 /* Align the stack downwards to 4. */
3717 sp = (env->regs[R_SP] & ~3);
3718 return sp - framesize;
3719 }
3720
3721 static void setup_frame(int sig, struct target_sigaction *ka,
3722 target_sigset_t *set, CPUCRISState *env)
3723 {
3724 struct target_signal_frame *frame;
3725 abi_ulong frame_addr;
3726 int i;
3727
3728 frame_addr = get_sigframe(env, sizeof *frame);
3729 trace_user_setup_frame(env, frame_addr);
3730 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3731 goto badframe;
3732
3733 /*
3734 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3735 * use this trampoline anymore but it sets it up for GDB.
3736 * In QEMU, using the trampoline simplifies things a bit so we use it.
3737 *
3738 * This is movu.w __NR_sigreturn, r9; break 13;
3739 */
3740 __put_user(0x9c5f, frame->retcode+0);
3741 __put_user(TARGET_NR_sigreturn,
3742 frame->retcode + 1);
3743 __put_user(0xe93d, frame->retcode + 2);
3744
3745 /* Save the mask. */
3746 __put_user(set->sig[0], &frame->sc.oldmask);
3747
3748 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3749 __put_user(set->sig[i], &frame->extramask[i - 1]);
3750 }
3751
3752 setup_sigcontext(&frame->sc, env);
3753
3754 /* Move the stack and setup the arguments for the handler. */
3755 env->regs[R_SP] = frame_addr;
3756 env->regs[10] = sig;
3757 env->pc = (unsigned long) ka->_sa_handler;
3758 /* Link SRP so the guest returns through the trampoline. */
3759 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3760
3761 unlock_user_struct(frame, frame_addr, 1);
3762 return;
3763 badframe:
3764 force_sig(TARGET_SIGSEGV);
3765 }
3766
3767 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3768 target_siginfo_t *info,
3769 target_sigset_t *set, CPUCRISState *env)
3770 {
3771 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3772 }
3773
3774 long do_sigreturn(CPUCRISState *env)
3775 {
3776 struct target_signal_frame *frame;
3777 abi_ulong frame_addr;
3778 target_sigset_t target_set;
3779 sigset_t set;
3780 int i;
3781
3782 frame_addr = env->regs[R_SP];
3783 trace_user_do_sigreturn(env, frame_addr);
3784 /* Make sure the guest isn't playing games. */
3785 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3786 goto badframe;
3787 }
3788
3789 /* Restore blocked signals */
3790 __get_user(target_set.sig[0], &frame->sc.oldmask);
3791 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3792 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3793 }
3794 target_to_host_sigset_internal(&set, &target_set);
3795 do_sigprocmask(SIG_SETMASK, &set, NULL);
3796
3797 restore_sigcontext(&frame->sc, env);
3798 unlock_user_struct(frame, frame_addr, 0);
3799 return -TARGET_QEMU_ESIGRETURN;
3800 badframe:
3801 force_sig(TARGET_SIGSEGV);
3802 }
3803
3804 long do_rt_sigreturn(CPUCRISState *env)
3805 {
3806 trace_user_do_rt_sigreturn(env, 0);
3807 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3808 return -TARGET_ENOSYS;
3809 }
3810
3811 #elif defined(TARGET_OPENRISC)
3812
3813 struct target_sigcontext {
3814 struct target_pt_regs regs;
3815 abi_ulong oldmask;
3816 abi_ulong usp;
3817 };
3818
3819 struct target_ucontext {
3820 abi_ulong tuc_flags;
3821 abi_ulong tuc_link;
3822 target_stack_t tuc_stack;
3823 struct target_sigcontext tuc_mcontext;
3824 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3825 };
3826
3827 struct target_rt_sigframe {
3828 abi_ulong pinfo;
3829 uint64_t puc;
3830 struct target_siginfo info;
3831 struct target_sigcontext sc;
3832 struct target_ucontext uc;
3833 unsigned char retcode[16]; /* trampoline code */
3834 };
3835
3836 /* This is the asm-generic/ucontext.h version */
3837 #if 0
3838 static int restore_sigcontext(CPUOpenRISCState *regs,
3839 struct target_sigcontext *sc)
3840 {
3841 unsigned int err = 0;
3842 unsigned long old_usp;
3843
3844 /* Alwys make any pending restarted system call return -EINTR */
3845 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3846
3847 /* restore the regs from &sc->regs (same as sc, since regs is first)
3848 * (sc is already checked for VERIFY_READ since the sigframe was
3849 * checked in sys_sigreturn previously)
3850 */
3851
3852 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3853 goto badframe;
3854 }
3855
3856 /* make sure the U-flag is set so user-mode cannot fool us */
3857
3858 regs->sr &= ~SR_SM;
3859
3860 /* restore the old USP as it was before we stacked the sc etc.
3861 * (we cannot just pop the sigcontext since we aligned the sp and
3862 * stuff after pushing it)
3863 */
3864
3865 __get_user(old_usp, &sc->usp);
3866 phx_signal("old_usp 0x%lx", old_usp);
3867
3868 __PHX__ REALLY /* ??? */
3869 wrusp(old_usp);
3870 regs->gpr[1] = old_usp;
3871
3872 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3873 * after this completes, but we don't use that mechanism. maybe we can
3874 * use it now ?
3875 */
3876
3877 return err;
3878
3879 badframe:
3880 return 1;
3881 }
3882 #endif
3883
3884 /* Set up a signal frame. */
3885
3886 static void setup_sigcontext(struct target_sigcontext *sc,
3887 CPUOpenRISCState *regs,
3888 unsigned long mask)
3889 {
3890 unsigned long usp = regs->gpr[1];
3891
3892 /* copy the regs. they are first in sc so we can use sc directly */
3893
3894 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3895
3896 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3897 the signal handler. The frametype will be restored to its previous
3898 value in restore_sigcontext. */
3899 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3900
3901 /* then some other stuff */
3902 __put_user(mask, &sc->oldmask);
3903 __put_user(usp, &sc->usp);
3904 }
3905
3906 static inline unsigned long align_sigframe(unsigned long sp)
3907 {
3908 unsigned long i;
3909 i = sp & ~3UL;
3910 return i;
3911 }
3912
3913 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3914 CPUOpenRISCState *regs,
3915 size_t frame_size)
3916 {
3917 unsigned long sp = regs->gpr[1];
3918 int onsigstack = on_sig_stack(sp);
3919
3920 /* redzone */
3921 /* This is the X/Open sanctioned signal stack switching. */
3922 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3923 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3924 }
3925
3926 sp = align_sigframe(sp - frame_size);
3927
3928 /*
3929 * If we are on the alternate signal stack and would overflow it, don't.
3930 * Return an always-bogus address instead so we will die with SIGSEGV.
3931 */
3932
3933 if (onsigstack && !likely(on_sig_stack(sp))) {
3934 return -1L;
3935 }
3936
3937 return sp;
3938 }
3939
3940 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3941 target_siginfo_t *info,
3942 target_sigset_t *set, CPUOpenRISCState *env)
3943 {
3944 int err = 0;
3945 abi_ulong frame_addr;
3946 unsigned long return_ip;
3947 struct target_rt_sigframe *frame;
3948 abi_ulong info_addr, uc_addr;
3949
3950 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3951 trace_user_setup_rt_frame(env, frame_addr);
3952 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3953 goto give_sigsegv;
3954 }
3955
3956 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
3957 __put_user(info_addr, &frame->pinfo);
3958 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
3959 __put_user(uc_addr, &frame->puc);
3960
3961 if (ka->sa_flags & SA_SIGINFO) {
3962 tswap_siginfo(&frame->info, info);
3963 }
3964
3965 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
3966 __put_user(0, &frame->uc.tuc_flags);
3967 __put_user(0, &frame->uc.tuc_link);
3968 __put_user(target_sigaltstack_used.ss_sp,
3969 &frame->uc.tuc_stack.ss_sp);
3970 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
3971 __put_user(target_sigaltstack_used.ss_size,
3972 &frame->uc.tuc_stack.ss_size);
3973 setup_sigcontext(&frame->sc, env, set->sig[0]);
3974
3975 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
3976
3977 /* trampoline - the desired return ip is the retcode itself */
3978 return_ip = (unsigned long)&frame->retcode;
3979 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
3980 __put_user(0xa960, (short *)(frame->retcode + 0));
3981 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
3982 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
3983 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
3984
3985 if (err) {
3986 goto give_sigsegv;
3987 }
3988
3989 /* TODO what is the current->exec_domain stuff and invmap ? */
3990
3991 /* Set up registers for signal handler */
3992 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
3993 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
3994 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
3995 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
3996 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
3997
3998 /* actually move the usp to reflect the stacked frame */
3999 env->gpr[1] = (unsigned long)frame;
4000
4001 return;
4002
4003 give_sigsegv:
4004 unlock_user_struct(frame, frame_addr, 1);
4005 if (sig == TARGET_SIGSEGV) {
4006 ka->_sa_handler = TARGET_SIG_DFL;
4007 }
4008 force_sig(TARGET_SIGSEGV);
4009 }
4010
4011 long do_sigreturn(CPUOpenRISCState *env)
4012 {
4013 trace_user_do_sigreturn(env, 0);
4014 fprintf(stderr, "do_sigreturn: not implemented\n");
4015 return -TARGET_ENOSYS;
4016 }
4017
4018 long do_rt_sigreturn(CPUOpenRISCState *env)
4019 {
4020 trace_user_do_rt_sigreturn(env, 0);
4021 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4022 return -TARGET_ENOSYS;
4023 }
4024 /* TARGET_OPENRISC */
4025
4026 #elif defined(TARGET_S390X)
4027
4028 #define __NUM_GPRS 16
4029 #define __NUM_FPRS 16
4030 #define __NUM_ACRS 16
4031
4032 #define S390_SYSCALL_SIZE 2
4033 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4034
4035 #define _SIGCONTEXT_NSIG 64
4036 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4037 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4038 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4039 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4040 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4041
4042 typedef struct {
4043 target_psw_t psw;
4044 target_ulong gprs[__NUM_GPRS];
4045 unsigned int acrs[__NUM_ACRS];
4046 } target_s390_regs_common;
4047
4048 typedef struct {
4049 unsigned int fpc;
4050 double fprs[__NUM_FPRS];
4051 } target_s390_fp_regs;
4052
4053 typedef struct {
4054 target_s390_regs_common regs;
4055 target_s390_fp_regs fpregs;
4056 } target_sigregs;
4057
4058 struct target_sigcontext {
4059 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4060 target_sigregs *sregs;
4061 };
4062
4063 typedef struct {
4064 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4065 struct target_sigcontext sc;
4066 target_sigregs sregs;
4067 int signo;
4068 uint8_t retcode[S390_SYSCALL_SIZE];
4069 } sigframe;
4070
4071 struct target_ucontext {
4072 target_ulong tuc_flags;
4073 struct target_ucontext *tuc_link;
4074 target_stack_t tuc_stack;
4075 target_sigregs tuc_mcontext;
4076 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4077 };
4078
4079 typedef struct {
4080 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4081 uint8_t retcode[S390_SYSCALL_SIZE];
4082 struct target_siginfo info;
4083 struct target_ucontext uc;
4084 } rt_sigframe;
4085
4086 static inline abi_ulong
4087 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4088 {
4089 abi_ulong sp;
4090
4091 /* Default to using normal stack */
4092 sp = env->regs[15];
4093
4094 /* This is the X/Open sanctioned signal stack switching. */
4095 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4096 if (!sas_ss_flags(sp)) {
4097 sp = target_sigaltstack_used.ss_sp +
4098 target_sigaltstack_used.ss_size;
4099 }
4100 }
4101
4102 /* This is the legacy signal stack switching. */
4103 else if (/* FIXME !user_mode(regs) */ 0 &&
4104 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4105 ka->sa_restorer) {
4106 sp = (abi_ulong) ka->sa_restorer;
4107 }
4108
4109 return (sp - frame_size) & -8ul;
4110 }
4111
4112 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4113 {
4114 int i;
4115 //save_access_regs(current->thread.acrs); FIXME
4116
4117 /* Copy a 'clean' PSW mask to the user to avoid leaking
4118 information about whether PER is currently on. */
4119 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4120 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4121 for (i = 0; i < 16; i++) {
4122 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4123 }
4124 for (i = 0; i < 16; i++) {
4125 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4126 }
4127 /*
4128 * We have to store the fp registers to current->thread.fp_regs
4129 * to merge them with the emulated registers.
4130 */
4131 //save_fp_regs(&current->thread.fp_regs); FIXME
4132 for (i = 0; i < 16; i++) {
4133 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4134 }
4135 }
4136
4137 static void setup_frame(int sig, struct target_sigaction *ka,
4138 target_sigset_t *set, CPUS390XState *env)
4139 {
4140 sigframe *frame;
4141 abi_ulong frame_addr;
4142
4143 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4144 trace_user_setup_frame(env, frame_addr);
4145 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4146 goto give_sigsegv;
4147 }
4148
4149 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4150
4151 save_sigregs(env, &frame->sregs);
4152
4153 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4154 (abi_ulong *)&frame->sc.sregs);
4155
4156 /* Set up to return from userspace. If provided, use a stub
4157 already in userspace. */
4158 if (ka->sa_flags & TARGET_SA_RESTORER) {
4159 env->regs[14] = (unsigned long)
4160 ka->sa_restorer | PSW_ADDR_AMODE;
4161 } else {
4162 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4163 | PSW_ADDR_AMODE;
4164 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4165 (uint16_t *)(frame->retcode));
4166 }
4167
4168 /* Set up backchain. */
4169 __put_user(env->regs[15], (abi_ulong *) frame);
4170
4171 /* Set up registers for signal handler */
4172 env->regs[15] = frame_addr;
4173 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4174
4175 env->regs[2] = sig; //map_signal(sig);
4176 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4177
4178 /* We forgot to include these in the sigcontext.
4179 To avoid breaking binary compatibility, they are passed as args. */
4180 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4181 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4182
4183 /* Place signal number on stack to allow backtrace from handler. */
4184 __put_user(env->regs[2], (int *) &frame->signo);
4185 unlock_user_struct(frame, frame_addr, 1);
4186 return;
4187
4188 give_sigsegv:
4189 force_sig(TARGET_SIGSEGV);
4190 }
4191
4192 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4193 target_siginfo_t *info,
4194 target_sigset_t *set, CPUS390XState *env)
4195 {
4196 int i;
4197 rt_sigframe *frame;
4198 abi_ulong frame_addr;
4199
4200 frame_addr = get_sigframe(ka, env, sizeof *frame);
4201 trace_user_setup_rt_frame(env, frame_addr);
4202 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4203 goto give_sigsegv;
4204 }
4205
4206 tswap_siginfo(&frame->info, info);
4207
4208 /* Create the ucontext. */
4209 __put_user(0, &frame->uc.tuc_flags);
4210 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4211 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4212 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4213 &frame->uc.tuc_stack.ss_flags);
4214 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4215 save_sigregs(env, &frame->uc.tuc_mcontext);
4216 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4217 __put_user((abi_ulong)set->sig[i],
4218 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4219 }
4220
4221 /* Set up to return from userspace. If provided, use a stub
4222 already in userspace. */
4223 if (ka->sa_flags & TARGET_SA_RESTORER) {
4224 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4225 } else {
4226 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4227 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4228 (uint16_t *)(frame->retcode));
4229 }
4230
4231 /* Set up backchain. */
4232 __put_user(env->regs[15], (abi_ulong *) frame);
4233
4234 /* Set up registers for signal handler */
4235 env->regs[15] = frame_addr;
4236 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4237
4238 env->regs[2] = sig; //map_signal(sig);
4239 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4240 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4241 return;
4242
4243 give_sigsegv:
4244 force_sig(TARGET_SIGSEGV);
4245 }
4246
4247 static int
4248 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4249 {
4250 int err = 0;
4251 int i;
4252
4253 for (i = 0; i < 16; i++) {
4254 __get_user(env->regs[i], &sc->regs.gprs[i]);
4255 }
4256
4257 __get_user(env->psw.mask, &sc->regs.psw.mask);
4258 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4259 (unsigned long long)env->psw.addr);
4260 __get_user(env->psw.addr, &sc->regs.psw.addr);
4261 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4262
4263 for (i = 0; i < 16; i++) {
4264 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4265 }
4266 for (i = 0; i < 16; i++) {
4267 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4268 }
4269
4270 return err;
4271 }
4272
4273 long do_sigreturn(CPUS390XState *env)
4274 {
4275 sigframe *frame;
4276 abi_ulong frame_addr = env->regs[15];
4277 target_sigset_t target_set;
4278 sigset_t set;
4279
4280 trace_user_do_sigreturn(env, frame_addr);
4281 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4282 goto badframe;
4283 }
4284 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4285
4286 target_to_host_sigset_internal(&set, &target_set);
4287 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
4288
4289 if (restore_sigregs(env, &frame->sregs)) {
4290 goto badframe;
4291 }
4292
4293 unlock_user_struct(frame, frame_addr, 0);
4294 return -TARGET_QEMU_ESIGRETURN;
4295
4296 badframe:
4297 force_sig(TARGET_SIGSEGV);
4298 return 0;
4299 }
4300
4301 long do_rt_sigreturn(CPUS390XState *env)
4302 {
4303 rt_sigframe *frame;
4304 abi_ulong frame_addr = env->regs[15];
4305 sigset_t set;
4306
4307 trace_user_do_rt_sigreturn(env, frame_addr);
4308 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4309 goto badframe;
4310 }
4311 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4312
4313 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
4314
4315 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4316 goto badframe;
4317 }
4318
4319 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4320 get_sp_from_cpustate(env)) == -EFAULT) {
4321 goto badframe;
4322 }
4323 unlock_user_struct(frame, frame_addr, 0);
4324 return -TARGET_QEMU_ESIGRETURN;
4325
4326 badframe:
4327 unlock_user_struct(frame, frame_addr, 0);
4328 force_sig(TARGET_SIGSEGV);
4329 return 0;
4330 }
4331
4332 #elif defined(TARGET_PPC)
4333
4334 /* Size of dummy stack frame allocated when calling signal handler.
4335 See arch/powerpc/include/asm/ptrace.h. */
4336 #if defined(TARGET_PPC64)
4337 #define SIGNAL_FRAMESIZE 128
4338 #else
4339 #define SIGNAL_FRAMESIZE 64
4340 #endif
4341
4342 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4343 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4344 struct target_mcontext {
4345 target_ulong mc_gregs[48];
4346 /* Includes fpscr. */
4347 uint64_t mc_fregs[33];
4348 target_ulong mc_pad[2];
4349 /* We need to handle Altivec and SPE at the same time, which no
4350 kernel needs to do. Fortunately, the kernel defines this bit to
4351 be Altivec-register-large all the time, rather than trying to
4352 twiddle it based on the specific platform. */
4353 union {
4354 /* SPE vector registers. One extra for SPEFSCR. */
4355 uint32_t spe[33];
4356 /* Altivec vector registers. The packing of VSCR and VRSAVE
4357 varies depending on whether we're PPC64 or not: PPC64 splits
4358 them apart; PPC32 stuffs them together. */
4359 #if defined(TARGET_PPC64)
4360 #define QEMU_NVRREG 34
4361 #else
4362 #define QEMU_NVRREG 33
4363 #endif
4364 ppc_avr_t altivec[QEMU_NVRREG];
4365 #undef QEMU_NVRREG
4366 } mc_vregs __attribute__((__aligned__(16)));
4367 };
4368
4369 /* See arch/powerpc/include/asm/sigcontext.h. */
4370 struct target_sigcontext {
4371 target_ulong _unused[4];
4372 int32_t signal;
4373 #if defined(TARGET_PPC64)
4374 int32_t pad0;
4375 #endif
4376 target_ulong handler;
4377 target_ulong oldmask;
4378 target_ulong regs; /* struct pt_regs __user * */
4379 #if defined(TARGET_PPC64)
4380 struct target_mcontext mcontext;
4381 #endif
4382 };
4383
4384 /* Indices for target_mcontext.mc_gregs, below.
4385 See arch/powerpc/include/asm/ptrace.h for details. */
4386 enum {
4387 TARGET_PT_R0 = 0,
4388 TARGET_PT_R1 = 1,
4389 TARGET_PT_R2 = 2,
4390 TARGET_PT_R3 = 3,
4391 TARGET_PT_R4 = 4,
4392 TARGET_PT_R5 = 5,
4393 TARGET_PT_R6 = 6,
4394 TARGET_PT_R7 = 7,
4395 TARGET_PT_R8 = 8,
4396 TARGET_PT_R9 = 9,
4397 TARGET_PT_R10 = 10,
4398 TARGET_PT_R11 = 11,
4399 TARGET_PT_R12 = 12,
4400 TARGET_PT_R13 = 13,
4401 TARGET_PT_R14 = 14,
4402 TARGET_PT_R15 = 15,
4403 TARGET_PT_R16 = 16,
4404 TARGET_PT_R17 = 17,
4405 TARGET_PT_R18 = 18,
4406 TARGET_PT_R19 = 19,
4407 TARGET_PT_R20 = 20,
4408 TARGET_PT_R21 = 21,
4409 TARGET_PT_R22 = 22,
4410 TARGET_PT_R23 = 23,
4411 TARGET_PT_R24 = 24,
4412 TARGET_PT_R25 = 25,
4413 TARGET_PT_R26 = 26,
4414 TARGET_PT_R27 = 27,
4415 TARGET_PT_R28 = 28,
4416 TARGET_PT_R29 = 29,
4417 TARGET_PT_R30 = 30,
4418 TARGET_PT_R31 = 31,
4419 TARGET_PT_NIP = 32,
4420 TARGET_PT_MSR = 33,
4421 TARGET_PT_ORIG_R3 = 34,
4422 TARGET_PT_CTR = 35,
4423 TARGET_PT_LNK = 36,
4424 TARGET_PT_XER = 37,
4425 TARGET_PT_CCR = 38,
4426 /* Yes, there are two registers with #39. One is 64-bit only. */
4427 TARGET_PT_MQ = 39,
4428 TARGET_PT_SOFTE = 39,
4429 TARGET_PT_TRAP = 40,
4430 TARGET_PT_DAR = 41,
4431 TARGET_PT_DSISR = 42,
4432 TARGET_PT_RESULT = 43,
4433 TARGET_PT_REGS_COUNT = 44
4434 };
4435
4436
4437 struct target_ucontext {
4438 target_ulong tuc_flags;
4439 target_ulong tuc_link; /* struct ucontext __user * */
4440 struct target_sigaltstack tuc_stack;
4441 #if !defined(TARGET_PPC64)
4442 int32_t tuc_pad[7];
4443 target_ulong tuc_regs; /* struct mcontext __user *
4444 points to uc_mcontext field */
4445 #endif
4446 target_sigset_t tuc_sigmask;
4447 #if defined(TARGET_PPC64)
4448 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4449 struct target_sigcontext tuc_sigcontext;
4450 #else
4451 int32_t tuc_maskext[30];
4452 int32_t tuc_pad2[3];
4453 struct target_mcontext tuc_mcontext;
4454 #endif
4455 };
4456
4457 /* See arch/powerpc/kernel/signal_32.c. */
4458 struct target_sigframe {
4459 struct target_sigcontext sctx;
4460 struct target_mcontext mctx;
4461 int32_t abigap[56];
4462 };
4463
4464 #if defined(TARGET_PPC64)
4465
4466 #define TARGET_TRAMP_SIZE 6
4467
4468 struct target_rt_sigframe {
4469 /* sys_rt_sigreturn requires the ucontext be the first field */
4470 struct target_ucontext uc;
4471 target_ulong _unused[2];
4472 uint32_t trampoline[TARGET_TRAMP_SIZE];
4473 target_ulong pinfo; /* struct siginfo __user * */
4474 target_ulong puc; /* void __user * */
4475 struct target_siginfo info;
4476 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4477 char abigap[288];
4478 } __attribute__((aligned(16)));
4479
4480 #else
4481
4482 struct target_rt_sigframe {
4483 struct target_siginfo info;
4484 struct target_ucontext uc;
4485 int32_t abigap[56];
4486 };
4487
4488 #endif
4489
4490 #if defined(TARGET_PPC64)
4491
4492 struct target_func_ptr {
4493 target_ulong entry;
4494 target_ulong toc;
4495 };
4496
4497 #endif
4498
4499 /* We use the mc_pad field for the signal return trampoline. */
4500 #define tramp mc_pad
4501
4502 /* See arch/powerpc/kernel/signal.c. */
4503 static target_ulong get_sigframe(struct target_sigaction *ka,
4504 CPUPPCState *env,
4505 int frame_size)
4506 {
4507 target_ulong oldsp, newsp;
4508
4509 oldsp = env->gpr[1];
4510
4511 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4512 (sas_ss_flags(oldsp) == 0)) {
4513 oldsp = (target_sigaltstack_used.ss_sp
4514 + target_sigaltstack_used.ss_size);
4515 }
4516
4517 newsp = (oldsp - frame_size) & ~0xFUL;
4518
4519 return newsp;
4520 }
4521
4522 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4523 {
4524 target_ulong msr = env->msr;
4525 int i;
4526 target_ulong ccr = 0;
4527
4528 /* In general, the kernel attempts to be intelligent about what it
4529 needs to save for Altivec/FP/SPE registers. We don't care that
4530 much, so we just go ahead and save everything. */
4531
4532 /* Save general registers. */
4533 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4534 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4535 }
4536 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4537 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4538 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4539 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4540
4541 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4542 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4543 }
4544 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4545
4546 /* Save Altivec registers if necessary. */
4547 if (env->insns_flags & PPC_ALTIVEC) {
4548 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4549 ppc_avr_t *avr = &env->avr[i];
4550 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4551
4552 __put_user(avr->u64[0], &vreg->u64[0]);
4553 __put_user(avr->u64[1], &vreg->u64[1]);
4554 }
4555 /* Set MSR_VR in the saved MSR value to indicate that
4556 frame->mc_vregs contains valid data. */
4557 msr |= MSR_VR;
4558 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4559 &frame->mc_vregs.altivec[32].u32[3]);
4560 }
4561
4562 /* Save floating point registers. */
4563 if (env->insns_flags & PPC_FLOAT) {
4564 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4565 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4566 }
4567 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4568 }
4569
4570 /* Save SPE registers. The kernel only saves the high half. */
4571 if (env->insns_flags & PPC_SPE) {
4572 #if defined(TARGET_PPC64)
4573 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4574 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4575 }
4576 #else
4577 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4578 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4579 }
4580 #endif
4581 /* Set MSR_SPE in the saved MSR value to indicate that
4582 frame->mc_vregs contains valid data. */
4583 msr |= MSR_SPE;
4584 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4585 }
4586
4587 /* Store MSR. */
4588 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4589 }
4590
4591 static void encode_trampoline(int sigret, uint32_t *tramp)
4592 {
4593 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4594 if (sigret) {
4595 __put_user(0x38000000 | sigret, &tramp[0]);
4596 __put_user(0x44000002, &tramp[1]);
4597 }
4598 }
4599
4600 static void restore_user_regs(CPUPPCState *env,
4601 struct target_mcontext *frame, int sig)
4602 {
4603 target_ulong save_r2 = 0;
4604 target_ulong msr;
4605 target_ulong ccr;
4606
4607 int i;
4608
4609 if (!sig) {
4610 save_r2 = env->gpr[2];
4611 }
4612
4613 /* Restore general registers. */
4614 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4615 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4616 }
4617 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4618 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4619 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4620 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4621 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4622
4623 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4624 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4625 }
4626
4627 if (!sig) {
4628 env->gpr[2] = save_r2;
4629 }
4630 /* Restore MSR. */
4631 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4632
4633 /* If doing signal return, restore the previous little-endian mode. */
4634 if (sig)
4635 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4636
4637 /* Restore Altivec registers if necessary. */
4638 if (env->insns_flags & PPC_ALTIVEC) {
4639 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4640 ppc_avr_t *avr = &env->avr[i];
4641 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4642
4643 __get_user(avr->u64[0], &vreg->u64[0]);
4644 __get_user(avr->u64[1], &vreg->u64[1]);
4645 }
4646 /* Set MSR_VEC in the saved MSR value to indicate that
4647 frame->mc_vregs contains valid data. */
4648 __get_user(env->spr[SPR_VRSAVE],
4649 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4650 }
4651
4652 /* Restore floating point registers. */
4653 if (env->insns_flags & PPC_FLOAT) {
4654 uint64_t fpscr;
4655 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4656 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4657 }
4658 __get_user(fpscr, &frame->mc_fregs[32]);
4659 env->fpscr = (uint32_t) fpscr;
4660 }
4661
4662 /* Save SPE registers. The kernel only saves the high half. */
4663 if (env->insns_flags & PPC_SPE) {
4664 #if defined(TARGET_PPC64)
4665 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4666 uint32_t hi;
4667
4668 __get_user(hi, &frame->mc_vregs.spe[i]);
4669 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4670 }
4671 #else
4672 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4673 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4674 }
4675 #endif
4676 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4677 }
4678 }
4679
4680 static void setup_frame(int sig, struct target_sigaction *ka,
4681 target_sigset_t *set, CPUPPCState *env)
4682 {
4683 struct target_sigframe *frame;
4684 struct target_sigcontext *sc;
4685 target_ulong frame_addr, newsp;
4686 int err = 0;
4687 #if defined(TARGET_PPC64)
4688 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4689 #endif
4690
4691 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4692 trace_user_setup_frame(env, frame_addr);
4693 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4694 goto sigsegv;
4695 sc = &frame->sctx;
4696
4697 __put_user(ka->_sa_handler, &sc->handler);
4698 __put_user(set->sig[0], &sc->oldmask);
4699 #if TARGET_ABI_BITS == 64
4700 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4701 #else
4702 __put_user(set->sig[1], &sc->_unused[3]);
4703 #endif
4704 __put_user(h2g(&frame->mctx), &sc->regs);
4705 __put_user(sig, &sc->signal);
4706
4707 /* Save user regs. */
4708 save_user_regs(env, &frame->mctx);
4709
4710 /* Construct the trampoline code on the stack. */
4711 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4712
4713 /* The kernel checks for the presence of a VDSO here. We don't
4714 emulate a vdso, so use a sigreturn system call. */
4715 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4716
4717 /* Turn off all fp exceptions. */
4718 env->fpscr = 0;
4719
4720 /* Create a stack frame for the caller of the handler. */
4721 newsp = frame_addr - SIGNAL_FRAMESIZE;
4722 err |= put_user(env->gpr[1], newsp, target_ulong);
4723
4724 if (err)
4725 goto sigsegv;
4726
4727 /* Set up registers for signal handler. */
4728 env->gpr[1] = newsp;
4729 env->gpr[3] = sig;
4730 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4731
4732 #if defined(TARGET_PPC64)
4733 if (get_ppc64_abi(image) < 2) {
4734 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4735 struct target_func_ptr *handler =
4736 (struct target_func_ptr *)g2h(ka->_sa_handler);
4737 env->nip = tswapl(handler->entry);
4738 env->gpr[2] = tswapl(handler->toc);
4739 } else {
4740 /* ELFv2 PPC64 function pointers are entry points, but R12
4741 * must also be set */
4742 env->nip = tswapl((target_ulong) ka->_sa_handler);
4743 env->gpr[12] = env->nip;
4744 }
4745 #else
4746 env->nip = (target_ulong) ka->_sa_handler;
4747 #endif
4748
4749 /* Signal handlers are entered in big-endian mode. */
4750 env->msr &= ~(1ull << MSR_LE);
4751
4752 unlock_user_struct(frame, frame_addr, 1);
4753 return;
4754
4755 sigsegv:
4756 unlock_user_struct(frame, frame_addr, 1);
4757 force_sig(TARGET_SIGSEGV);
4758 }
4759
4760 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4761 target_siginfo_t *info,
4762 target_sigset_t *set, CPUPPCState *env)
4763 {
4764 struct target_rt_sigframe *rt_sf;
4765 uint32_t *trampptr = 0;
4766 struct target_mcontext *mctx = 0;
4767 target_ulong rt_sf_addr, newsp = 0;
4768 int i, err = 0;
4769 #if defined(TARGET_PPC64)
4770 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4771 #endif
4772
4773 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4774 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4775 goto sigsegv;
4776
4777 tswap_siginfo(&rt_sf->info, info);
4778
4779 __put_user(0, &rt_sf->uc.tuc_flags);
4780 __put_user(0, &rt_sf->uc.tuc_link);
4781 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4782 &rt_sf->uc.tuc_stack.ss_sp);
4783 __put_user(sas_ss_flags(env->gpr[1]),
4784 &rt_sf->uc.tuc_stack.ss_flags);
4785 __put_user(target_sigaltstack_used.ss_size,
4786 &rt_sf->uc.tuc_stack.ss_size);
4787 #if !defined(TARGET_PPC64)
4788 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4789 &rt_sf->uc.tuc_regs);
4790 #endif
4791 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4792 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4793 }
4794
4795 #if defined(TARGET_PPC64)
4796 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4797 trampptr = &rt_sf->trampoline[0];
4798 #else
4799 mctx = &rt_sf->uc.tuc_mcontext;
4800 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4801 #endif
4802
4803 save_user_regs(env, mctx);
4804 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4805
4806 /* The kernel checks for the presence of a VDSO here. We don't
4807 emulate a vdso, so use a sigreturn system call. */
4808 env->lr = (target_ulong) h2g(trampptr);
4809
4810 /* Turn off all fp exceptions. */
4811 env->fpscr = 0;
4812
4813 /* Create a stack frame for the caller of the handler. */
4814 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4815 err |= put_user(env->gpr[1], newsp, target_ulong);
4816
4817 if (err)
4818 goto sigsegv;
4819
4820 /* Set up registers for signal handler. */
4821 env->gpr[1] = newsp;
4822 env->gpr[3] = (target_ulong) sig;
4823 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4824 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4825 env->gpr[6] = (target_ulong) h2g(rt_sf);
4826
4827 #if defined(TARGET_PPC64)
4828 if (get_ppc64_abi(image) < 2) {
4829 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4830 struct target_func_ptr *handler =
4831 (struct target_func_ptr *)g2h(ka->_sa_handler);
4832 env->nip = tswapl(handler->entry);
4833 env->gpr[2] = tswapl(handler->toc);
4834 } else {
4835 /* ELFv2 PPC64 function pointers are entry points, but R12
4836 * must also be set */
4837 env->nip = tswapl((target_ulong) ka->_sa_handler);
4838 env->gpr[12] = env->nip;
4839 }
4840 #else
4841 env->nip = (target_ulong) ka->_sa_handler;
4842 #endif
4843
4844 /* Signal handlers are entered in big-endian mode. */
4845 env->msr &= ~(1ull << MSR_LE);
4846
4847 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4848 return;
4849
4850 sigsegv:
4851 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4852 force_sig(TARGET_SIGSEGV);
4853
4854 }
4855
4856 long do_sigreturn(CPUPPCState *env)
4857 {
4858 struct target_sigcontext *sc = NULL;
4859 struct target_mcontext *sr = NULL;
4860 target_ulong sr_addr = 0, sc_addr;
4861 sigset_t blocked;
4862 target_sigset_t set;
4863
4864 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4865 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4866 goto sigsegv;
4867
4868 #if defined(TARGET_PPC64)
4869 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4870 #else
4871 __get_user(set.sig[0], &sc->oldmask);
4872 __get_user(set.sig[1], &sc->_unused[3]);
4873 #endif
4874 target_to_host_sigset_internal(&blocked, &set);
4875 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
4876
4877 __get_user(sr_addr, &sc->regs);
4878 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4879 goto sigsegv;
4880 restore_user_regs(env, sr, 1);
4881
4882 unlock_user_struct(sr, sr_addr, 1);
4883 unlock_user_struct(sc, sc_addr, 1);
4884 return -TARGET_QEMU_ESIGRETURN;
4885
4886 sigsegv:
4887 unlock_user_struct(sr, sr_addr, 1);
4888 unlock_user_struct(sc, sc_addr, 1);
4889 force_sig(TARGET_SIGSEGV);
4890 return 0;
4891 }
4892
4893 /* See arch/powerpc/kernel/signal_32.c. */
4894 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4895 {
4896 struct target_mcontext *mcp;
4897 target_ulong mcp_addr;
4898 sigset_t blocked;
4899 target_sigset_t set;
4900
4901 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4902 sizeof (set)))
4903 return 1;
4904
4905 #if defined(TARGET_PPC64)
4906 mcp_addr = h2g(ucp) +
4907 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4908 #else
4909 __get_user(mcp_addr, &ucp->tuc_regs);
4910 #endif
4911
4912 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4913 return 1;
4914
4915 target_to_host_sigset_internal(&blocked, &set);
4916 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
4917 restore_user_regs(env, mcp, sig);
4918
4919 unlock_user_struct(mcp, mcp_addr, 1);
4920 return 0;
4921 }
4922
4923 long do_rt_sigreturn(CPUPPCState *env)
4924 {
4925 struct target_rt_sigframe *rt_sf = NULL;
4926 target_ulong rt_sf_addr;
4927
4928 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4929 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4930 goto sigsegv;
4931
4932 if (do_setcontext(&rt_sf->uc, env, 1))
4933 goto sigsegv;
4934
4935 do_sigaltstack(rt_sf_addr
4936 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4937 0, env->gpr[1]);
4938
4939 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4940 return -TARGET_QEMU_ESIGRETURN;
4941
4942 sigsegv:
4943 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4944 force_sig(TARGET_SIGSEGV);
4945 return 0;
4946 }
4947
4948 #elif defined(TARGET_M68K)
4949
4950 struct target_sigcontext {
4951 abi_ulong sc_mask;
4952 abi_ulong sc_usp;
4953 abi_ulong sc_d0;
4954 abi_ulong sc_d1;
4955 abi_ulong sc_a0;
4956 abi_ulong sc_a1;
4957 unsigned short sc_sr;
4958 abi_ulong sc_pc;
4959 };
4960
4961 struct target_sigframe
4962 {
4963 abi_ulong pretcode;
4964 int sig;
4965 int code;
4966 abi_ulong psc;
4967 char retcode[8];
4968 abi_ulong extramask[TARGET_NSIG_WORDS-1];
4969 struct target_sigcontext sc;
4970 };
4971
4972 typedef int target_greg_t;
4973 #define TARGET_NGREG 18
4974 typedef target_greg_t target_gregset_t[TARGET_NGREG];
4975
4976 typedef struct target_fpregset {
4977 int f_fpcntl[3];
4978 int f_fpregs[8*3];
4979 } target_fpregset_t;
4980
4981 struct target_mcontext {
4982 int version;
4983 target_gregset_t gregs;
4984 target_fpregset_t fpregs;
4985 };
4986
4987 #define TARGET_MCONTEXT_VERSION 2
4988
4989 struct target_ucontext {
4990 abi_ulong tuc_flags;
4991 abi_ulong tuc_link;
4992 target_stack_t tuc_stack;
4993 struct target_mcontext tuc_mcontext;
4994 abi_long tuc_filler[80];
4995 target_sigset_t tuc_sigmask;
4996 };
4997
4998 struct target_rt_sigframe
4999 {
5000 abi_ulong pretcode;
5001 int sig;
5002 abi_ulong pinfo;
5003 abi_ulong puc;
5004 char retcode[8];
5005 struct target_siginfo info;
5006 struct target_ucontext uc;
5007 };
5008
5009 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5010 abi_ulong mask)
5011 {
5012 __put_user(mask, &sc->sc_mask);
5013 __put_user(env->aregs[7], &sc->sc_usp);
5014 __put_user(env->dregs[0], &sc->sc_d0);
5015 __put_user(env->dregs[1], &sc->sc_d1);
5016 __put_user(env->aregs[0], &sc->sc_a0);
5017 __put_user(env->aregs[1], &sc->sc_a1);
5018 __put_user(env->sr, &sc->sc_sr);
5019 __put_user(env->pc, &sc->sc_pc);
5020 }
5021
5022 static void
5023 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5024 {
5025 int temp;
5026
5027 __get_user(env->aregs[7], &sc->sc_usp);
5028 __get_user(env->dregs[0], &sc->sc_d0);
5029 __get_user(env->dregs[1], &sc->sc_d1);
5030 __get_user(env->aregs[0], &sc->sc_a0);
5031 __get_user(env->aregs[1], &sc->sc_a1);
5032 __get_user(env->pc, &sc->sc_pc);
5033 __get_user(temp, &sc->sc_sr);
5034 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5035 }
5036
5037 /*
5038 * Determine which stack to use..
5039 */
5040 static inline abi_ulong
5041 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5042 size_t frame_size)
5043 {
5044 unsigned long sp;
5045
5046 sp = regs->aregs[7];
5047
5048 /* This is the X/Open sanctioned signal stack switching. */
5049 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5050 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5051 }
5052
5053 return ((sp - frame_size) & -8UL);
5054 }
5055
5056 static void setup_frame(int sig, struct target_sigaction *ka,
5057 target_sigset_t *set, CPUM68KState *env)
5058 {
5059 struct target_sigframe *frame;
5060 abi_ulong frame_addr;
5061 abi_ulong retcode_addr;
5062 abi_ulong sc_addr;
5063 int i;
5064
5065 frame_addr = get_sigframe(ka, env, sizeof *frame);
5066 trace_user_setup_frame(env, frame_addr);
5067 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5068 goto give_sigsegv;
5069 }
5070
5071 __put_user(sig, &frame->sig);
5072
5073 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5074 __put_user(sc_addr, &frame->psc);
5075
5076 setup_sigcontext(&frame->sc, env, set->sig[0]);
5077
5078 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5079 __put_user(set->sig[i], &frame->extramask[i - 1]);
5080 }
5081
5082 /* Set up to return from userspace. */
5083
5084 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5085 __put_user(retcode_addr, &frame->pretcode);
5086
5087 /* moveq #,d0; trap #0 */
5088
5089 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5090 (uint32_t *)(frame->retcode));
5091
5092 /* Set up to return from userspace */
5093
5094 env->aregs[7] = frame_addr;
5095 env->pc = ka->_sa_handler;
5096
5097 unlock_user_struct(frame, frame_addr, 1);
5098 return;
5099
5100 give_sigsegv:
5101 force_sig(TARGET_SIGSEGV);
5102 }
5103
5104 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5105 CPUM68KState *env)
5106 {
5107 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5108
5109 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5110 __put_user(env->dregs[0], &gregs[0]);
5111 __put_user(env->dregs[1], &gregs[1]);
5112 __put_user(env->dregs[2], &gregs[2]);
5113 __put_user(env->dregs[3], &gregs[3]);
5114 __put_user(env->dregs[4], &gregs[4]);
5115 __put_user(env->dregs[5], &gregs[5]);
5116 __put_user(env->dregs[6], &gregs[6]);
5117 __put_user(env->dregs[7], &gregs[7]);
5118 __put_user(env->aregs[0], &gregs[8]);
5119 __put_user(env->aregs[1], &gregs[9]);
5120 __put_user(env->aregs[2], &gregs[10]);
5121 __put_user(env->aregs[3], &gregs[11]);
5122 __put_user(env->aregs[4], &gregs[12]);
5123 __put_user(env->aregs[5], &gregs[13]);
5124 __put_user(env->aregs[6], &gregs[14]);
5125 __put_user(env->aregs[7], &gregs[15]);
5126 __put_user(env->pc, &gregs[16]);
5127 __put_user(env->sr, &gregs[17]);
5128
5129 return 0;
5130 }
5131
5132 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5133 struct target_ucontext *uc)
5134 {
5135 int temp;
5136 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5137
5138 __get_user(temp, &uc->tuc_mcontext.version);
5139 if (temp != TARGET_MCONTEXT_VERSION)
5140 goto badframe;
5141
5142 /* restore passed registers */
5143 __get_user(env->dregs[0], &gregs[0]);
5144 __get_user(env->dregs[1], &gregs[1]);
5145 __get_user(env->dregs[2], &gregs[2]);
5146 __get_user(env->dregs[3], &gregs[3]);
5147 __get_user(env->dregs[4], &gregs[4]);
5148 __get_user(env->dregs[5], &gregs[5]);
5149 __get_user(env->dregs[6], &gregs[6]);
5150 __get_user(env->dregs[7], &gregs[7]);
5151 __get_user(env->aregs[0], &gregs[8]);
5152 __get_user(env->aregs[1], &gregs[9]);
5153 __get_user(env->aregs[2], &gregs[10]);
5154 __get_user(env->aregs[3], &gregs[11]);
5155 __get_user(env->aregs[4], &gregs[12]);
5156 __get_user(env->aregs[5], &gregs[13]);
5157 __get_user(env->aregs[6], &gregs[14]);
5158 __get_user(env->aregs[7], &gregs[15]);
5159 __get_user(env->pc, &gregs[16]);
5160 __get_user(temp, &gregs[17]);
5161 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5162
5163 return 0;
5164
5165 badframe:
5166 return 1;
5167 }
5168
5169 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5170 target_siginfo_t *info,
5171 target_sigset_t *set, CPUM68KState *env)
5172 {
5173 struct target_rt_sigframe *frame;
5174 abi_ulong frame_addr;
5175 abi_ulong retcode_addr;
5176 abi_ulong info_addr;
5177 abi_ulong uc_addr;
5178 int err = 0;
5179 int i;
5180
5181 frame_addr = get_sigframe(ka, env, sizeof *frame);
5182 trace_user_setup_rt_frame(env, frame_addr);
5183 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5184 goto give_sigsegv;
5185 }
5186
5187 __put_user(sig, &frame->sig);
5188
5189 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5190 __put_user(info_addr, &frame->pinfo);
5191
5192 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5193 __put_user(uc_addr, &frame->puc);
5194
5195 tswap_siginfo(&frame->info, info);
5196
5197 /* Create the ucontext */
5198
5199 __put_user(0, &frame->uc.tuc_flags);
5200 __put_user(0, &frame->uc.tuc_link);
5201 __put_user(target_sigaltstack_used.ss_sp,
5202 &frame->uc.tuc_stack.ss_sp);
5203 __put_user(sas_ss_flags(env->aregs[7]),
5204 &frame->uc.tuc_stack.ss_flags);
5205 __put_user(target_sigaltstack_used.ss_size,
5206 &frame->uc.tuc_stack.ss_size);
5207 err |= target_rt_setup_ucontext(&frame->uc, env);
5208
5209 if (err)
5210 goto give_sigsegv;
5211
5212 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5213 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5214 }
5215
5216 /* Set up to return from userspace. */
5217
5218 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5219 __put_user(retcode_addr, &frame->pretcode);
5220
5221 /* moveq #,d0; notb d0; trap #0 */
5222
5223 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5224 (uint32_t *)(frame->retcode + 0));
5225 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5226
5227 if (err)
5228 goto give_sigsegv;
5229
5230 /* Set up to return from userspace */
5231
5232 env->aregs[7] = frame_addr;
5233 env->pc = ka->_sa_handler;
5234
5235 unlock_user_struct(frame, frame_addr, 1);
5236 return;
5237
5238 give_sigsegv:
5239 unlock_user_struct(frame, frame_addr, 1);
5240 force_sig(TARGET_SIGSEGV);
5241 }
5242
5243 long do_sigreturn(CPUM68KState *env)
5244 {
5245 struct target_sigframe *frame;
5246 abi_ulong frame_addr = env->aregs[7] - 4;
5247 target_sigset_t target_set;
5248 sigset_t set;
5249 int i;
5250
5251 trace_user_do_sigreturn(env, frame_addr);
5252 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5253 goto badframe;
5254
5255 /* set blocked signals */
5256
5257 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5258
5259 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5260 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5261 }
5262
5263 target_to_host_sigset_internal(&set, &target_set);
5264 do_sigprocmask(SIG_SETMASK, &set, NULL);
5265
5266 /* restore registers */
5267
5268 restore_sigcontext(env, &frame->sc);
5269
5270 unlock_user_struct(frame, frame_addr, 0);
5271 return -TARGET_QEMU_ESIGRETURN;
5272
5273 badframe:
5274 force_sig(TARGET_SIGSEGV);
5275 return 0;
5276 }
5277
5278 long do_rt_sigreturn(CPUM68KState *env)
5279 {
5280 struct target_rt_sigframe *frame;
5281 abi_ulong frame_addr = env->aregs[7] - 4;
5282 target_sigset_t target_set;
5283 sigset_t set;
5284
5285 trace_user_do_rt_sigreturn(env, frame_addr);
5286 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5287 goto badframe;
5288
5289 target_to_host_sigset_internal(&set, &target_set);
5290 do_sigprocmask(SIG_SETMASK, &set, NULL);
5291
5292 /* restore registers */
5293
5294 if (target_rt_restore_ucontext(env, &frame->uc))
5295 goto badframe;
5296
5297 if (do_sigaltstack(frame_addr +
5298 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5299 0, get_sp_from_cpustate(env)) == -EFAULT)
5300 goto badframe;
5301
5302 unlock_user_struct(frame, frame_addr, 0);
5303 return -TARGET_QEMU_ESIGRETURN;
5304
5305 badframe:
5306 unlock_user_struct(frame, frame_addr, 0);
5307 force_sig(TARGET_SIGSEGV);
5308 return 0;
5309 }
5310
5311 #elif defined(TARGET_ALPHA)
5312
5313 struct target_sigcontext {
5314 abi_long sc_onstack;
5315 abi_long sc_mask;
5316 abi_long sc_pc;
5317 abi_long sc_ps;
5318 abi_long sc_regs[32];
5319 abi_long sc_ownedfp;
5320 abi_long sc_fpregs[32];
5321 abi_ulong sc_fpcr;
5322 abi_ulong sc_fp_control;
5323 abi_ulong sc_reserved1;
5324 abi_ulong sc_reserved2;
5325 abi_ulong sc_ssize;
5326 abi_ulong sc_sbase;
5327 abi_ulong sc_traparg_a0;
5328 abi_ulong sc_traparg_a1;
5329 abi_ulong sc_traparg_a2;
5330 abi_ulong sc_fp_trap_pc;
5331 abi_ulong sc_fp_trigger_sum;
5332 abi_ulong sc_fp_trigger_inst;
5333 };
5334
5335 struct target_ucontext {
5336 abi_ulong tuc_flags;
5337 abi_ulong tuc_link;
5338 abi_ulong tuc_osf_sigmask;
5339 target_stack_t tuc_stack;
5340 struct target_sigcontext tuc_mcontext;
5341 target_sigset_t tuc_sigmask;
5342 };
5343
5344 struct target_sigframe {
5345 struct target_sigcontext sc;
5346 unsigned int retcode[3];
5347 };
5348
5349 struct target_rt_sigframe {
5350 target_siginfo_t info;
5351 struct target_ucontext uc;
5352 unsigned int retcode[3];
5353 };
5354
5355 #define INSN_MOV_R30_R16 0x47fe0410
5356 #define INSN_LDI_R0 0x201f0000
5357 #define INSN_CALLSYS 0x00000083
5358
5359 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5360 abi_ulong frame_addr, target_sigset_t *set)
5361 {
5362 int i;
5363
5364 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5365 __put_user(set->sig[0], &sc->sc_mask);
5366 __put_user(env->pc, &sc->sc_pc);
5367 __put_user(8, &sc->sc_ps);
5368
5369 for (i = 0; i < 31; ++i) {
5370 __put_user(env->ir[i], &sc->sc_regs[i]);
5371 }
5372 __put_user(0, &sc->sc_regs[31]);
5373
5374 for (i = 0; i < 31; ++i) {
5375 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5376 }
5377 __put_user(0, &sc->sc_fpregs[31]);
5378 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5379
5380 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5381 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5382 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5383 }
5384
5385 static void restore_sigcontext(CPUAlphaState *env,
5386 struct target_sigcontext *sc)
5387 {
5388 uint64_t fpcr;
5389 int i;
5390
5391 __get_user(env->pc, &sc->sc_pc);
5392
5393 for (i = 0; i < 31; ++i) {
5394 __get_user(env->ir[i], &sc->sc_regs[i]);
5395 }
5396 for (i = 0; i < 31; ++i) {
5397 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5398 }
5399
5400 __get_user(fpcr, &sc->sc_fpcr);
5401 cpu_alpha_store_fpcr(env, fpcr);
5402 }
5403
5404 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5405 CPUAlphaState *env,
5406 unsigned long framesize)
5407 {
5408 abi_ulong sp = env->ir[IR_SP];
5409
5410 /* This is the X/Open sanctioned signal stack switching. */
5411 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5412 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5413 }
5414 return (sp - framesize) & -32;
5415 }
5416
5417 static void setup_frame(int sig, struct target_sigaction *ka,
5418 target_sigset_t *set, CPUAlphaState *env)
5419 {
5420 abi_ulong frame_addr, r26;
5421 struct target_sigframe *frame;
5422 int err = 0;
5423
5424 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5425 trace_user_setup_frame(env, frame_addr);
5426 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5427 goto give_sigsegv;
5428 }
5429
5430 setup_sigcontext(&frame->sc, env, frame_addr, set);
5431
5432 if (ka->sa_restorer) {
5433 r26 = ka->sa_restorer;
5434 } else {
5435 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5436 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5437 &frame->retcode[1]);
5438 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5439 /* imb() */
5440 r26 = frame_addr;
5441 }
5442
5443 unlock_user_struct(frame, frame_addr, 1);
5444
5445 if (err) {
5446 give_sigsegv:
5447 if (sig == TARGET_SIGSEGV) {
5448 ka->_sa_handler = TARGET_SIG_DFL;
5449 }
5450 force_sig(TARGET_SIGSEGV);
5451 }
5452
5453 env->ir[IR_RA] = r26;
5454 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5455 env->ir[IR_A0] = sig;
5456 env->ir[IR_A1] = 0;
5457 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5458 env->ir[IR_SP] = frame_addr;
5459 }
5460
5461 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5462 target_siginfo_t *info,
5463 target_sigset_t *set, CPUAlphaState *env)
5464 {
5465 abi_ulong frame_addr, r26;
5466 struct target_rt_sigframe *frame;
5467 int i, err = 0;
5468
5469 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5470 trace_user_setup_rt_frame(env, frame_addr);
5471 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5472 goto give_sigsegv;
5473 }
5474
5475 tswap_siginfo(&frame->info, info);
5476
5477 __put_user(0, &frame->uc.tuc_flags);
5478 __put_user(0, &frame->uc.tuc_link);
5479 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5480 __put_user(target_sigaltstack_used.ss_sp,
5481 &frame->uc.tuc_stack.ss_sp);
5482 __put_user(sas_ss_flags(env->ir[IR_SP]),
5483 &frame->uc.tuc_stack.ss_flags);
5484 __put_user(target_sigaltstack_used.ss_size,
5485 &frame->uc.tuc_stack.ss_size);
5486 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5487 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5488 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5489 }
5490
5491 if (ka->sa_restorer) {
5492 r26 = ka->sa_restorer;
5493 } else {
5494 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5495 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5496 &frame->retcode[1]);
5497 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5498 /* imb(); */
5499 r26 = frame_addr;
5500 }
5501
5502 if (err) {
5503 give_sigsegv:
5504 if (sig == TARGET_SIGSEGV) {
5505 ka->_sa_handler = TARGET_SIG_DFL;
5506 }
5507 force_sig(TARGET_SIGSEGV);
5508 }
5509
5510 env->ir[IR_RA] = r26;
5511 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5512 env->ir[IR_A0] = sig;
5513 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5514 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5515 env->ir[IR_SP] = frame_addr;
5516 }
5517
5518 long do_sigreturn(CPUAlphaState *env)
5519 {
5520 struct target_sigcontext *sc;
5521 abi_ulong sc_addr = env->ir[IR_A0];
5522 target_sigset_t target_set;
5523 sigset_t set;
5524
5525 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5526 goto badframe;
5527 }
5528
5529 target_sigemptyset(&target_set);
5530 __get_user(target_set.sig[0], &sc->sc_mask);
5531
5532 target_to_host_sigset_internal(&set, &target_set);
5533 do_sigprocmask(SIG_SETMASK, &set, NULL);
5534
5535 restore_sigcontext(env, sc);
5536 unlock_user_struct(sc, sc_addr, 0);
5537 return -TARGET_QEMU_ESIGRETURN;
5538
5539 badframe:
5540 force_sig(TARGET_SIGSEGV);
5541 }
5542
5543 long do_rt_sigreturn(CPUAlphaState *env)
5544 {
5545 abi_ulong frame_addr = env->ir[IR_A0];
5546 struct target_rt_sigframe *frame;
5547 sigset_t set;
5548
5549 trace_user_do_rt_sigreturn(env, frame_addr);
5550 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5551 goto badframe;
5552 }
5553 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5554 do_sigprocmask(SIG_SETMASK, &set, NULL);
5555
5556 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5557 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5558 uc.tuc_stack),
5559 0, env->ir[IR_SP]) == -EFAULT) {
5560 goto badframe;
5561 }
5562
5563 unlock_user_struct(frame, frame_addr, 0);
5564 return -TARGET_QEMU_ESIGRETURN;
5565
5566
5567 badframe:
5568 unlock_user_struct(frame, frame_addr, 0);
5569 force_sig(TARGET_SIGSEGV);
5570 }
5571
5572 #elif defined(TARGET_TILEGX)
5573
5574 struct target_sigcontext {
5575 union {
5576 /* General-purpose registers. */
5577 abi_ulong gregs[56];
5578 struct {
5579 abi_ulong __gregs[53];
5580 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5581 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5582 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5583 };
5584 };
5585 abi_ulong pc; /* Program counter. */
5586 abi_ulong ics; /* In Interrupt Critical Section? */
5587 abi_ulong faultnum; /* Fault number. */
5588 abi_ulong pad[5];
5589 };
5590
5591 struct target_ucontext {
5592 abi_ulong tuc_flags;
5593 abi_ulong tuc_link;
5594 target_stack_t tuc_stack;
5595 struct target_sigcontext tuc_mcontext;
5596 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5597 };
5598
5599 struct target_rt_sigframe {
5600 unsigned char save_area[16]; /* caller save area */
5601 struct target_siginfo info;
5602 struct target_ucontext uc;
5603 abi_ulong retcode[2];
5604 };
5605
5606 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5607 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5608
5609
5610 static void setup_sigcontext(struct target_sigcontext *sc,
5611 CPUArchState *env, int signo)
5612 {
5613 int i;
5614
5615 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5616 __put_user(env->regs[i], &sc->gregs[i]);
5617 }
5618
5619 __put_user(env->pc, &sc->pc);
5620 __put_user(0, &sc->ics);
5621 __put_user(signo, &sc->faultnum);
5622 }
5623
5624 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5625 {
5626 int i;
5627
5628 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5629 __get_user(env->regs[i], &sc->gregs[i]);
5630 }
5631
5632 __get_user(env->pc, &sc->pc);
5633 }
5634
5635 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5636 size_t frame_size)
5637 {
5638 unsigned long sp = env->regs[TILEGX_R_SP];
5639
5640 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5641 return -1UL;
5642 }
5643
5644 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5645 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5646 }
5647
5648 sp -= frame_size;
5649 sp &= -16UL;
5650 return sp;
5651 }
5652
5653 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5654 target_siginfo_t *info,
5655 target_sigset_t *set, CPUArchState *env)
5656 {
5657 abi_ulong frame_addr;
5658 struct target_rt_sigframe *frame;
5659 unsigned long restorer;
5660
5661 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5662 trace_user_setup_rt_frame(env, frame_addr);
5663 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5664 goto give_sigsegv;
5665 }
5666
5667 /* Always write at least the signal number for the stack backtracer. */
5668 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5669 /* At sigreturn time, restore the callee-save registers too. */
5670 tswap_siginfo(&frame->info, info);
5671 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5672 } else {
5673 __put_user(info->si_signo, &frame->info.si_signo);
5674 }
5675
5676 /* Create the ucontext. */
5677 __put_user(0, &frame->uc.tuc_flags);
5678 __put_user(0, &frame->uc.tuc_link);
5679 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5680 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5681 &frame->uc.tuc_stack.ss_flags);
5682 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5683 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5684
5685 if (ka->sa_flags & TARGET_SA_RESTORER) {
5686 restorer = (unsigned long) ka->sa_restorer;
5687 } else {
5688 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5689 __put_user(INSN_SWINT1, &frame->retcode[1]);
5690 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5691 }
5692 env->pc = (unsigned long) ka->_sa_handler;
5693 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5694 env->regs[TILEGX_R_LR] = restorer;
5695 env->regs[0] = (unsigned long) sig;
5696 env->regs[1] = (unsigned long) &frame->info;
5697 env->regs[2] = (unsigned long) &frame->uc;
5698 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5699
5700 unlock_user_struct(frame, frame_addr, 1);
5701 return;
5702
5703 give_sigsegv:
5704 if (sig == TARGET_SIGSEGV) {
5705 ka->_sa_handler = TARGET_SIG_DFL;
5706 }
5707 force_sig(TARGET_SIGSEGV /* , current */);
5708 }
5709
5710 long do_rt_sigreturn(CPUTLGState *env)
5711 {
5712 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5713 struct target_rt_sigframe *frame;
5714 sigset_t set;
5715
5716 trace_user_do_rt_sigreturn(env, frame_addr);
5717 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5718 goto badframe;
5719 }
5720 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5721 do_sigprocmask(SIG_SETMASK, &set, NULL);
5722
5723 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5724 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5725 uc.tuc_stack),
5726 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5727 goto badframe;
5728 }
5729
5730 unlock_user_struct(frame, frame_addr, 0);
5731 return -TARGET_QEMU_ESIGRETURN;
5732
5733
5734 badframe:
5735 unlock_user_struct(frame, frame_addr, 0);
5736 force_sig(TARGET_SIGSEGV);
5737 }
5738
5739 #else
5740
5741 static void setup_frame(int sig, struct target_sigaction *ka,
5742 target_sigset_t *set, CPUArchState *env)
5743 {
5744 fprintf(stderr, "setup_frame: not implemented\n");
5745 }
5746
5747 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5748 target_siginfo_t *info,
5749 target_sigset_t *set, CPUArchState *env)
5750 {
5751 fprintf(stderr, "setup_rt_frame: not implemented\n");
5752 }
5753
5754 long do_sigreturn(CPUArchState *env)
5755 {
5756 fprintf(stderr, "do_sigreturn: not implemented\n");
5757 return -TARGET_ENOSYS;
5758 }
5759
5760 long do_rt_sigreturn(CPUArchState *env)
5761 {
5762 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5763 return -TARGET_ENOSYS;
5764 }
5765
5766 #endif
5767
5768 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5769 {
5770 CPUState *cpu = ENV_GET_CPU(cpu_env);
5771 abi_ulong handler;
5772 sigset_t set, old_set;
5773 target_sigset_t target_old_set;
5774 struct target_sigaction *sa;
5775 struct sigqueue *q;
5776 TaskState *ts = cpu->opaque;
5777 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5778
5779 trace_user_handle_signal(cpu_env, sig);
5780 /* dequeue signal */
5781 q = k->first;
5782 k->first = q->next;
5783 if (!k->first)
5784 k->pending = 0;
5785
5786 sig = gdb_handlesig(cpu, sig);
5787 if (!sig) {
5788 sa = NULL;
5789 handler = TARGET_SIG_IGN;
5790 } else {
5791 sa = &sigact_table[sig - 1];
5792 handler = sa->_sa_handler;
5793 }
5794
5795 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
5796 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
5797 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
5798 * because it got a real MMU fault), and treat as if default handler.
5799 */
5800 handler = TARGET_SIG_DFL;
5801 }
5802
5803 if (handler == TARGET_SIG_DFL) {
5804 /* default handler : ignore some signal. The other are job control or fatal */
5805 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5806 kill(getpid(),SIGSTOP);
5807 } else if (sig != TARGET_SIGCHLD &&
5808 sig != TARGET_SIGURG &&
5809 sig != TARGET_SIGWINCH &&
5810 sig != TARGET_SIGCONT) {
5811 force_sig(sig);
5812 }
5813 } else if (handler == TARGET_SIG_IGN) {
5814 /* ignore sig */
5815 } else if (handler == TARGET_SIG_ERR) {
5816 force_sig(sig);
5817 } else {
5818 /* compute the blocked signals during the handler execution */
5819 target_to_host_sigset(&set, &sa->sa_mask);
5820 /* SA_NODEFER indicates that the current signal should not be
5821 blocked during the handler */
5822 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5823 sigaddset(&set, target_to_host_signal(sig));
5824
5825 /* block signals in the handler using Linux */
5826 do_sigprocmask(SIG_BLOCK, &set, &old_set);
5827 /* save the previous blocked signal state to restore it at the
5828 end of the signal execution (see do_sigreturn) */
5829 host_to_target_sigset_internal(&target_old_set, &old_set);
5830
5831 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5832 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5833 {
5834 CPUX86State *env = cpu_env;
5835 if (env->eflags & VM_MASK)
5836 save_v86_state(env);
5837 }
5838 #endif
5839 /* prepare the stack frame of the virtual CPU */
5840 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5841 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5842 /* These targets do not have traditional signals. */
5843 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5844 #else
5845 if (sa->sa_flags & TARGET_SA_SIGINFO)
5846 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5847 else
5848 setup_frame(sig, sa, &target_old_set, cpu_env);
5849 #endif
5850 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5851 sa->_sa_handler = TARGET_SIG_DFL;
5852 }
5853 }
5854 if (q != &k->info)
5855 free_sigqueue(cpu_env, q);
5856 }
5857
5858 void process_pending_signals(CPUArchState *cpu_env)
5859 {
5860 CPUState *cpu = ENV_GET_CPU(cpu_env);
5861 int sig;
5862 TaskState *ts = cpu->opaque;
5863
5864 if (!ts->signal_pending)
5865 return;
5866
5867 /* FIXME: This is not threadsafe. */
5868 for(sig = 1; sig <= TARGET_NSIG; sig++) {
5869 if (ts->sigtab[sig - 1].pending) {
5870 handle_pending_signal(cpu_env, sig);
5871 return;
5872 }
5873 }
5874 /* if no signal is pending, just return */
5875 ts->signal_pending = 0;
5876 return;
5877 }