]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: Support for restarting system calls for SPARC targets
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/ucontext.h>
21 #include <sys/resource.h>
22
23 #include "qemu.h"
24 #include "qemu-common.h"
25 #include "target_signal.h"
26 #include "trace.h"
27
28 static struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32 };
33
34 static struct target_sigaction sigact_table[TARGET_NSIG];
35
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
38
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
81 };
82 static uint8_t target_to_host_signal_table[_NSIG];
83
84 static inline int on_sig_stack(unsigned long sp)
85 {
86 return (sp - target_sigaltstack_used.ss_sp
87 < target_sigaltstack_used.ss_size);
88 }
89
90 static inline int sas_ss_flags(unsigned long sp)
91 {
92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
93 : on_sig_stack(sp) ? SS_ONSTACK : 0);
94 }
95
96 int host_to_target_signal(int sig)
97 {
98 if (sig < 0 || sig >= _NSIG)
99 return sig;
100 return host_to_target_signal_table[sig];
101 }
102
103 int target_to_host_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return target_to_host_signal_table[sig];
108 }
109
110 static inline void target_sigemptyset(target_sigset_t *set)
111 {
112 memset(set, 0, sizeof(*set));
113 }
114
115 static inline void target_sigaddset(target_sigset_t *set, int signum)
116 {
117 signum--;
118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
119 set->sig[signum / TARGET_NSIG_BPW] |= mask;
120 }
121
122 static inline int target_sigismember(const target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
127 }
128
129 static void host_to_target_sigset_internal(target_sigset_t *d,
130 const sigset_t *s)
131 {
132 int i;
133 target_sigemptyset(d);
134 for (i = 1; i <= TARGET_NSIG; i++) {
135 if (sigismember(s, i)) {
136 target_sigaddset(d, host_to_target_signal(i));
137 }
138 }
139 }
140
141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
142 {
143 target_sigset_t d1;
144 int i;
145
146 host_to_target_sigset_internal(&d1, s);
147 for(i = 0;i < TARGET_NSIG_WORDS; i++)
148 d->sig[i] = tswapal(d1.sig[i]);
149 }
150
151 static void target_to_host_sigset_internal(sigset_t *d,
152 const target_sigset_t *s)
153 {
154 int i;
155 sigemptyset(d);
156 for (i = 1; i <= TARGET_NSIG; i++) {
157 if (target_sigismember(s, i)) {
158 sigaddset(d, target_to_host_signal(i));
159 }
160 }
161 }
162
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165 target_sigset_t s1;
166 int i;
167
168 for(i = 0;i < TARGET_NSIG_WORDS; i++)
169 s1.sig[i] = tswapal(s->sig[i]);
170 target_to_host_sigset_internal(d, &s1);
171 }
172
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174 const sigset_t *sigset)
175 {
176 target_sigset_t d;
177 host_to_target_sigset(&d, sigset);
178 *old_sigset = d.sig[0];
179 }
180
181 void target_to_host_old_sigset(sigset_t *sigset,
182 const abi_ulong *old_sigset)
183 {
184 target_sigset_t d;
185 int i;
186
187 d.sig[0] = *old_sigset;
188 for(i = 1;i < TARGET_NSIG_WORDS; i++)
189 d.sig[i] = 0;
190 target_to_host_sigset(sigset, &d);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. This wraps the sigprocmask host calls
196 * that should be protected (calls originated from guest)
197 */
198 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
199 {
200 int ret;
201 sigset_t val;
202 sigset_t *temp = NULL;
203 CPUState *cpu = thread_cpu;
204 TaskState *ts = (TaskState *)cpu->opaque;
205 bool segv_was_blocked = ts->sigsegv_blocked;
206
207 if (set) {
208 bool has_sigsegv = sigismember(set, SIGSEGV);
209 val = *set;
210 temp = &val;
211
212 sigdelset(temp, SIGSEGV);
213
214 switch (how) {
215 case SIG_BLOCK:
216 if (has_sigsegv) {
217 ts->sigsegv_blocked = true;
218 }
219 break;
220 case SIG_UNBLOCK:
221 if (has_sigsegv) {
222 ts->sigsegv_blocked = false;
223 }
224 break;
225 case SIG_SETMASK:
226 ts->sigsegv_blocked = has_sigsegv;
227 break;
228 default:
229 g_assert_not_reached();
230 }
231 }
232
233 ret = sigprocmask(how, temp, oldset);
234
235 if (oldset && segv_was_blocked) {
236 sigaddset(oldset, SIGSEGV);
237 }
238
239 return ret;
240 }
241
242 /* siginfo conversion */
243
244 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
245 const siginfo_t *info)
246 {
247 int sig = host_to_target_signal(info->si_signo);
248 tinfo->si_signo = sig;
249 tinfo->si_errno = 0;
250 tinfo->si_code = info->si_code;
251
252 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
253 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
254 /* Should never come here, but who knows. The information for
255 the target is irrelevant. */
256 tinfo->_sifields._sigfault._addr = 0;
257 } else if (sig == TARGET_SIGIO) {
258 tinfo->_sifields._sigpoll._band = info->si_band;
259 tinfo->_sifields._sigpoll._fd = info->si_fd;
260 } else if (sig == TARGET_SIGCHLD) {
261 tinfo->_sifields._sigchld._pid = info->si_pid;
262 tinfo->_sifields._sigchld._uid = info->si_uid;
263 tinfo->_sifields._sigchld._status
264 = host_to_target_waitstatus(info->si_status);
265 tinfo->_sifields._sigchld._utime = info->si_utime;
266 tinfo->_sifields._sigchld._stime = info->si_stime;
267 } else if (sig >= TARGET_SIGRTMIN) {
268 tinfo->_sifields._rt._pid = info->si_pid;
269 tinfo->_sifields._rt._uid = info->si_uid;
270 /* XXX: potential problem if 64 bit */
271 tinfo->_sifields._rt._sigval.sival_ptr
272 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
273 }
274 }
275
276 static void tswap_siginfo(target_siginfo_t *tinfo,
277 const target_siginfo_t *info)
278 {
279 int sig = info->si_signo;
280 tinfo->si_signo = tswap32(sig);
281 tinfo->si_errno = tswap32(info->si_errno);
282 tinfo->si_code = tswap32(info->si_code);
283
284 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
285 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
286 tinfo->_sifields._sigfault._addr
287 = tswapal(info->_sifields._sigfault._addr);
288 } else if (sig == TARGET_SIGIO) {
289 tinfo->_sifields._sigpoll._band
290 = tswap32(info->_sifields._sigpoll._band);
291 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
292 } else if (sig == TARGET_SIGCHLD) {
293 tinfo->_sifields._sigchld._pid
294 = tswap32(info->_sifields._sigchld._pid);
295 tinfo->_sifields._sigchld._uid
296 = tswap32(info->_sifields._sigchld._uid);
297 tinfo->_sifields._sigchld._status
298 = tswap32(info->_sifields._sigchld._status);
299 tinfo->_sifields._sigchld._utime
300 = tswapal(info->_sifields._sigchld._utime);
301 tinfo->_sifields._sigchld._stime
302 = tswapal(info->_sifields._sigchld._stime);
303 } else if (sig >= TARGET_SIGRTMIN) {
304 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
305 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
306 tinfo->_sifields._rt._sigval.sival_ptr
307 = tswapal(info->_sifields._rt._sigval.sival_ptr);
308 }
309 }
310
311
312 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
313 {
314 host_to_target_siginfo_noswap(tinfo, info);
315 tswap_siginfo(tinfo, tinfo);
316 }
317
318 /* XXX: we support only POSIX RT signals are used. */
319 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
320 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
321 {
322 info->si_signo = tswap32(tinfo->si_signo);
323 info->si_errno = tswap32(tinfo->si_errno);
324 info->si_code = tswap32(tinfo->si_code);
325 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
326 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
327 info->si_value.sival_ptr =
328 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
329 }
330
331 static int fatal_signal (int sig)
332 {
333 switch (sig) {
334 case TARGET_SIGCHLD:
335 case TARGET_SIGURG:
336 case TARGET_SIGWINCH:
337 /* Ignored by default. */
338 return 0;
339 case TARGET_SIGCONT:
340 case TARGET_SIGSTOP:
341 case TARGET_SIGTSTP:
342 case TARGET_SIGTTIN:
343 case TARGET_SIGTTOU:
344 /* Job control signals. */
345 return 0;
346 default:
347 return 1;
348 }
349 }
350
351 /* returns 1 if given signal should dump core if not handled */
352 static int core_dump_signal(int sig)
353 {
354 switch (sig) {
355 case TARGET_SIGABRT:
356 case TARGET_SIGFPE:
357 case TARGET_SIGILL:
358 case TARGET_SIGQUIT:
359 case TARGET_SIGSEGV:
360 case TARGET_SIGTRAP:
361 case TARGET_SIGBUS:
362 return (1);
363 default:
364 return (0);
365 }
366 }
367
368 void signal_init(void)
369 {
370 struct sigaction act;
371 struct sigaction oact;
372 int i, j;
373 int host_sig;
374
375 /* generate signal conversion tables */
376 for(i = 1; i < _NSIG; i++) {
377 if (host_to_target_signal_table[i] == 0)
378 host_to_target_signal_table[i] = i;
379 }
380 for(i = 1; i < _NSIG; i++) {
381 j = host_to_target_signal_table[i];
382 target_to_host_signal_table[j] = i;
383 }
384
385 /* set all host signal handlers. ALL signals are blocked during
386 the handlers to serialize them. */
387 memset(sigact_table, 0, sizeof(sigact_table));
388
389 sigfillset(&act.sa_mask);
390 act.sa_flags = SA_SIGINFO;
391 act.sa_sigaction = host_signal_handler;
392 for(i = 1; i <= TARGET_NSIG; i++) {
393 host_sig = target_to_host_signal(i);
394 sigaction(host_sig, NULL, &oact);
395 if (oact.sa_sigaction == (void *)SIG_IGN) {
396 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
397 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
398 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
399 }
400 /* If there's already a handler installed then something has
401 gone horribly wrong, so don't even try to handle that case. */
402 /* Install some handlers for our own use. We need at least
403 SIGSEGV and SIGBUS, to detect exceptions. We can not just
404 trap all signals because it affects syscall interrupt
405 behavior. But do trap all default-fatal signals. */
406 if (fatal_signal (i))
407 sigaction(host_sig, &act, NULL);
408 }
409 }
410
411 /* signal queue handling */
412
413 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
414 {
415 CPUState *cpu = ENV_GET_CPU(env);
416 TaskState *ts = cpu->opaque;
417 struct sigqueue *q = ts->first_free;
418 if (!q)
419 return NULL;
420 ts->first_free = q->next;
421 return q;
422 }
423
424 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
425 {
426 CPUState *cpu = ENV_GET_CPU(env);
427 TaskState *ts = cpu->opaque;
428
429 q->next = ts->first_free;
430 ts->first_free = q;
431 }
432
433 /* abort execution with signal */
434 static void QEMU_NORETURN force_sig(int target_sig)
435 {
436 CPUState *cpu = thread_cpu;
437 CPUArchState *env = cpu->env_ptr;
438 TaskState *ts = (TaskState *)cpu->opaque;
439 int host_sig, core_dumped = 0;
440 struct sigaction act;
441
442 host_sig = target_to_host_signal(target_sig);
443 trace_user_force_sig(env, target_sig, host_sig);
444 gdb_signalled(env, target_sig);
445
446 /* dump core if supported by target binary format */
447 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
448 stop_all_tasks();
449 core_dumped =
450 ((*ts->bprm->core_dump)(target_sig, env) == 0);
451 }
452 if (core_dumped) {
453 /* we already dumped the core of target process, we don't want
454 * a coredump of qemu itself */
455 struct rlimit nodump;
456 getrlimit(RLIMIT_CORE, &nodump);
457 nodump.rlim_cur=0;
458 setrlimit(RLIMIT_CORE, &nodump);
459 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
460 target_sig, strsignal(host_sig), "core dumped" );
461 }
462
463 /* The proper exit code for dying from an uncaught signal is
464 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
465 * a negative value. To get the proper exit code we need to
466 * actually die from an uncaught signal. Here the default signal
467 * handler is installed, we send ourself a signal and we wait for
468 * it to arrive. */
469 sigfillset(&act.sa_mask);
470 act.sa_handler = SIG_DFL;
471 act.sa_flags = 0;
472 sigaction(host_sig, &act, NULL);
473
474 /* For some reason raise(host_sig) doesn't send the signal when
475 * statically linked on x86-64. */
476 kill(getpid(), host_sig);
477
478 /* Make sure the signal isn't masked (just reuse the mask inside
479 of act) */
480 sigdelset(&act.sa_mask, host_sig);
481 sigsuspend(&act.sa_mask);
482
483 /* unreachable */
484 abort();
485 }
486
487 /* queue a signal so that it will be send to the virtual CPU as soon
488 as possible */
489 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
490 {
491 CPUState *cpu = ENV_GET_CPU(env);
492 TaskState *ts = cpu->opaque;
493 struct emulated_sigtable *k;
494 struct sigqueue *q, **pq;
495 abi_ulong handler;
496 int queue;
497
498 trace_user_queue_signal(env, sig);
499 k = &ts->sigtab[sig - 1];
500 queue = gdb_queuesig ();
501 handler = sigact_table[sig - 1]._sa_handler;
502
503 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
504 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
505 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
506 * because it got a real MMU fault). A blocked SIGSEGV in that
507 * situation is treated as if using the default handler. This is
508 * not correct if some other process has randomly sent us a SIGSEGV
509 * via kill(), but that is not easy to distinguish at this point,
510 * so we assume it doesn't happen.
511 */
512 handler = TARGET_SIG_DFL;
513 }
514
515 if (!queue && handler == TARGET_SIG_DFL) {
516 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
517 kill(getpid(),SIGSTOP);
518 return 0;
519 } else
520 /* default handler : ignore some signal. The other are fatal */
521 if (sig != TARGET_SIGCHLD &&
522 sig != TARGET_SIGURG &&
523 sig != TARGET_SIGWINCH &&
524 sig != TARGET_SIGCONT) {
525 force_sig(sig);
526 } else {
527 return 0; /* indicate ignored */
528 }
529 } else if (!queue && handler == TARGET_SIG_IGN) {
530 /* ignore signal */
531 return 0;
532 } else if (!queue && handler == TARGET_SIG_ERR) {
533 force_sig(sig);
534 } else {
535 pq = &k->first;
536 if (sig < TARGET_SIGRTMIN) {
537 /* if non real time signal, we queue exactly one signal */
538 if (!k->pending)
539 q = &k->info;
540 else
541 return 0;
542 } else {
543 if (!k->pending) {
544 /* first signal */
545 q = &k->info;
546 } else {
547 q = alloc_sigqueue(env);
548 if (!q)
549 return -EAGAIN;
550 while (*pq != NULL)
551 pq = &(*pq)->next;
552 }
553 }
554 *pq = q;
555 q->info = *info;
556 q->next = NULL;
557 k->pending = 1;
558 /* signal that a new signal is pending */
559 ts->signal_pending = 1;
560 return 1; /* indicates that the signal was queued */
561 }
562 }
563
564 static void host_signal_handler(int host_signum, siginfo_t *info,
565 void *puc)
566 {
567 CPUArchState *env = thread_cpu->env_ptr;
568 int sig;
569 target_siginfo_t tinfo;
570
571 /* the CPU emulator uses some host signals to detect exceptions,
572 we forward to it some signals */
573 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
574 && info->si_code > 0) {
575 if (cpu_signal_handler(host_signum, info, puc))
576 return;
577 }
578
579 /* get target signal number */
580 sig = host_to_target_signal(host_signum);
581 if (sig < 1 || sig > TARGET_NSIG)
582 return;
583 trace_user_host_signal(env, host_signum, sig);
584 host_to_target_siginfo_noswap(&tinfo, info);
585 if (queue_signal(env, sig, &tinfo) == 1) {
586 /* interrupt the virtual CPU as soon as possible */
587 cpu_exit(thread_cpu);
588 }
589 }
590
591 /* do_sigaltstack() returns target values and errnos. */
592 /* compare linux/kernel/signal.c:do_sigaltstack() */
593 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
594 {
595 int ret;
596 struct target_sigaltstack oss;
597
598 /* XXX: test errors */
599 if(uoss_addr)
600 {
601 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
602 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
603 __put_user(sas_ss_flags(sp), &oss.ss_flags);
604 }
605
606 if(uss_addr)
607 {
608 struct target_sigaltstack *uss;
609 struct target_sigaltstack ss;
610 size_t minstacksize = TARGET_MINSIGSTKSZ;
611
612 #if defined(TARGET_PPC64)
613 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
614 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
615 if (get_ppc64_abi(image) > 1) {
616 minstacksize = 4096;
617 }
618 #endif
619
620 ret = -TARGET_EFAULT;
621 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
622 goto out;
623 }
624 __get_user(ss.ss_sp, &uss->ss_sp);
625 __get_user(ss.ss_size, &uss->ss_size);
626 __get_user(ss.ss_flags, &uss->ss_flags);
627 unlock_user_struct(uss, uss_addr, 0);
628
629 ret = -TARGET_EPERM;
630 if (on_sig_stack(sp))
631 goto out;
632
633 ret = -TARGET_EINVAL;
634 if (ss.ss_flags != TARGET_SS_DISABLE
635 && ss.ss_flags != TARGET_SS_ONSTACK
636 && ss.ss_flags != 0)
637 goto out;
638
639 if (ss.ss_flags == TARGET_SS_DISABLE) {
640 ss.ss_size = 0;
641 ss.ss_sp = 0;
642 } else {
643 ret = -TARGET_ENOMEM;
644 if (ss.ss_size < minstacksize) {
645 goto out;
646 }
647 }
648
649 target_sigaltstack_used.ss_sp = ss.ss_sp;
650 target_sigaltstack_used.ss_size = ss.ss_size;
651 }
652
653 if (uoss_addr) {
654 ret = -TARGET_EFAULT;
655 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
656 goto out;
657 }
658
659 ret = 0;
660 out:
661 return ret;
662 }
663
664 /* do_sigaction() return host values and errnos */
665 int do_sigaction(int sig, const struct target_sigaction *act,
666 struct target_sigaction *oact)
667 {
668 struct target_sigaction *k;
669 struct sigaction act1;
670 int host_sig;
671 int ret = 0;
672
673 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
674 return -EINVAL;
675 k = &sigact_table[sig - 1];
676 if (oact) {
677 __put_user(k->_sa_handler, &oact->_sa_handler);
678 __put_user(k->sa_flags, &oact->sa_flags);
679 #if !defined(TARGET_MIPS)
680 __put_user(k->sa_restorer, &oact->sa_restorer);
681 #endif
682 /* Not swapped. */
683 oact->sa_mask = k->sa_mask;
684 }
685 if (act) {
686 /* FIXME: This is not threadsafe. */
687 __get_user(k->_sa_handler, &act->_sa_handler);
688 __get_user(k->sa_flags, &act->sa_flags);
689 #if !defined(TARGET_MIPS)
690 __get_user(k->sa_restorer, &act->sa_restorer);
691 #endif
692 /* To be swapped in target_to_host_sigset. */
693 k->sa_mask = act->sa_mask;
694
695 /* we update the host linux signal state */
696 host_sig = target_to_host_signal(sig);
697 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
698 sigfillset(&act1.sa_mask);
699 act1.sa_flags = SA_SIGINFO;
700 if (k->sa_flags & TARGET_SA_RESTART)
701 act1.sa_flags |= SA_RESTART;
702 /* NOTE: it is important to update the host kernel signal
703 ignore state to avoid getting unexpected interrupted
704 syscalls */
705 if (k->_sa_handler == TARGET_SIG_IGN) {
706 act1.sa_sigaction = (void *)SIG_IGN;
707 } else if (k->_sa_handler == TARGET_SIG_DFL) {
708 if (fatal_signal (sig))
709 act1.sa_sigaction = host_signal_handler;
710 else
711 act1.sa_sigaction = (void *)SIG_DFL;
712 } else {
713 act1.sa_sigaction = host_signal_handler;
714 }
715 ret = sigaction(host_sig, &act1, NULL);
716 }
717 }
718 return ret;
719 }
720
721 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
722
723 /* from the Linux kernel */
724
725 struct target_fpreg {
726 uint16_t significand[4];
727 uint16_t exponent;
728 };
729
730 struct target_fpxreg {
731 uint16_t significand[4];
732 uint16_t exponent;
733 uint16_t padding[3];
734 };
735
736 struct target_xmmreg {
737 abi_ulong element[4];
738 };
739
740 struct target_fpstate {
741 /* Regular FPU environment */
742 abi_ulong cw;
743 abi_ulong sw;
744 abi_ulong tag;
745 abi_ulong ipoff;
746 abi_ulong cssel;
747 abi_ulong dataoff;
748 abi_ulong datasel;
749 struct target_fpreg _st[8];
750 uint16_t status;
751 uint16_t magic; /* 0xffff = regular FPU data only */
752
753 /* FXSR FPU environment */
754 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
755 abi_ulong mxcsr;
756 abi_ulong reserved;
757 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
758 struct target_xmmreg _xmm[8];
759 abi_ulong padding[56];
760 };
761
762 #define X86_FXSR_MAGIC 0x0000
763
764 struct target_sigcontext {
765 uint16_t gs, __gsh;
766 uint16_t fs, __fsh;
767 uint16_t es, __esh;
768 uint16_t ds, __dsh;
769 abi_ulong edi;
770 abi_ulong esi;
771 abi_ulong ebp;
772 abi_ulong esp;
773 abi_ulong ebx;
774 abi_ulong edx;
775 abi_ulong ecx;
776 abi_ulong eax;
777 abi_ulong trapno;
778 abi_ulong err;
779 abi_ulong eip;
780 uint16_t cs, __csh;
781 abi_ulong eflags;
782 abi_ulong esp_at_signal;
783 uint16_t ss, __ssh;
784 abi_ulong fpstate; /* pointer */
785 abi_ulong oldmask;
786 abi_ulong cr2;
787 };
788
789 struct target_ucontext {
790 abi_ulong tuc_flags;
791 abi_ulong tuc_link;
792 target_stack_t tuc_stack;
793 struct target_sigcontext tuc_mcontext;
794 target_sigset_t tuc_sigmask; /* mask last for extensibility */
795 };
796
797 struct sigframe
798 {
799 abi_ulong pretcode;
800 int sig;
801 struct target_sigcontext sc;
802 struct target_fpstate fpstate;
803 abi_ulong extramask[TARGET_NSIG_WORDS-1];
804 char retcode[8];
805 };
806
807 struct rt_sigframe
808 {
809 abi_ulong pretcode;
810 int sig;
811 abi_ulong pinfo;
812 abi_ulong puc;
813 struct target_siginfo info;
814 struct target_ucontext uc;
815 struct target_fpstate fpstate;
816 char retcode[8];
817 };
818
819 /*
820 * Set up a signal frame.
821 */
822
823 /* XXX: save x87 state */
824 static void setup_sigcontext(struct target_sigcontext *sc,
825 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
826 abi_ulong fpstate_addr)
827 {
828 CPUState *cs = CPU(x86_env_get_cpu(env));
829 uint16_t magic;
830
831 /* already locked in setup_frame() */
832 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
833 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
834 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
835 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
836 __put_user(env->regs[R_EDI], &sc->edi);
837 __put_user(env->regs[R_ESI], &sc->esi);
838 __put_user(env->regs[R_EBP], &sc->ebp);
839 __put_user(env->regs[R_ESP], &sc->esp);
840 __put_user(env->regs[R_EBX], &sc->ebx);
841 __put_user(env->regs[R_EDX], &sc->edx);
842 __put_user(env->regs[R_ECX], &sc->ecx);
843 __put_user(env->regs[R_EAX], &sc->eax);
844 __put_user(cs->exception_index, &sc->trapno);
845 __put_user(env->error_code, &sc->err);
846 __put_user(env->eip, &sc->eip);
847 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
848 __put_user(env->eflags, &sc->eflags);
849 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
850 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
851
852 cpu_x86_fsave(env, fpstate_addr, 1);
853 fpstate->status = fpstate->sw;
854 magic = 0xffff;
855 __put_user(magic, &fpstate->magic);
856 __put_user(fpstate_addr, &sc->fpstate);
857
858 /* non-iBCS2 extensions.. */
859 __put_user(mask, &sc->oldmask);
860 __put_user(env->cr[2], &sc->cr2);
861 }
862
863 /*
864 * Determine which stack to use..
865 */
866
867 static inline abi_ulong
868 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
869 {
870 unsigned long esp;
871
872 /* Default to using normal stack */
873 esp = env->regs[R_ESP];
874 /* This is the X/Open sanctioned signal stack switching. */
875 if (ka->sa_flags & TARGET_SA_ONSTACK) {
876 if (sas_ss_flags(esp) == 0) {
877 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
878 }
879 } else {
880
881 /* This is the legacy signal stack switching. */
882 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
883 !(ka->sa_flags & TARGET_SA_RESTORER) &&
884 ka->sa_restorer) {
885 esp = (unsigned long) ka->sa_restorer;
886 }
887 }
888 return (esp - frame_size) & -8ul;
889 }
890
891 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
892 static void setup_frame(int sig, struct target_sigaction *ka,
893 target_sigset_t *set, CPUX86State *env)
894 {
895 abi_ulong frame_addr;
896 struct sigframe *frame;
897 int i;
898
899 frame_addr = get_sigframe(ka, env, sizeof(*frame));
900 trace_user_setup_frame(env, frame_addr);
901
902 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
903 goto give_sigsegv;
904
905 __put_user(sig, &frame->sig);
906
907 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
908 frame_addr + offsetof(struct sigframe, fpstate));
909
910 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
911 __put_user(set->sig[i], &frame->extramask[i - 1]);
912 }
913
914 /* Set up to return from userspace. If provided, use a stub
915 already in userspace. */
916 if (ka->sa_flags & TARGET_SA_RESTORER) {
917 __put_user(ka->sa_restorer, &frame->pretcode);
918 } else {
919 uint16_t val16;
920 abi_ulong retcode_addr;
921 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
922 __put_user(retcode_addr, &frame->pretcode);
923 /* This is popl %eax ; movl $,%eax ; int $0x80 */
924 val16 = 0xb858;
925 __put_user(val16, (uint16_t *)(frame->retcode+0));
926 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
927 val16 = 0x80cd;
928 __put_user(val16, (uint16_t *)(frame->retcode+6));
929 }
930
931
932 /* Set up registers for signal handler */
933 env->regs[R_ESP] = frame_addr;
934 env->eip = ka->_sa_handler;
935
936 cpu_x86_load_seg(env, R_DS, __USER_DS);
937 cpu_x86_load_seg(env, R_ES, __USER_DS);
938 cpu_x86_load_seg(env, R_SS, __USER_DS);
939 cpu_x86_load_seg(env, R_CS, __USER_CS);
940 env->eflags &= ~TF_MASK;
941
942 unlock_user_struct(frame, frame_addr, 1);
943
944 return;
945
946 give_sigsegv:
947 if (sig == TARGET_SIGSEGV) {
948 ka->_sa_handler = TARGET_SIG_DFL;
949 }
950 force_sig(TARGET_SIGSEGV /* , current */);
951 }
952
953 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
954 static void setup_rt_frame(int sig, struct target_sigaction *ka,
955 target_siginfo_t *info,
956 target_sigset_t *set, CPUX86State *env)
957 {
958 abi_ulong frame_addr, addr;
959 struct rt_sigframe *frame;
960 int i;
961
962 frame_addr = get_sigframe(ka, env, sizeof(*frame));
963 trace_user_setup_rt_frame(env, frame_addr);
964
965 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
966 goto give_sigsegv;
967
968 __put_user(sig, &frame->sig);
969 addr = frame_addr + offsetof(struct rt_sigframe, info);
970 __put_user(addr, &frame->pinfo);
971 addr = frame_addr + offsetof(struct rt_sigframe, uc);
972 __put_user(addr, &frame->puc);
973 tswap_siginfo(&frame->info, info);
974
975 /* Create the ucontext. */
976 __put_user(0, &frame->uc.tuc_flags);
977 __put_user(0, &frame->uc.tuc_link);
978 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
979 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
980 &frame->uc.tuc_stack.ss_flags);
981 __put_user(target_sigaltstack_used.ss_size,
982 &frame->uc.tuc_stack.ss_size);
983 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
984 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
985
986 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
987 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
988 }
989
990 /* Set up to return from userspace. If provided, use a stub
991 already in userspace. */
992 if (ka->sa_flags & TARGET_SA_RESTORER) {
993 __put_user(ka->sa_restorer, &frame->pretcode);
994 } else {
995 uint16_t val16;
996 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
997 __put_user(addr, &frame->pretcode);
998 /* This is movl $,%eax ; int $0x80 */
999 __put_user(0xb8, (char *)(frame->retcode+0));
1000 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1001 val16 = 0x80cd;
1002 __put_user(val16, (uint16_t *)(frame->retcode+5));
1003 }
1004
1005 /* Set up registers for signal handler */
1006 env->regs[R_ESP] = frame_addr;
1007 env->eip = ka->_sa_handler;
1008
1009 cpu_x86_load_seg(env, R_DS, __USER_DS);
1010 cpu_x86_load_seg(env, R_ES, __USER_DS);
1011 cpu_x86_load_seg(env, R_SS, __USER_DS);
1012 cpu_x86_load_seg(env, R_CS, __USER_CS);
1013 env->eflags &= ~TF_MASK;
1014
1015 unlock_user_struct(frame, frame_addr, 1);
1016
1017 return;
1018
1019 give_sigsegv:
1020 if (sig == TARGET_SIGSEGV) {
1021 ka->_sa_handler = TARGET_SIG_DFL;
1022 }
1023 force_sig(TARGET_SIGSEGV /* , current */);
1024 }
1025
1026 static int
1027 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1028 {
1029 unsigned int err = 0;
1030 abi_ulong fpstate_addr;
1031 unsigned int tmpflags;
1032
1033 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1034 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1035 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1036 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1037
1038 env->regs[R_EDI] = tswapl(sc->edi);
1039 env->regs[R_ESI] = tswapl(sc->esi);
1040 env->regs[R_EBP] = tswapl(sc->ebp);
1041 env->regs[R_ESP] = tswapl(sc->esp);
1042 env->regs[R_EBX] = tswapl(sc->ebx);
1043 env->regs[R_EDX] = tswapl(sc->edx);
1044 env->regs[R_ECX] = tswapl(sc->ecx);
1045 env->regs[R_EAX] = tswapl(sc->eax);
1046 env->eip = tswapl(sc->eip);
1047
1048 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1049 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1050
1051 tmpflags = tswapl(sc->eflags);
1052 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1053 // regs->orig_eax = -1; /* disable syscall checks */
1054
1055 fpstate_addr = tswapl(sc->fpstate);
1056 if (fpstate_addr != 0) {
1057 if (!access_ok(VERIFY_READ, fpstate_addr,
1058 sizeof(struct target_fpstate)))
1059 goto badframe;
1060 cpu_x86_frstor(env, fpstate_addr, 1);
1061 }
1062
1063 return err;
1064 badframe:
1065 return 1;
1066 }
1067
1068 long do_sigreturn(CPUX86State *env)
1069 {
1070 struct sigframe *frame;
1071 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1072 target_sigset_t target_set;
1073 sigset_t set;
1074 int i;
1075
1076 trace_user_do_sigreturn(env, frame_addr);
1077 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1078 goto badframe;
1079 /* set blocked signals */
1080 __get_user(target_set.sig[0], &frame->sc.oldmask);
1081 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1082 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1083 }
1084
1085 target_to_host_sigset_internal(&set, &target_set);
1086 do_sigprocmask(SIG_SETMASK, &set, NULL);
1087
1088 /* restore registers */
1089 if (restore_sigcontext(env, &frame->sc))
1090 goto badframe;
1091 unlock_user_struct(frame, frame_addr, 0);
1092 return -TARGET_QEMU_ESIGRETURN;
1093
1094 badframe:
1095 unlock_user_struct(frame, frame_addr, 0);
1096 force_sig(TARGET_SIGSEGV);
1097 return 0;
1098 }
1099
1100 long do_rt_sigreturn(CPUX86State *env)
1101 {
1102 abi_ulong frame_addr;
1103 struct rt_sigframe *frame;
1104 sigset_t set;
1105
1106 frame_addr = env->regs[R_ESP] - 4;
1107 trace_user_do_rt_sigreturn(env, frame_addr);
1108 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1109 goto badframe;
1110 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1111 do_sigprocmask(SIG_SETMASK, &set, NULL);
1112
1113 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1114 goto badframe;
1115 }
1116
1117 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1118 get_sp_from_cpustate(env)) == -EFAULT) {
1119 goto badframe;
1120 }
1121
1122 unlock_user_struct(frame, frame_addr, 0);
1123 return -TARGET_QEMU_ESIGRETURN;
1124
1125 badframe:
1126 unlock_user_struct(frame, frame_addr, 0);
1127 force_sig(TARGET_SIGSEGV);
1128 return 0;
1129 }
1130
1131 #elif defined(TARGET_AARCH64)
1132
1133 struct target_sigcontext {
1134 uint64_t fault_address;
1135 /* AArch64 registers */
1136 uint64_t regs[31];
1137 uint64_t sp;
1138 uint64_t pc;
1139 uint64_t pstate;
1140 /* 4K reserved for FP/SIMD state and future expansion */
1141 char __reserved[4096] __attribute__((__aligned__(16)));
1142 };
1143
1144 struct target_ucontext {
1145 abi_ulong tuc_flags;
1146 abi_ulong tuc_link;
1147 target_stack_t tuc_stack;
1148 target_sigset_t tuc_sigmask;
1149 /* glibc uses a 1024-bit sigset_t */
1150 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1151 /* last for future expansion */
1152 struct target_sigcontext tuc_mcontext;
1153 };
1154
1155 /*
1156 * Header to be used at the beginning of structures extending the user
1157 * context. Such structures must be placed after the rt_sigframe on the stack
1158 * and be 16-byte aligned. The last structure must be a dummy one with the
1159 * magic and size set to 0.
1160 */
1161 struct target_aarch64_ctx {
1162 uint32_t magic;
1163 uint32_t size;
1164 };
1165
1166 #define TARGET_FPSIMD_MAGIC 0x46508001
1167
1168 struct target_fpsimd_context {
1169 struct target_aarch64_ctx head;
1170 uint32_t fpsr;
1171 uint32_t fpcr;
1172 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1173 };
1174
1175 /*
1176 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1177 * user space as it will change with the addition of new context. User space
1178 * should check the magic/size information.
1179 */
1180 struct target_aux_context {
1181 struct target_fpsimd_context fpsimd;
1182 /* additional context to be added before "end" */
1183 struct target_aarch64_ctx end;
1184 };
1185
1186 struct target_rt_sigframe {
1187 struct target_siginfo info;
1188 struct target_ucontext uc;
1189 uint64_t fp;
1190 uint64_t lr;
1191 uint32_t tramp[2];
1192 };
1193
1194 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1195 CPUARMState *env, target_sigset_t *set)
1196 {
1197 int i;
1198 struct target_aux_context *aux =
1199 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1200
1201 /* set up the stack frame for unwinding */
1202 __put_user(env->xregs[29], &sf->fp);
1203 __put_user(env->xregs[30], &sf->lr);
1204
1205 for (i = 0; i < 31; i++) {
1206 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1207 }
1208 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1209 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1210 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1211
1212 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1213
1214 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1215 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1216 }
1217
1218 for (i = 0; i < 32; i++) {
1219 #ifdef TARGET_WORDS_BIGENDIAN
1220 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1221 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1222 #else
1223 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1224 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1225 #endif
1226 }
1227 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1228 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1229 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1230 __put_user(sizeof(struct target_fpsimd_context),
1231 &aux->fpsimd.head.size);
1232
1233 /* set the "end" magic */
1234 __put_user(0, &aux->end.magic);
1235 __put_user(0, &aux->end.size);
1236
1237 return 0;
1238 }
1239
1240 static int target_restore_sigframe(CPUARMState *env,
1241 struct target_rt_sigframe *sf)
1242 {
1243 sigset_t set;
1244 int i;
1245 struct target_aux_context *aux =
1246 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1247 uint32_t magic, size, fpsr, fpcr;
1248 uint64_t pstate;
1249
1250 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1251 do_sigprocmask(SIG_SETMASK, &set, NULL);
1252
1253 for (i = 0; i < 31; i++) {
1254 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1255 }
1256
1257 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1258 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1259 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1260 pstate_write(env, pstate);
1261
1262 __get_user(magic, &aux->fpsimd.head.magic);
1263 __get_user(size, &aux->fpsimd.head.size);
1264
1265 if (magic != TARGET_FPSIMD_MAGIC
1266 || size != sizeof(struct target_fpsimd_context)) {
1267 return 1;
1268 }
1269
1270 for (i = 0; i < 32; i++) {
1271 #ifdef TARGET_WORDS_BIGENDIAN
1272 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1273 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1274 #else
1275 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1276 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1277 #endif
1278 }
1279 __get_user(fpsr, &aux->fpsimd.fpsr);
1280 vfp_set_fpsr(env, fpsr);
1281 __get_user(fpcr, &aux->fpsimd.fpcr);
1282 vfp_set_fpcr(env, fpcr);
1283
1284 return 0;
1285 }
1286
1287 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1288 {
1289 abi_ulong sp;
1290
1291 sp = env->xregs[31];
1292
1293 /*
1294 * This is the X/Open sanctioned signal stack switching.
1295 */
1296 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1297 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1298 }
1299
1300 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1301
1302 return sp;
1303 }
1304
1305 static void target_setup_frame(int usig, struct target_sigaction *ka,
1306 target_siginfo_t *info, target_sigset_t *set,
1307 CPUARMState *env)
1308 {
1309 struct target_rt_sigframe *frame;
1310 abi_ulong frame_addr, return_addr;
1311
1312 frame_addr = get_sigframe(ka, env);
1313 trace_user_setup_frame(env, frame_addr);
1314 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1315 goto give_sigsegv;
1316 }
1317
1318 __put_user(0, &frame->uc.tuc_flags);
1319 __put_user(0, &frame->uc.tuc_link);
1320
1321 __put_user(target_sigaltstack_used.ss_sp,
1322 &frame->uc.tuc_stack.ss_sp);
1323 __put_user(sas_ss_flags(env->xregs[31]),
1324 &frame->uc.tuc_stack.ss_flags);
1325 __put_user(target_sigaltstack_used.ss_size,
1326 &frame->uc.tuc_stack.ss_size);
1327 target_setup_sigframe(frame, env, set);
1328 if (ka->sa_flags & TARGET_SA_RESTORER) {
1329 return_addr = ka->sa_restorer;
1330 } else {
1331 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1332 __put_user(0xd2801168, &frame->tramp[0]);
1333 __put_user(0xd4000001, &frame->tramp[1]);
1334 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1335 }
1336 env->xregs[0] = usig;
1337 env->xregs[31] = frame_addr;
1338 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1339 env->pc = ka->_sa_handler;
1340 env->xregs[30] = return_addr;
1341 if (info) {
1342 tswap_siginfo(&frame->info, info);
1343 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1344 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1345 }
1346
1347 unlock_user_struct(frame, frame_addr, 1);
1348 return;
1349
1350 give_sigsegv:
1351 unlock_user_struct(frame, frame_addr, 1);
1352 force_sig(TARGET_SIGSEGV);
1353 }
1354
1355 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1356 target_siginfo_t *info, target_sigset_t *set,
1357 CPUARMState *env)
1358 {
1359 target_setup_frame(sig, ka, info, set, env);
1360 }
1361
1362 static void setup_frame(int sig, struct target_sigaction *ka,
1363 target_sigset_t *set, CPUARMState *env)
1364 {
1365 target_setup_frame(sig, ka, 0, set, env);
1366 }
1367
1368 long do_rt_sigreturn(CPUARMState *env)
1369 {
1370 struct target_rt_sigframe *frame = NULL;
1371 abi_ulong frame_addr = env->xregs[31];
1372
1373 trace_user_do_rt_sigreturn(env, frame_addr);
1374 if (frame_addr & 15) {
1375 goto badframe;
1376 }
1377
1378 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1379 goto badframe;
1380 }
1381
1382 if (target_restore_sigframe(env, frame)) {
1383 goto badframe;
1384 }
1385
1386 if (do_sigaltstack(frame_addr +
1387 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1388 0, get_sp_from_cpustate(env)) == -EFAULT) {
1389 goto badframe;
1390 }
1391
1392 unlock_user_struct(frame, frame_addr, 0);
1393 return -TARGET_QEMU_ESIGRETURN;
1394
1395 badframe:
1396 unlock_user_struct(frame, frame_addr, 0);
1397 force_sig(TARGET_SIGSEGV);
1398 return 0;
1399 }
1400
1401 long do_sigreturn(CPUARMState *env)
1402 {
1403 return do_rt_sigreturn(env);
1404 }
1405
1406 #elif defined(TARGET_ARM)
1407
1408 struct target_sigcontext {
1409 abi_ulong trap_no;
1410 abi_ulong error_code;
1411 abi_ulong oldmask;
1412 abi_ulong arm_r0;
1413 abi_ulong arm_r1;
1414 abi_ulong arm_r2;
1415 abi_ulong arm_r3;
1416 abi_ulong arm_r4;
1417 abi_ulong arm_r5;
1418 abi_ulong arm_r6;
1419 abi_ulong arm_r7;
1420 abi_ulong arm_r8;
1421 abi_ulong arm_r9;
1422 abi_ulong arm_r10;
1423 abi_ulong arm_fp;
1424 abi_ulong arm_ip;
1425 abi_ulong arm_sp;
1426 abi_ulong arm_lr;
1427 abi_ulong arm_pc;
1428 abi_ulong arm_cpsr;
1429 abi_ulong fault_address;
1430 };
1431
1432 struct target_ucontext_v1 {
1433 abi_ulong tuc_flags;
1434 abi_ulong tuc_link;
1435 target_stack_t tuc_stack;
1436 struct target_sigcontext tuc_mcontext;
1437 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1438 };
1439
1440 struct target_ucontext_v2 {
1441 abi_ulong tuc_flags;
1442 abi_ulong tuc_link;
1443 target_stack_t tuc_stack;
1444 struct target_sigcontext tuc_mcontext;
1445 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1446 char __unused[128 - sizeof(target_sigset_t)];
1447 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1448 };
1449
1450 struct target_user_vfp {
1451 uint64_t fpregs[32];
1452 abi_ulong fpscr;
1453 };
1454
1455 struct target_user_vfp_exc {
1456 abi_ulong fpexc;
1457 abi_ulong fpinst;
1458 abi_ulong fpinst2;
1459 };
1460
1461 struct target_vfp_sigframe {
1462 abi_ulong magic;
1463 abi_ulong size;
1464 struct target_user_vfp ufp;
1465 struct target_user_vfp_exc ufp_exc;
1466 } __attribute__((__aligned__(8)));
1467
1468 struct target_iwmmxt_sigframe {
1469 abi_ulong magic;
1470 abi_ulong size;
1471 uint64_t regs[16];
1472 /* Note that not all the coprocessor control registers are stored here */
1473 uint32_t wcssf;
1474 uint32_t wcasf;
1475 uint32_t wcgr0;
1476 uint32_t wcgr1;
1477 uint32_t wcgr2;
1478 uint32_t wcgr3;
1479 } __attribute__((__aligned__(8)));
1480
1481 #define TARGET_VFP_MAGIC 0x56465001
1482 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1483
1484 struct sigframe_v1
1485 {
1486 struct target_sigcontext sc;
1487 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1488 abi_ulong retcode;
1489 };
1490
1491 struct sigframe_v2
1492 {
1493 struct target_ucontext_v2 uc;
1494 abi_ulong retcode;
1495 };
1496
1497 struct rt_sigframe_v1
1498 {
1499 abi_ulong pinfo;
1500 abi_ulong puc;
1501 struct target_siginfo info;
1502 struct target_ucontext_v1 uc;
1503 abi_ulong retcode;
1504 };
1505
1506 struct rt_sigframe_v2
1507 {
1508 struct target_siginfo info;
1509 struct target_ucontext_v2 uc;
1510 abi_ulong retcode;
1511 };
1512
1513 #define TARGET_CONFIG_CPU_32 1
1514
1515 /*
1516 * For ARM syscalls, we encode the syscall number into the instruction.
1517 */
1518 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1519 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1520
1521 /*
1522 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1523 * need two 16-bit instructions.
1524 */
1525 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1526 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1527
1528 static const abi_ulong retcodes[4] = {
1529 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1530 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1531 };
1532
1533
1534 static inline int valid_user_regs(CPUARMState *regs)
1535 {
1536 return 1;
1537 }
1538
1539 static void
1540 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1541 CPUARMState *env, abi_ulong mask)
1542 {
1543 __put_user(env->regs[0], &sc->arm_r0);
1544 __put_user(env->regs[1], &sc->arm_r1);
1545 __put_user(env->regs[2], &sc->arm_r2);
1546 __put_user(env->regs[3], &sc->arm_r3);
1547 __put_user(env->regs[4], &sc->arm_r4);
1548 __put_user(env->regs[5], &sc->arm_r5);
1549 __put_user(env->regs[6], &sc->arm_r6);
1550 __put_user(env->regs[7], &sc->arm_r7);
1551 __put_user(env->regs[8], &sc->arm_r8);
1552 __put_user(env->regs[9], &sc->arm_r9);
1553 __put_user(env->regs[10], &sc->arm_r10);
1554 __put_user(env->regs[11], &sc->arm_fp);
1555 __put_user(env->regs[12], &sc->arm_ip);
1556 __put_user(env->regs[13], &sc->arm_sp);
1557 __put_user(env->regs[14], &sc->arm_lr);
1558 __put_user(env->regs[15], &sc->arm_pc);
1559 #ifdef TARGET_CONFIG_CPU_32
1560 __put_user(cpsr_read(env), &sc->arm_cpsr);
1561 #endif
1562
1563 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1564 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1565 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1566 __put_user(mask, &sc->oldmask);
1567 }
1568
1569 static inline abi_ulong
1570 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1571 {
1572 unsigned long sp = regs->regs[13];
1573
1574 /*
1575 * This is the X/Open sanctioned signal stack switching.
1576 */
1577 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1578 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1579 }
1580 /*
1581 * ATPCS B01 mandates 8-byte alignment
1582 */
1583 return (sp - framesize) & ~7;
1584 }
1585
1586 static void
1587 setup_return(CPUARMState *env, struct target_sigaction *ka,
1588 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1589 {
1590 abi_ulong handler = ka->_sa_handler;
1591 abi_ulong retcode;
1592 int thumb = handler & 1;
1593 uint32_t cpsr = cpsr_read(env);
1594
1595 cpsr &= ~CPSR_IT;
1596 if (thumb) {
1597 cpsr |= CPSR_T;
1598 } else {
1599 cpsr &= ~CPSR_T;
1600 }
1601
1602 if (ka->sa_flags & TARGET_SA_RESTORER) {
1603 retcode = ka->sa_restorer;
1604 } else {
1605 unsigned int idx = thumb;
1606
1607 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1608 idx += 2;
1609 }
1610
1611 __put_user(retcodes[idx], rc);
1612
1613 retcode = rc_addr + thumb;
1614 }
1615
1616 env->regs[0] = usig;
1617 env->regs[13] = frame_addr;
1618 env->regs[14] = retcode;
1619 env->regs[15] = handler & (thumb ? ~1 : ~3);
1620 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1621 }
1622
1623 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1624 {
1625 int i;
1626 struct target_vfp_sigframe *vfpframe;
1627 vfpframe = (struct target_vfp_sigframe *)regspace;
1628 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1629 __put_user(sizeof(*vfpframe), &vfpframe->size);
1630 for (i = 0; i < 32; i++) {
1631 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1632 }
1633 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1634 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1635 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1636 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1637 return (abi_ulong*)(vfpframe+1);
1638 }
1639
1640 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1641 CPUARMState *env)
1642 {
1643 int i;
1644 struct target_iwmmxt_sigframe *iwmmxtframe;
1645 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1646 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1647 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1648 for (i = 0; i < 16; i++) {
1649 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1650 }
1651 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1652 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1653 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1654 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1655 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1656 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1657 return (abi_ulong*)(iwmmxtframe+1);
1658 }
1659
1660 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1661 target_sigset_t *set, CPUARMState *env)
1662 {
1663 struct target_sigaltstack stack;
1664 int i;
1665 abi_ulong *regspace;
1666
1667 /* Clear all the bits of the ucontext we don't use. */
1668 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1669
1670 memset(&stack, 0, sizeof(stack));
1671 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1672 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1673 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1674 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1675
1676 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1677 /* Save coprocessor signal frame. */
1678 regspace = uc->tuc_regspace;
1679 if (arm_feature(env, ARM_FEATURE_VFP)) {
1680 regspace = setup_sigframe_v2_vfp(regspace, env);
1681 }
1682 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1683 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1684 }
1685
1686 /* Write terminating magic word */
1687 __put_user(0, regspace);
1688
1689 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1690 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1691 }
1692 }
1693
1694 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1695 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1696 target_sigset_t *set, CPUARMState *regs)
1697 {
1698 struct sigframe_v1 *frame;
1699 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1700 int i;
1701
1702 trace_user_setup_frame(regs, frame_addr);
1703 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1704 return;
1705 }
1706
1707 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1708
1709 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1710 __put_user(set->sig[i], &frame->extramask[i - 1]);
1711 }
1712
1713 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1714 frame_addr + offsetof(struct sigframe_v1, retcode));
1715
1716 unlock_user_struct(frame, frame_addr, 1);
1717 }
1718
1719 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1720 target_sigset_t *set, CPUARMState *regs)
1721 {
1722 struct sigframe_v2 *frame;
1723 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1724
1725 trace_user_setup_frame(regs, frame_addr);
1726 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1727 return;
1728 }
1729
1730 setup_sigframe_v2(&frame->uc, set, regs);
1731
1732 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1733 frame_addr + offsetof(struct sigframe_v2, retcode));
1734
1735 unlock_user_struct(frame, frame_addr, 1);
1736 }
1737
1738 static void setup_frame(int usig, struct target_sigaction *ka,
1739 target_sigset_t *set, CPUARMState *regs)
1740 {
1741 if (get_osversion() >= 0x020612) {
1742 setup_frame_v2(usig, ka, set, regs);
1743 } else {
1744 setup_frame_v1(usig, ka, set, regs);
1745 }
1746 }
1747
1748 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1749 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1750 target_siginfo_t *info,
1751 target_sigset_t *set, CPUARMState *env)
1752 {
1753 struct rt_sigframe_v1 *frame;
1754 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1755 struct target_sigaltstack stack;
1756 int i;
1757 abi_ulong info_addr, uc_addr;
1758
1759 trace_user_setup_rt_frame(env, frame_addr);
1760 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1761 return /* 1 */;
1762 }
1763
1764 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1765 __put_user(info_addr, &frame->pinfo);
1766 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1767 __put_user(uc_addr, &frame->puc);
1768 tswap_siginfo(&frame->info, info);
1769
1770 /* Clear all the bits of the ucontext we don't use. */
1771 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1772
1773 memset(&stack, 0, sizeof(stack));
1774 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1775 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1776 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1777 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1778
1779 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1780 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1781 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1782 }
1783
1784 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1785 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1786
1787 env->regs[1] = info_addr;
1788 env->regs[2] = uc_addr;
1789
1790 unlock_user_struct(frame, frame_addr, 1);
1791 }
1792
1793 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1794 target_siginfo_t *info,
1795 target_sigset_t *set, CPUARMState *env)
1796 {
1797 struct rt_sigframe_v2 *frame;
1798 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1799 abi_ulong info_addr, uc_addr;
1800
1801 trace_user_setup_rt_frame(env, frame_addr);
1802 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1803 return /* 1 */;
1804 }
1805
1806 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1807 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1808 tswap_siginfo(&frame->info, info);
1809
1810 setup_sigframe_v2(&frame->uc, set, env);
1811
1812 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1813 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1814
1815 env->regs[1] = info_addr;
1816 env->regs[2] = uc_addr;
1817
1818 unlock_user_struct(frame, frame_addr, 1);
1819 }
1820
1821 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1822 target_siginfo_t *info,
1823 target_sigset_t *set, CPUARMState *env)
1824 {
1825 if (get_osversion() >= 0x020612) {
1826 setup_rt_frame_v2(usig, ka, info, set, env);
1827 } else {
1828 setup_rt_frame_v1(usig, ka, info, set, env);
1829 }
1830 }
1831
1832 static int
1833 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1834 {
1835 int err = 0;
1836 uint32_t cpsr;
1837
1838 __get_user(env->regs[0], &sc->arm_r0);
1839 __get_user(env->regs[1], &sc->arm_r1);
1840 __get_user(env->regs[2], &sc->arm_r2);
1841 __get_user(env->regs[3], &sc->arm_r3);
1842 __get_user(env->regs[4], &sc->arm_r4);
1843 __get_user(env->regs[5], &sc->arm_r5);
1844 __get_user(env->regs[6], &sc->arm_r6);
1845 __get_user(env->regs[7], &sc->arm_r7);
1846 __get_user(env->regs[8], &sc->arm_r8);
1847 __get_user(env->regs[9], &sc->arm_r9);
1848 __get_user(env->regs[10], &sc->arm_r10);
1849 __get_user(env->regs[11], &sc->arm_fp);
1850 __get_user(env->regs[12], &sc->arm_ip);
1851 __get_user(env->regs[13], &sc->arm_sp);
1852 __get_user(env->regs[14], &sc->arm_lr);
1853 __get_user(env->regs[15], &sc->arm_pc);
1854 #ifdef TARGET_CONFIG_CPU_32
1855 __get_user(cpsr, &sc->arm_cpsr);
1856 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1857 #endif
1858
1859 err |= !valid_user_regs(env);
1860
1861 return err;
1862 }
1863
1864 static long do_sigreturn_v1(CPUARMState *env)
1865 {
1866 abi_ulong frame_addr;
1867 struct sigframe_v1 *frame = NULL;
1868 target_sigset_t set;
1869 sigset_t host_set;
1870 int i;
1871
1872 /*
1873 * Since we stacked the signal on a 64-bit boundary,
1874 * then 'sp' should be word aligned here. If it's
1875 * not, then the user is trying to mess with us.
1876 */
1877 frame_addr = env->regs[13];
1878 trace_user_do_sigreturn(env, frame_addr);
1879 if (frame_addr & 7) {
1880 goto badframe;
1881 }
1882
1883 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1884 goto badframe;
1885 }
1886
1887 __get_user(set.sig[0], &frame->sc.oldmask);
1888 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1889 __get_user(set.sig[i], &frame->extramask[i - 1]);
1890 }
1891
1892 target_to_host_sigset_internal(&host_set, &set);
1893 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1894
1895 if (restore_sigcontext(env, &frame->sc)) {
1896 goto badframe;
1897 }
1898
1899 #if 0
1900 /* Send SIGTRAP if we're single-stepping */
1901 if (ptrace_cancel_bpt(current))
1902 send_sig(SIGTRAP, current, 1);
1903 #endif
1904 unlock_user_struct(frame, frame_addr, 0);
1905 return -TARGET_QEMU_ESIGRETURN;
1906
1907 badframe:
1908 force_sig(TARGET_SIGSEGV /* , current */);
1909 return 0;
1910 }
1911
1912 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1913 {
1914 int i;
1915 abi_ulong magic, sz;
1916 uint32_t fpscr, fpexc;
1917 struct target_vfp_sigframe *vfpframe;
1918 vfpframe = (struct target_vfp_sigframe *)regspace;
1919
1920 __get_user(magic, &vfpframe->magic);
1921 __get_user(sz, &vfpframe->size);
1922 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1923 return 0;
1924 }
1925 for (i = 0; i < 32; i++) {
1926 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1927 }
1928 __get_user(fpscr, &vfpframe->ufp.fpscr);
1929 vfp_set_fpscr(env, fpscr);
1930 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1931 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1932 * and the exception flag is cleared
1933 */
1934 fpexc |= (1 << 30);
1935 fpexc &= ~((1 << 31) | (1 << 28));
1936 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1937 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1938 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1939 return (abi_ulong*)(vfpframe + 1);
1940 }
1941
1942 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1943 abi_ulong *regspace)
1944 {
1945 int i;
1946 abi_ulong magic, sz;
1947 struct target_iwmmxt_sigframe *iwmmxtframe;
1948 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1949
1950 __get_user(magic, &iwmmxtframe->magic);
1951 __get_user(sz, &iwmmxtframe->size);
1952 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1953 return 0;
1954 }
1955 for (i = 0; i < 16; i++) {
1956 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1957 }
1958 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1959 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1960 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1961 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1962 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1963 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1964 return (abi_ulong*)(iwmmxtframe + 1);
1965 }
1966
1967 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1968 struct target_ucontext_v2 *uc)
1969 {
1970 sigset_t host_set;
1971 abi_ulong *regspace;
1972
1973 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1974 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1975
1976 if (restore_sigcontext(env, &uc->tuc_mcontext))
1977 return 1;
1978
1979 /* Restore coprocessor signal frame */
1980 regspace = uc->tuc_regspace;
1981 if (arm_feature(env, ARM_FEATURE_VFP)) {
1982 regspace = restore_sigframe_v2_vfp(env, regspace);
1983 if (!regspace) {
1984 return 1;
1985 }
1986 }
1987 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1988 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1989 if (!regspace) {
1990 return 1;
1991 }
1992 }
1993
1994 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
1995 return 1;
1996
1997 #if 0
1998 /* Send SIGTRAP if we're single-stepping */
1999 if (ptrace_cancel_bpt(current))
2000 send_sig(SIGTRAP, current, 1);
2001 #endif
2002
2003 return 0;
2004 }
2005
2006 static long do_sigreturn_v2(CPUARMState *env)
2007 {
2008 abi_ulong frame_addr;
2009 struct sigframe_v2 *frame = NULL;
2010
2011 /*
2012 * Since we stacked the signal on a 64-bit boundary,
2013 * then 'sp' should be word aligned here. If it's
2014 * not, then the user is trying to mess with us.
2015 */
2016 frame_addr = env->regs[13];
2017 trace_user_do_sigreturn(env, frame_addr);
2018 if (frame_addr & 7) {
2019 goto badframe;
2020 }
2021
2022 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2023 goto badframe;
2024 }
2025
2026 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2027 goto badframe;
2028 }
2029
2030 unlock_user_struct(frame, frame_addr, 0);
2031 return -TARGET_QEMU_ESIGRETURN;
2032
2033 badframe:
2034 unlock_user_struct(frame, frame_addr, 0);
2035 force_sig(TARGET_SIGSEGV /* , current */);
2036 return 0;
2037 }
2038
2039 long do_sigreturn(CPUARMState *env)
2040 {
2041 if (get_osversion() >= 0x020612) {
2042 return do_sigreturn_v2(env);
2043 } else {
2044 return do_sigreturn_v1(env);
2045 }
2046 }
2047
2048 static long do_rt_sigreturn_v1(CPUARMState *env)
2049 {
2050 abi_ulong frame_addr;
2051 struct rt_sigframe_v1 *frame = NULL;
2052 sigset_t host_set;
2053
2054 /*
2055 * Since we stacked the signal on a 64-bit boundary,
2056 * then 'sp' should be word aligned here. If it's
2057 * not, then the user is trying to mess with us.
2058 */
2059 frame_addr = env->regs[13];
2060 trace_user_do_rt_sigreturn(env, frame_addr);
2061 if (frame_addr & 7) {
2062 goto badframe;
2063 }
2064
2065 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2066 goto badframe;
2067 }
2068
2069 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2070 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2071
2072 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2073 goto badframe;
2074 }
2075
2076 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2077 goto badframe;
2078
2079 #if 0
2080 /* Send SIGTRAP if we're single-stepping */
2081 if (ptrace_cancel_bpt(current))
2082 send_sig(SIGTRAP, current, 1);
2083 #endif
2084 unlock_user_struct(frame, frame_addr, 0);
2085 return -TARGET_QEMU_ESIGRETURN;
2086
2087 badframe:
2088 unlock_user_struct(frame, frame_addr, 0);
2089 force_sig(TARGET_SIGSEGV /* , current */);
2090 return 0;
2091 }
2092
2093 static long do_rt_sigreturn_v2(CPUARMState *env)
2094 {
2095 abi_ulong frame_addr;
2096 struct rt_sigframe_v2 *frame = NULL;
2097
2098 /*
2099 * Since we stacked the signal on a 64-bit boundary,
2100 * then 'sp' should be word aligned here. If it's
2101 * not, then the user is trying to mess with us.
2102 */
2103 frame_addr = env->regs[13];
2104 trace_user_do_rt_sigreturn(env, frame_addr);
2105 if (frame_addr & 7) {
2106 goto badframe;
2107 }
2108
2109 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2110 goto badframe;
2111 }
2112
2113 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2114 goto badframe;
2115 }
2116
2117 unlock_user_struct(frame, frame_addr, 0);
2118 return -TARGET_QEMU_ESIGRETURN;
2119
2120 badframe:
2121 unlock_user_struct(frame, frame_addr, 0);
2122 force_sig(TARGET_SIGSEGV /* , current */);
2123 return 0;
2124 }
2125
2126 long do_rt_sigreturn(CPUARMState *env)
2127 {
2128 if (get_osversion() >= 0x020612) {
2129 return do_rt_sigreturn_v2(env);
2130 } else {
2131 return do_rt_sigreturn_v1(env);
2132 }
2133 }
2134
2135 #elif defined(TARGET_SPARC)
2136
2137 #define __SUNOS_MAXWIN 31
2138
2139 /* This is what SunOS does, so shall I. */
2140 struct target_sigcontext {
2141 abi_ulong sigc_onstack; /* state to restore */
2142
2143 abi_ulong sigc_mask; /* sigmask to restore */
2144 abi_ulong sigc_sp; /* stack pointer */
2145 abi_ulong sigc_pc; /* program counter */
2146 abi_ulong sigc_npc; /* next program counter */
2147 abi_ulong sigc_psr; /* for condition codes etc */
2148 abi_ulong sigc_g1; /* User uses these two registers */
2149 abi_ulong sigc_o0; /* within the trampoline code. */
2150
2151 /* Now comes information regarding the users window set
2152 * at the time of the signal.
2153 */
2154 abi_ulong sigc_oswins; /* outstanding windows */
2155
2156 /* stack ptrs for each regwin buf */
2157 char *sigc_spbuf[__SUNOS_MAXWIN];
2158
2159 /* Windows to restore after signal */
2160 struct {
2161 abi_ulong locals[8];
2162 abi_ulong ins[8];
2163 } sigc_wbuf[__SUNOS_MAXWIN];
2164 };
2165 /* A Sparc stack frame */
2166 struct sparc_stackf {
2167 abi_ulong locals[8];
2168 abi_ulong ins[8];
2169 /* It's simpler to treat fp and callers_pc as elements of ins[]
2170 * since we never need to access them ourselves.
2171 */
2172 char *structptr;
2173 abi_ulong xargs[6];
2174 abi_ulong xxargs[1];
2175 };
2176
2177 typedef struct {
2178 struct {
2179 abi_ulong psr;
2180 abi_ulong pc;
2181 abi_ulong npc;
2182 abi_ulong y;
2183 abi_ulong u_regs[16]; /* globals and ins */
2184 } si_regs;
2185 int si_mask;
2186 } __siginfo_t;
2187
2188 typedef struct {
2189 abi_ulong si_float_regs[32];
2190 unsigned long si_fsr;
2191 unsigned long si_fpqdepth;
2192 struct {
2193 unsigned long *insn_addr;
2194 unsigned long insn;
2195 } si_fpqueue [16];
2196 } qemu_siginfo_fpu_t;
2197
2198
2199 struct target_signal_frame {
2200 struct sparc_stackf ss;
2201 __siginfo_t info;
2202 abi_ulong fpu_save;
2203 abi_ulong insns[2] __attribute__ ((aligned (8)));
2204 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2205 abi_ulong extra_size; /* Should be 0 */
2206 qemu_siginfo_fpu_t fpu_state;
2207 };
2208 struct target_rt_signal_frame {
2209 struct sparc_stackf ss;
2210 siginfo_t info;
2211 abi_ulong regs[20];
2212 sigset_t mask;
2213 abi_ulong fpu_save;
2214 unsigned int insns[2];
2215 stack_t stack;
2216 unsigned int extra_size; /* Should be 0 */
2217 qemu_siginfo_fpu_t fpu_state;
2218 };
2219
2220 #define UREG_O0 16
2221 #define UREG_O6 22
2222 #define UREG_I0 0
2223 #define UREG_I1 1
2224 #define UREG_I2 2
2225 #define UREG_I3 3
2226 #define UREG_I4 4
2227 #define UREG_I5 5
2228 #define UREG_I6 6
2229 #define UREG_I7 7
2230 #define UREG_L0 8
2231 #define UREG_FP UREG_I6
2232 #define UREG_SP UREG_O6
2233
2234 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2235 CPUSPARCState *env,
2236 unsigned long framesize)
2237 {
2238 abi_ulong sp;
2239
2240 sp = env->regwptr[UREG_FP];
2241
2242 /* This is the X/Open sanctioned signal stack switching. */
2243 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2244 if (!on_sig_stack(sp)
2245 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2246 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2247 }
2248 }
2249 return sp - framesize;
2250 }
2251
2252 static int
2253 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2254 {
2255 int err = 0, i;
2256
2257 __put_user(env->psr, &si->si_regs.psr);
2258 __put_user(env->pc, &si->si_regs.pc);
2259 __put_user(env->npc, &si->si_regs.npc);
2260 __put_user(env->y, &si->si_regs.y);
2261 for (i=0; i < 8; i++) {
2262 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2263 }
2264 for (i=0; i < 8; i++) {
2265 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2266 }
2267 __put_user(mask, &si->si_mask);
2268 return err;
2269 }
2270
2271 #if 0
2272 static int
2273 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2274 CPUSPARCState *env, unsigned long mask)
2275 {
2276 int err = 0;
2277
2278 __put_user(mask, &sc->sigc_mask);
2279 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2280 __put_user(env->pc, &sc->sigc_pc);
2281 __put_user(env->npc, &sc->sigc_npc);
2282 __put_user(env->psr, &sc->sigc_psr);
2283 __put_user(env->gregs[1], &sc->sigc_g1);
2284 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2285
2286 return err;
2287 }
2288 #endif
2289 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2290
2291 static void setup_frame(int sig, struct target_sigaction *ka,
2292 target_sigset_t *set, CPUSPARCState *env)
2293 {
2294 abi_ulong sf_addr;
2295 struct target_signal_frame *sf;
2296 int sigframe_size, err, i;
2297
2298 /* 1. Make sure everything is clean */
2299 //synchronize_user_stack();
2300
2301 sigframe_size = NF_ALIGNEDSZ;
2302 sf_addr = get_sigframe(ka, env, sigframe_size);
2303 trace_user_setup_frame(env, sf_addr);
2304
2305 sf = lock_user(VERIFY_WRITE, sf_addr,
2306 sizeof(struct target_signal_frame), 0);
2307 if (!sf) {
2308 goto sigsegv;
2309 }
2310 #if 0
2311 if (invalid_frame_pointer(sf, sigframe_size))
2312 goto sigill_and_return;
2313 #endif
2314 /* 2. Save the current process state */
2315 err = setup___siginfo(&sf->info, env, set->sig[0]);
2316 __put_user(0, &sf->extra_size);
2317
2318 //save_fpu_state(regs, &sf->fpu_state);
2319 //__put_user(&sf->fpu_state, &sf->fpu_save);
2320
2321 __put_user(set->sig[0], &sf->info.si_mask);
2322 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2323 __put_user(set->sig[i + 1], &sf->extramask[i]);
2324 }
2325
2326 for (i = 0; i < 8; i++) {
2327 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2328 }
2329 for (i = 0; i < 8; i++) {
2330 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2331 }
2332 if (err)
2333 goto sigsegv;
2334
2335 /* 3. signal handler back-trampoline and parameters */
2336 env->regwptr[UREG_FP] = sf_addr;
2337 env->regwptr[UREG_I0] = sig;
2338 env->regwptr[UREG_I1] = sf_addr +
2339 offsetof(struct target_signal_frame, info);
2340 env->regwptr[UREG_I2] = sf_addr +
2341 offsetof(struct target_signal_frame, info);
2342
2343 /* 4. signal handler */
2344 env->pc = ka->_sa_handler;
2345 env->npc = (env->pc + 4);
2346 /* 5. return to kernel instructions */
2347 if (ka->sa_restorer) {
2348 env->regwptr[UREG_I7] = ka->sa_restorer;
2349 } else {
2350 uint32_t val32;
2351
2352 env->regwptr[UREG_I7] = sf_addr +
2353 offsetof(struct target_signal_frame, insns) - 2 * 4;
2354
2355 /* mov __NR_sigreturn, %g1 */
2356 val32 = 0x821020d8;
2357 __put_user(val32, &sf->insns[0]);
2358
2359 /* t 0x10 */
2360 val32 = 0x91d02010;
2361 __put_user(val32, &sf->insns[1]);
2362 if (err)
2363 goto sigsegv;
2364
2365 /* Flush instruction space. */
2366 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2367 // tb_flush(env);
2368 }
2369 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2370 return;
2371 #if 0
2372 sigill_and_return:
2373 force_sig(TARGET_SIGILL);
2374 #endif
2375 sigsegv:
2376 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2377 force_sig(TARGET_SIGSEGV);
2378 }
2379
2380 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2381 target_siginfo_t *info,
2382 target_sigset_t *set, CPUSPARCState *env)
2383 {
2384 fprintf(stderr, "setup_rt_frame: not implemented\n");
2385 }
2386
2387 long do_sigreturn(CPUSPARCState *env)
2388 {
2389 abi_ulong sf_addr;
2390 struct target_signal_frame *sf;
2391 uint32_t up_psr, pc, npc;
2392 target_sigset_t set;
2393 sigset_t host_set;
2394 int err=0, i;
2395
2396 sf_addr = env->regwptr[UREG_FP];
2397 trace_user_do_sigreturn(env, sf_addr);
2398 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2399 goto segv_and_exit;
2400 }
2401
2402 /* 1. Make sure we are not getting garbage from the user */
2403
2404 if (sf_addr & 3)
2405 goto segv_and_exit;
2406
2407 __get_user(pc, &sf->info.si_regs.pc);
2408 __get_user(npc, &sf->info.si_regs.npc);
2409
2410 if ((pc | npc) & 3) {
2411 goto segv_and_exit;
2412 }
2413
2414 /* 2. Restore the state */
2415 __get_user(up_psr, &sf->info.si_regs.psr);
2416
2417 /* User can only change condition codes and FPU enabling in %psr. */
2418 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2419 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2420
2421 env->pc = pc;
2422 env->npc = npc;
2423 __get_user(env->y, &sf->info.si_regs.y);
2424 for (i=0; i < 8; i++) {
2425 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2426 }
2427 for (i=0; i < 8; i++) {
2428 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2429 }
2430
2431 /* FIXME: implement FPU save/restore:
2432 * __get_user(fpu_save, &sf->fpu_save);
2433 * if (fpu_save)
2434 * err |= restore_fpu_state(env, fpu_save);
2435 */
2436
2437 /* This is pretty much atomic, no amount locking would prevent
2438 * the races which exist anyways.
2439 */
2440 __get_user(set.sig[0], &sf->info.si_mask);
2441 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2442 __get_user(set.sig[i], &sf->extramask[i - 1]);
2443 }
2444
2445 target_to_host_sigset_internal(&host_set, &set);
2446 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2447
2448 if (err) {
2449 goto segv_and_exit;
2450 }
2451 unlock_user_struct(sf, sf_addr, 0);
2452 return -TARGET_QEMU_ESIGRETURN;
2453
2454 segv_and_exit:
2455 unlock_user_struct(sf, sf_addr, 0);
2456 force_sig(TARGET_SIGSEGV);
2457 }
2458
2459 long do_rt_sigreturn(CPUSPARCState *env)
2460 {
2461 trace_user_do_rt_sigreturn(env, 0);
2462 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2463 return -TARGET_ENOSYS;
2464 }
2465
2466 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2467 #define MC_TSTATE 0
2468 #define MC_PC 1
2469 #define MC_NPC 2
2470 #define MC_Y 3
2471 #define MC_G1 4
2472 #define MC_G2 5
2473 #define MC_G3 6
2474 #define MC_G4 7
2475 #define MC_G5 8
2476 #define MC_G6 9
2477 #define MC_G7 10
2478 #define MC_O0 11
2479 #define MC_O1 12
2480 #define MC_O2 13
2481 #define MC_O3 14
2482 #define MC_O4 15
2483 #define MC_O5 16
2484 #define MC_O6 17
2485 #define MC_O7 18
2486 #define MC_NGREG 19
2487
2488 typedef abi_ulong target_mc_greg_t;
2489 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2490
2491 struct target_mc_fq {
2492 abi_ulong *mcfq_addr;
2493 uint32_t mcfq_insn;
2494 };
2495
2496 struct target_mc_fpu {
2497 union {
2498 uint32_t sregs[32];
2499 uint64_t dregs[32];
2500 //uint128_t qregs[16];
2501 } mcfpu_fregs;
2502 abi_ulong mcfpu_fsr;
2503 abi_ulong mcfpu_fprs;
2504 abi_ulong mcfpu_gsr;
2505 struct target_mc_fq *mcfpu_fq;
2506 unsigned char mcfpu_qcnt;
2507 unsigned char mcfpu_qentsz;
2508 unsigned char mcfpu_enab;
2509 };
2510 typedef struct target_mc_fpu target_mc_fpu_t;
2511
2512 typedef struct {
2513 target_mc_gregset_t mc_gregs;
2514 target_mc_greg_t mc_fp;
2515 target_mc_greg_t mc_i7;
2516 target_mc_fpu_t mc_fpregs;
2517 } target_mcontext_t;
2518
2519 struct target_ucontext {
2520 struct target_ucontext *tuc_link;
2521 abi_ulong tuc_flags;
2522 target_sigset_t tuc_sigmask;
2523 target_mcontext_t tuc_mcontext;
2524 };
2525
2526 /* A V9 register window */
2527 struct target_reg_window {
2528 abi_ulong locals[8];
2529 abi_ulong ins[8];
2530 };
2531
2532 #define TARGET_STACK_BIAS 2047
2533
2534 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2535 void sparc64_set_context(CPUSPARCState *env)
2536 {
2537 abi_ulong ucp_addr;
2538 struct target_ucontext *ucp;
2539 target_mc_gregset_t *grp;
2540 abi_ulong pc, npc, tstate;
2541 abi_ulong fp, i7, w_addr;
2542 unsigned int i;
2543
2544 ucp_addr = env->regwptr[UREG_I0];
2545 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2546 goto do_sigsegv;
2547 }
2548 grp = &ucp->tuc_mcontext.mc_gregs;
2549 __get_user(pc, &((*grp)[MC_PC]));
2550 __get_user(npc, &((*grp)[MC_NPC]));
2551 if ((pc | npc) & 3) {
2552 goto do_sigsegv;
2553 }
2554 if (env->regwptr[UREG_I1]) {
2555 target_sigset_t target_set;
2556 sigset_t set;
2557
2558 if (TARGET_NSIG_WORDS == 1) {
2559 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2560 } else {
2561 abi_ulong *src, *dst;
2562 src = ucp->tuc_sigmask.sig;
2563 dst = target_set.sig;
2564 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2565 __get_user(*dst, src);
2566 }
2567 }
2568 target_to_host_sigset_internal(&set, &target_set);
2569 do_sigprocmask(SIG_SETMASK, &set, NULL);
2570 }
2571 env->pc = pc;
2572 env->npc = npc;
2573 __get_user(env->y, &((*grp)[MC_Y]));
2574 __get_user(tstate, &((*grp)[MC_TSTATE]));
2575 env->asi = (tstate >> 24) & 0xff;
2576 cpu_put_ccr(env, tstate >> 32);
2577 cpu_put_cwp64(env, tstate & 0x1f);
2578 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2579 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2580 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2581 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2582 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2583 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2584 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2585 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2586 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2587 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2588 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2589 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2590 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2591 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2592 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2593
2594 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2595 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2596
2597 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2598 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2599 abi_ulong) != 0) {
2600 goto do_sigsegv;
2601 }
2602 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2603 abi_ulong) != 0) {
2604 goto do_sigsegv;
2605 }
2606 /* FIXME this does not match how the kernel handles the FPU in
2607 * its sparc64_set_context implementation. In particular the FPU
2608 * is only restored if fenab is non-zero in:
2609 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2610 */
2611 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2612 {
2613 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2614 for (i = 0; i < 64; i++, src++) {
2615 if (i & 1) {
2616 __get_user(env->fpr[i/2].l.lower, src);
2617 } else {
2618 __get_user(env->fpr[i/2].l.upper, src);
2619 }
2620 }
2621 }
2622 __get_user(env->fsr,
2623 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2624 __get_user(env->gsr,
2625 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2626 unlock_user_struct(ucp, ucp_addr, 0);
2627 return;
2628 do_sigsegv:
2629 unlock_user_struct(ucp, ucp_addr, 0);
2630 force_sig(TARGET_SIGSEGV);
2631 }
2632
2633 void sparc64_get_context(CPUSPARCState *env)
2634 {
2635 abi_ulong ucp_addr;
2636 struct target_ucontext *ucp;
2637 target_mc_gregset_t *grp;
2638 target_mcontext_t *mcp;
2639 abi_ulong fp, i7, w_addr;
2640 int err;
2641 unsigned int i;
2642 target_sigset_t target_set;
2643 sigset_t set;
2644
2645 ucp_addr = env->regwptr[UREG_I0];
2646 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2647 goto do_sigsegv;
2648 }
2649
2650 mcp = &ucp->tuc_mcontext;
2651 grp = &mcp->mc_gregs;
2652
2653 /* Skip over the trap instruction, first. */
2654 env->pc = env->npc;
2655 env->npc += 4;
2656
2657 err = 0;
2658
2659 do_sigprocmask(0, NULL, &set);
2660 host_to_target_sigset_internal(&target_set, &set);
2661 if (TARGET_NSIG_WORDS == 1) {
2662 __put_user(target_set.sig[0],
2663 (abi_ulong *)&ucp->tuc_sigmask);
2664 } else {
2665 abi_ulong *src, *dst;
2666 src = target_set.sig;
2667 dst = ucp->tuc_sigmask.sig;
2668 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2669 __put_user(*src, dst);
2670 }
2671 if (err)
2672 goto do_sigsegv;
2673 }
2674
2675 /* XXX: tstate must be saved properly */
2676 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2677 __put_user(env->pc, &((*grp)[MC_PC]));
2678 __put_user(env->npc, &((*grp)[MC_NPC]));
2679 __put_user(env->y, &((*grp)[MC_Y]));
2680 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2681 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2682 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2683 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2684 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2685 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2686 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2687 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2688 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2689 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2690 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2691 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2692 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2693 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2694 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2695
2696 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2697 fp = i7 = 0;
2698 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2699 abi_ulong) != 0) {
2700 goto do_sigsegv;
2701 }
2702 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2703 abi_ulong) != 0) {
2704 goto do_sigsegv;
2705 }
2706 __put_user(fp, &(mcp->mc_fp));
2707 __put_user(i7, &(mcp->mc_i7));
2708
2709 {
2710 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2711 for (i = 0; i < 64; i++, dst++) {
2712 if (i & 1) {
2713 __put_user(env->fpr[i/2].l.lower, dst);
2714 } else {
2715 __put_user(env->fpr[i/2].l.upper, dst);
2716 }
2717 }
2718 }
2719 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2720 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2721 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2722
2723 if (err)
2724 goto do_sigsegv;
2725 unlock_user_struct(ucp, ucp_addr, 1);
2726 return;
2727 do_sigsegv:
2728 unlock_user_struct(ucp, ucp_addr, 1);
2729 force_sig(TARGET_SIGSEGV);
2730 }
2731 #endif
2732 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2733
2734 # if defined(TARGET_ABI_MIPSO32)
2735 struct target_sigcontext {
2736 uint32_t sc_regmask; /* Unused */
2737 uint32_t sc_status;
2738 uint64_t sc_pc;
2739 uint64_t sc_regs[32];
2740 uint64_t sc_fpregs[32];
2741 uint32_t sc_ownedfp; /* Unused */
2742 uint32_t sc_fpc_csr;
2743 uint32_t sc_fpc_eir; /* Unused */
2744 uint32_t sc_used_math;
2745 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2746 uint32_t pad0;
2747 uint64_t sc_mdhi;
2748 uint64_t sc_mdlo;
2749 target_ulong sc_hi1; /* Was sc_cause */
2750 target_ulong sc_lo1; /* Was sc_badvaddr */
2751 target_ulong sc_hi2; /* Was sc_sigset[4] */
2752 target_ulong sc_lo2;
2753 target_ulong sc_hi3;
2754 target_ulong sc_lo3;
2755 };
2756 # else /* N32 || N64 */
2757 struct target_sigcontext {
2758 uint64_t sc_regs[32];
2759 uint64_t sc_fpregs[32];
2760 uint64_t sc_mdhi;
2761 uint64_t sc_hi1;
2762 uint64_t sc_hi2;
2763 uint64_t sc_hi3;
2764 uint64_t sc_mdlo;
2765 uint64_t sc_lo1;
2766 uint64_t sc_lo2;
2767 uint64_t sc_lo3;
2768 uint64_t sc_pc;
2769 uint32_t sc_fpc_csr;
2770 uint32_t sc_used_math;
2771 uint32_t sc_dsp;
2772 uint32_t sc_reserved;
2773 };
2774 # endif /* O32 */
2775
2776 struct sigframe {
2777 uint32_t sf_ass[4]; /* argument save space for o32 */
2778 uint32_t sf_code[2]; /* signal trampoline */
2779 struct target_sigcontext sf_sc;
2780 target_sigset_t sf_mask;
2781 };
2782
2783 struct target_ucontext {
2784 target_ulong tuc_flags;
2785 target_ulong tuc_link;
2786 target_stack_t tuc_stack;
2787 target_ulong pad0;
2788 struct target_sigcontext tuc_mcontext;
2789 target_sigset_t tuc_sigmask;
2790 };
2791
2792 struct target_rt_sigframe {
2793 uint32_t rs_ass[4]; /* argument save space for o32 */
2794 uint32_t rs_code[2]; /* signal trampoline */
2795 struct target_siginfo rs_info;
2796 struct target_ucontext rs_uc;
2797 };
2798
2799 /* Install trampoline to jump back from signal handler */
2800 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2801 {
2802 int err = 0;
2803
2804 /*
2805 * Set up the return code ...
2806 *
2807 * li v0, __NR__foo_sigreturn
2808 * syscall
2809 */
2810
2811 __put_user(0x24020000 + syscall, tramp + 0);
2812 __put_user(0x0000000c , tramp + 1);
2813 return err;
2814 }
2815
2816 static inline void setup_sigcontext(CPUMIPSState *regs,
2817 struct target_sigcontext *sc)
2818 {
2819 int i;
2820
2821 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2822 regs->hflags &= ~MIPS_HFLAG_BMASK;
2823
2824 __put_user(0, &sc->sc_regs[0]);
2825 for (i = 1; i < 32; ++i) {
2826 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2827 }
2828
2829 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2830 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2831
2832 /* Rather than checking for dsp existence, always copy. The storage
2833 would just be garbage otherwise. */
2834 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2835 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2836 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2837 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2838 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2839 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2840 {
2841 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2842 __put_user(dsp, &sc->sc_dsp);
2843 }
2844
2845 __put_user(1, &sc->sc_used_math);
2846
2847 for (i = 0; i < 32; ++i) {
2848 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2849 }
2850 }
2851
2852 static inline void
2853 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2854 {
2855 int i;
2856
2857 __get_user(regs->CP0_EPC, &sc->sc_pc);
2858
2859 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2860 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2861
2862 for (i = 1; i < 32; ++i) {
2863 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2864 }
2865
2866 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2867 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2868 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2869 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2870 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2871 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2872 {
2873 uint32_t dsp;
2874 __get_user(dsp, &sc->sc_dsp);
2875 cpu_wrdsp(dsp, 0x3ff, regs);
2876 }
2877
2878 for (i = 0; i < 32; ++i) {
2879 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2880 }
2881 }
2882
2883 /*
2884 * Determine which stack to use..
2885 */
2886 static inline abi_ulong
2887 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2888 {
2889 unsigned long sp;
2890
2891 /* Default to using normal stack */
2892 sp = regs->active_tc.gpr[29];
2893
2894 /*
2895 * FPU emulator may have its own trampoline active just
2896 * above the user stack, 16-bytes before the next lowest
2897 * 16 byte boundary. Try to avoid trashing it.
2898 */
2899 sp -= 32;
2900
2901 /* This is the X/Open sanctioned signal stack switching. */
2902 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2903 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2904 }
2905
2906 return (sp - frame_size) & ~7;
2907 }
2908
2909 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2910 {
2911 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2912 env->hflags &= ~MIPS_HFLAG_M16;
2913 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2914 env->active_tc.PC &= ~(target_ulong) 1;
2915 }
2916 }
2917
2918 # if defined(TARGET_ABI_MIPSO32)
2919 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2920 static void setup_frame(int sig, struct target_sigaction * ka,
2921 target_sigset_t *set, CPUMIPSState *regs)
2922 {
2923 struct sigframe *frame;
2924 abi_ulong frame_addr;
2925 int i;
2926
2927 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2928 trace_user_setup_frame(regs, frame_addr);
2929 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2930 goto give_sigsegv;
2931 }
2932
2933 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2934
2935 setup_sigcontext(regs, &frame->sf_sc);
2936
2937 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2938 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2939 }
2940
2941 /*
2942 * Arguments to signal handler:
2943 *
2944 * a0 = signal number
2945 * a1 = 0 (should be cause)
2946 * a2 = pointer to struct sigcontext
2947 *
2948 * $25 and PC point to the signal handler, $29 points to the
2949 * struct sigframe.
2950 */
2951 regs->active_tc.gpr[ 4] = sig;
2952 regs->active_tc.gpr[ 5] = 0;
2953 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2954 regs->active_tc.gpr[29] = frame_addr;
2955 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2956 /* The original kernel code sets CP0_EPC to the handler
2957 * since it returns to userland using eret
2958 * we cannot do this here, and we must set PC directly */
2959 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2960 mips_set_hflags_isa_mode_from_pc(regs);
2961 unlock_user_struct(frame, frame_addr, 1);
2962 return;
2963
2964 give_sigsegv:
2965 force_sig(TARGET_SIGSEGV/*, current*/);
2966 }
2967
2968 long do_sigreturn(CPUMIPSState *regs)
2969 {
2970 struct sigframe *frame;
2971 abi_ulong frame_addr;
2972 sigset_t blocked;
2973 target_sigset_t target_set;
2974 int i;
2975
2976 frame_addr = regs->active_tc.gpr[29];
2977 trace_user_do_sigreturn(regs, frame_addr);
2978 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2979 goto badframe;
2980
2981 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2982 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2983 }
2984
2985 target_to_host_sigset_internal(&blocked, &target_set);
2986 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
2987
2988 restore_sigcontext(regs, &frame->sf_sc);
2989
2990 #if 0
2991 /*
2992 * Don't let your children do this ...
2993 */
2994 __asm__ __volatile__(
2995 "move\t$29, %0\n\t"
2996 "j\tsyscall_exit"
2997 :/* no outputs */
2998 :"r" (&regs));
2999 /* Unreached */
3000 #endif
3001
3002 regs->active_tc.PC = regs->CP0_EPC;
3003 mips_set_hflags_isa_mode_from_pc(regs);
3004 /* I am not sure this is right, but it seems to work
3005 * maybe a problem with nested signals ? */
3006 regs->CP0_EPC = 0;
3007 return -TARGET_QEMU_ESIGRETURN;
3008
3009 badframe:
3010 force_sig(TARGET_SIGSEGV/*, current*/);
3011 return 0;
3012 }
3013 # endif /* O32 */
3014
3015 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3016 target_siginfo_t *info,
3017 target_sigset_t *set, CPUMIPSState *env)
3018 {
3019 struct target_rt_sigframe *frame;
3020 abi_ulong frame_addr;
3021 int i;
3022
3023 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3024 trace_user_setup_rt_frame(env, frame_addr);
3025 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3026 goto give_sigsegv;
3027 }
3028
3029 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3030
3031 tswap_siginfo(&frame->rs_info, info);
3032
3033 __put_user(0, &frame->rs_uc.tuc_flags);
3034 __put_user(0, &frame->rs_uc.tuc_link);
3035 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3036 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3037 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3038 &frame->rs_uc.tuc_stack.ss_flags);
3039
3040 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3041
3042 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3043 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3044 }
3045
3046 /*
3047 * Arguments to signal handler:
3048 *
3049 * a0 = signal number
3050 * a1 = pointer to siginfo_t
3051 * a2 = pointer to struct ucontext
3052 *
3053 * $25 and PC point to the signal handler, $29 points to the
3054 * struct sigframe.
3055 */
3056 env->active_tc.gpr[ 4] = sig;
3057 env->active_tc.gpr[ 5] = frame_addr
3058 + offsetof(struct target_rt_sigframe, rs_info);
3059 env->active_tc.gpr[ 6] = frame_addr
3060 + offsetof(struct target_rt_sigframe, rs_uc);
3061 env->active_tc.gpr[29] = frame_addr;
3062 env->active_tc.gpr[31] = frame_addr
3063 + offsetof(struct target_rt_sigframe, rs_code);
3064 /* The original kernel code sets CP0_EPC to the handler
3065 * since it returns to userland using eret
3066 * we cannot do this here, and we must set PC directly */
3067 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3068 mips_set_hflags_isa_mode_from_pc(env);
3069 unlock_user_struct(frame, frame_addr, 1);
3070 return;
3071
3072 give_sigsegv:
3073 unlock_user_struct(frame, frame_addr, 1);
3074 force_sig(TARGET_SIGSEGV/*, current*/);
3075 }
3076
3077 long do_rt_sigreturn(CPUMIPSState *env)
3078 {
3079 struct target_rt_sigframe *frame;
3080 abi_ulong frame_addr;
3081 sigset_t blocked;
3082
3083 frame_addr = env->active_tc.gpr[29];
3084 trace_user_do_rt_sigreturn(env, frame_addr);
3085 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3086 goto badframe;
3087 }
3088
3089 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3090 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3091
3092 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3093
3094 if (do_sigaltstack(frame_addr +
3095 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3096 0, get_sp_from_cpustate(env)) == -EFAULT)
3097 goto badframe;
3098
3099 env->active_tc.PC = env->CP0_EPC;
3100 mips_set_hflags_isa_mode_from_pc(env);
3101 /* I am not sure this is right, but it seems to work
3102 * maybe a problem with nested signals ? */
3103 env->CP0_EPC = 0;
3104 return -TARGET_QEMU_ESIGRETURN;
3105
3106 badframe:
3107 force_sig(TARGET_SIGSEGV/*, current*/);
3108 return 0;
3109 }
3110
3111 #elif defined(TARGET_SH4)
3112
3113 /*
3114 * code and data structures from linux kernel:
3115 * include/asm-sh/sigcontext.h
3116 * arch/sh/kernel/signal.c
3117 */
3118
3119 struct target_sigcontext {
3120 target_ulong oldmask;
3121
3122 /* CPU registers */
3123 target_ulong sc_gregs[16];
3124 target_ulong sc_pc;
3125 target_ulong sc_pr;
3126 target_ulong sc_sr;
3127 target_ulong sc_gbr;
3128 target_ulong sc_mach;
3129 target_ulong sc_macl;
3130
3131 /* FPU registers */
3132 target_ulong sc_fpregs[16];
3133 target_ulong sc_xfpregs[16];
3134 unsigned int sc_fpscr;
3135 unsigned int sc_fpul;
3136 unsigned int sc_ownedfp;
3137 };
3138
3139 struct target_sigframe
3140 {
3141 struct target_sigcontext sc;
3142 target_ulong extramask[TARGET_NSIG_WORDS-1];
3143 uint16_t retcode[3];
3144 };
3145
3146
3147 struct target_ucontext {
3148 target_ulong tuc_flags;
3149 struct target_ucontext *tuc_link;
3150 target_stack_t tuc_stack;
3151 struct target_sigcontext tuc_mcontext;
3152 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3153 };
3154
3155 struct target_rt_sigframe
3156 {
3157 struct target_siginfo info;
3158 struct target_ucontext uc;
3159 uint16_t retcode[3];
3160 };
3161
3162
3163 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3164 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3165
3166 static abi_ulong get_sigframe(struct target_sigaction *ka,
3167 unsigned long sp, size_t frame_size)
3168 {
3169 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3170 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3171 }
3172
3173 return (sp - frame_size) & -8ul;
3174 }
3175
3176 static void setup_sigcontext(struct target_sigcontext *sc,
3177 CPUSH4State *regs, unsigned long mask)
3178 {
3179 int i;
3180
3181 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3182 COPY(gregs[0]); COPY(gregs[1]);
3183 COPY(gregs[2]); COPY(gregs[3]);
3184 COPY(gregs[4]); COPY(gregs[5]);
3185 COPY(gregs[6]); COPY(gregs[7]);
3186 COPY(gregs[8]); COPY(gregs[9]);
3187 COPY(gregs[10]); COPY(gregs[11]);
3188 COPY(gregs[12]); COPY(gregs[13]);
3189 COPY(gregs[14]); COPY(gregs[15]);
3190 COPY(gbr); COPY(mach);
3191 COPY(macl); COPY(pr);
3192 COPY(sr); COPY(pc);
3193 #undef COPY
3194
3195 for (i=0; i<16; i++) {
3196 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3197 }
3198 __put_user(regs->fpscr, &sc->sc_fpscr);
3199 __put_user(regs->fpul, &sc->sc_fpul);
3200
3201 /* non-iBCS2 extensions.. */
3202 __put_user(mask, &sc->oldmask);
3203 }
3204
3205 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc,
3206 target_ulong *r0_p)
3207 {
3208 int i;
3209
3210 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3211 COPY(gregs[1]);
3212 COPY(gregs[2]); COPY(gregs[3]);
3213 COPY(gregs[4]); COPY(gregs[5]);
3214 COPY(gregs[6]); COPY(gregs[7]);
3215 COPY(gregs[8]); COPY(gregs[9]);
3216 COPY(gregs[10]); COPY(gregs[11]);
3217 COPY(gregs[12]); COPY(gregs[13]);
3218 COPY(gregs[14]); COPY(gregs[15]);
3219 COPY(gbr); COPY(mach);
3220 COPY(macl); COPY(pr);
3221 COPY(sr); COPY(pc);
3222 #undef COPY
3223
3224 for (i=0; i<16; i++) {
3225 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3226 }
3227 __get_user(regs->fpscr, &sc->sc_fpscr);
3228 __get_user(regs->fpul, &sc->sc_fpul);
3229
3230 regs->tra = -1; /* disable syscall checks */
3231 __get_user(*r0_p, &sc->sc_gregs[0]);
3232 }
3233
3234 static void setup_frame(int sig, struct target_sigaction *ka,
3235 target_sigset_t *set, CPUSH4State *regs)
3236 {
3237 struct target_sigframe *frame;
3238 abi_ulong frame_addr;
3239 int i;
3240
3241 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3242 trace_user_setup_frame(regs, frame_addr);
3243 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3244 goto give_sigsegv;
3245 }
3246
3247 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3248
3249 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3250 __put_user(set->sig[i + 1], &frame->extramask[i]);
3251 }
3252
3253 /* Set up to return from userspace. If provided, use a stub
3254 already in userspace. */
3255 if (ka->sa_flags & TARGET_SA_RESTORER) {
3256 regs->pr = (unsigned long) ka->sa_restorer;
3257 } else {
3258 /* Generate return code (system call to sigreturn) */
3259 abi_ulong retcode_addr = frame_addr +
3260 offsetof(struct target_sigframe, retcode);
3261 __put_user(MOVW(2), &frame->retcode[0]);
3262 __put_user(TRAP_NOARG, &frame->retcode[1]);
3263 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3264 regs->pr = (unsigned long) retcode_addr;
3265 }
3266
3267 /* Set up registers for signal handler */
3268 regs->gregs[15] = frame_addr;
3269 regs->gregs[4] = sig; /* Arg for signal handler */
3270 regs->gregs[5] = 0;
3271 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3272 regs->pc = (unsigned long) ka->_sa_handler;
3273
3274 unlock_user_struct(frame, frame_addr, 1);
3275 return;
3276
3277 give_sigsegv:
3278 unlock_user_struct(frame, frame_addr, 1);
3279 force_sig(TARGET_SIGSEGV);
3280 }
3281
3282 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3283 target_siginfo_t *info,
3284 target_sigset_t *set, CPUSH4State *regs)
3285 {
3286 struct target_rt_sigframe *frame;
3287 abi_ulong frame_addr;
3288 int i;
3289
3290 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3291 trace_user_setup_rt_frame(regs, frame_addr);
3292 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3293 goto give_sigsegv;
3294 }
3295
3296 tswap_siginfo(&frame->info, info);
3297
3298 /* Create the ucontext. */
3299 __put_user(0, &frame->uc.tuc_flags);
3300 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3301 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3302 &frame->uc.tuc_stack.ss_sp);
3303 __put_user(sas_ss_flags(regs->gregs[15]),
3304 &frame->uc.tuc_stack.ss_flags);
3305 __put_user(target_sigaltstack_used.ss_size,
3306 &frame->uc.tuc_stack.ss_size);
3307 setup_sigcontext(&frame->uc.tuc_mcontext,
3308 regs, set->sig[0]);
3309 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3310 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3311 }
3312
3313 /* Set up to return from userspace. If provided, use a stub
3314 already in userspace. */
3315 if (ka->sa_flags & TARGET_SA_RESTORER) {
3316 regs->pr = (unsigned long) ka->sa_restorer;
3317 } else {
3318 /* Generate return code (system call to sigreturn) */
3319 abi_ulong retcode_addr = frame_addr +
3320 offsetof(struct target_rt_sigframe, retcode);
3321 __put_user(MOVW(2), &frame->retcode[0]);
3322 __put_user(TRAP_NOARG, &frame->retcode[1]);
3323 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3324 regs->pr = (unsigned long) retcode_addr;
3325 }
3326
3327 /* Set up registers for signal handler */
3328 regs->gregs[15] = frame_addr;
3329 regs->gregs[4] = sig; /* Arg for signal handler */
3330 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3331 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3332 regs->pc = (unsigned long) ka->_sa_handler;
3333
3334 unlock_user_struct(frame, frame_addr, 1);
3335 return;
3336
3337 give_sigsegv:
3338 unlock_user_struct(frame, frame_addr, 1);
3339 force_sig(TARGET_SIGSEGV);
3340 }
3341
3342 long do_sigreturn(CPUSH4State *regs)
3343 {
3344 struct target_sigframe *frame;
3345 abi_ulong frame_addr;
3346 sigset_t blocked;
3347 target_sigset_t target_set;
3348 target_ulong r0;
3349 int i;
3350 int err = 0;
3351
3352 frame_addr = regs->gregs[15];
3353 trace_user_do_sigreturn(regs, frame_addr);
3354 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3355 goto badframe;
3356 }
3357
3358 __get_user(target_set.sig[0], &frame->sc.oldmask);
3359 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3360 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3361 }
3362
3363 if (err)
3364 goto badframe;
3365
3366 target_to_host_sigset_internal(&blocked, &target_set);
3367 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3368
3369 restore_sigcontext(regs, &frame->sc, &r0);
3370
3371 unlock_user_struct(frame, frame_addr, 0);
3372 return r0;
3373
3374 badframe:
3375 unlock_user_struct(frame, frame_addr, 0);
3376 force_sig(TARGET_SIGSEGV);
3377 return 0;
3378 }
3379
3380 long do_rt_sigreturn(CPUSH4State *regs)
3381 {
3382 struct target_rt_sigframe *frame;
3383 abi_ulong frame_addr;
3384 sigset_t blocked;
3385 target_ulong r0;
3386
3387 frame_addr = regs->gregs[15];
3388 trace_user_do_rt_sigreturn(regs, frame_addr);
3389 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3390 goto badframe;
3391 }
3392
3393 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3394 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3395
3396 restore_sigcontext(regs, &frame->uc.tuc_mcontext, &r0);
3397
3398 if (do_sigaltstack(frame_addr +
3399 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3400 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3401 goto badframe;
3402 }
3403
3404 unlock_user_struct(frame, frame_addr, 0);
3405 return r0;
3406
3407 badframe:
3408 unlock_user_struct(frame, frame_addr, 0);
3409 force_sig(TARGET_SIGSEGV);
3410 return 0;
3411 }
3412 #elif defined(TARGET_MICROBLAZE)
3413
3414 struct target_sigcontext {
3415 struct target_pt_regs regs; /* needs to be first */
3416 uint32_t oldmask;
3417 };
3418
3419 struct target_stack_t {
3420 abi_ulong ss_sp;
3421 int ss_flags;
3422 unsigned int ss_size;
3423 };
3424
3425 struct target_ucontext {
3426 abi_ulong tuc_flags;
3427 abi_ulong tuc_link;
3428 struct target_stack_t tuc_stack;
3429 struct target_sigcontext tuc_mcontext;
3430 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3431 };
3432
3433 /* Signal frames. */
3434 struct target_signal_frame {
3435 struct target_ucontext uc;
3436 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3437 uint32_t tramp[2];
3438 };
3439
3440 struct rt_signal_frame {
3441 siginfo_t info;
3442 struct ucontext uc;
3443 uint32_t tramp[2];
3444 };
3445
3446 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3447 {
3448 __put_user(env->regs[0], &sc->regs.r0);
3449 __put_user(env->regs[1], &sc->regs.r1);
3450 __put_user(env->regs[2], &sc->regs.r2);
3451 __put_user(env->regs[3], &sc->regs.r3);
3452 __put_user(env->regs[4], &sc->regs.r4);
3453 __put_user(env->regs[5], &sc->regs.r5);
3454 __put_user(env->regs[6], &sc->regs.r6);
3455 __put_user(env->regs[7], &sc->regs.r7);
3456 __put_user(env->regs[8], &sc->regs.r8);
3457 __put_user(env->regs[9], &sc->regs.r9);
3458 __put_user(env->regs[10], &sc->regs.r10);
3459 __put_user(env->regs[11], &sc->regs.r11);
3460 __put_user(env->regs[12], &sc->regs.r12);
3461 __put_user(env->regs[13], &sc->regs.r13);
3462 __put_user(env->regs[14], &sc->regs.r14);
3463 __put_user(env->regs[15], &sc->regs.r15);
3464 __put_user(env->regs[16], &sc->regs.r16);
3465 __put_user(env->regs[17], &sc->regs.r17);
3466 __put_user(env->regs[18], &sc->regs.r18);
3467 __put_user(env->regs[19], &sc->regs.r19);
3468 __put_user(env->regs[20], &sc->regs.r20);
3469 __put_user(env->regs[21], &sc->regs.r21);
3470 __put_user(env->regs[22], &sc->regs.r22);
3471 __put_user(env->regs[23], &sc->regs.r23);
3472 __put_user(env->regs[24], &sc->regs.r24);
3473 __put_user(env->regs[25], &sc->regs.r25);
3474 __put_user(env->regs[26], &sc->regs.r26);
3475 __put_user(env->regs[27], &sc->regs.r27);
3476 __put_user(env->regs[28], &sc->regs.r28);
3477 __put_user(env->regs[29], &sc->regs.r29);
3478 __put_user(env->regs[30], &sc->regs.r30);
3479 __put_user(env->regs[31], &sc->regs.r31);
3480 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3481 }
3482
3483 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3484 {
3485 __get_user(env->regs[0], &sc->regs.r0);
3486 __get_user(env->regs[1], &sc->regs.r1);
3487 __get_user(env->regs[2], &sc->regs.r2);
3488 __get_user(env->regs[3], &sc->regs.r3);
3489 __get_user(env->regs[4], &sc->regs.r4);
3490 __get_user(env->regs[5], &sc->regs.r5);
3491 __get_user(env->regs[6], &sc->regs.r6);
3492 __get_user(env->regs[7], &sc->regs.r7);
3493 __get_user(env->regs[8], &sc->regs.r8);
3494 __get_user(env->regs[9], &sc->regs.r9);
3495 __get_user(env->regs[10], &sc->regs.r10);
3496 __get_user(env->regs[11], &sc->regs.r11);
3497 __get_user(env->regs[12], &sc->regs.r12);
3498 __get_user(env->regs[13], &sc->regs.r13);
3499 __get_user(env->regs[14], &sc->regs.r14);
3500 __get_user(env->regs[15], &sc->regs.r15);
3501 __get_user(env->regs[16], &sc->regs.r16);
3502 __get_user(env->regs[17], &sc->regs.r17);
3503 __get_user(env->regs[18], &sc->regs.r18);
3504 __get_user(env->regs[19], &sc->regs.r19);
3505 __get_user(env->regs[20], &sc->regs.r20);
3506 __get_user(env->regs[21], &sc->regs.r21);
3507 __get_user(env->regs[22], &sc->regs.r22);
3508 __get_user(env->regs[23], &sc->regs.r23);
3509 __get_user(env->regs[24], &sc->regs.r24);
3510 __get_user(env->regs[25], &sc->regs.r25);
3511 __get_user(env->regs[26], &sc->regs.r26);
3512 __get_user(env->regs[27], &sc->regs.r27);
3513 __get_user(env->regs[28], &sc->regs.r28);
3514 __get_user(env->regs[29], &sc->regs.r29);
3515 __get_user(env->regs[30], &sc->regs.r30);
3516 __get_user(env->regs[31], &sc->regs.r31);
3517 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3518 }
3519
3520 static abi_ulong get_sigframe(struct target_sigaction *ka,
3521 CPUMBState *env, int frame_size)
3522 {
3523 abi_ulong sp = env->regs[1];
3524
3525 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3526 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3527 }
3528
3529 return ((sp - frame_size) & -8UL);
3530 }
3531
3532 static void setup_frame(int sig, struct target_sigaction *ka,
3533 target_sigset_t *set, CPUMBState *env)
3534 {
3535 struct target_signal_frame *frame;
3536 abi_ulong frame_addr;
3537 int i;
3538
3539 frame_addr = get_sigframe(ka, env, sizeof *frame);
3540 trace_user_setup_frame(env, frame_addr);
3541 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3542 goto badframe;
3543
3544 /* Save the mask. */
3545 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3546
3547 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3548 __put_user(set->sig[i], &frame->extramask[i - 1]);
3549 }
3550
3551 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3552
3553 /* Set up to return from userspace. If provided, use a stub
3554 already in userspace. */
3555 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3556 if (ka->sa_flags & TARGET_SA_RESTORER) {
3557 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3558 } else {
3559 uint32_t t;
3560 /* Note, these encodings are _big endian_! */
3561 /* addi r12, r0, __NR_sigreturn */
3562 t = 0x31800000UL | TARGET_NR_sigreturn;
3563 __put_user(t, frame->tramp + 0);
3564 /* brki r14, 0x8 */
3565 t = 0xb9cc0008UL;
3566 __put_user(t, frame->tramp + 1);
3567
3568 /* Return from sighandler will jump to the tramp.
3569 Negative 8 offset because return is rtsd r15, 8 */
3570 env->regs[15] = ((unsigned long)frame->tramp) - 8;
3571 }
3572
3573 /* Set up registers for signal handler */
3574 env->regs[1] = frame_addr;
3575 /* Signal handler args: */
3576 env->regs[5] = sig; /* Arg 0: signum */
3577 env->regs[6] = 0;
3578 /* arg 1: sigcontext */
3579 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3580
3581 /* Offset of 4 to handle microblaze rtid r14, 0 */
3582 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3583
3584 unlock_user_struct(frame, frame_addr, 1);
3585 return;
3586 badframe:
3587 force_sig(TARGET_SIGSEGV);
3588 }
3589
3590 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3591 target_siginfo_t *info,
3592 target_sigset_t *set, CPUMBState *env)
3593 {
3594 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3595 }
3596
3597 long do_sigreturn(CPUMBState *env)
3598 {
3599 struct target_signal_frame *frame;
3600 abi_ulong frame_addr;
3601 target_sigset_t target_set;
3602 sigset_t set;
3603 int i;
3604
3605 frame_addr = env->regs[R_SP];
3606 trace_user_do_sigreturn(env, frame_addr);
3607 /* Make sure the guest isn't playing games. */
3608 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3609 goto badframe;
3610
3611 /* Restore blocked signals */
3612 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3613 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3614 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3615 }
3616 target_to_host_sigset_internal(&set, &target_set);
3617 do_sigprocmask(SIG_SETMASK, &set, NULL);
3618
3619 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3620 /* We got here through a sigreturn syscall, our path back is via an
3621 rtb insn so setup r14 for that. */
3622 env->regs[14] = env->sregs[SR_PC];
3623
3624 unlock_user_struct(frame, frame_addr, 0);
3625 return env->regs[10];
3626 badframe:
3627 force_sig(TARGET_SIGSEGV);
3628 }
3629
3630 long do_rt_sigreturn(CPUMBState *env)
3631 {
3632 trace_user_do_rt_sigreturn(env, 0);
3633 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3634 return -TARGET_ENOSYS;
3635 }
3636
3637 #elif defined(TARGET_CRIS)
3638
3639 struct target_sigcontext {
3640 struct target_pt_regs regs; /* needs to be first */
3641 uint32_t oldmask;
3642 uint32_t usp; /* usp before stacking this gunk on it */
3643 };
3644
3645 /* Signal frames. */
3646 struct target_signal_frame {
3647 struct target_sigcontext sc;
3648 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3649 uint16_t retcode[4]; /* Trampoline code. */
3650 };
3651
3652 struct rt_signal_frame {
3653 siginfo_t *pinfo;
3654 void *puc;
3655 siginfo_t info;
3656 struct ucontext uc;
3657 uint16_t retcode[4]; /* Trampoline code. */
3658 };
3659
3660 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3661 {
3662 __put_user(env->regs[0], &sc->regs.r0);
3663 __put_user(env->regs[1], &sc->regs.r1);
3664 __put_user(env->regs[2], &sc->regs.r2);
3665 __put_user(env->regs[3], &sc->regs.r3);
3666 __put_user(env->regs[4], &sc->regs.r4);
3667 __put_user(env->regs[5], &sc->regs.r5);
3668 __put_user(env->regs[6], &sc->regs.r6);
3669 __put_user(env->regs[7], &sc->regs.r7);
3670 __put_user(env->regs[8], &sc->regs.r8);
3671 __put_user(env->regs[9], &sc->regs.r9);
3672 __put_user(env->regs[10], &sc->regs.r10);
3673 __put_user(env->regs[11], &sc->regs.r11);
3674 __put_user(env->regs[12], &sc->regs.r12);
3675 __put_user(env->regs[13], &sc->regs.r13);
3676 __put_user(env->regs[14], &sc->usp);
3677 __put_user(env->regs[15], &sc->regs.acr);
3678 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3679 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3680 __put_user(env->pc, &sc->regs.erp);
3681 }
3682
3683 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3684 {
3685 __get_user(env->regs[0], &sc->regs.r0);
3686 __get_user(env->regs[1], &sc->regs.r1);
3687 __get_user(env->regs[2], &sc->regs.r2);
3688 __get_user(env->regs[3], &sc->regs.r3);
3689 __get_user(env->regs[4], &sc->regs.r4);
3690 __get_user(env->regs[5], &sc->regs.r5);
3691 __get_user(env->regs[6], &sc->regs.r6);
3692 __get_user(env->regs[7], &sc->regs.r7);
3693 __get_user(env->regs[8], &sc->regs.r8);
3694 __get_user(env->regs[9], &sc->regs.r9);
3695 __get_user(env->regs[10], &sc->regs.r10);
3696 __get_user(env->regs[11], &sc->regs.r11);
3697 __get_user(env->regs[12], &sc->regs.r12);
3698 __get_user(env->regs[13], &sc->regs.r13);
3699 __get_user(env->regs[14], &sc->usp);
3700 __get_user(env->regs[15], &sc->regs.acr);
3701 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3702 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3703 __get_user(env->pc, &sc->regs.erp);
3704 }
3705
3706 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3707 {
3708 abi_ulong sp;
3709 /* Align the stack downwards to 4. */
3710 sp = (env->regs[R_SP] & ~3);
3711 return sp - framesize;
3712 }
3713
3714 static void setup_frame(int sig, struct target_sigaction *ka,
3715 target_sigset_t *set, CPUCRISState *env)
3716 {
3717 struct target_signal_frame *frame;
3718 abi_ulong frame_addr;
3719 int i;
3720
3721 frame_addr = get_sigframe(env, sizeof *frame);
3722 trace_user_setup_frame(env, frame_addr);
3723 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3724 goto badframe;
3725
3726 /*
3727 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3728 * use this trampoline anymore but it sets it up for GDB.
3729 * In QEMU, using the trampoline simplifies things a bit so we use it.
3730 *
3731 * This is movu.w __NR_sigreturn, r9; break 13;
3732 */
3733 __put_user(0x9c5f, frame->retcode+0);
3734 __put_user(TARGET_NR_sigreturn,
3735 frame->retcode + 1);
3736 __put_user(0xe93d, frame->retcode + 2);
3737
3738 /* Save the mask. */
3739 __put_user(set->sig[0], &frame->sc.oldmask);
3740
3741 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3742 __put_user(set->sig[i], &frame->extramask[i - 1]);
3743 }
3744
3745 setup_sigcontext(&frame->sc, env);
3746
3747 /* Move the stack and setup the arguments for the handler. */
3748 env->regs[R_SP] = frame_addr;
3749 env->regs[10] = sig;
3750 env->pc = (unsigned long) ka->_sa_handler;
3751 /* Link SRP so the guest returns through the trampoline. */
3752 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3753
3754 unlock_user_struct(frame, frame_addr, 1);
3755 return;
3756 badframe:
3757 force_sig(TARGET_SIGSEGV);
3758 }
3759
3760 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3761 target_siginfo_t *info,
3762 target_sigset_t *set, CPUCRISState *env)
3763 {
3764 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3765 }
3766
3767 long do_sigreturn(CPUCRISState *env)
3768 {
3769 struct target_signal_frame *frame;
3770 abi_ulong frame_addr;
3771 target_sigset_t target_set;
3772 sigset_t set;
3773 int i;
3774
3775 frame_addr = env->regs[R_SP];
3776 trace_user_do_sigreturn(env, frame_addr);
3777 /* Make sure the guest isn't playing games. */
3778 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3779 goto badframe;
3780 }
3781
3782 /* Restore blocked signals */
3783 __get_user(target_set.sig[0], &frame->sc.oldmask);
3784 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3785 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3786 }
3787 target_to_host_sigset_internal(&set, &target_set);
3788 do_sigprocmask(SIG_SETMASK, &set, NULL);
3789
3790 restore_sigcontext(&frame->sc, env);
3791 unlock_user_struct(frame, frame_addr, 0);
3792 return env->regs[10];
3793 badframe:
3794 force_sig(TARGET_SIGSEGV);
3795 }
3796
3797 long do_rt_sigreturn(CPUCRISState *env)
3798 {
3799 trace_user_do_rt_sigreturn(env, 0);
3800 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3801 return -TARGET_ENOSYS;
3802 }
3803
3804 #elif defined(TARGET_OPENRISC)
3805
3806 struct target_sigcontext {
3807 struct target_pt_regs regs;
3808 abi_ulong oldmask;
3809 abi_ulong usp;
3810 };
3811
3812 struct target_ucontext {
3813 abi_ulong tuc_flags;
3814 abi_ulong tuc_link;
3815 target_stack_t tuc_stack;
3816 struct target_sigcontext tuc_mcontext;
3817 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3818 };
3819
3820 struct target_rt_sigframe {
3821 abi_ulong pinfo;
3822 uint64_t puc;
3823 struct target_siginfo info;
3824 struct target_sigcontext sc;
3825 struct target_ucontext uc;
3826 unsigned char retcode[16]; /* trampoline code */
3827 };
3828
3829 /* This is the asm-generic/ucontext.h version */
3830 #if 0
3831 static int restore_sigcontext(CPUOpenRISCState *regs,
3832 struct target_sigcontext *sc)
3833 {
3834 unsigned int err = 0;
3835 unsigned long old_usp;
3836
3837 /* Alwys make any pending restarted system call return -EINTR */
3838 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3839
3840 /* restore the regs from &sc->regs (same as sc, since regs is first)
3841 * (sc is already checked for VERIFY_READ since the sigframe was
3842 * checked in sys_sigreturn previously)
3843 */
3844
3845 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3846 goto badframe;
3847 }
3848
3849 /* make sure the U-flag is set so user-mode cannot fool us */
3850
3851 regs->sr &= ~SR_SM;
3852
3853 /* restore the old USP as it was before we stacked the sc etc.
3854 * (we cannot just pop the sigcontext since we aligned the sp and
3855 * stuff after pushing it)
3856 */
3857
3858 __get_user(old_usp, &sc->usp);
3859 phx_signal("old_usp 0x%lx", old_usp);
3860
3861 __PHX__ REALLY /* ??? */
3862 wrusp(old_usp);
3863 regs->gpr[1] = old_usp;
3864
3865 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3866 * after this completes, but we don't use that mechanism. maybe we can
3867 * use it now ?
3868 */
3869
3870 return err;
3871
3872 badframe:
3873 return 1;
3874 }
3875 #endif
3876
3877 /* Set up a signal frame. */
3878
3879 static void setup_sigcontext(struct target_sigcontext *sc,
3880 CPUOpenRISCState *regs,
3881 unsigned long mask)
3882 {
3883 unsigned long usp = regs->gpr[1];
3884
3885 /* copy the regs. they are first in sc so we can use sc directly */
3886
3887 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3888
3889 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3890 the signal handler. The frametype will be restored to its previous
3891 value in restore_sigcontext. */
3892 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3893
3894 /* then some other stuff */
3895 __put_user(mask, &sc->oldmask);
3896 __put_user(usp, &sc->usp);
3897 }
3898
3899 static inline unsigned long align_sigframe(unsigned long sp)
3900 {
3901 unsigned long i;
3902 i = sp & ~3UL;
3903 return i;
3904 }
3905
3906 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3907 CPUOpenRISCState *regs,
3908 size_t frame_size)
3909 {
3910 unsigned long sp = regs->gpr[1];
3911 int onsigstack = on_sig_stack(sp);
3912
3913 /* redzone */
3914 /* This is the X/Open sanctioned signal stack switching. */
3915 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3916 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3917 }
3918
3919 sp = align_sigframe(sp - frame_size);
3920
3921 /*
3922 * If we are on the alternate signal stack and would overflow it, don't.
3923 * Return an always-bogus address instead so we will die with SIGSEGV.
3924 */
3925
3926 if (onsigstack && !likely(on_sig_stack(sp))) {
3927 return -1L;
3928 }
3929
3930 return sp;
3931 }
3932
3933 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3934 target_siginfo_t *info,
3935 target_sigset_t *set, CPUOpenRISCState *env)
3936 {
3937 int err = 0;
3938 abi_ulong frame_addr;
3939 unsigned long return_ip;
3940 struct target_rt_sigframe *frame;
3941 abi_ulong info_addr, uc_addr;
3942
3943 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3944 trace_user_setup_rt_frame(env, frame_addr);
3945 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3946 goto give_sigsegv;
3947 }
3948
3949 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
3950 __put_user(info_addr, &frame->pinfo);
3951 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
3952 __put_user(uc_addr, &frame->puc);
3953
3954 if (ka->sa_flags & SA_SIGINFO) {
3955 tswap_siginfo(&frame->info, info);
3956 }
3957
3958 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
3959 __put_user(0, &frame->uc.tuc_flags);
3960 __put_user(0, &frame->uc.tuc_link);
3961 __put_user(target_sigaltstack_used.ss_sp,
3962 &frame->uc.tuc_stack.ss_sp);
3963 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
3964 __put_user(target_sigaltstack_used.ss_size,
3965 &frame->uc.tuc_stack.ss_size);
3966 setup_sigcontext(&frame->sc, env, set->sig[0]);
3967
3968 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
3969
3970 /* trampoline - the desired return ip is the retcode itself */
3971 return_ip = (unsigned long)&frame->retcode;
3972 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
3973 __put_user(0xa960, (short *)(frame->retcode + 0));
3974 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
3975 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
3976 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
3977
3978 if (err) {
3979 goto give_sigsegv;
3980 }
3981
3982 /* TODO what is the current->exec_domain stuff and invmap ? */
3983
3984 /* Set up registers for signal handler */
3985 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
3986 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
3987 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
3988 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
3989 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
3990
3991 /* actually move the usp to reflect the stacked frame */
3992 env->gpr[1] = (unsigned long)frame;
3993
3994 return;
3995
3996 give_sigsegv:
3997 unlock_user_struct(frame, frame_addr, 1);
3998 if (sig == TARGET_SIGSEGV) {
3999 ka->_sa_handler = TARGET_SIG_DFL;
4000 }
4001 force_sig(TARGET_SIGSEGV);
4002 }
4003
4004 long do_sigreturn(CPUOpenRISCState *env)
4005 {
4006 trace_user_do_sigreturn(env, 0);
4007 fprintf(stderr, "do_sigreturn: not implemented\n");
4008 return -TARGET_ENOSYS;
4009 }
4010
4011 long do_rt_sigreturn(CPUOpenRISCState *env)
4012 {
4013 trace_user_do_rt_sigreturn(env, 0);
4014 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4015 return -TARGET_ENOSYS;
4016 }
4017 /* TARGET_OPENRISC */
4018
4019 #elif defined(TARGET_S390X)
4020
4021 #define __NUM_GPRS 16
4022 #define __NUM_FPRS 16
4023 #define __NUM_ACRS 16
4024
4025 #define S390_SYSCALL_SIZE 2
4026 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4027
4028 #define _SIGCONTEXT_NSIG 64
4029 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4030 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4031 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4032 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4033 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4034
4035 typedef struct {
4036 target_psw_t psw;
4037 target_ulong gprs[__NUM_GPRS];
4038 unsigned int acrs[__NUM_ACRS];
4039 } target_s390_regs_common;
4040
4041 typedef struct {
4042 unsigned int fpc;
4043 double fprs[__NUM_FPRS];
4044 } target_s390_fp_regs;
4045
4046 typedef struct {
4047 target_s390_regs_common regs;
4048 target_s390_fp_regs fpregs;
4049 } target_sigregs;
4050
4051 struct target_sigcontext {
4052 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4053 target_sigregs *sregs;
4054 };
4055
4056 typedef struct {
4057 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4058 struct target_sigcontext sc;
4059 target_sigregs sregs;
4060 int signo;
4061 uint8_t retcode[S390_SYSCALL_SIZE];
4062 } sigframe;
4063
4064 struct target_ucontext {
4065 target_ulong tuc_flags;
4066 struct target_ucontext *tuc_link;
4067 target_stack_t tuc_stack;
4068 target_sigregs tuc_mcontext;
4069 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4070 };
4071
4072 typedef struct {
4073 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4074 uint8_t retcode[S390_SYSCALL_SIZE];
4075 struct target_siginfo info;
4076 struct target_ucontext uc;
4077 } rt_sigframe;
4078
4079 static inline abi_ulong
4080 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4081 {
4082 abi_ulong sp;
4083
4084 /* Default to using normal stack */
4085 sp = env->regs[15];
4086
4087 /* This is the X/Open sanctioned signal stack switching. */
4088 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4089 if (!sas_ss_flags(sp)) {
4090 sp = target_sigaltstack_used.ss_sp +
4091 target_sigaltstack_used.ss_size;
4092 }
4093 }
4094
4095 /* This is the legacy signal stack switching. */
4096 else if (/* FIXME !user_mode(regs) */ 0 &&
4097 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4098 ka->sa_restorer) {
4099 sp = (abi_ulong) ka->sa_restorer;
4100 }
4101
4102 return (sp - frame_size) & -8ul;
4103 }
4104
4105 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4106 {
4107 int i;
4108 //save_access_regs(current->thread.acrs); FIXME
4109
4110 /* Copy a 'clean' PSW mask to the user to avoid leaking
4111 information about whether PER is currently on. */
4112 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4113 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4114 for (i = 0; i < 16; i++) {
4115 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4116 }
4117 for (i = 0; i < 16; i++) {
4118 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4119 }
4120 /*
4121 * We have to store the fp registers to current->thread.fp_regs
4122 * to merge them with the emulated registers.
4123 */
4124 //save_fp_regs(&current->thread.fp_regs); FIXME
4125 for (i = 0; i < 16; i++) {
4126 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4127 }
4128 }
4129
4130 static void setup_frame(int sig, struct target_sigaction *ka,
4131 target_sigset_t *set, CPUS390XState *env)
4132 {
4133 sigframe *frame;
4134 abi_ulong frame_addr;
4135
4136 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4137 trace_user_setup_frame(env, frame_addr);
4138 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4139 goto give_sigsegv;
4140 }
4141
4142 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4143
4144 save_sigregs(env, &frame->sregs);
4145
4146 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4147 (abi_ulong *)&frame->sc.sregs);
4148
4149 /* Set up to return from userspace. If provided, use a stub
4150 already in userspace. */
4151 if (ka->sa_flags & TARGET_SA_RESTORER) {
4152 env->regs[14] = (unsigned long)
4153 ka->sa_restorer | PSW_ADDR_AMODE;
4154 } else {
4155 env->regs[14] = (unsigned long)
4156 frame->retcode | PSW_ADDR_AMODE;
4157 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4158 (uint16_t *)(frame->retcode));
4159 }
4160
4161 /* Set up backchain. */
4162 __put_user(env->regs[15], (abi_ulong *) frame);
4163
4164 /* Set up registers for signal handler */
4165 env->regs[15] = frame_addr;
4166 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4167
4168 env->regs[2] = sig; //map_signal(sig);
4169 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4170
4171 /* We forgot to include these in the sigcontext.
4172 To avoid breaking binary compatibility, they are passed as args. */
4173 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4174 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4175
4176 /* Place signal number on stack to allow backtrace from handler. */
4177 __put_user(env->regs[2], (int *) &frame->signo);
4178 unlock_user_struct(frame, frame_addr, 1);
4179 return;
4180
4181 give_sigsegv:
4182 force_sig(TARGET_SIGSEGV);
4183 }
4184
4185 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4186 target_siginfo_t *info,
4187 target_sigset_t *set, CPUS390XState *env)
4188 {
4189 int i;
4190 rt_sigframe *frame;
4191 abi_ulong frame_addr;
4192
4193 frame_addr = get_sigframe(ka, env, sizeof *frame);
4194 trace_user_setup_rt_frame(env, frame_addr);
4195 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4196 goto give_sigsegv;
4197 }
4198
4199 tswap_siginfo(&frame->info, info);
4200
4201 /* Create the ucontext. */
4202 __put_user(0, &frame->uc.tuc_flags);
4203 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4204 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4205 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4206 &frame->uc.tuc_stack.ss_flags);
4207 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4208 save_sigregs(env, &frame->uc.tuc_mcontext);
4209 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4210 __put_user((abi_ulong)set->sig[i],
4211 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4212 }
4213
4214 /* Set up to return from userspace. If provided, use a stub
4215 already in userspace. */
4216 if (ka->sa_flags & TARGET_SA_RESTORER) {
4217 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4218 } else {
4219 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4220 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4221 (uint16_t *)(frame->retcode));
4222 }
4223
4224 /* Set up backchain. */
4225 __put_user(env->regs[15], (abi_ulong *) frame);
4226
4227 /* Set up registers for signal handler */
4228 env->regs[15] = frame_addr;
4229 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4230
4231 env->regs[2] = sig; //map_signal(sig);
4232 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4233 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4234 return;
4235
4236 give_sigsegv:
4237 force_sig(TARGET_SIGSEGV);
4238 }
4239
4240 static int
4241 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4242 {
4243 int err = 0;
4244 int i;
4245
4246 for (i = 0; i < 16; i++) {
4247 __get_user(env->regs[i], &sc->regs.gprs[i]);
4248 }
4249
4250 __get_user(env->psw.mask, &sc->regs.psw.mask);
4251 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4252 (unsigned long long)env->psw.addr);
4253 __get_user(env->psw.addr, &sc->regs.psw.addr);
4254 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4255
4256 for (i = 0; i < 16; i++) {
4257 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4258 }
4259 for (i = 0; i < 16; i++) {
4260 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4261 }
4262
4263 return err;
4264 }
4265
4266 long do_sigreturn(CPUS390XState *env)
4267 {
4268 sigframe *frame;
4269 abi_ulong frame_addr = env->regs[15];
4270 target_sigset_t target_set;
4271 sigset_t set;
4272
4273 trace_user_do_sigreturn(env, frame_addr);
4274 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4275 goto badframe;
4276 }
4277 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4278
4279 target_to_host_sigset_internal(&set, &target_set);
4280 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
4281
4282 if (restore_sigregs(env, &frame->sregs)) {
4283 goto badframe;
4284 }
4285
4286 unlock_user_struct(frame, frame_addr, 0);
4287 return env->regs[2];
4288
4289 badframe:
4290 force_sig(TARGET_SIGSEGV);
4291 return 0;
4292 }
4293
4294 long do_rt_sigreturn(CPUS390XState *env)
4295 {
4296 rt_sigframe *frame;
4297 abi_ulong frame_addr = env->regs[15];
4298 sigset_t set;
4299
4300 trace_user_do_rt_sigreturn(env, frame_addr);
4301 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4302 goto badframe;
4303 }
4304 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4305
4306 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
4307
4308 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4309 goto badframe;
4310 }
4311
4312 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4313 get_sp_from_cpustate(env)) == -EFAULT) {
4314 goto badframe;
4315 }
4316 unlock_user_struct(frame, frame_addr, 0);
4317 return env->regs[2];
4318
4319 badframe:
4320 unlock_user_struct(frame, frame_addr, 0);
4321 force_sig(TARGET_SIGSEGV);
4322 return 0;
4323 }
4324
4325 #elif defined(TARGET_PPC)
4326
4327 /* Size of dummy stack frame allocated when calling signal handler.
4328 See arch/powerpc/include/asm/ptrace.h. */
4329 #if defined(TARGET_PPC64)
4330 #define SIGNAL_FRAMESIZE 128
4331 #else
4332 #define SIGNAL_FRAMESIZE 64
4333 #endif
4334
4335 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4336 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4337 struct target_mcontext {
4338 target_ulong mc_gregs[48];
4339 /* Includes fpscr. */
4340 uint64_t mc_fregs[33];
4341 target_ulong mc_pad[2];
4342 /* We need to handle Altivec and SPE at the same time, which no
4343 kernel needs to do. Fortunately, the kernel defines this bit to
4344 be Altivec-register-large all the time, rather than trying to
4345 twiddle it based on the specific platform. */
4346 union {
4347 /* SPE vector registers. One extra for SPEFSCR. */
4348 uint32_t spe[33];
4349 /* Altivec vector registers. The packing of VSCR and VRSAVE
4350 varies depending on whether we're PPC64 or not: PPC64 splits
4351 them apart; PPC32 stuffs them together. */
4352 #if defined(TARGET_PPC64)
4353 #define QEMU_NVRREG 34
4354 #else
4355 #define QEMU_NVRREG 33
4356 #endif
4357 ppc_avr_t altivec[QEMU_NVRREG];
4358 #undef QEMU_NVRREG
4359 } mc_vregs __attribute__((__aligned__(16)));
4360 };
4361
4362 /* See arch/powerpc/include/asm/sigcontext.h. */
4363 struct target_sigcontext {
4364 target_ulong _unused[4];
4365 int32_t signal;
4366 #if defined(TARGET_PPC64)
4367 int32_t pad0;
4368 #endif
4369 target_ulong handler;
4370 target_ulong oldmask;
4371 target_ulong regs; /* struct pt_regs __user * */
4372 #if defined(TARGET_PPC64)
4373 struct target_mcontext mcontext;
4374 #endif
4375 };
4376
4377 /* Indices for target_mcontext.mc_gregs, below.
4378 See arch/powerpc/include/asm/ptrace.h for details. */
4379 enum {
4380 TARGET_PT_R0 = 0,
4381 TARGET_PT_R1 = 1,
4382 TARGET_PT_R2 = 2,
4383 TARGET_PT_R3 = 3,
4384 TARGET_PT_R4 = 4,
4385 TARGET_PT_R5 = 5,
4386 TARGET_PT_R6 = 6,
4387 TARGET_PT_R7 = 7,
4388 TARGET_PT_R8 = 8,
4389 TARGET_PT_R9 = 9,
4390 TARGET_PT_R10 = 10,
4391 TARGET_PT_R11 = 11,
4392 TARGET_PT_R12 = 12,
4393 TARGET_PT_R13 = 13,
4394 TARGET_PT_R14 = 14,
4395 TARGET_PT_R15 = 15,
4396 TARGET_PT_R16 = 16,
4397 TARGET_PT_R17 = 17,
4398 TARGET_PT_R18 = 18,
4399 TARGET_PT_R19 = 19,
4400 TARGET_PT_R20 = 20,
4401 TARGET_PT_R21 = 21,
4402 TARGET_PT_R22 = 22,
4403 TARGET_PT_R23 = 23,
4404 TARGET_PT_R24 = 24,
4405 TARGET_PT_R25 = 25,
4406 TARGET_PT_R26 = 26,
4407 TARGET_PT_R27 = 27,
4408 TARGET_PT_R28 = 28,
4409 TARGET_PT_R29 = 29,
4410 TARGET_PT_R30 = 30,
4411 TARGET_PT_R31 = 31,
4412 TARGET_PT_NIP = 32,
4413 TARGET_PT_MSR = 33,
4414 TARGET_PT_ORIG_R3 = 34,
4415 TARGET_PT_CTR = 35,
4416 TARGET_PT_LNK = 36,
4417 TARGET_PT_XER = 37,
4418 TARGET_PT_CCR = 38,
4419 /* Yes, there are two registers with #39. One is 64-bit only. */
4420 TARGET_PT_MQ = 39,
4421 TARGET_PT_SOFTE = 39,
4422 TARGET_PT_TRAP = 40,
4423 TARGET_PT_DAR = 41,
4424 TARGET_PT_DSISR = 42,
4425 TARGET_PT_RESULT = 43,
4426 TARGET_PT_REGS_COUNT = 44
4427 };
4428
4429
4430 struct target_ucontext {
4431 target_ulong tuc_flags;
4432 target_ulong tuc_link; /* struct ucontext __user * */
4433 struct target_sigaltstack tuc_stack;
4434 #if !defined(TARGET_PPC64)
4435 int32_t tuc_pad[7];
4436 target_ulong tuc_regs; /* struct mcontext __user *
4437 points to uc_mcontext field */
4438 #endif
4439 target_sigset_t tuc_sigmask;
4440 #if defined(TARGET_PPC64)
4441 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4442 struct target_sigcontext tuc_sigcontext;
4443 #else
4444 int32_t tuc_maskext[30];
4445 int32_t tuc_pad2[3];
4446 struct target_mcontext tuc_mcontext;
4447 #endif
4448 };
4449
4450 /* See arch/powerpc/kernel/signal_32.c. */
4451 struct target_sigframe {
4452 struct target_sigcontext sctx;
4453 struct target_mcontext mctx;
4454 int32_t abigap[56];
4455 };
4456
4457 #if defined(TARGET_PPC64)
4458
4459 #define TARGET_TRAMP_SIZE 6
4460
4461 struct target_rt_sigframe {
4462 /* sys_rt_sigreturn requires the ucontext be the first field */
4463 struct target_ucontext uc;
4464 target_ulong _unused[2];
4465 uint32_t trampoline[TARGET_TRAMP_SIZE];
4466 target_ulong pinfo; /* struct siginfo __user * */
4467 target_ulong puc; /* void __user * */
4468 struct target_siginfo info;
4469 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4470 char abigap[288];
4471 } __attribute__((aligned(16)));
4472
4473 #else
4474
4475 struct target_rt_sigframe {
4476 struct target_siginfo info;
4477 struct target_ucontext uc;
4478 int32_t abigap[56];
4479 };
4480
4481 #endif
4482
4483 #if defined(TARGET_PPC64)
4484
4485 struct target_func_ptr {
4486 target_ulong entry;
4487 target_ulong toc;
4488 };
4489
4490 #endif
4491
4492 /* We use the mc_pad field for the signal return trampoline. */
4493 #define tramp mc_pad
4494
4495 /* See arch/powerpc/kernel/signal.c. */
4496 static target_ulong get_sigframe(struct target_sigaction *ka,
4497 CPUPPCState *env,
4498 int frame_size)
4499 {
4500 target_ulong oldsp, newsp;
4501
4502 oldsp = env->gpr[1];
4503
4504 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4505 (sas_ss_flags(oldsp) == 0)) {
4506 oldsp = (target_sigaltstack_used.ss_sp
4507 + target_sigaltstack_used.ss_size);
4508 }
4509
4510 newsp = (oldsp - frame_size) & ~0xFUL;
4511
4512 return newsp;
4513 }
4514
4515 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4516 {
4517 target_ulong msr = env->msr;
4518 int i;
4519 target_ulong ccr = 0;
4520
4521 /* In general, the kernel attempts to be intelligent about what it
4522 needs to save for Altivec/FP/SPE registers. We don't care that
4523 much, so we just go ahead and save everything. */
4524
4525 /* Save general registers. */
4526 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4527 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4528 }
4529 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4530 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4531 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4532 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4533
4534 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4535 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4536 }
4537 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4538
4539 /* Save Altivec registers if necessary. */
4540 if (env->insns_flags & PPC_ALTIVEC) {
4541 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4542 ppc_avr_t *avr = &env->avr[i];
4543 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4544
4545 __put_user(avr->u64[0], &vreg->u64[0]);
4546 __put_user(avr->u64[1], &vreg->u64[1]);
4547 }
4548 /* Set MSR_VR in the saved MSR value to indicate that
4549 frame->mc_vregs contains valid data. */
4550 msr |= MSR_VR;
4551 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4552 &frame->mc_vregs.altivec[32].u32[3]);
4553 }
4554
4555 /* Save floating point registers. */
4556 if (env->insns_flags & PPC_FLOAT) {
4557 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4558 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4559 }
4560 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4561 }
4562
4563 /* Save SPE registers. The kernel only saves the high half. */
4564 if (env->insns_flags & PPC_SPE) {
4565 #if defined(TARGET_PPC64)
4566 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4567 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4568 }
4569 #else
4570 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4571 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4572 }
4573 #endif
4574 /* Set MSR_SPE in the saved MSR value to indicate that
4575 frame->mc_vregs contains valid data. */
4576 msr |= MSR_SPE;
4577 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4578 }
4579
4580 /* Store MSR. */
4581 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4582 }
4583
4584 static void encode_trampoline(int sigret, uint32_t *tramp)
4585 {
4586 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4587 if (sigret) {
4588 __put_user(0x38000000 | sigret, &tramp[0]);
4589 __put_user(0x44000002, &tramp[1]);
4590 }
4591 }
4592
4593 static void restore_user_regs(CPUPPCState *env,
4594 struct target_mcontext *frame, int sig)
4595 {
4596 target_ulong save_r2 = 0;
4597 target_ulong msr;
4598 target_ulong ccr;
4599
4600 int i;
4601
4602 if (!sig) {
4603 save_r2 = env->gpr[2];
4604 }
4605
4606 /* Restore general registers. */
4607 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4608 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4609 }
4610 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4611 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4612 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4613 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4614 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4615
4616 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4617 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4618 }
4619
4620 if (!sig) {
4621 env->gpr[2] = save_r2;
4622 }
4623 /* Restore MSR. */
4624 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4625
4626 /* If doing signal return, restore the previous little-endian mode. */
4627 if (sig)
4628 env->msr = (env->msr & ~MSR_LE) | (msr & MSR_LE);
4629
4630 /* Restore Altivec registers if necessary. */
4631 if (env->insns_flags & PPC_ALTIVEC) {
4632 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4633 ppc_avr_t *avr = &env->avr[i];
4634 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4635
4636 __get_user(avr->u64[0], &vreg->u64[0]);
4637 __get_user(avr->u64[1], &vreg->u64[1]);
4638 }
4639 /* Set MSR_VEC in the saved MSR value to indicate that
4640 frame->mc_vregs contains valid data. */
4641 __get_user(env->spr[SPR_VRSAVE],
4642 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4643 }
4644
4645 /* Restore floating point registers. */
4646 if (env->insns_flags & PPC_FLOAT) {
4647 uint64_t fpscr;
4648 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4649 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4650 }
4651 __get_user(fpscr, &frame->mc_fregs[32]);
4652 env->fpscr = (uint32_t) fpscr;
4653 }
4654
4655 /* Save SPE registers. The kernel only saves the high half. */
4656 if (env->insns_flags & PPC_SPE) {
4657 #if defined(TARGET_PPC64)
4658 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4659 uint32_t hi;
4660
4661 __get_user(hi, &frame->mc_vregs.spe[i]);
4662 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4663 }
4664 #else
4665 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4666 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4667 }
4668 #endif
4669 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4670 }
4671 }
4672
4673 static void setup_frame(int sig, struct target_sigaction *ka,
4674 target_sigset_t *set, CPUPPCState *env)
4675 {
4676 struct target_sigframe *frame;
4677 struct target_sigcontext *sc;
4678 target_ulong frame_addr, newsp;
4679 int err = 0;
4680 #if defined(TARGET_PPC64)
4681 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4682 #endif
4683
4684 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4685 trace_user_setup_frame(env, frame_addr);
4686 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4687 goto sigsegv;
4688 sc = &frame->sctx;
4689
4690 __put_user(ka->_sa_handler, &sc->handler);
4691 __put_user(set->sig[0], &sc->oldmask);
4692 #if TARGET_ABI_BITS == 64
4693 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4694 #else
4695 __put_user(set->sig[1], &sc->_unused[3]);
4696 #endif
4697 __put_user(h2g(&frame->mctx), &sc->regs);
4698 __put_user(sig, &sc->signal);
4699
4700 /* Save user regs. */
4701 save_user_regs(env, &frame->mctx);
4702
4703 /* Construct the trampoline code on the stack. */
4704 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4705
4706 /* The kernel checks for the presence of a VDSO here. We don't
4707 emulate a vdso, so use a sigreturn system call. */
4708 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4709
4710 /* Turn off all fp exceptions. */
4711 env->fpscr = 0;
4712
4713 /* Create a stack frame for the caller of the handler. */
4714 newsp = frame_addr - SIGNAL_FRAMESIZE;
4715 err |= put_user(env->gpr[1], newsp, target_ulong);
4716
4717 if (err)
4718 goto sigsegv;
4719
4720 /* Set up registers for signal handler. */
4721 env->gpr[1] = newsp;
4722 env->gpr[3] = sig;
4723 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4724
4725 #if defined(TARGET_PPC64)
4726 if (get_ppc64_abi(image) < 2) {
4727 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4728 struct target_func_ptr *handler =
4729 (struct target_func_ptr *)g2h(ka->_sa_handler);
4730 env->nip = tswapl(handler->entry);
4731 env->gpr[2] = tswapl(handler->toc);
4732 } else {
4733 /* ELFv2 PPC64 function pointers are entry points, but R12
4734 * must also be set */
4735 env->nip = tswapl((target_ulong) ka->_sa_handler);
4736 env->gpr[12] = env->nip;
4737 }
4738 #else
4739 env->nip = (target_ulong) ka->_sa_handler;
4740 #endif
4741
4742 /* Signal handlers are entered in big-endian mode. */
4743 env->msr &= ~MSR_LE;
4744
4745 unlock_user_struct(frame, frame_addr, 1);
4746 return;
4747
4748 sigsegv:
4749 unlock_user_struct(frame, frame_addr, 1);
4750 force_sig(TARGET_SIGSEGV);
4751 }
4752
4753 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4754 target_siginfo_t *info,
4755 target_sigset_t *set, CPUPPCState *env)
4756 {
4757 struct target_rt_sigframe *rt_sf;
4758 uint32_t *trampptr = 0;
4759 struct target_mcontext *mctx = 0;
4760 target_ulong rt_sf_addr, newsp = 0;
4761 int i, err = 0;
4762 #if defined(TARGET_PPC64)
4763 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4764 #endif
4765
4766 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4767 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4768 goto sigsegv;
4769
4770 tswap_siginfo(&rt_sf->info, info);
4771
4772 __put_user(0, &rt_sf->uc.tuc_flags);
4773 __put_user(0, &rt_sf->uc.tuc_link);
4774 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4775 &rt_sf->uc.tuc_stack.ss_sp);
4776 __put_user(sas_ss_flags(env->gpr[1]),
4777 &rt_sf->uc.tuc_stack.ss_flags);
4778 __put_user(target_sigaltstack_used.ss_size,
4779 &rt_sf->uc.tuc_stack.ss_size);
4780 #if !defined(TARGET_PPC64)
4781 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4782 &rt_sf->uc.tuc_regs);
4783 #endif
4784 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4785 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4786 }
4787
4788 #if defined(TARGET_PPC64)
4789 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4790 trampptr = &rt_sf->trampoline[0];
4791 #else
4792 mctx = &rt_sf->uc.tuc_mcontext;
4793 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4794 #endif
4795
4796 save_user_regs(env, mctx);
4797 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4798
4799 /* The kernel checks for the presence of a VDSO here. We don't
4800 emulate a vdso, so use a sigreturn system call. */
4801 env->lr = (target_ulong) h2g(trampptr);
4802
4803 /* Turn off all fp exceptions. */
4804 env->fpscr = 0;
4805
4806 /* Create a stack frame for the caller of the handler. */
4807 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4808 err |= put_user(env->gpr[1], newsp, target_ulong);
4809
4810 if (err)
4811 goto sigsegv;
4812
4813 /* Set up registers for signal handler. */
4814 env->gpr[1] = newsp;
4815 env->gpr[3] = (target_ulong) sig;
4816 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4817 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4818 env->gpr[6] = (target_ulong) h2g(rt_sf);
4819
4820 #if defined(TARGET_PPC64)
4821 if (get_ppc64_abi(image) < 2) {
4822 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4823 struct target_func_ptr *handler =
4824 (struct target_func_ptr *)g2h(ka->_sa_handler);
4825 env->nip = tswapl(handler->entry);
4826 env->gpr[2] = tswapl(handler->toc);
4827 } else {
4828 /* ELFv2 PPC64 function pointers are entry points, but R12
4829 * must also be set */
4830 env->nip = tswapl((target_ulong) ka->_sa_handler);
4831 env->gpr[12] = env->nip;
4832 }
4833 #else
4834 env->nip = (target_ulong) ka->_sa_handler;
4835 #endif
4836
4837 /* Signal handlers are entered in big-endian mode. */
4838 env->msr &= ~MSR_LE;
4839
4840 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4841 return;
4842
4843 sigsegv:
4844 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4845 force_sig(TARGET_SIGSEGV);
4846
4847 }
4848
4849 long do_sigreturn(CPUPPCState *env)
4850 {
4851 struct target_sigcontext *sc = NULL;
4852 struct target_mcontext *sr = NULL;
4853 target_ulong sr_addr = 0, sc_addr;
4854 sigset_t blocked;
4855 target_sigset_t set;
4856
4857 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4858 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4859 goto sigsegv;
4860
4861 #if defined(TARGET_PPC64)
4862 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4863 #else
4864 __get_user(set.sig[0], &sc->oldmask);
4865 __get_user(set.sig[1], &sc->_unused[3]);
4866 #endif
4867 target_to_host_sigset_internal(&blocked, &set);
4868 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
4869
4870 __get_user(sr_addr, &sc->regs);
4871 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4872 goto sigsegv;
4873 restore_user_regs(env, sr, 1);
4874
4875 unlock_user_struct(sr, sr_addr, 1);
4876 unlock_user_struct(sc, sc_addr, 1);
4877 return -TARGET_QEMU_ESIGRETURN;
4878
4879 sigsegv:
4880 unlock_user_struct(sr, sr_addr, 1);
4881 unlock_user_struct(sc, sc_addr, 1);
4882 force_sig(TARGET_SIGSEGV);
4883 return 0;
4884 }
4885
4886 /* See arch/powerpc/kernel/signal_32.c. */
4887 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4888 {
4889 struct target_mcontext *mcp;
4890 target_ulong mcp_addr;
4891 sigset_t blocked;
4892 target_sigset_t set;
4893
4894 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4895 sizeof (set)))
4896 return 1;
4897
4898 #if defined(TARGET_PPC64)
4899 mcp_addr = h2g(ucp) +
4900 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4901 #else
4902 __get_user(mcp_addr, &ucp->tuc_regs);
4903 #endif
4904
4905 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4906 return 1;
4907
4908 target_to_host_sigset_internal(&blocked, &set);
4909 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
4910 restore_user_regs(env, mcp, sig);
4911
4912 unlock_user_struct(mcp, mcp_addr, 1);
4913 return 0;
4914 }
4915
4916 long do_rt_sigreturn(CPUPPCState *env)
4917 {
4918 struct target_rt_sigframe *rt_sf = NULL;
4919 target_ulong rt_sf_addr;
4920
4921 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4922 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4923 goto sigsegv;
4924
4925 if (do_setcontext(&rt_sf->uc, env, 1))
4926 goto sigsegv;
4927
4928 do_sigaltstack(rt_sf_addr
4929 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4930 0, env->gpr[1]);
4931
4932 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4933 return -TARGET_QEMU_ESIGRETURN;
4934
4935 sigsegv:
4936 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4937 force_sig(TARGET_SIGSEGV);
4938 return 0;
4939 }
4940
4941 #elif defined(TARGET_M68K)
4942
4943 struct target_sigcontext {
4944 abi_ulong sc_mask;
4945 abi_ulong sc_usp;
4946 abi_ulong sc_d0;
4947 abi_ulong sc_d1;
4948 abi_ulong sc_a0;
4949 abi_ulong sc_a1;
4950 unsigned short sc_sr;
4951 abi_ulong sc_pc;
4952 };
4953
4954 struct target_sigframe
4955 {
4956 abi_ulong pretcode;
4957 int sig;
4958 int code;
4959 abi_ulong psc;
4960 char retcode[8];
4961 abi_ulong extramask[TARGET_NSIG_WORDS-1];
4962 struct target_sigcontext sc;
4963 };
4964
4965 typedef int target_greg_t;
4966 #define TARGET_NGREG 18
4967 typedef target_greg_t target_gregset_t[TARGET_NGREG];
4968
4969 typedef struct target_fpregset {
4970 int f_fpcntl[3];
4971 int f_fpregs[8*3];
4972 } target_fpregset_t;
4973
4974 struct target_mcontext {
4975 int version;
4976 target_gregset_t gregs;
4977 target_fpregset_t fpregs;
4978 };
4979
4980 #define TARGET_MCONTEXT_VERSION 2
4981
4982 struct target_ucontext {
4983 abi_ulong tuc_flags;
4984 abi_ulong tuc_link;
4985 target_stack_t tuc_stack;
4986 struct target_mcontext tuc_mcontext;
4987 abi_long tuc_filler[80];
4988 target_sigset_t tuc_sigmask;
4989 };
4990
4991 struct target_rt_sigframe
4992 {
4993 abi_ulong pretcode;
4994 int sig;
4995 abi_ulong pinfo;
4996 abi_ulong puc;
4997 char retcode[8];
4998 struct target_siginfo info;
4999 struct target_ucontext uc;
5000 };
5001
5002 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5003 abi_ulong mask)
5004 {
5005 __put_user(mask, &sc->sc_mask);
5006 __put_user(env->aregs[7], &sc->sc_usp);
5007 __put_user(env->dregs[0], &sc->sc_d0);
5008 __put_user(env->dregs[1], &sc->sc_d1);
5009 __put_user(env->aregs[0], &sc->sc_a0);
5010 __put_user(env->aregs[1], &sc->sc_a1);
5011 __put_user(env->sr, &sc->sc_sr);
5012 __put_user(env->pc, &sc->sc_pc);
5013 }
5014
5015 static void
5016 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc, int *pd0)
5017 {
5018 int temp;
5019
5020 __get_user(env->aregs[7], &sc->sc_usp);
5021 __get_user(env->dregs[1], &sc->sc_d1);
5022 __get_user(env->aregs[0], &sc->sc_a0);
5023 __get_user(env->aregs[1], &sc->sc_a1);
5024 __get_user(env->pc, &sc->sc_pc);
5025 __get_user(temp, &sc->sc_sr);
5026 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5027
5028 *pd0 = tswapl(sc->sc_d0);
5029 }
5030
5031 /*
5032 * Determine which stack to use..
5033 */
5034 static inline abi_ulong
5035 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5036 size_t frame_size)
5037 {
5038 unsigned long sp;
5039
5040 sp = regs->aregs[7];
5041
5042 /* This is the X/Open sanctioned signal stack switching. */
5043 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5044 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5045 }
5046
5047 return ((sp - frame_size) & -8UL);
5048 }
5049
5050 static void setup_frame(int sig, struct target_sigaction *ka,
5051 target_sigset_t *set, CPUM68KState *env)
5052 {
5053 struct target_sigframe *frame;
5054 abi_ulong frame_addr;
5055 abi_ulong retcode_addr;
5056 abi_ulong sc_addr;
5057 int i;
5058
5059 frame_addr = get_sigframe(ka, env, sizeof *frame);
5060 trace_user_setup_frame(env, frame_addr);
5061 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5062 goto give_sigsegv;
5063 }
5064
5065 __put_user(sig, &frame->sig);
5066
5067 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5068 __put_user(sc_addr, &frame->psc);
5069
5070 setup_sigcontext(&frame->sc, env, set->sig[0]);
5071
5072 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5073 __put_user(set->sig[i], &frame->extramask[i - 1]);
5074 }
5075
5076 /* Set up to return from userspace. */
5077
5078 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5079 __put_user(retcode_addr, &frame->pretcode);
5080
5081 /* moveq #,d0; trap #0 */
5082
5083 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5084 (uint32_t *)(frame->retcode));
5085
5086 /* Set up to return from userspace */
5087
5088 env->aregs[7] = frame_addr;
5089 env->pc = ka->_sa_handler;
5090
5091 unlock_user_struct(frame, frame_addr, 1);
5092 return;
5093
5094 give_sigsegv:
5095 force_sig(TARGET_SIGSEGV);
5096 }
5097
5098 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5099 CPUM68KState *env)
5100 {
5101 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5102
5103 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5104 __put_user(env->dregs[0], &gregs[0]);
5105 __put_user(env->dregs[1], &gregs[1]);
5106 __put_user(env->dregs[2], &gregs[2]);
5107 __put_user(env->dregs[3], &gregs[3]);
5108 __put_user(env->dregs[4], &gregs[4]);
5109 __put_user(env->dregs[5], &gregs[5]);
5110 __put_user(env->dregs[6], &gregs[6]);
5111 __put_user(env->dregs[7], &gregs[7]);
5112 __put_user(env->aregs[0], &gregs[8]);
5113 __put_user(env->aregs[1], &gregs[9]);
5114 __put_user(env->aregs[2], &gregs[10]);
5115 __put_user(env->aregs[3], &gregs[11]);
5116 __put_user(env->aregs[4], &gregs[12]);
5117 __put_user(env->aregs[5], &gregs[13]);
5118 __put_user(env->aregs[6], &gregs[14]);
5119 __put_user(env->aregs[7], &gregs[15]);
5120 __put_user(env->pc, &gregs[16]);
5121 __put_user(env->sr, &gregs[17]);
5122
5123 return 0;
5124 }
5125
5126 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5127 struct target_ucontext *uc,
5128 int *pd0)
5129 {
5130 int temp;
5131 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5132
5133 __get_user(temp, &uc->tuc_mcontext.version);
5134 if (temp != TARGET_MCONTEXT_VERSION)
5135 goto badframe;
5136
5137 /* restore passed registers */
5138 __get_user(env->dregs[0], &gregs[0]);
5139 __get_user(env->dregs[1], &gregs[1]);
5140 __get_user(env->dregs[2], &gregs[2]);
5141 __get_user(env->dregs[3], &gregs[3]);
5142 __get_user(env->dregs[4], &gregs[4]);
5143 __get_user(env->dregs[5], &gregs[5]);
5144 __get_user(env->dregs[6], &gregs[6]);
5145 __get_user(env->dregs[7], &gregs[7]);
5146 __get_user(env->aregs[0], &gregs[8]);
5147 __get_user(env->aregs[1], &gregs[9]);
5148 __get_user(env->aregs[2], &gregs[10]);
5149 __get_user(env->aregs[3], &gregs[11]);
5150 __get_user(env->aregs[4], &gregs[12]);
5151 __get_user(env->aregs[5], &gregs[13]);
5152 __get_user(env->aregs[6], &gregs[14]);
5153 __get_user(env->aregs[7], &gregs[15]);
5154 __get_user(env->pc, &gregs[16]);
5155 __get_user(temp, &gregs[17]);
5156 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5157
5158 *pd0 = env->dregs[0];
5159 return 0;
5160
5161 badframe:
5162 return 1;
5163 }
5164
5165 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5166 target_siginfo_t *info,
5167 target_sigset_t *set, CPUM68KState *env)
5168 {
5169 struct target_rt_sigframe *frame;
5170 abi_ulong frame_addr;
5171 abi_ulong retcode_addr;
5172 abi_ulong info_addr;
5173 abi_ulong uc_addr;
5174 int err = 0;
5175 int i;
5176
5177 frame_addr = get_sigframe(ka, env, sizeof *frame);
5178 trace_user_setup_rt_frame(env, frame_addr);
5179 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5180 goto give_sigsegv;
5181 }
5182
5183 __put_user(sig, &frame->sig);
5184
5185 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5186 __put_user(info_addr, &frame->pinfo);
5187
5188 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5189 __put_user(uc_addr, &frame->puc);
5190
5191 tswap_siginfo(&frame->info, info);
5192
5193 /* Create the ucontext */
5194
5195 __put_user(0, &frame->uc.tuc_flags);
5196 __put_user(0, &frame->uc.tuc_link);
5197 __put_user(target_sigaltstack_used.ss_sp,
5198 &frame->uc.tuc_stack.ss_sp);
5199 __put_user(sas_ss_flags(env->aregs[7]),
5200 &frame->uc.tuc_stack.ss_flags);
5201 __put_user(target_sigaltstack_used.ss_size,
5202 &frame->uc.tuc_stack.ss_size);
5203 err |= target_rt_setup_ucontext(&frame->uc, env);
5204
5205 if (err)
5206 goto give_sigsegv;
5207
5208 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5209 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5210 }
5211
5212 /* Set up to return from userspace. */
5213
5214 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5215 __put_user(retcode_addr, &frame->pretcode);
5216
5217 /* moveq #,d0; notb d0; trap #0 */
5218
5219 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5220 (uint32_t *)(frame->retcode + 0));
5221 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5222
5223 if (err)
5224 goto give_sigsegv;
5225
5226 /* Set up to return from userspace */
5227
5228 env->aregs[7] = frame_addr;
5229 env->pc = ka->_sa_handler;
5230
5231 unlock_user_struct(frame, frame_addr, 1);
5232 return;
5233
5234 give_sigsegv:
5235 unlock_user_struct(frame, frame_addr, 1);
5236 force_sig(TARGET_SIGSEGV);
5237 }
5238
5239 long do_sigreturn(CPUM68KState *env)
5240 {
5241 struct target_sigframe *frame;
5242 abi_ulong frame_addr = env->aregs[7] - 4;
5243 target_sigset_t target_set;
5244 sigset_t set;
5245 int d0, i;
5246
5247 trace_user_do_sigreturn(env, frame_addr);
5248 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5249 goto badframe;
5250
5251 /* set blocked signals */
5252
5253 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5254
5255 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5256 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5257 }
5258
5259 target_to_host_sigset_internal(&set, &target_set);
5260 do_sigprocmask(SIG_SETMASK, &set, NULL);
5261
5262 /* restore registers */
5263
5264 restore_sigcontext(env, &frame->sc, &d0);
5265
5266 unlock_user_struct(frame, frame_addr, 0);
5267 return d0;
5268
5269 badframe:
5270 force_sig(TARGET_SIGSEGV);
5271 return 0;
5272 }
5273
5274 long do_rt_sigreturn(CPUM68KState *env)
5275 {
5276 struct target_rt_sigframe *frame;
5277 abi_ulong frame_addr = env->aregs[7] - 4;
5278 target_sigset_t target_set;
5279 sigset_t set;
5280 int d0;
5281
5282 trace_user_do_rt_sigreturn(env, frame_addr);
5283 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5284 goto badframe;
5285
5286 target_to_host_sigset_internal(&set, &target_set);
5287 do_sigprocmask(SIG_SETMASK, &set, NULL);
5288
5289 /* restore registers */
5290
5291 if (target_rt_restore_ucontext(env, &frame->uc, &d0))
5292 goto badframe;
5293
5294 if (do_sigaltstack(frame_addr +
5295 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5296 0, get_sp_from_cpustate(env)) == -EFAULT)
5297 goto badframe;
5298
5299 unlock_user_struct(frame, frame_addr, 0);
5300 return d0;
5301
5302 badframe:
5303 unlock_user_struct(frame, frame_addr, 0);
5304 force_sig(TARGET_SIGSEGV);
5305 return 0;
5306 }
5307
5308 #elif defined(TARGET_ALPHA)
5309
5310 struct target_sigcontext {
5311 abi_long sc_onstack;
5312 abi_long sc_mask;
5313 abi_long sc_pc;
5314 abi_long sc_ps;
5315 abi_long sc_regs[32];
5316 abi_long sc_ownedfp;
5317 abi_long sc_fpregs[32];
5318 abi_ulong sc_fpcr;
5319 abi_ulong sc_fp_control;
5320 abi_ulong sc_reserved1;
5321 abi_ulong sc_reserved2;
5322 abi_ulong sc_ssize;
5323 abi_ulong sc_sbase;
5324 abi_ulong sc_traparg_a0;
5325 abi_ulong sc_traparg_a1;
5326 abi_ulong sc_traparg_a2;
5327 abi_ulong sc_fp_trap_pc;
5328 abi_ulong sc_fp_trigger_sum;
5329 abi_ulong sc_fp_trigger_inst;
5330 };
5331
5332 struct target_ucontext {
5333 abi_ulong tuc_flags;
5334 abi_ulong tuc_link;
5335 abi_ulong tuc_osf_sigmask;
5336 target_stack_t tuc_stack;
5337 struct target_sigcontext tuc_mcontext;
5338 target_sigset_t tuc_sigmask;
5339 };
5340
5341 struct target_sigframe {
5342 struct target_sigcontext sc;
5343 unsigned int retcode[3];
5344 };
5345
5346 struct target_rt_sigframe {
5347 target_siginfo_t info;
5348 struct target_ucontext uc;
5349 unsigned int retcode[3];
5350 };
5351
5352 #define INSN_MOV_R30_R16 0x47fe0410
5353 #define INSN_LDI_R0 0x201f0000
5354 #define INSN_CALLSYS 0x00000083
5355
5356 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5357 abi_ulong frame_addr, target_sigset_t *set)
5358 {
5359 int i;
5360
5361 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5362 __put_user(set->sig[0], &sc->sc_mask);
5363 __put_user(env->pc, &sc->sc_pc);
5364 __put_user(8, &sc->sc_ps);
5365
5366 for (i = 0; i < 31; ++i) {
5367 __put_user(env->ir[i], &sc->sc_regs[i]);
5368 }
5369 __put_user(0, &sc->sc_regs[31]);
5370
5371 for (i = 0; i < 31; ++i) {
5372 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5373 }
5374 __put_user(0, &sc->sc_fpregs[31]);
5375 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5376
5377 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5378 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5379 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5380 }
5381
5382 static void restore_sigcontext(CPUAlphaState *env,
5383 struct target_sigcontext *sc)
5384 {
5385 uint64_t fpcr;
5386 int i;
5387
5388 __get_user(env->pc, &sc->sc_pc);
5389
5390 for (i = 0; i < 31; ++i) {
5391 __get_user(env->ir[i], &sc->sc_regs[i]);
5392 }
5393 for (i = 0; i < 31; ++i) {
5394 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5395 }
5396
5397 __get_user(fpcr, &sc->sc_fpcr);
5398 cpu_alpha_store_fpcr(env, fpcr);
5399 }
5400
5401 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5402 CPUAlphaState *env,
5403 unsigned long framesize)
5404 {
5405 abi_ulong sp = env->ir[IR_SP];
5406
5407 /* This is the X/Open sanctioned signal stack switching. */
5408 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5409 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5410 }
5411 return (sp - framesize) & -32;
5412 }
5413
5414 static void setup_frame(int sig, struct target_sigaction *ka,
5415 target_sigset_t *set, CPUAlphaState *env)
5416 {
5417 abi_ulong frame_addr, r26;
5418 struct target_sigframe *frame;
5419 int err = 0;
5420
5421 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5422 trace_user_setup_frame(env, frame_addr);
5423 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5424 goto give_sigsegv;
5425 }
5426
5427 setup_sigcontext(&frame->sc, env, frame_addr, set);
5428
5429 if (ka->sa_restorer) {
5430 r26 = ka->sa_restorer;
5431 } else {
5432 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5433 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5434 &frame->retcode[1]);
5435 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5436 /* imb() */
5437 r26 = frame_addr;
5438 }
5439
5440 unlock_user_struct(frame, frame_addr, 1);
5441
5442 if (err) {
5443 give_sigsegv:
5444 if (sig == TARGET_SIGSEGV) {
5445 ka->_sa_handler = TARGET_SIG_DFL;
5446 }
5447 force_sig(TARGET_SIGSEGV);
5448 }
5449
5450 env->ir[IR_RA] = r26;
5451 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5452 env->ir[IR_A0] = sig;
5453 env->ir[IR_A1] = 0;
5454 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5455 env->ir[IR_SP] = frame_addr;
5456 }
5457
5458 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5459 target_siginfo_t *info,
5460 target_sigset_t *set, CPUAlphaState *env)
5461 {
5462 abi_ulong frame_addr, r26;
5463 struct target_rt_sigframe *frame;
5464 int i, err = 0;
5465
5466 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5467 trace_user_setup_rt_frame(env, frame_addr);
5468 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5469 goto give_sigsegv;
5470 }
5471
5472 tswap_siginfo(&frame->info, info);
5473
5474 __put_user(0, &frame->uc.tuc_flags);
5475 __put_user(0, &frame->uc.tuc_link);
5476 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5477 __put_user(target_sigaltstack_used.ss_sp,
5478 &frame->uc.tuc_stack.ss_sp);
5479 __put_user(sas_ss_flags(env->ir[IR_SP]),
5480 &frame->uc.tuc_stack.ss_flags);
5481 __put_user(target_sigaltstack_used.ss_size,
5482 &frame->uc.tuc_stack.ss_size);
5483 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5484 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5485 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5486 }
5487
5488 if (ka->sa_restorer) {
5489 r26 = ka->sa_restorer;
5490 } else {
5491 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5492 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5493 &frame->retcode[1]);
5494 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5495 /* imb(); */
5496 r26 = frame_addr;
5497 }
5498
5499 if (err) {
5500 give_sigsegv:
5501 if (sig == TARGET_SIGSEGV) {
5502 ka->_sa_handler = TARGET_SIG_DFL;
5503 }
5504 force_sig(TARGET_SIGSEGV);
5505 }
5506
5507 env->ir[IR_RA] = r26;
5508 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5509 env->ir[IR_A0] = sig;
5510 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5511 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5512 env->ir[IR_SP] = frame_addr;
5513 }
5514
5515 long do_sigreturn(CPUAlphaState *env)
5516 {
5517 struct target_sigcontext *sc;
5518 abi_ulong sc_addr = env->ir[IR_A0];
5519 target_sigset_t target_set;
5520 sigset_t set;
5521
5522 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5523 goto badframe;
5524 }
5525
5526 target_sigemptyset(&target_set);
5527 __get_user(target_set.sig[0], &sc->sc_mask);
5528
5529 target_to_host_sigset_internal(&set, &target_set);
5530 do_sigprocmask(SIG_SETMASK, &set, NULL);
5531
5532 restore_sigcontext(env, sc);
5533 unlock_user_struct(sc, sc_addr, 0);
5534 return env->ir[IR_V0];
5535
5536 badframe:
5537 force_sig(TARGET_SIGSEGV);
5538 }
5539
5540 long do_rt_sigreturn(CPUAlphaState *env)
5541 {
5542 abi_ulong frame_addr = env->ir[IR_A0];
5543 struct target_rt_sigframe *frame;
5544 sigset_t set;
5545
5546 trace_user_do_rt_sigreturn(env, frame_addr);
5547 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5548 goto badframe;
5549 }
5550 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5551 do_sigprocmask(SIG_SETMASK, &set, NULL);
5552
5553 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5554 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5555 uc.tuc_stack),
5556 0, env->ir[IR_SP]) == -EFAULT) {
5557 goto badframe;
5558 }
5559
5560 unlock_user_struct(frame, frame_addr, 0);
5561 return env->ir[IR_V0];
5562
5563
5564 badframe:
5565 unlock_user_struct(frame, frame_addr, 0);
5566 force_sig(TARGET_SIGSEGV);
5567 }
5568
5569 #elif defined(TARGET_TILEGX)
5570
5571 struct target_sigcontext {
5572 union {
5573 /* General-purpose registers. */
5574 abi_ulong gregs[56];
5575 struct {
5576 abi_ulong __gregs[53];
5577 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5578 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5579 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5580 };
5581 };
5582 abi_ulong pc; /* Program counter. */
5583 abi_ulong ics; /* In Interrupt Critical Section? */
5584 abi_ulong faultnum; /* Fault number. */
5585 abi_ulong pad[5];
5586 };
5587
5588 struct target_ucontext {
5589 abi_ulong tuc_flags;
5590 abi_ulong tuc_link;
5591 target_stack_t tuc_stack;
5592 struct target_sigcontext tuc_mcontext;
5593 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5594 };
5595
5596 struct target_rt_sigframe {
5597 unsigned char save_area[16]; /* caller save area */
5598 struct target_siginfo info;
5599 struct target_ucontext uc;
5600 };
5601
5602 static void setup_sigcontext(struct target_sigcontext *sc,
5603 CPUArchState *env, int signo)
5604 {
5605 int i;
5606
5607 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5608 __put_user(env->regs[i], &sc->gregs[i]);
5609 }
5610
5611 __put_user(env->pc, &sc->pc);
5612 __put_user(0, &sc->ics);
5613 __put_user(signo, &sc->faultnum);
5614 }
5615
5616 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5617 {
5618 int i;
5619
5620 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5621 __get_user(env->regs[i], &sc->gregs[i]);
5622 }
5623
5624 __get_user(env->pc, &sc->pc);
5625 }
5626
5627 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5628 size_t frame_size)
5629 {
5630 unsigned long sp = env->regs[TILEGX_R_SP];
5631
5632 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5633 return -1UL;
5634 }
5635
5636 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5637 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5638 }
5639
5640 sp -= frame_size;
5641 sp &= -16UL;
5642 return sp;
5643 }
5644
5645 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5646 target_siginfo_t *info,
5647 target_sigset_t *set, CPUArchState *env)
5648 {
5649 abi_ulong frame_addr;
5650 struct target_rt_sigframe *frame;
5651 unsigned long restorer;
5652
5653 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5654 trace_user_setup_rt_frame(env, frame_addr);
5655 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5656 goto give_sigsegv;
5657 }
5658
5659 /* Always write at least the signal number for the stack backtracer. */
5660 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5661 /* At sigreturn time, restore the callee-save registers too. */
5662 tswap_siginfo(&frame->info, info);
5663 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5664 } else {
5665 __put_user(info->si_signo, &frame->info.si_signo);
5666 }
5667
5668 /* Create the ucontext. */
5669 __put_user(0, &frame->uc.tuc_flags);
5670 __put_user(0, &frame->uc.tuc_link);
5671 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5672 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5673 &frame->uc.tuc_stack.ss_flags);
5674 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5675 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5676
5677 restorer = (unsigned long) do_rt_sigreturn;
5678 if (ka->sa_flags & TARGET_SA_RESTORER) {
5679 restorer = (unsigned long) ka->sa_restorer;
5680 }
5681 env->pc = (unsigned long) ka->_sa_handler;
5682 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5683 env->regs[TILEGX_R_LR] = restorer;
5684 env->regs[0] = (unsigned long) sig;
5685 env->regs[1] = (unsigned long) &frame->info;
5686 env->regs[2] = (unsigned long) &frame->uc;
5687 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5688
5689 unlock_user_struct(frame, frame_addr, 1);
5690 return;
5691
5692 give_sigsegv:
5693 if (sig == TARGET_SIGSEGV) {
5694 ka->_sa_handler = TARGET_SIG_DFL;
5695 }
5696 force_sig(TARGET_SIGSEGV /* , current */);
5697 }
5698
5699 long do_rt_sigreturn(CPUTLGState *env)
5700 {
5701 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5702 struct target_rt_sigframe *frame;
5703 sigset_t set;
5704
5705 trace_user_do_rt_sigreturn(env, frame_addr);
5706 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5707 goto badframe;
5708 }
5709 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5710 do_sigprocmask(SIG_SETMASK, &set, NULL);
5711
5712 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5713 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5714 uc.tuc_stack),
5715 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5716 goto badframe;
5717 }
5718
5719 unlock_user_struct(frame, frame_addr, 0);
5720 return env->regs[TILEGX_R_RE];
5721
5722
5723 badframe:
5724 unlock_user_struct(frame, frame_addr, 0);
5725 force_sig(TARGET_SIGSEGV);
5726 }
5727
5728 #else
5729
5730 static void setup_frame(int sig, struct target_sigaction *ka,
5731 target_sigset_t *set, CPUArchState *env)
5732 {
5733 fprintf(stderr, "setup_frame: not implemented\n");
5734 }
5735
5736 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5737 target_siginfo_t *info,
5738 target_sigset_t *set, CPUArchState *env)
5739 {
5740 fprintf(stderr, "setup_rt_frame: not implemented\n");
5741 }
5742
5743 long do_sigreturn(CPUArchState *env)
5744 {
5745 fprintf(stderr, "do_sigreturn: not implemented\n");
5746 return -TARGET_ENOSYS;
5747 }
5748
5749 long do_rt_sigreturn(CPUArchState *env)
5750 {
5751 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5752 return -TARGET_ENOSYS;
5753 }
5754
5755 #endif
5756
5757 void process_pending_signals(CPUArchState *cpu_env)
5758 {
5759 CPUState *cpu = ENV_GET_CPU(cpu_env);
5760 int sig;
5761 abi_ulong handler;
5762 sigset_t set, old_set;
5763 target_sigset_t target_old_set;
5764 struct emulated_sigtable *k;
5765 struct target_sigaction *sa;
5766 struct sigqueue *q;
5767 TaskState *ts = cpu->opaque;
5768
5769 if (!ts->signal_pending)
5770 return;
5771
5772 /* FIXME: This is not threadsafe. */
5773 k = ts->sigtab;
5774 for(sig = 1; sig <= TARGET_NSIG; sig++) {
5775 if (k->pending)
5776 goto handle_signal;
5777 k++;
5778 }
5779 /* if no signal is pending, just return */
5780 ts->signal_pending = 0;
5781 return;
5782
5783 handle_signal:
5784 trace_user_handle_signal(cpu_env, sig);
5785 /* dequeue signal */
5786 q = k->first;
5787 k->first = q->next;
5788 if (!k->first)
5789 k->pending = 0;
5790
5791 sig = gdb_handlesig(cpu, sig);
5792 if (!sig) {
5793 sa = NULL;
5794 handler = TARGET_SIG_IGN;
5795 } else {
5796 sa = &sigact_table[sig - 1];
5797 handler = sa->_sa_handler;
5798 }
5799
5800 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
5801 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
5802 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
5803 * because it got a real MMU fault), and treat as if default handler.
5804 */
5805 handler = TARGET_SIG_DFL;
5806 }
5807
5808 if (handler == TARGET_SIG_DFL) {
5809 /* default handler : ignore some signal. The other are job control or fatal */
5810 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5811 kill(getpid(),SIGSTOP);
5812 } else if (sig != TARGET_SIGCHLD &&
5813 sig != TARGET_SIGURG &&
5814 sig != TARGET_SIGWINCH &&
5815 sig != TARGET_SIGCONT) {
5816 force_sig(sig);
5817 }
5818 } else if (handler == TARGET_SIG_IGN) {
5819 /* ignore sig */
5820 } else if (handler == TARGET_SIG_ERR) {
5821 force_sig(sig);
5822 } else {
5823 /* compute the blocked signals during the handler execution */
5824 target_to_host_sigset(&set, &sa->sa_mask);
5825 /* SA_NODEFER indicates that the current signal should not be
5826 blocked during the handler */
5827 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5828 sigaddset(&set, target_to_host_signal(sig));
5829
5830 /* block signals in the handler using Linux */
5831 do_sigprocmask(SIG_BLOCK, &set, &old_set);
5832 /* save the previous blocked signal state to restore it at the
5833 end of the signal execution (see do_sigreturn) */
5834 host_to_target_sigset_internal(&target_old_set, &old_set);
5835
5836 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5837 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5838 {
5839 CPUX86State *env = cpu_env;
5840 if (env->eflags & VM_MASK)
5841 save_v86_state(env);
5842 }
5843 #endif
5844 /* prepare the stack frame of the virtual CPU */
5845 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5846 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5847 /* These targets do not have traditional signals. */
5848 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5849 #else
5850 if (sa->sa_flags & TARGET_SA_SIGINFO)
5851 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5852 else
5853 setup_frame(sig, sa, &target_old_set, cpu_env);
5854 #endif
5855 if (sa->sa_flags & TARGET_SA_RESETHAND)
5856 sa->_sa_handler = TARGET_SIG_DFL;
5857 }
5858 if (q != &k->info)
5859 free_sigqueue(cpu_env, q);
5860 }