]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: Fix race between multiple signals
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/ucontext.h>
21 #include <sys/resource.h>
22
23 #include "qemu.h"
24 #include "qemu-common.h"
25 #include "target_signal.h"
26 #include "trace.h"
27
28 static struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32 };
33
34 static struct target_sigaction sigact_table[TARGET_NSIG];
35
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
38
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
81 };
82 static uint8_t target_to_host_signal_table[_NSIG];
83
84 static inline int on_sig_stack(unsigned long sp)
85 {
86 return (sp - target_sigaltstack_used.ss_sp
87 < target_sigaltstack_used.ss_size);
88 }
89
90 static inline int sas_ss_flags(unsigned long sp)
91 {
92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
93 : on_sig_stack(sp) ? SS_ONSTACK : 0);
94 }
95
96 int host_to_target_signal(int sig)
97 {
98 if (sig < 0 || sig >= _NSIG)
99 return sig;
100 return host_to_target_signal_table[sig];
101 }
102
103 int target_to_host_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return target_to_host_signal_table[sig];
108 }
109
110 static inline void target_sigemptyset(target_sigset_t *set)
111 {
112 memset(set, 0, sizeof(*set));
113 }
114
115 static inline void target_sigaddset(target_sigset_t *set, int signum)
116 {
117 signum--;
118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
119 set->sig[signum / TARGET_NSIG_BPW] |= mask;
120 }
121
122 static inline int target_sigismember(const target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
127 }
128
129 static void host_to_target_sigset_internal(target_sigset_t *d,
130 const sigset_t *s)
131 {
132 int i;
133 target_sigemptyset(d);
134 for (i = 1; i <= TARGET_NSIG; i++) {
135 if (sigismember(s, i)) {
136 target_sigaddset(d, host_to_target_signal(i));
137 }
138 }
139 }
140
141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
142 {
143 target_sigset_t d1;
144 int i;
145
146 host_to_target_sigset_internal(&d1, s);
147 for(i = 0;i < TARGET_NSIG_WORDS; i++)
148 d->sig[i] = tswapal(d1.sig[i]);
149 }
150
151 static void target_to_host_sigset_internal(sigset_t *d,
152 const target_sigset_t *s)
153 {
154 int i;
155 sigemptyset(d);
156 for (i = 1; i <= TARGET_NSIG; i++) {
157 if (target_sigismember(s, i)) {
158 sigaddset(d, target_to_host_signal(i));
159 }
160 }
161 }
162
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165 target_sigset_t s1;
166 int i;
167
168 for(i = 0;i < TARGET_NSIG_WORDS; i++)
169 s1.sig[i] = tswapal(s->sig[i]);
170 target_to_host_sigset_internal(d, &s1);
171 }
172
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174 const sigset_t *sigset)
175 {
176 target_sigset_t d;
177 host_to_target_sigset(&d, sigset);
178 *old_sigset = d.sig[0];
179 }
180
181 void target_to_host_old_sigset(sigset_t *sigset,
182 const abi_ulong *old_sigset)
183 {
184 target_sigset_t d;
185 int i;
186
187 d.sig[0] = *old_sigset;
188 for(i = 1;i < TARGET_NSIG_WORDS; i++)
189 d.sig[i] = 0;
190 target_to_host_sigset(sigset, &d);
191 }
192
193 int block_signals(void)
194 {
195 TaskState *ts = (TaskState *)thread_cpu->opaque;
196 sigset_t set;
197 int pending;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 pending = atomic_xchg(&ts->signal_pending, 1);
207
208 return pending;
209 }
210
211 /* Wrapper for sigprocmask function
212 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
213 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
214 * a signal was already pending and the syscall must be restarted, or
215 * 0 on success.
216 * If set is NULL, this is guaranteed not to fail.
217 */
218 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
219 {
220 TaskState *ts = (TaskState *)thread_cpu->opaque;
221
222 if (oldset) {
223 *oldset = ts->signal_mask;
224 }
225
226 if (set) {
227 int i;
228
229 if (block_signals()) {
230 return -TARGET_ERESTARTSYS;
231 }
232
233 switch (how) {
234 case SIG_BLOCK:
235 sigorset(&ts->signal_mask, &ts->signal_mask, set);
236 break;
237 case SIG_UNBLOCK:
238 for (i = 1; i <= NSIG; ++i) {
239 if (sigismember(set, i)) {
240 sigdelset(&ts->signal_mask, i);
241 }
242 }
243 break;
244 case SIG_SETMASK:
245 ts->signal_mask = *set;
246 break;
247 default:
248 g_assert_not_reached();
249 }
250
251 /* Silently ignore attempts to change blocking status of KILL or STOP */
252 sigdelset(&ts->signal_mask, SIGKILL);
253 sigdelset(&ts->signal_mask, SIGSTOP);
254 }
255 return 0;
256 }
257
258 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
259 !defined(TARGET_X86_64)
260 /* Just set the guest's signal mask to the specified value; the
261 * caller is assumed to have called block_signals() already.
262 */
263 static void set_sigmask(const sigset_t *set)
264 {
265 TaskState *ts = (TaskState *)thread_cpu->opaque;
266
267 ts->signal_mask = *set;
268 }
269 #endif
270
271 /* siginfo conversion */
272
273 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
274 const siginfo_t *info)
275 {
276 int sig = host_to_target_signal(info->si_signo);
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
280
281 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
282 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
283 /* Should never come here, but who knows. The information for
284 the target is irrelevant. */
285 tinfo->_sifields._sigfault._addr = 0;
286 } else if (sig == TARGET_SIGIO) {
287 tinfo->_sifields._sigpoll._band = info->si_band;
288 tinfo->_sifields._sigpoll._fd = info->si_fd;
289 } else if (sig == TARGET_SIGCHLD) {
290 tinfo->_sifields._sigchld._pid = info->si_pid;
291 tinfo->_sifields._sigchld._uid = info->si_uid;
292 tinfo->_sifields._sigchld._status
293 = host_to_target_waitstatus(info->si_status);
294 tinfo->_sifields._sigchld._utime = info->si_utime;
295 tinfo->_sifields._sigchld._stime = info->si_stime;
296 } else if (sig >= TARGET_SIGRTMIN) {
297 tinfo->_sifields._rt._pid = info->si_pid;
298 tinfo->_sifields._rt._uid = info->si_uid;
299 /* XXX: potential problem if 64 bit */
300 tinfo->_sifields._rt._sigval.sival_ptr
301 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
302 }
303 }
304
305 static void tswap_siginfo(target_siginfo_t *tinfo,
306 const target_siginfo_t *info)
307 {
308 int sig = info->si_signo;
309 tinfo->si_signo = tswap32(sig);
310 tinfo->si_errno = tswap32(info->si_errno);
311 tinfo->si_code = tswap32(info->si_code);
312
313 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
314 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
315 tinfo->_sifields._sigfault._addr
316 = tswapal(info->_sifields._sigfault._addr);
317 } else if (sig == TARGET_SIGIO) {
318 tinfo->_sifields._sigpoll._band
319 = tswap32(info->_sifields._sigpoll._band);
320 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
321 } else if (sig == TARGET_SIGCHLD) {
322 tinfo->_sifields._sigchld._pid
323 = tswap32(info->_sifields._sigchld._pid);
324 tinfo->_sifields._sigchld._uid
325 = tswap32(info->_sifields._sigchld._uid);
326 tinfo->_sifields._sigchld._status
327 = tswap32(info->_sifields._sigchld._status);
328 tinfo->_sifields._sigchld._utime
329 = tswapal(info->_sifields._sigchld._utime);
330 tinfo->_sifields._sigchld._stime
331 = tswapal(info->_sifields._sigchld._stime);
332 } else if (sig >= TARGET_SIGRTMIN) {
333 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
334 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
335 tinfo->_sifields._rt._sigval.sival_ptr
336 = tswapal(info->_sifields._rt._sigval.sival_ptr);
337 }
338 }
339
340
341 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
342 {
343 host_to_target_siginfo_noswap(tinfo, info);
344 tswap_siginfo(tinfo, tinfo);
345 }
346
347 /* XXX: we support only POSIX RT signals are used. */
348 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
349 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
350 {
351 info->si_signo = tswap32(tinfo->si_signo);
352 info->si_errno = tswap32(tinfo->si_errno);
353 info->si_code = tswap32(tinfo->si_code);
354 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
355 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
356 info->si_value.sival_ptr =
357 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
358 }
359
360 static int fatal_signal (int sig)
361 {
362 switch (sig) {
363 case TARGET_SIGCHLD:
364 case TARGET_SIGURG:
365 case TARGET_SIGWINCH:
366 /* Ignored by default. */
367 return 0;
368 case TARGET_SIGCONT:
369 case TARGET_SIGSTOP:
370 case TARGET_SIGTSTP:
371 case TARGET_SIGTTIN:
372 case TARGET_SIGTTOU:
373 /* Job control signals. */
374 return 0;
375 default:
376 return 1;
377 }
378 }
379
380 /* returns 1 if given signal should dump core if not handled */
381 static int core_dump_signal(int sig)
382 {
383 switch (sig) {
384 case TARGET_SIGABRT:
385 case TARGET_SIGFPE:
386 case TARGET_SIGILL:
387 case TARGET_SIGQUIT:
388 case TARGET_SIGSEGV:
389 case TARGET_SIGTRAP:
390 case TARGET_SIGBUS:
391 return (1);
392 default:
393 return (0);
394 }
395 }
396
397 void signal_init(void)
398 {
399 TaskState *ts = (TaskState *)thread_cpu->opaque;
400 struct sigaction act;
401 struct sigaction oact;
402 int i, j;
403 int host_sig;
404
405 /* generate signal conversion tables */
406 for(i = 1; i < _NSIG; i++) {
407 if (host_to_target_signal_table[i] == 0)
408 host_to_target_signal_table[i] = i;
409 }
410 for(i = 1; i < _NSIG; i++) {
411 j = host_to_target_signal_table[i];
412 target_to_host_signal_table[j] = i;
413 }
414
415 /* Set the signal mask from the host mask. */
416 sigprocmask(0, 0, &ts->signal_mask);
417
418 /* set all host signal handlers. ALL signals are blocked during
419 the handlers to serialize them. */
420 memset(sigact_table, 0, sizeof(sigact_table));
421
422 sigfillset(&act.sa_mask);
423 act.sa_flags = SA_SIGINFO;
424 act.sa_sigaction = host_signal_handler;
425 for(i = 1; i <= TARGET_NSIG; i++) {
426 host_sig = target_to_host_signal(i);
427 sigaction(host_sig, NULL, &oact);
428 if (oact.sa_sigaction == (void *)SIG_IGN) {
429 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
430 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
431 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
432 }
433 /* If there's already a handler installed then something has
434 gone horribly wrong, so don't even try to handle that case. */
435 /* Install some handlers for our own use. We need at least
436 SIGSEGV and SIGBUS, to detect exceptions. We can not just
437 trap all signals because it affects syscall interrupt
438 behavior. But do trap all default-fatal signals. */
439 if (fatal_signal (i))
440 sigaction(host_sig, &act, NULL);
441 }
442 }
443
444 /* signal queue handling */
445
446 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
447 {
448 CPUState *cpu = ENV_GET_CPU(env);
449 TaskState *ts = cpu->opaque;
450 struct sigqueue *q = ts->first_free;
451 if (!q)
452 return NULL;
453 ts->first_free = q->next;
454 return q;
455 }
456
457 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
458 {
459 CPUState *cpu = ENV_GET_CPU(env);
460 TaskState *ts = cpu->opaque;
461
462 q->next = ts->first_free;
463 ts->first_free = q;
464 }
465
466 /* abort execution with signal */
467 static void QEMU_NORETURN force_sig(int target_sig)
468 {
469 CPUState *cpu = thread_cpu;
470 CPUArchState *env = cpu->env_ptr;
471 TaskState *ts = (TaskState *)cpu->opaque;
472 int host_sig, core_dumped = 0;
473 struct sigaction act;
474
475 host_sig = target_to_host_signal(target_sig);
476 trace_user_force_sig(env, target_sig, host_sig);
477 gdb_signalled(env, target_sig);
478
479 /* dump core if supported by target binary format */
480 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
481 stop_all_tasks();
482 core_dumped =
483 ((*ts->bprm->core_dump)(target_sig, env) == 0);
484 }
485 if (core_dumped) {
486 /* we already dumped the core of target process, we don't want
487 * a coredump of qemu itself */
488 struct rlimit nodump;
489 getrlimit(RLIMIT_CORE, &nodump);
490 nodump.rlim_cur=0;
491 setrlimit(RLIMIT_CORE, &nodump);
492 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
493 target_sig, strsignal(host_sig), "core dumped" );
494 }
495
496 /* The proper exit code for dying from an uncaught signal is
497 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
498 * a negative value. To get the proper exit code we need to
499 * actually die from an uncaught signal. Here the default signal
500 * handler is installed, we send ourself a signal and we wait for
501 * it to arrive. */
502 sigfillset(&act.sa_mask);
503 act.sa_handler = SIG_DFL;
504 act.sa_flags = 0;
505 sigaction(host_sig, &act, NULL);
506
507 /* For some reason raise(host_sig) doesn't send the signal when
508 * statically linked on x86-64. */
509 kill(getpid(), host_sig);
510
511 /* Make sure the signal isn't masked (just reuse the mask inside
512 of act) */
513 sigdelset(&act.sa_mask, host_sig);
514 sigsuspend(&act.sa_mask);
515
516 /* unreachable */
517 abort();
518 }
519
520 /* queue a signal so that it will be send to the virtual CPU as soon
521 as possible */
522 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
523 {
524 CPUState *cpu = ENV_GET_CPU(env);
525 TaskState *ts = cpu->opaque;
526 struct emulated_sigtable *k;
527 struct sigqueue *q, **pq;
528 abi_ulong handler;
529 int queue;
530
531 trace_user_queue_signal(env, sig);
532 k = &ts->sigtab[sig - 1];
533 queue = gdb_queuesig ();
534 handler = sigact_table[sig - 1]._sa_handler;
535
536 if (sig == TARGET_SIGSEGV && sigismember(&ts->signal_mask, SIGSEGV)) {
537 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
538 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
539 * because it got a real MMU fault). A blocked SIGSEGV in that
540 * situation is treated as if using the default handler. This is
541 * not correct if some other process has randomly sent us a SIGSEGV
542 * via kill(), but that is not easy to distinguish at this point,
543 * so we assume it doesn't happen.
544 */
545 handler = TARGET_SIG_DFL;
546 }
547
548 if (!queue && handler == TARGET_SIG_DFL) {
549 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
550 kill(getpid(),SIGSTOP);
551 return 0;
552 } else
553 /* default handler : ignore some signal. The other are fatal */
554 if (sig != TARGET_SIGCHLD &&
555 sig != TARGET_SIGURG &&
556 sig != TARGET_SIGWINCH &&
557 sig != TARGET_SIGCONT) {
558 force_sig(sig);
559 } else {
560 return 0; /* indicate ignored */
561 }
562 } else if (!queue && handler == TARGET_SIG_IGN) {
563 /* ignore signal */
564 return 0;
565 } else if (!queue && handler == TARGET_SIG_ERR) {
566 force_sig(sig);
567 } else {
568 pq = &k->first;
569 if (sig < TARGET_SIGRTMIN) {
570 /* if non real time signal, we queue exactly one signal */
571 if (!k->pending)
572 q = &k->info;
573 else
574 return 0;
575 } else {
576 if (!k->pending) {
577 /* first signal */
578 q = &k->info;
579 } else {
580 q = alloc_sigqueue(env);
581 if (!q)
582 return -EAGAIN;
583 while (*pq != NULL)
584 pq = &(*pq)->next;
585 }
586 }
587 *pq = q;
588 q->info = *info;
589 q->next = NULL;
590 k->pending = 1;
591 /* signal that a new signal is pending */
592 atomic_set(&ts->signal_pending, 1);
593 return 1; /* indicates that the signal was queued */
594 }
595 }
596
597 #ifndef HAVE_SAFE_SYSCALL
598 static inline void rewind_if_in_safe_syscall(void *puc)
599 {
600 /* Default version: never rewind */
601 }
602 #endif
603
604 static void host_signal_handler(int host_signum, siginfo_t *info,
605 void *puc)
606 {
607 CPUArchState *env = thread_cpu->env_ptr;
608 int sig;
609 target_siginfo_t tinfo;
610 ucontext_t *uc = puc;
611
612 /* the CPU emulator uses some host signals to detect exceptions,
613 we forward to it some signals */
614 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
615 && info->si_code > 0) {
616 if (cpu_signal_handler(host_signum, info, puc))
617 return;
618 }
619
620 /* get target signal number */
621 sig = host_to_target_signal(host_signum);
622 if (sig < 1 || sig > TARGET_NSIG)
623 return;
624 trace_user_host_signal(env, host_signum, sig);
625
626 rewind_if_in_safe_syscall(puc);
627
628 host_to_target_siginfo_noswap(&tinfo, info);
629 if (queue_signal(env, sig, &tinfo) == 1) {
630 /* Block host signals until target signal handler entered. We
631 * can't block SIGSEGV or SIGBUS while we're executing guest
632 * code in case the guest code provokes one in the window between
633 * now and it getting out to the main loop. Signals will be
634 * unblocked again in process_pending_signals().
635 */
636 sigfillset(&uc->uc_sigmask);
637 sigdelset(&uc->uc_sigmask, SIGSEGV);
638 sigdelset(&uc->uc_sigmask, SIGBUS);
639
640 /* interrupt the virtual CPU as soon as possible */
641 cpu_exit(thread_cpu);
642 }
643 }
644
645 /* do_sigaltstack() returns target values and errnos. */
646 /* compare linux/kernel/signal.c:do_sigaltstack() */
647 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
648 {
649 int ret;
650 struct target_sigaltstack oss;
651
652 /* XXX: test errors */
653 if(uoss_addr)
654 {
655 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
656 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
657 __put_user(sas_ss_flags(sp), &oss.ss_flags);
658 }
659
660 if(uss_addr)
661 {
662 struct target_sigaltstack *uss;
663 struct target_sigaltstack ss;
664 size_t minstacksize = TARGET_MINSIGSTKSZ;
665
666 #if defined(TARGET_PPC64)
667 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
668 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
669 if (get_ppc64_abi(image) > 1) {
670 minstacksize = 4096;
671 }
672 #endif
673
674 ret = -TARGET_EFAULT;
675 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
676 goto out;
677 }
678 __get_user(ss.ss_sp, &uss->ss_sp);
679 __get_user(ss.ss_size, &uss->ss_size);
680 __get_user(ss.ss_flags, &uss->ss_flags);
681 unlock_user_struct(uss, uss_addr, 0);
682
683 ret = -TARGET_EPERM;
684 if (on_sig_stack(sp))
685 goto out;
686
687 ret = -TARGET_EINVAL;
688 if (ss.ss_flags != TARGET_SS_DISABLE
689 && ss.ss_flags != TARGET_SS_ONSTACK
690 && ss.ss_flags != 0)
691 goto out;
692
693 if (ss.ss_flags == TARGET_SS_DISABLE) {
694 ss.ss_size = 0;
695 ss.ss_sp = 0;
696 } else {
697 ret = -TARGET_ENOMEM;
698 if (ss.ss_size < minstacksize) {
699 goto out;
700 }
701 }
702
703 target_sigaltstack_used.ss_sp = ss.ss_sp;
704 target_sigaltstack_used.ss_size = ss.ss_size;
705 }
706
707 if (uoss_addr) {
708 ret = -TARGET_EFAULT;
709 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
710 goto out;
711 }
712
713 ret = 0;
714 out:
715 return ret;
716 }
717
718 /* do_sigaction() return host values and errnos */
719 int do_sigaction(int sig, const struct target_sigaction *act,
720 struct target_sigaction *oact)
721 {
722 struct target_sigaction *k;
723 struct sigaction act1;
724 int host_sig;
725 int ret = 0;
726
727 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
728 return -EINVAL;
729 k = &sigact_table[sig - 1];
730 if (oact) {
731 __put_user(k->_sa_handler, &oact->_sa_handler);
732 __put_user(k->sa_flags, &oact->sa_flags);
733 #if !defined(TARGET_MIPS)
734 __put_user(k->sa_restorer, &oact->sa_restorer);
735 #endif
736 /* Not swapped. */
737 oact->sa_mask = k->sa_mask;
738 }
739 if (act) {
740 /* FIXME: This is not threadsafe. */
741 __get_user(k->_sa_handler, &act->_sa_handler);
742 __get_user(k->sa_flags, &act->sa_flags);
743 #if !defined(TARGET_MIPS)
744 __get_user(k->sa_restorer, &act->sa_restorer);
745 #endif
746 /* To be swapped in target_to_host_sigset. */
747 k->sa_mask = act->sa_mask;
748
749 /* we update the host linux signal state */
750 host_sig = target_to_host_signal(sig);
751 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
752 sigfillset(&act1.sa_mask);
753 act1.sa_flags = SA_SIGINFO;
754 if (k->sa_flags & TARGET_SA_RESTART)
755 act1.sa_flags |= SA_RESTART;
756 /* NOTE: it is important to update the host kernel signal
757 ignore state to avoid getting unexpected interrupted
758 syscalls */
759 if (k->_sa_handler == TARGET_SIG_IGN) {
760 act1.sa_sigaction = (void *)SIG_IGN;
761 } else if (k->_sa_handler == TARGET_SIG_DFL) {
762 if (fatal_signal (sig))
763 act1.sa_sigaction = host_signal_handler;
764 else
765 act1.sa_sigaction = (void *)SIG_DFL;
766 } else {
767 act1.sa_sigaction = host_signal_handler;
768 }
769 ret = sigaction(host_sig, &act1, NULL);
770 }
771 }
772 return ret;
773 }
774
775 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
776
777 /* from the Linux kernel */
778
779 struct target_fpreg {
780 uint16_t significand[4];
781 uint16_t exponent;
782 };
783
784 struct target_fpxreg {
785 uint16_t significand[4];
786 uint16_t exponent;
787 uint16_t padding[3];
788 };
789
790 struct target_xmmreg {
791 abi_ulong element[4];
792 };
793
794 struct target_fpstate {
795 /* Regular FPU environment */
796 abi_ulong cw;
797 abi_ulong sw;
798 abi_ulong tag;
799 abi_ulong ipoff;
800 abi_ulong cssel;
801 abi_ulong dataoff;
802 abi_ulong datasel;
803 struct target_fpreg _st[8];
804 uint16_t status;
805 uint16_t magic; /* 0xffff = regular FPU data only */
806
807 /* FXSR FPU environment */
808 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
809 abi_ulong mxcsr;
810 abi_ulong reserved;
811 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
812 struct target_xmmreg _xmm[8];
813 abi_ulong padding[56];
814 };
815
816 #define X86_FXSR_MAGIC 0x0000
817
818 struct target_sigcontext {
819 uint16_t gs, __gsh;
820 uint16_t fs, __fsh;
821 uint16_t es, __esh;
822 uint16_t ds, __dsh;
823 abi_ulong edi;
824 abi_ulong esi;
825 abi_ulong ebp;
826 abi_ulong esp;
827 abi_ulong ebx;
828 abi_ulong edx;
829 abi_ulong ecx;
830 abi_ulong eax;
831 abi_ulong trapno;
832 abi_ulong err;
833 abi_ulong eip;
834 uint16_t cs, __csh;
835 abi_ulong eflags;
836 abi_ulong esp_at_signal;
837 uint16_t ss, __ssh;
838 abi_ulong fpstate; /* pointer */
839 abi_ulong oldmask;
840 abi_ulong cr2;
841 };
842
843 struct target_ucontext {
844 abi_ulong tuc_flags;
845 abi_ulong tuc_link;
846 target_stack_t tuc_stack;
847 struct target_sigcontext tuc_mcontext;
848 target_sigset_t tuc_sigmask; /* mask last for extensibility */
849 };
850
851 struct sigframe
852 {
853 abi_ulong pretcode;
854 int sig;
855 struct target_sigcontext sc;
856 struct target_fpstate fpstate;
857 abi_ulong extramask[TARGET_NSIG_WORDS-1];
858 char retcode[8];
859 };
860
861 struct rt_sigframe
862 {
863 abi_ulong pretcode;
864 int sig;
865 abi_ulong pinfo;
866 abi_ulong puc;
867 struct target_siginfo info;
868 struct target_ucontext uc;
869 struct target_fpstate fpstate;
870 char retcode[8];
871 };
872
873 /*
874 * Set up a signal frame.
875 */
876
877 /* XXX: save x87 state */
878 static void setup_sigcontext(struct target_sigcontext *sc,
879 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
880 abi_ulong fpstate_addr)
881 {
882 CPUState *cs = CPU(x86_env_get_cpu(env));
883 uint16_t magic;
884
885 /* already locked in setup_frame() */
886 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
887 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
888 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
889 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
890 __put_user(env->regs[R_EDI], &sc->edi);
891 __put_user(env->regs[R_ESI], &sc->esi);
892 __put_user(env->regs[R_EBP], &sc->ebp);
893 __put_user(env->regs[R_ESP], &sc->esp);
894 __put_user(env->regs[R_EBX], &sc->ebx);
895 __put_user(env->regs[R_EDX], &sc->edx);
896 __put_user(env->regs[R_ECX], &sc->ecx);
897 __put_user(env->regs[R_EAX], &sc->eax);
898 __put_user(cs->exception_index, &sc->trapno);
899 __put_user(env->error_code, &sc->err);
900 __put_user(env->eip, &sc->eip);
901 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
902 __put_user(env->eflags, &sc->eflags);
903 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
904 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
905
906 cpu_x86_fsave(env, fpstate_addr, 1);
907 fpstate->status = fpstate->sw;
908 magic = 0xffff;
909 __put_user(magic, &fpstate->magic);
910 __put_user(fpstate_addr, &sc->fpstate);
911
912 /* non-iBCS2 extensions.. */
913 __put_user(mask, &sc->oldmask);
914 __put_user(env->cr[2], &sc->cr2);
915 }
916
917 /*
918 * Determine which stack to use..
919 */
920
921 static inline abi_ulong
922 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
923 {
924 unsigned long esp;
925
926 /* Default to using normal stack */
927 esp = env->regs[R_ESP];
928 /* This is the X/Open sanctioned signal stack switching. */
929 if (ka->sa_flags & TARGET_SA_ONSTACK) {
930 if (sas_ss_flags(esp) == 0) {
931 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
932 }
933 } else {
934
935 /* This is the legacy signal stack switching. */
936 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
937 !(ka->sa_flags & TARGET_SA_RESTORER) &&
938 ka->sa_restorer) {
939 esp = (unsigned long) ka->sa_restorer;
940 }
941 }
942 return (esp - frame_size) & -8ul;
943 }
944
945 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
946 static void setup_frame(int sig, struct target_sigaction *ka,
947 target_sigset_t *set, CPUX86State *env)
948 {
949 abi_ulong frame_addr;
950 struct sigframe *frame;
951 int i;
952
953 frame_addr = get_sigframe(ka, env, sizeof(*frame));
954 trace_user_setup_frame(env, frame_addr);
955
956 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
957 goto give_sigsegv;
958
959 __put_user(sig, &frame->sig);
960
961 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
962 frame_addr + offsetof(struct sigframe, fpstate));
963
964 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
965 __put_user(set->sig[i], &frame->extramask[i - 1]);
966 }
967
968 /* Set up to return from userspace. If provided, use a stub
969 already in userspace. */
970 if (ka->sa_flags & TARGET_SA_RESTORER) {
971 __put_user(ka->sa_restorer, &frame->pretcode);
972 } else {
973 uint16_t val16;
974 abi_ulong retcode_addr;
975 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
976 __put_user(retcode_addr, &frame->pretcode);
977 /* This is popl %eax ; movl $,%eax ; int $0x80 */
978 val16 = 0xb858;
979 __put_user(val16, (uint16_t *)(frame->retcode+0));
980 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
981 val16 = 0x80cd;
982 __put_user(val16, (uint16_t *)(frame->retcode+6));
983 }
984
985
986 /* Set up registers for signal handler */
987 env->regs[R_ESP] = frame_addr;
988 env->eip = ka->_sa_handler;
989
990 cpu_x86_load_seg(env, R_DS, __USER_DS);
991 cpu_x86_load_seg(env, R_ES, __USER_DS);
992 cpu_x86_load_seg(env, R_SS, __USER_DS);
993 cpu_x86_load_seg(env, R_CS, __USER_CS);
994 env->eflags &= ~TF_MASK;
995
996 unlock_user_struct(frame, frame_addr, 1);
997
998 return;
999
1000 give_sigsegv:
1001 if (sig == TARGET_SIGSEGV) {
1002 ka->_sa_handler = TARGET_SIG_DFL;
1003 }
1004 force_sig(TARGET_SIGSEGV /* , current */);
1005 }
1006
1007 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1008 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1009 target_siginfo_t *info,
1010 target_sigset_t *set, CPUX86State *env)
1011 {
1012 abi_ulong frame_addr, addr;
1013 struct rt_sigframe *frame;
1014 int i;
1015
1016 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1017 trace_user_setup_rt_frame(env, frame_addr);
1018
1019 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1020 goto give_sigsegv;
1021
1022 __put_user(sig, &frame->sig);
1023 addr = frame_addr + offsetof(struct rt_sigframe, info);
1024 __put_user(addr, &frame->pinfo);
1025 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1026 __put_user(addr, &frame->puc);
1027 tswap_siginfo(&frame->info, info);
1028
1029 /* Create the ucontext. */
1030 __put_user(0, &frame->uc.tuc_flags);
1031 __put_user(0, &frame->uc.tuc_link);
1032 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1033 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1034 &frame->uc.tuc_stack.ss_flags);
1035 __put_user(target_sigaltstack_used.ss_size,
1036 &frame->uc.tuc_stack.ss_size);
1037 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1038 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1039
1040 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1041 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1042 }
1043
1044 /* Set up to return from userspace. If provided, use a stub
1045 already in userspace. */
1046 if (ka->sa_flags & TARGET_SA_RESTORER) {
1047 __put_user(ka->sa_restorer, &frame->pretcode);
1048 } else {
1049 uint16_t val16;
1050 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1051 __put_user(addr, &frame->pretcode);
1052 /* This is movl $,%eax ; int $0x80 */
1053 __put_user(0xb8, (char *)(frame->retcode+0));
1054 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1055 val16 = 0x80cd;
1056 __put_user(val16, (uint16_t *)(frame->retcode+5));
1057 }
1058
1059 /* Set up registers for signal handler */
1060 env->regs[R_ESP] = frame_addr;
1061 env->eip = ka->_sa_handler;
1062
1063 cpu_x86_load_seg(env, R_DS, __USER_DS);
1064 cpu_x86_load_seg(env, R_ES, __USER_DS);
1065 cpu_x86_load_seg(env, R_SS, __USER_DS);
1066 cpu_x86_load_seg(env, R_CS, __USER_CS);
1067 env->eflags &= ~TF_MASK;
1068
1069 unlock_user_struct(frame, frame_addr, 1);
1070
1071 return;
1072
1073 give_sigsegv:
1074 if (sig == TARGET_SIGSEGV) {
1075 ka->_sa_handler = TARGET_SIG_DFL;
1076 }
1077 force_sig(TARGET_SIGSEGV /* , current */);
1078 }
1079
1080 static int
1081 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1082 {
1083 unsigned int err = 0;
1084 abi_ulong fpstate_addr;
1085 unsigned int tmpflags;
1086
1087 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1088 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1089 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1090 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1091
1092 env->regs[R_EDI] = tswapl(sc->edi);
1093 env->regs[R_ESI] = tswapl(sc->esi);
1094 env->regs[R_EBP] = tswapl(sc->ebp);
1095 env->regs[R_ESP] = tswapl(sc->esp);
1096 env->regs[R_EBX] = tswapl(sc->ebx);
1097 env->regs[R_EDX] = tswapl(sc->edx);
1098 env->regs[R_ECX] = tswapl(sc->ecx);
1099 env->regs[R_EAX] = tswapl(sc->eax);
1100 env->eip = tswapl(sc->eip);
1101
1102 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1103 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1104
1105 tmpflags = tswapl(sc->eflags);
1106 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1107 // regs->orig_eax = -1; /* disable syscall checks */
1108
1109 fpstate_addr = tswapl(sc->fpstate);
1110 if (fpstate_addr != 0) {
1111 if (!access_ok(VERIFY_READ, fpstate_addr,
1112 sizeof(struct target_fpstate)))
1113 goto badframe;
1114 cpu_x86_frstor(env, fpstate_addr, 1);
1115 }
1116
1117 return err;
1118 badframe:
1119 return 1;
1120 }
1121
1122 long do_sigreturn(CPUX86State *env)
1123 {
1124 struct sigframe *frame;
1125 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1126 target_sigset_t target_set;
1127 sigset_t set;
1128 int i;
1129
1130 trace_user_do_sigreturn(env, frame_addr);
1131 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1132 goto badframe;
1133 /* set blocked signals */
1134 __get_user(target_set.sig[0], &frame->sc.oldmask);
1135 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1136 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1137 }
1138
1139 target_to_host_sigset_internal(&set, &target_set);
1140 set_sigmask(&set);
1141
1142 /* restore registers */
1143 if (restore_sigcontext(env, &frame->sc))
1144 goto badframe;
1145 unlock_user_struct(frame, frame_addr, 0);
1146 return -TARGET_QEMU_ESIGRETURN;
1147
1148 badframe:
1149 unlock_user_struct(frame, frame_addr, 0);
1150 force_sig(TARGET_SIGSEGV);
1151 return 0;
1152 }
1153
1154 long do_rt_sigreturn(CPUX86State *env)
1155 {
1156 abi_ulong frame_addr;
1157 struct rt_sigframe *frame;
1158 sigset_t set;
1159
1160 frame_addr = env->regs[R_ESP] - 4;
1161 trace_user_do_rt_sigreturn(env, frame_addr);
1162 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1163 goto badframe;
1164 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1165 set_sigmask(&set);
1166
1167 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1168 goto badframe;
1169 }
1170
1171 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1172 get_sp_from_cpustate(env)) == -EFAULT) {
1173 goto badframe;
1174 }
1175
1176 unlock_user_struct(frame, frame_addr, 0);
1177 return -TARGET_QEMU_ESIGRETURN;
1178
1179 badframe:
1180 unlock_user_struct(frame, frame_addr, 0);
1181 force_sig(TARGET_SIGSEGV);
1182 return 0;
1183 }
1184
1185 #elif defined(TARGET_AARCH64)
1186
1187 struct target_sigcontext {
1188 uint64_t fault_address;
1189 /* AArch64 registers */
1190 uint64_t regs[31];
1191 uint64_t sp;
1192 uint64_t pc;
1193 uint64_t pstate;
1194 /* 4K reserved for FP/SIMD state and future expansion */
1195 char __reserved[4096] __attribute__((__aligned__(16)));
1196 };
1197
1198 struct target_ucontext {
1199 abi_ulong tuc_flags;
1200 abi_ulong tuc_link;
1201 target_stack_t tuc_stack;
1202 target_sigset_t tuc_sigmask;
1203 /* glibc uses a 1024-bit sigset_t */
1204 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1205 /* last for future expansion */
1206 struct target_sigcontext tuc_mcontext;
1207 };
1208
1209 /*
1210 * Header to be used at the beginning of structures extending the user
1211 * context. Such structures must be placed after the rt_sigframe on the stack
1212 * and be 16-byte aligned. The last structure must be a dummy one with the
1213 * magic and size set to 0.
1214 */
1215 struct target_aarch64_ctx {
1216 uint32_t magic;
1217 uint32_t size;
1218 };
1219
1220 #define TARGET_FPSIMD_MAGIC 0x46508001
1221
1222 struct target_fpsimd_context {
1223 struct target_aarch64_ctx head;
1224 uint32_t fpsr;
1225 uint32_t fpcr;
1226 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1227 };
1228
1229 /*
1230 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1231 * user space as it will change with the addition of new context. User space
1232 * should check the magic/size information.
1233 */
1234 struct target_aux_context {
1235 struct target_fpsimd_context fpsimd;
1236 /* additional context to be added before "end" */
1237 struct target_aarch64_ctx end;
1238 };
1239
1240 struct target_rt_sigframe {
1241 struct target_siginfo info;
1242 struct target_ucontext uc;
1243 uint64_t fp;
1244 uint64_t lr;
1245 uint32_t tramp[2];
1246 };
1247
1248 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1249 CPUARMState *env, target_sigset_t *set)
1250 {
1251 int i;
1252 struct target_aux_context *aux =
1253 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1254
1255 /* set up the stack frame for unwinding */
1256 __put_user(env->xregs[29], &sf->fp);
1257 __put_user(env->xregs[30], &sf->lr);
1258
1259 for (i = 0; i < 31; i++) {
1260 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1261 }
1262 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1263 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1264 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1265
1266 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1267
1268 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1269 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1270 }
1271
1272 for (i = 0; i < 32; i++) {
1273 #ifdef TARGET_WORDS_BIGENDIAN
1274 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1275 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1276 #else
1277 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1278 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1279 #endif
1280 }
1281 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1282 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1283 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1284 __put_user(sizeof(struct target_fpsimd_context),
1285 &aux->fpsimd.head.size);
1286
1287 /* set the "end" magic */
1288 __put_user(0, &aux->end.magic);
1289 __put_user(0, &aux->end.size);
1290
1291 return 0;
1292 }
1293
1294 static int target_restore_sigframe(CPUARMState *env,
1295 struct target_rt_sigframe *sf)
1296 {
1297 sigset_t set;
1298 int i;
1299 struct target_aux_context *aux =
1300 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1301 uint32_t magic, size, fpsr, fpcr;
1302 uint64_t pstate;
1303
1304 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1305 set_sigmask(&set);
1306
1307 for (i = 0; i < 31; i++) {
1308 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1309 }
1310
1311 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1312 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1313 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1314 pstate_write(env, pstate);
1315
1316 __get_user(magic, &aux->fpsimd.head.magic);
1317 __get_user(size, &aux->fpsimd.head.size);
1318
1319 if (magic != TARGET_FPSIMD_MAGIC
1320 || size != sizeof(struct target_fpsimd_context)) {
1321 return 1;
1322 }
1323
1324 for (i = 0; i < 32; i++) {
1325 #ifdef TARGET_WORDS_BIGENDIAN
1326 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1327 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1328 #else
1329 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1330 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1331 #endif
1332 }
1333 __get_user(fpsr, &aux->fpsimd.fpsr);
1334 vfp_set_fpsr(env, fpsr);
1335 __get_user(fpcr, &aux->fpsimd.fpcr);
1336 vfp_set_fpcr(env, fpcr);
1337
1338 return 0;
1339 }
1340
1341 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1342 {
1343 abi_ulong sp;
1344
1345 sp = env->xregs[31];
1346
1347 /*
1348 * This is the X/Open sanctioned signal stack switching.
1349 */
1350 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1351 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1352 }
1353
1354 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1355
1356 return sp;
1357 }
1358
1359 static void target_setup_frame(int usig, struct target_sigaction *ka,
1360 target_siginfo_t *info, target_sigset_t *set,
1361 CPUARMState *env)
1362 {
1363 struct target_rt_sigframe *frame;
1364 abi_ulong frame_addr, return_addr;
1365
1366 frame_addr = get_sigframe(ka, env);
1367 trace_user_setup_frame(env, frame_addr);
1368 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1369 goto give_sigsegv;
1370 }
1371
1372 __put_user(0, &frame->uc.tuc_flags);
1373 __put_user(0, &frame->uc.tuc_link);
1374
1375 __put_user(target_sigaltstack_used.ss_sp,
1376 &frame->uc.tuc_stack.ss_sp);
1377 __put_user(sas_ss_flags(env->xregs[31]),
1378 &frame->uc.tuc_stack.ss_flags);
1379 __put_user(target_sigaltstack_used.ss_size,
1380 &frame->uc.tuc_stack.ss_size);
1381 target_setup_sigframe(frame, env, set);
1382 if (ka->sa_flags & TARGET_SA_RESTORER) {
1383 return_addr = ka->sa_restorer;
1384 } else {
1385 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1386 __put_user(0xd2801168, &frame->tramp[0]);
1387 __put_user(0xd4000001, &frame->tramp[1]);
1388 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1389 }
1390 env->xregs[0] = usig;
1391 env->xregs[31] = frame_addr;
1392 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1393 env->pc = ka->_sa_handler;
1394 env->xregs[30] = return_addr;
1395 if (info) {
1396 tswap_siginfo(&frame->info, info);
1397 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1398 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1399 }
1400
1401 unlock_user_struct(frame, frame_addr, 1);
1402 return;
1403
1404 give_sigsegv:
1405 unlock_user_struct(frame, frame_addr, 1);
1406 force_sig(TARGET_SIGSEGV);
1407 }
1408
1409 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1410 target_siginfo_t *info, target_sigset_t *set,
1411 CPUARMState *env)
1412 {
1413 target_setup_frame(sig, ka, info, set, env);
1414 }
1415
1416 static void setup_frame(int sig, struct target_sigaction *ka,
1417 target_sigset_t *set, CPUARMState *env)
1418 {
1419 target_setup_frame(sig, ka, 0, set, env);
1420 }
1421
1422 long do_rt_sigreturn(CPUARMState *env)
1423 {
1424 struct target_rt_sigframe *frame = NULL;
1425 abi_ulong frame_addr = env->xregs[31];
1426
1427 trace_user_do_rt_sigreturn(env, frame_addr);
1428 if (frame_addr & 15) {
1429 goto badframe;
1430 }
1431
1432 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1433 goto badframe;
1434 }
1435
1436 if (target_restore_sigframe(env, frame)) {
1437 goto badframe;
1438 }
1439
1440 if (do_sigaltstack(frame_addr +
1441 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1442 0, get_sp_from_cpustate(env)) == -EFAULT) {
1443 goto badframe;
1444 }
1445
1446 unlock_user_struct(frame, frame_addr, 0);
1447 return -TARGET_QEMU_ESIGRETURN;
1448
1449 badframe:
1450 unlock_user_struct(frame, frame_addr, 0);
1451 force_sig(TARGET_SIGSEGV);
1452 return 0;
1453 }
1454
1455 long do_sigreturn(CPUARMState *env)
1456 {
1457 return do_rt_sigreturn(env);
1458 }
1459
1460 #elif defined(TARGET_ARM)
1461
1462 struct target_sigcontext {
1463 abi_ulong trap_no;
1464 abi_ulong error_code;
1465 abi_ulong oldmask;
1466 abi_ulong arm_r0;
1467 abi_ulong arm_r1;
1468 abi_ulong arm_r2;
1469 abi_ulong arm_r3;
1470 abi_ulong arm_r4;
1471 abi_ulong arm_r5;
1472 abi_ulong arm_r6;
1473 abi_ulong arm_r7;
1474 abi_ulong arm_r8;
1475 abi_ulong arm_r9;
1476 abi_ulong arm_r10;
1477 abi_ulong arm_fp;
1478 abi_ulong arm_ip;
1479 abi_ulong arm_sp;
1480 abi_ulong arm_lr;
1481 abi_ulong arm_pc;
1482 abi_ulong arm_cpsr;
1483 abi_ulong fault_address;
1484 };
1485
1486 struct target_ucontext_v1 {
1487 abi_ulong tuc_flags;
1488 abi_ulong tuc_link;
1489 target_stack_t tuc_stack;
1490 struct target_sigcontext tuc_mcontext;
1491 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1492 };
1493
1494 struct target_ucontext_v2 {
1495 abi_ulong tuc_flags;
1496 abi_ulong tuc_link;
1497 target_stack_t tuc_stack;
1498 struct target_sigcontext tuc_mcontext;
1499 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1500 char __unused[128 - sizeof(target_sigset_t)];
1501 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1502 };
1503
1504 struct target_user_vfp {
1505 uint64_t fpregs[32];
1506 abi_ulong fpscr;
1507 };
1508
1509 struct target_user_vfp_exc {
1510 abi_ulong fpexc;
1511 abi_ulong fpinst;
1512 abi_ulong fpinst2;
1513 };
1514
1515 struct target_vfp_sigframe {
1516 abi_ulong magic;
1517 abi_ulong size;
1518 struct target_user_vfp ufp;
1519 struct target_user_vfp_exc ufp_exc;
1520 } __attribute__((__aligned__(8)));
1521
1522 struct target_iwmmxt_sigframe {
1523 abi_ulong magic;
1524 abi_ulong size;
1525 uint64_t regs[16];
1526 /* Note that not all the coprocessor control registers are stored here */
1527 uint32_t wcssf;
1528 uint32_t wcasf;
1529 uint32_t wcgr0;
1530 uint32_t wcgr1;
1531 uint32_t wcgr2;
1532 uint32_t wcgr3;
1533 } __attribute__((__aligned__(8)));
1534
1535 #define TARGET_VFP_MAGIC 0x56465001
1536 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1537
1538 struct sigframe_v1
1539 {
1540 struct target_sigcontext sc;
1541 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1542 abi_ulong retcode;
1543 };
1544
1545 struct sigframe_v2
1546 {
1547 struct target_ucontext_v2 uc;
1548 abi_ulong retcode;
1549 };
1550
1551 struct rt_sigframe_v1
1552 {
1553 abi_ulong pinfo;
1554 abi_ulong puc;
1555 struct target_siginfo info;
1556 struct target_ucontext_v1 uc;
1557 abi_ulong retcode;
1558 };
1559
1560 struct rt_sigframe_v2
1561 {
1562 struct target_siginfo info;
1563 struct target_ucontext_v2 uc;
1564 abi_ulong retcode;
1565 };
1566
1567 #define TARGET_CONFIG_CPU_32 1
1568
1569 /*
1570 * For ARM syscalls, we encode the syscall number into the instruction.
1571 */
1572 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1573 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1574
1575 /*
1576 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1577 * need two 16-bit instructions.
1578 */
1579 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1580 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1581
1582 static const abi_ulong retcodes[4] = {
1583 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1584 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1585 };
1586
1587
1588 static inline int valid_user_regs(CPUARMState *regs)
1589 {
1590 return 1;
1591 }
1592
1593 static void
1594 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1595 CPUARMState *env, abi_ulong mask)
1596 {
1597 __put_user(env->regs[0], &sc->arm_r0);
1598 __put_user(env->regs[1], &sc->arm_r1);
1599 __put_user(env->regs[2], &sc->arm_r2);
1600 __put_user(env->regs[3], &sc->arm_r3);
1601 __put_user(env->regs[4], &sc->arm_r4);
1602 __put_user(env->regs[5], &sc->arm_r5);
1603 __put_user(env->regs[6], &sc->arm_r6);
1604 __put_user(env->regs[7], &sc->arm_r7);
1605 __put_user(env->regs[8], &sc->arm_r8);
1606 __put_user(env->regs[9], &sc->arm_r9);
1607 __put_user(env->regs[10], &sc->arm_r10);
1608 __put_user(env->regs[11], &sc->arm_fp);
1609 __put_user(env->regs[12], &sc->arm_ip);
1610 __put_user(env->regs[13], &sc->arm_sp);
1611 __put_user(env->regs[14], &sc->arm_lr);
1612 __put_user(env->regs[15], &sc->arm_pc);
1613 #ifdef TARGET_CONFIG_CPU_32
1614 __put_user(cpsr_read(env), &sc->arm_cpsr);
1615 #endif
1616
1617 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1618 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1619 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1620 __put_user(mask, &sc->oldmask);
1621 }
1622
1623 static inline abi_ulong
1624 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1625 {
1626 unsigned long sp = regs->regs[13];
1627
1628 /*
1629 * This is the X/Open sanctioned signal stack switching.
1630 */
1631 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1632 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1633 }
1634 /*
1635 * ATPCS B01 mandates 8-byte alignment
1636 */
1637 return (sp - framesize) & ~7;
1638 }
1639
1640 static void
1641 setup_return(CPUARMState *env, struct target_sigaction *ka,
1642 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1643 {
1644 abi_ulong handler = ka->_sa_handler;
1645 abi_ulong retcode;
1646 int thumb = handler & 1;
1647 uint32_t cpsr = cpsr_read(env);
1648
1649 cpsr &= ~CPSR_IT;
1650 if (thumb) {
1651 cpsr |= CPSR_T;
1652 } else {
1653 cpsr &= ~CPSR_T;
1654 }
1655
1656 if (ka->sa_flags & TARGET_SA_RESTORER) {
1657 retcode = ka->sa_restorer;
1658 } else {
1659 unsigned int idx = thumb;
1660
1661 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1662 idx += 2;
1663 }
1664
1665 __put_user(retcodes[idx], rc);
1666
1667 retcode = rc_addr + thumb;
1668 }
1669
1670 env->regs[0] = usig;
1671 env->regs[13] = frame_addr;
1672 env->regs[14] = retcode;
1673 env->regs[15] = handler & (thumb ? ~1 : ~3);
1674 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1675 }
1676
1677 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1678 {
1679 int i;
1680 struct target_vfp_sigframe *vfpframe;
1681 vfpframe = (struct target_vfp_sigframe *)regspace;
1682 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1683 __put_user(sizeof(*vfpframe), &vfpframe->size);
1684 for (i = 0; i < 32; i++) {
1685 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1686 }
1687 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1688 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1689 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1690 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1691 return (abi_ulong*)(vfpframe+1);
1692 }
1693
1694 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1695 CPUARMState *env)
1696 {
1697 int i;
1698 struct target_iwmmxt_sigframe *iwmmxtframe;
1699 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1700 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1701 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1702 for (i = 0; i < 16; i++) {
1703 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1704 }
1705 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1706 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1707 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1708 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1709 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1710 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1711 return (abi_ulong*)(iwmmxtframe+1);
1712 }
1713
1714 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1715 target_sigset_t *set, CPUARMState *env)
1716 {
1717 struct target_sigaltstack stack;
1718 int i;
1719 abi_ulong *regspace;
1720
1721 /* Clear all the bits of the ucontext we don't use. */
1722 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1723
1724 memset(&stack, 0, sizeof(stack));
1725 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1726 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1727 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1728 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1729
1730 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1731 /* Save coprocessor signal frame. */
1732 regspace = uc->tuc_regspace;
1733 if (arm_feature(env, ARM_FEATURE_VFP)) {
1734 regspace = setup_sigframe_v2_vfp(regspace, env);
1735 }
1736 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1737 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1738 }
1739
1740 /* Write terminating magic word */
1741 __put_user(0, regspace);
1742
1743 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1744 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1745 }
1746 }
1747
1748 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1749 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1750 target_sigset_t *set, CPUARMState *regs)
1751 {
1752 struct sigframe_v1 *frame;
1753 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1754 int i;
1755
1756 trace_user_setup_frame(regs, frame_addr);
1757 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1758 return;
1759 }
1760
1761 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1762
1763 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1764 __put_user(set->sig[i], &frame->extramask[i - 1]);
1765 }
1766
1767 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1768 frame_addr + offsetof(struct sigframe_v1, retcode));
1769
1770 unlock_user_struct(frame, frame_addr, 1);
1771 }
1772
1773 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1774 target_sigset_t *set, CPUARMState *regs)
1775 {
1776 struct sigframe_v2 *frame;
1777 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1778
1779 trace_user_setup_frame(regs, frame_addr);
1780 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1781 return;
1782 }
1783
1784 setup_sigframe_v2(&frame->uc, set, regs);
1785
1786 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1787 frame_addr + offsetof(struct sigframe_v2, retcode));
1788
1789 unlock_user_struct(frame, frame_addr, 1);
1790 }
1791
1792 static void setup_frame(int usig, struct target_sigaction *ka,
1793 target_sigset_t *set, CPUARMState *regs)
1794 {
1795 if (get_osversion() >= 0x020612) {
1796 setup_frame_v2(usig, ka, set, regs);
1797 } else {
1798 setup_frame_v1(usig, ka, set, regs);
1799 }
1800 }
1801
1802 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1803 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1804 target_siginfo_t *info,
1805 target_sigset_t *set, CPUARMState *env)
1806 {
1807 struct rt_sigframe_v1 *frame;
1808 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1809 struct target_sigaltstack stack;
1810 int i;
1811 abi_ulong info_addr, uc_addr;
1812
1813 trace_user_setup_rt_frame(env, frame_addr);
1814 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1815 return /* 1 */;
1816 }
1817
1818 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1819 __put_user(info_addr, &frame->pinfo);
1820 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1821 __put_user(uc_addr, &frame->puc);
1822 tswap_siginfo(&frame->info, info);
1823
1824 /* Clear all the bits of the ucontext we don't use. */
1825 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1826
1827 memset(&stack, 0, sizeof(stack));
1828 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1829 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1830 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1831 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1832
1833 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1834 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1835 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1836 }
1837
1838 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1839 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1840
1841 env->regs[1] = info_addr;
1842 env->regs[2] = uc_addr;
1843
1844 unlock_user_struct(frame, frame_addr, 1);
1845 }
1846
1847 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1848 target_siginfo_t *info,
1849 target_sigset_t *set, CPUARMState *env)
1850 {
1851 struct rt_sigframe_v2 *frame;
1852 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1853 abi_ulong info_addr, uc_addr;
1854
1855 trace_user_setup_rt_frame(env, frame_addr);
1856 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1857 return /* 1 */;
1858 }
1859
1860 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1861 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1862 tswap_siginfo(&frame->info, info);
1863
1864 setup_sigframe_v2(&frame->uc, set, env);
1865
1866 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1867 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1868
1869 env->regs[1] = info_addr;
1870 env->regs[2] = uc_addr;
1871
1872 unlock_user_struct(frame, frame_addr, 1);
1873 }
1874
1875 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1876 target_siginfo_t *info,
1877 target_sigset_t *set, CPUARMState *env)
1878 {
1879 if (get_osversion() >= 0x020612) {
1880 setup_rt_frame_v2(usig, ka, info, set, env);
1881 } else {
1882 setup_rt_frame_v1(usig, ka, info, set, env);
1883 }
1884 }
1885
1886 static int
1887 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1888 {
1889 int err = 0;
1890 uint32_t cpsr;
1891
1892 __get_user(env->regs[0], &sc->arm_r0);
1893 __get_user(env->regs[1], &sc->arm_r1);
1894 __get_user(env->regs[2], &sc->arm_r2);
1895 __get_user(env->regs[3], &sc->arm_r3);
1896 __get_user(env->regs[4], &sc->arm_r4);
1897 __get_user(env->regs[5], &sc->arm_r5);
1898 __get_user(env->regs[6], &sc->arm_r6);
1899 __get_user(env->regs[7], &sc->arm_r7);
1900 __get_user(env->regs[8], &sc->arm_r8);
1901 __get_user(env->regs[9], &sc->arm_r9);
1902 __get_user(env->regs[10], &sc->arm_r10);
1903 __get_user(env->regs[11], &sc->arm_fp);
1904 __get_user(env->regs[12], &sc->arm_ip);
1905 __get_user(env->regs[13], &sc->arm_sp);
1906 __get_user(env->regs[14], &sc->arm_lr);
1907 __get_user(env->regs[15], &sc->arm_pc);
1908 #ifdef TARGET_CONFIG_CPU_32
1909 __get_user(cpsr, &sc->arm_cpsr);
1910 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1911 #endif
1912
1913 err |= !valid_user_regs(env);
1914
1915 return err;
1916 }
1917
1918 static long do_sigreturn_v1(CPUARMState *env)
1919 {
1920 abi_ulong frame_addr;
1921 struct sigframe_v1 *frame = NULL;
1922 target_sigset_t set;
1923 sigset_t host_set;
1924 int i;
1925
1926 /*
1927 * Since we stacked the signal on a 64-bit boundary,
1928 * then 'sp' should be word aligned here. If it's
1929 * not, then the user is trying to mess with us.
1930 */
1931 frame_addr = env->regs[13];
1932 trace_user_do_sigreturn(env, frame_addr);
1933 if (frame_addr & 7) {
1934 goto badframe;
1935 }
1936
1937 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1938 goto badframe;
1939 }
1940
1941 __get_user(set.sig[0], &frame->sc.oldmask);
1942 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1943 __get_user(set.sig[i], &frame->extramask[i - 1]);
1944 }
1945
1946 target_to_host_sigset_internal(&host_set, &set);
1947 set_sigmask(&host_set);
1948
1949 if (restore_sigcontext(env, &frame->sc)) {
1950 goto badframe;
1951 }
1952
1953 #if 0
1954 /* Send SIGTRAP if we're single-stepping */
1955 if (ptrace_cancel_bpt(current))
1956 send_sig(SIGTRAP, current, 1);
1957 #endif
1958 unlock_user_struct(frame, frame_addr, 0);
1959 return -TARGET_QEMU_ESIGRETURN;
1960
1961 badframe:
1962 force_sig(TARGET_SIGSEGV /* , current */);
1963 return 0;
1964 }
1965
1966 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1967 {
1968 int i;
1969 abi_ulong magic, sz;
1970 uint32_t fpscr, fpexc;
1971 struct target_vfp_sigframe *vfpframe;
1972 vfpframe = (struct target_vfp_sigframe *)regspace;
1973
1974 __get_user(magic, &vfpframe->magic);
1975 __get_user(sz, &vfpframe->size);
1976 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1977 return 0;
1978 }
1979 for (i = 0; i < 32; i++) {
1980 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1981 }
1982 __get_user(fpscr, &vfpframe->ufp.fpscr);
1983 vfp_set_fpscr(env, fpscr);
1984 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1985 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1986 * and the exception flag is cleared
1987 */
1988 fpexc |= (1 << 30);
1989 fpexc &= ~((1 << 31) | (1 << 28));
1990 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1991 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1992 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1993 return (abi_ulong*)(vfpframe + 1);
1994 }
1995
1996 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1997 abi_ulong *regspace)
1998 {
1999 int i;
2000 abi_ulong magic, sz;
2001 struct target_iwmmxt_sigframe *iwmmxtframe;
2002 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2003
2004 __get_user(magic, &iwmmxtframe->magic);
2005 __get_user(sz, &iwmmxtframe->size);
2006 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2007 return 0;
2008 }
2009 for (i = 0; i < 16; i++) {
2010 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2011 }
2012 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2013 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2014 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2015 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2016 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2017 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2018 return (abi_ulong*)(iwmmxtframe + 1);
2019 }
2020
2021 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2022 struct target_ucontext_v2 *uc)
2023 {
2024 sigset_t host_set;
2025 abi_ulong *regspace;
2026
2027 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2028 set_sigmask(&host_set);
2029
2030 if (restore_sigcontext(env, &uc->tuc_mcontext))
2031 return 1;
2032
2033 /* Restore coprocessor signal frame */
2034 regspace = uc->tuc_regspace;
2035 if (arm_feature(env, ARM_FEATURE_VFP)) {
2036 regspace = restore_sigframe_v2_vfp(env, regspace);
2037 if (!regspace) {
2038 return 1;
2039 }
2040 }
2041 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2042 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2043 if (!regspace) {
2044 return 1;
2045 }
2046 }
2047
2048 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2049 return 1;
2050
2051 #if 0
2052 /* Send SIGTRAP if we're single-stepping */
2053 if (ptrace_cancel_bpt(current))
2054 send_sig(SIGTRAP, current, 1);
2055 #endif
2056
2057 return 0;
2058 }
2059
2060 static long do_sigreturn_v2(CPUARMState *env)
2061 {
2062 abi_ulong frame_addr;
2063 struct sigframe_v2 *frame = NULL;
2064
2065 /*
2066 * Since we stacked the signal on a 64-bit boundary,
2067 * then 'sp' should be word aligned here. If it's
2068 * not, then the user is trying to mess with us.
2069 */
2070 frame_addr = env->regs[13];
2071 trace_user_do_sigreturn(env, frame_addr);
2072 if (frame_addr & 7) {
2073 goto badframe;
2074 }
2075
2076 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2077 goto badframe;
2078 }
2079
2080 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2081 goto badframe;
2082 }
2083
2084 unlock_user_struct(frame, frame_addr, 0);
2085 return -TARGET_QEMU_ESIGRETURN;
2086
2087 badframe:
2088 unlock_user_struct(frame, frame_addr, 0);
2089 force_sig(TARGET_SIGSEGV /* , current */);
2090 return 0;
2091 }
2092
2093 long do_sigreturn(CPUARMState *env)
2094 {
2095 if (get_osversion() >= 0x020612) {
2096 return do_sigreturn_v2(env);
2097 } else {
2098 return do_sigreturn_v1(env);
2099 }
2100 }
2101
2102 static long do_rt_sigreturn_v1(CPUARMState *env)
2103 {
2104 abi_ulong frame_addr;
2105 struct rt_sigframe_v1 *frame = NULL;
2106 sigset_t host_set;
2107
2108 /*
2109 * Since we stacked the signal on a 64-bit boundary,
2110 * then 'sp' should be word aligned here. If it's
2111 * not, then the user is trying to mess with us.
2112 */
2113 frame_addr = env->regs[13];
2114 trace_user_do_rt_sigreturn(env, frame_addr);
2115 if (frame_addr & 7) {
2116 goto badframe;
2117 }
2118
2119 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2120 goto badframe;
2121 }
2122
2123 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2124 set_sigmask(&host_set);
2125
2126 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2127 goto badframe;
2128 }
2129
2130 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2131 goto badframe;
2132
2133 #if 0
2134 /* Send SIGTRAP if we're single-stepping */
2135 if (ptrace_cancel_bpt(current))
2136 send_sig(SIGTRAP, current, 1);
2137 #endif
2138 unlock_user_struct(frame, frame_addr, 0);
2139 return -TARGET_QEMU_ESIGRETURN;
2140
2141 badframe:
2142 unlock_user_struct(frame, frame_addr, 0);
2143 force_sig(TARGET_SIGSEGV /* , current */);
2144 return 0;
2145 }
2146
2147 static long do_rt_sigreturn_v2(CPUARMState *env)
2148 {
2149 abi_ulong frame_addr;
2150 struct rt_sigframe_v2 *frame = NULL;
2151
2152 /*
2153 * Since we stacked the signal on a 64-bit boundary,
2154 * then 'sp' should be word aligned here. If it's
2155 * not, then the user is trying to mess with us.
2156 */
2157 frame_addr = env->regs[13];
2158 trace_user_do_rt_sigreturn(env, frame_addr);
2159 if (frame_addr & 7) {
2160 goto badframe;
2161 }
2162
2163 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2164 goto badframe;
2165 }
2166
2167 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2168 goto badframe;
2169 }
2170
2171 unlock_user_struct(frame, frame_addr, 0);
2172 return -TARGET_QEMU_ESIGRETURN;
2173
2174 badframe:
2175 unlock_user_struct(frame, frame_addr, 0);
2176 force_sig(TARGET_SIGSEGV /* , current */);
2177 return 0;
2178 }
2179
2180 long do_rt_sigreturn(CPUARMState *env)
2181 {
2182 if (get_osversion() >= 0x020612) {
2183 return do_rt_sigreturn_v2(env);
2184 } else {
2185 return do_rt_sigreturn_v1(env);
2186 }
2187 }
2188
2189 #elif defined(TARGET_SPARC)
2190
2191 #define __SUNOS_MAXWIN 31
2192
2193 /* This is what SunOS does, so shall I. */
2194 struct target_sigcontext {
2195 abi_ulong sigc_onstack; /* state to restore */
2196
2197 abi_ulong sigc_mask; /* sigmask to restore */
2198 abi_ulong sigc_sp; /* stack pointer */
2199 abi_ulong sigc_pc; /* program counter */
2200 abi_ulong sigc_npc; /* next program counter */
2201 abi_ulong sigc_psr; /* for condition codes etc */
2202 abi_ulong sigc_g1; /* User uses these two registers */
2203 abi_ulong sigc_o0; /* within the trampoline code. */
2204
2205 /* Now comes information regarding the users window set
2206 * at the time of the signal.
2207 */
2208 abi_ulong sigc_oswins; /* outstanding windows */
2209
2210 /* stack ptrs for each regwin buf */
2211 char *sigc_spbuf[__SUNOS_MAXWIN];
2212
2213 /* Windows to restore after signal */
2214 struct {
2215 abi_ulong locals[8];
2216 abi_ulong ins[8];
2217 } sigc_wbuf[__SUNOS_MAXWIN];
2218 };
2219 /* A Sparc stack frame */
2220 struct sparc_stackf {
2221 abi_ulong locals[8];
2222 abi_ulong ins[8];
2223 /* It's simpler to treat fp and callers_pc as elements of ins[]
2224 * since we never need to access them ourselves.
2225 */
2226 char *structptr;
2227 abi_ulong xargs[6];
2228 abi_ulong xxargs[1];
2229 };
2230
2231 typedef struct {
2232 struct {
2233 abi_ulong psr;
2234 abi_ulong pc;
2235 abi_ulong npc;
2236 abi_ulong y;
2237 abi_ulong u_regs[16]; /* globals and ins */
2238 } si_regs;
2239 int si_mask;
2240 } __siginfo_t;
2241
2242 typedef struct {
2243 abi_ulong si_float_regs[32];
2244 unsigned long si_fsr;
2245 unsigned long si_fpqdepth;
2246 struct {
2247 unsigned long *insn_addr;
2248 unsigned long insn;
2249 } si_fpqueue [16];
2250 } qemu_siginfo_fpu_t;
2251
2252
2253 struct target_signal_frame {
2254 struct sparc_stackf ss;
2255 __siginfo_t info;
2256 abi_ulong fpu_save;
2257 abi_ulong insns[2] __attribute__ ((aligned (8)));
2258 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2259 abi_ulong extra_size; /* Should be 0 */
2260 qemu_siginfo_fpu_t fpu_state;
2261 };
2262 struct target_rt_signal_frame {
2263 struct sparc_stackf ss;
2264 siginfo_t info;
2265 abi_ulong regs[20];
2266 sigset_t mask;
2267 abi_ulong fpu_save;
2268 unsigned int insns[2];
2269 stack_t stack;
2270 unsigned int extra_size; /* Should be 0 */
2271 qemu_siginfo_fpu_t fpu_state;
2272 };
2273
2274 #define UREG_O0 16
2275 #define UREG_O6 22
2276 #define UREG_I0 0
2277 #define UREG_I1 1
2278 #define UREG_I2 2
2279 #define UREG_I3 3
2280 #define UREG_I4 4
2281 #define UREG_I5 5
2282 #define UREG_I6 6
2283 #define UREG_I7 7
2284 #define UREG_L0 8
2285 #define UREG_FP UREG_I6
2286 #define UREG_SP UREG_O6
2287
2288 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2289 CPUSPARCState *env,
2290 unsigned long framesize)
2291 {
2292 abi_ulong sp;
2293
2294 sp = env->regwptr[UREG_FP];
2295
2296 /* This is the X/Open sanctioned signal stack switching. */
2297 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2298 if (!on_sig_stack(sp)
2299 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2300 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2301 }
2302 }
2303 return sp - framesize;
2304 }
2305
2306 static int
2307 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2308 {
2309 int err = 0, i;
2310
2311 __put_user(env->psr, &si->si_regs.psr);
2312 __put_user(env->pc, &si->si_regs.pc);
2313 __put_user(env->npc, &si->si_regs.npc);
2314 __put_user(env->y, &si->si_regs.y);
2315 for (i=0; i < 8; i++) {
2316 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2317 }
2318 for (i=0; i < 8; i++) {
2319 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2320 }
2321 __put_user(mask, &si->si_mask);
2322 return err;
2323 }
2324
2325 #if 0
2326 static int
2327 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2328 CPUSPARCState *env, unsigned long mask)
2329 {
2330 int err = 0;
2331
2332 __put_user(mask, &sc->sigc_mask);
2333 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2334 __put_user(env->pc, &sc->sigc_pc);
2335 __put_user(env->npc, &sc->sigc_npc);
2336 __put_user(env->psr, &sc->sigc_psr);
2337 __put_user(env->gregs[1], &sc->sigc_g1);
2338 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2339
2340 return err;
2341 }
2342 #endif
2343 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2344
2345 static void setup_frame(int sig, struct target_sigaction *ka,
2346 target_sigset_t *set, CPUSPARCState *env)
2347 {
2348 abi_ulong sf_addr;
2349 struct target_signal_frame *sf;
2350 int sigframe_size, err, i;
2351
2352 /* 1. Make sure everything is clean */
2353 //synchronize_user_stack();
2354
2355 sigframe_size = NF_ALIGNEDSZ;
2356 sf_addr = get_sigframe(ka, env, sigframe_size);
2357 trace_user_setup_frame(env, sf_addr);
2358
2359 sf = lock_user(VERIFY_WRITE, sf_addr,
2360 sizeof(struct target_signal_frame), 0);
2361 if (!sf) {
2362 goto sigsegv;
2363 }
2364 #if 0
2365 if (invalid_frame_pointer(sf, sigframe_size))
2366 goto sigill_and_return;
2367 #endif
2368 /* 2. Save the current process state */
2369 err = setup___siginfo(&sf->info, env, set->sig[0]);
2370 __put_user(0, &sf->extra_size);
2371
2372 //save_fpu_state(regs, &sf->fpu_state);
2373 //__put_user(&sf->fpu_state, &sf->fpu_save);
2374
2375 __put_user(set->sig[0], &sf->info.si_mask);
2376 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2377 __put_user(set->sig[i + 1], &sf->extramask[i]);
2378 }
2379
2380 for (i = 0; i < 8; i++) {
2381 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2382 }
2383 for (i = 0; i < 8; i++) {
2384 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2385 }
2386 if (err)
2387 goto sigsegv;
2388
2389 /* 3. signal handler back-trampoline and parameters */
2390 env->regwptr[UREG_FP] = sf_addr;
2391 env->regwptr[UREG_I0] = sig;
2392 env->regwptr[UREG_I1] = sf_addr +
2393 offsetof(struct target_signal_frame, info);
2394 env->regwptr[UREG_I2] = sf_addr +
2395 offsetof(struct target_signal_frame, info);
2396
2397 /* 4. signal handler */
2398 env->pc = ka->_sa_handler;
2399 env->npc = (env->pc + 4);
2400 /* 5. return to kernel instructions */
2401 if (ka->sa_restorer) {
2402 env->regwptr[UREG_I7] = ka->sa_restorer;
2403 } else {
2404 uint32_t val32;
2405
2406 env->regwptr[UREG_I7] = sf_addr +
2407 offsetof(struct target_signal_frame, insns) - 2 * 4;
2408
2409 /* mov __NR_sigreturn, %g1 */
2410 val32 = 0x821020d8;
2411 __put_user(val32, &sf->insns[0]);
2412
2413 /* t 0x10 */
2414 val32 = 0x91d02010;
2415 __put_user(val32, &sf->insns[1]);
2416 if (err)
2417 goto sigsegv;
2418
2419 /* Flush instruction space. */
2420 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2421 // tb_flush(env);
2422 }
2423 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2424 return;
2425 #if 0
2426 sigill_and_return:
2427 force_sig(TARGET_SIGILL);
2428 #endif
2429 sigsegv:
2430 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2431 force_sig(TARGET_SIGSEGV);
2432 }
2433
2434 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2435 target_siginfo_t *info,
2436 target_sigset_t *set, CPUSPARCState *env)
2437 {
2438 fprintf(stderr, "setup_rt_frame: not implemented\n");
2439 }
2440
2441 long do_sigreturn(CPUSPARCState *env)
2442 {
2443 abi_ulong sf_addr;
2444 struct target_signal_frame *sf;
2445 uint32_t up_psr, pc, npc;
2446 target_sigset_t set;
2447 sigset_t host_set;
2448 int err=0, i;
2449
2450 sf_addr = env->regwptr[UREG_FP];
2451 trace_user_do_sigreturn(env, sf_addr);
2452 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2453 goto segv_and_exit;
2454 }
2455
2456 /* 1. Make sure we are not getting garbage from the user */
2457
2458 if (sf_addr & 3)
2459 goto segv_and_exit;
2460
2461 __get_user(pc, &sf->info.si_regs.pc);
2462 __get_user(npc, &sf->info.si_regs.npc);
2463
2464 if ((pc | npc) & 3) {
2465 goto segv_and_exit;
2466 }
2467
2468 /* 2. Restore the state */
2469 __get_user(up_psr, &sf->info.si_regs.psr);
2470
2471 /* User can only change condition codes and FPU enabling in %psr. */
2472 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2473 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2474
2475 env->pc = pc;
2476 env->npc = npc;
2477 __get_user(env->y, &sf->info.si_regs.y);
2478 for (i=0; i < 8; i++) {
2479 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2480 }
2481 for (i=0; i < 8; i++) {
2482 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2483 }
2484
2485 /* FIXME: implement FPU save/restore:
2486 * __get_user(fpu_save, &sf->fpu_save);
2487 * if (fpu_save)
2488 * err |= restore_fpu_state(env, fpu_save);
2489 */
2490
2491 /* This is pretty much atomic, no amount locking would prevent
2492 * the races which exist anyways.
2493 */
2494 __get_user(set.sig[0], &sf->info.si_mask);
2495 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2496 __get_user(set.sig[i], &sf->extramask[i - 1]);
2497 }
2498
2499 target_to_host_sigset_internal(&host_set, &set);
2500 set_sigmask(&host_set);
2501
2502 if (err) {
2503 goto segv_and_exit;
2504 }
2505 unlock_user_struct(sf, sf_addr, 0);
2506 return -TARGET_QEMU_ESIGRETURN;
2507
2508 segv_and_exit:
2509 unlock_user_struct(sf, sf_addr, 0);
2510 force_sig(TARGET_SIGSEGV);
2511 }
2512
2513 long do_rt_sigreturn(CPUSPARCState *env)
2514 {
2515 trace_user_do_rt_sigreturn(env, 0);
2516 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2517 return -TARGET_ENOSYS;
2518 }
2519
2520 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2521 #define MC_TSTATE 0
2522 #define MC_PC 1
2523 #define MC_NPC 2
2524 #define MC_Y 3
2525 #define MC_G1 4
2526 #define MC_G2 5
2527 #define MC_G3 6
2528 #define MC_G4 7
2529 #define MC_G5 8
2530 #define MC_G6 9
2531 #define MC_G7 10
2532 #define MC_O0 11
2533 #define MC_O1 12
2534 #define MC_O2 13
2535 #define MC_O3 14
2536 #define MC_O4 15
2537 #define MC_O5 16
2538 #define MC_O6 17
2539 #define MC_O7 18
2540 #define MC_NGREG 19
2541
2542 typedef abi_ulong target_mc_greg_t;
2543 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2544
2545 struct target_mc_fq {
2546 abi_ulong *mcfq_addr;
2547 uint32_t mcfq_insn;
2548 };
2549
2550 struct target_mc_fpu {
2551 union {
2552 uint32_t sregs[32];
2553 uint64_t dregs[32];
2554 //uint128_t qregs[16];
2555 } mcfpu_fregs;
2556 abi_ulong mcfpu_fsr;
2557 abi_ulong mcfpu_fprs;
2558 abi_ulong mcfpu_gsr;
2559 struct target_mc_fq *mcfpu_fq;
2560 unsigned char mcfpu_qcnt;
2561 unsigned char mcfpu_qentsz;
2562 unsigned char mcfpu_enab;
2563 };
2564 typedef struct target_mc_fpu target_mc_fpu_t;
2565
2566 typedef struct {
2567 target_mc_gregset_t mc_gregs;
2568 target_mc_greg_t mc_fp;
2569 target_mc_greg_t mc_i7;
2570 target_mc_fpu_t mc_fpregs;
2571 } target_mcontext_t;
2572
2573 struct target_ucontext {
2574 struct target_ucontext *tuc_link;
2575 abi_ulong tuc_flags;
2576 target_sigset_t tuc_sigmask;
2577 target_mcontext_t tuc_mcontext;
2578 };
2579
2580 /* A V9 register window */
2581 struct target_reg_window {
2582 abi_ulong locals[8];
2583 abi_ulong ins[8];
2584 };
2585
2586 #define TARGET_STACK_BIAS 2047
2587
2588 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2589 void sparc64_set_context(CPUSPARCState *env)
2590 {
2591 abi_ulong ucp_addr;
2592 struct target_ucontext *ucp;
2593 target_mc_gregset_t *grp;
2594 abi_ulong pc, npc, tstate;
2595 abi_ulong fp, i7, w_addr;
2596 unsigned int i;
2597
2598 ucp_addr = env->regwptr[UREG_I0];
2599 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2600 goto do_sigsegv;
2601 }
2602 grp = &ucp->tuc_mcontext.mc_gregs;
2603 __get_user(pc, &((*grp)[MC_PC]));
2604 __get_user(npc, &((*grp)[MC_NPC]));
2605 if ((pc | npc) & 3) {
2606 goto do_sigsegv;
2607 }
2608 if (env->regwptr[UREG_I1]) {
2609 target_sigset_t target_set;
2610 sigset_t set;
2611
2612 if (TARGET_NSIG_WORDS == 1) {
2613 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2614 } else {
2615 abi_ulong *src, *dst;
2616 src = ucp->tuc_sigmask.sig;
2617 dst = target_set.sig;
2618 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2619 __get_user(*dst, src);
2620 }
2621 }
2622 target_to_host_sigset_internal(&set, &target_set);
2623 set_sigmask(&set);
2624 }
2625 env->pc = pc;
2626 env->npc = npc;
2627 __get_user(env->y, &((*grp)[MC_Y]));
2628 __get_user(tstate, &((*grp)[MC_TSTATE]));
2629 env->asi = (tstate >> 24) & 0xff;
2630 cpu_put_ccr(env, tstate >> 32);
2631 cpu_put_cwp64(env, tstate & 0x1f);
2632 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2633 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2634 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2635 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2636 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2637 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2638 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2639 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2640 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2641 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2642 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2643 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2644 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2645 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2646 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2647
2648 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2649 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2650
2651 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2652 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2653 abi_ulong) != 0) {
2654 goto do_sigsegv;
2655 }
2656 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2657 abi_ulong) != 0) {
2658 goto do_sigsegv;
2659 }
2660 /* FIXME this does not match how the kernel handles the FPU in
2661 * its sparc64_set_context implementation. In particular the FPU
2662 * is only restored if fenab is non-zero in:
2663 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2664 */
2665 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2666 {
2667 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2668 for (i = 0; i < 64; i++, src++) {
2669 if (i & 1) {
2670 __get_user(env->fpr[i/2].l.lower, src);
2671 } else {
2672 __get_user(env->fpr[i/2].l.upper, src);
2673 }
2674 }
2675 }
2676 __get_user(env->fsr,
2677 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2678 __get_user(env->gsr,
2679 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2680 unlock_user_struct(ucp, ucp_addr, 0);
2681 return;
2682 do_sigsegv:
2683 unlock_user_struct(ucp, ucp_addr, 0);
2684 force_sig(TARGET_SIGSEGV);
2685 }
2686
2687 void sparc64_get_context(CPUSPARCState *env)
2688 {
2689 abi_ulong ucp_addr;
2690 struct target_ucontext *ucp;
2691 target_mc_gregset_t *grp;
2692 target_mcontext_t *mcp;
2693 abi_ulong fp, i7, w_addr;
2694 int err;
2695 unsigned int i;
2696 target_sigset_t target_set;
2697 sigset_t set;
2698
2699 ucp_addr = env->regwptr[UREG_I0];
2700 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2701 goto do_sigsegv;
2702 }
2703
2704 mcp = &ucp->tuc_mcontext;
2705 grp = &mcp->mc_gregs;
2706
2707 /* Skip over the trap instruction, first. */
2708 env->pc = env->npc;
2709 env->npc += 4;
2710
2711 /* If we're only reading the signal mask then do_sigprocmask()
2712 * is guaranteed not to fail, which is important because we don't
2713 * have any way to signal a failure or restart this operation since
2714 * this is not a normal syscall.
2715 */
2716 err = do_sigprocmask(0, NULL, &set);
2717 assert(err == 0);
2718 host_to_target_sigset_internal(&target_set, &set);
2719 if (TARGET_NSIG_WORDS == 1) {
2720 __put_user(target_set.sig[0],
2721 (abi_ulong *)&ucp->tuc_sigmask);
2722 } else {
2723 abi_ulong *src, *dst;
2724 src = target_set.sig;
2725 dst = ucp->tuc_sigmask.sig;
2726 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2727 __put_user(*src, dst);
2728 }
2729 if (err)
2730 goto do_sigsegv;
2731 }
2732
2733 /* XXX: tstate must be saved properly */
2734 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2735 __put_user(env->pc, &((*grp)[MC_PC]));
2736 __put_user(env->npc, &((*grp)[MC_NPC]));
2737 __put_user(env->y, &((*grp)[MC_Y]));
2738 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2739 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2740 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2741 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2742 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2743 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2744 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2745 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2746 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2747 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2748 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2749 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2750 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2751 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2752 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2753
2754 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2755 fp = i7 = 0;
2756 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2757 abi_ulong) != 0) {
2758 goto do_sigsegv;
2759 }
2760 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2761 abi_ulong) != 0) {
2762 goto do_sigsegv;
2763 }
2764 __put_user(fp, &(mcp->mc_fp));
2765 __put_user(i7, &(mcp->mc_i7));
2766
2767 {
2768 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2769 for (i = 0; i < 64; i++, dst++) {
2770 if (i & 1) {
2771 __put_user(env->fpr[i/2].l.lower, dst);
2772 } else {
2773 __put_user(env->fpr[i/2].l.upper, dst);
2774 }
2775 }
2776 }
2777 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2778 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2779 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2780
2781 if (err)
2782 goto do_sigsegv;
2783 unlock_user_struct(ucp, ucp_addr, 1);
2784 return;
2785 do_sigsegv:
2786 unlock_user_struct(ucp, ucp_addr, 1);
2787 force_sig(TARGET_SIGSEGV);
2788 }
2789 #endif
2790 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2791
2792 # if defined(TARGET_ABI_MIPSO32)
2793 struct target_sigcontext {
2794 uint32_t sc_regmask; /* Unused */
2795 uint32_t sc_status;
2796 uint64_t sc_pc;
2797 uint64_t sc_regs[32];
2798 uint64_t sc_fpregs[32];
2799 uint32_t sc_ownedfp; /* Unused */
2800 uint32_t sc_fpc_csr;
2801 uint32_t sc_fpc_eir; /* Unused */
2802 uint32_t sc_used_math;
2803 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2804 uint32_t pad0;
2805 uint64_t sc_mdhi;
2806 uint64_t sc_mdlo;
2807 target_ulong sc_hi1; /* Was sc_cause */
2808 target_ulong sc_lo1; /* Was sc_badvaddr */
2809 target_ulong sc_hi2; /* Was sc_sigset[4] */
2810 target_ulong sc_lo2;
2811 target_ulong sc_hi3;
2812 target_ulong sc_lo3;
2813 };
2814 # else /* N32 || N64 */
2815 struct target_sigcontext {
2816 uint64_t sc_regs[32];
2817 uint64_t sc_fpregs[32];
2818 uint64_t sc_mdhi;
2819 uint64_t sc_hi1;
2820 uint64_t sc_hi2;
2821 uint64_t sc_hi3;
2822 uint64_t sc_mdlo;
2823 uint64_t sc_lo1;
2824 uint64_t sc_lo2;
2825 uint64_t sc_lo3;
2826 uint64_t sc_pc;
2827 uint32_t sc_fpc_csr;
2828 uint32_t sc_used_math;
2829 uint32_t sc_dsp;
2830 uint32_t sc_reserved;
2831 };
2832 # endif /* O32 */
2833
2834 struct sigframe {
2835 uint32_t sf_ass[4]; /* argument save space for o32 */
2836 uint32_t sf_code[2]; /* signal trampoline */
2837 struct target_sigcontext sf_sc;
2838 target_sigset_t sf_mask;
2839 };
2840
2841 struct target_ucontext {
2842 target_ulong tuc_flags;
2843 target_ulong tuc_link;
2844 target_stack_t tuc_stack;
2845 target_ulong pad0;
2846 struct target_sigcontext tuc_mcontext;
2847 target_sigset_t tuc_sigmask;
2848 };
2849
2850 struct target_rt_sigframe {
2851 uint32_t rs_ass[4]; /* argument save space for o32 */
2852 uint32_t rs_code[2]; /* signal trampoline */
2853 struct target_siginfo rs_info;
2854 struct target_ucontext rs_uc;
2855 };
2856
2857 /* Install trampoline to jump back from signal handler */
2858 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2859 {
2860 int err = 0;
2861
2862 /*
2863 * Set up the return code ...
2864 *
2865 * li v0, __NR__foo_sigreturn
2866 * syscall
2867 */
2868
2869 __put_user(0x24020000 + syscall, tramp + 0);
2870 __put_user(0x0000000c , tramp + 1);
2871 return err;
2872 }
2873
2874 static inline void setup_sigcontext(CPUMIPSState *regs,
2875 struct target_sigcontext *sc)
2876 {
2877 int i;
2878
2879 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2880 regs->hflags &= ~MIPS_HFLAG_BMASK;
2881
2882 __put_user(0, &sc->sc_regs[0]);
2883 for (i = 1; i < 32; ++i) {
2884 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2885 }
2886
2887 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2888 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2889
2890 /* Rather than checking for dsp existence, always copy. The storage
2891 would just be garbage otherwise. */
2892 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2893 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2894 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2895 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2896 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2897 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2898 {
2899 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2900 __put_user(dsp, &sc->sc_dsp);
2901 }
2902
2903 __put_user(1, &sc->sc_used_math);
2904
2905 for (i = 0; i < 32; ++i) {
2906 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2907 }
2908 }
2909
2910 static inline void
2911 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2912 {
2913 int i;
2914
2915 __get_user(regs->CP0_EPC, &sc->sc_pc);
2916
2917 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2918 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2919
2920 for (i = 1; i < 32; ++i) {
2921 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2922 }
2923
2924 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2925 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2926 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2927 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2928 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2929 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2930 {
2931 uint32_t dsp;
2932 __get_user(dsp, &sc->sc_dsp);
2933 cpu_wrdsp(dsp, 0x3ff, regs);
2934 }
2935
2936 for (i = 0; i < 32; ++i) {
2937 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2938 }
2939 }
2940
2941 /*
2942 * Determine which stack to use..
2943 */
2944 static inline abi_ulong
2945 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2946 {
2947 unsigned long sp;
2948
2949 /* Default to using normal stack */
2950 sp = regs->active_tc.gpr[29];
2951
2952 /*
2953 * FPU emulator may have its own trampoline active just
2954 * above the user stack, 16-bytes before the next lowest
2955 * 16 byte boundary. Try to avoid trashing it.
2956 */
2957 sp -= 32;
2958
2959 /* This is the X/Open sanctioned signal stack switching. */
2960 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2961 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2962 }
2963
2964 return (sp - frame_size) & ~7;
2965 }
2966
2967 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2968 {
2969 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2970 env->hflags &= ~MIPS_HFLAG_M16;
2971 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2972 env->active_tc.PC &= ~(target_ulong) 1;
2973 }
2974 }
2975
2976 # if defined(TARGET_ABI_MIPSO32)
2977 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2978 static void setup_frame(int sig, struct target_sigaction * ka,
2979 target_sigset_t *set, CPUMIPSState *regs)
2980 {
2981 struct sigframe *frame;
2982 abi_ulong frame_addr;
2983 int i;
2984
2985 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2986 trace_user_setup_frame(regs, frame_addr);
2987 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2988 goto give_sigsegv;
2989 }
2990
2991 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2992
2993 setup_sigcontext(regs, &frame->sf_sc);
2994
2995 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2996 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2997 }
2998
2999 /*
3000 * Arguments to signal handler:
3001 *
3002 * a0 = signal number
3003 * a1 = 0 (should be cause)
3004 * a2 = pointer to struct sigcontext
3005 *
3006 * $25 and PC point to the signal handler, $29 points to the
3007 * struct sigframe.
3008 */
3009 regs->active_tc.gpr[ 4] = sig;
3010 regs->active_tc.gpr[ 5] = 0;
3011 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3012 regs->active_tc.gpr[29] = frame_addr;
3013 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3014 /* The original kernel code sets CP0_EPC to the handler
3015 * since it returns to userland using eret
3016 * we cannot do this here, and we must set PC directly */
3017 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3018 mips_set_hflags_isa_mode_from_pc(regs);
3019 unlock_user_struct(frame, frame_addr, 1);
3020 return;
3021
3022 give_sigsegv:
3023 force_sig(TARGET_SIGSEGV/*, current*/);
3024 }
3025
3026 long do_sigreturn(CPUMIPSState *regs)
3027 {
3028 struct sigframe *frame;
3029 abi_ulong frame_addr;
3030 sigset_t blocked;
3031 target_sigset_t target_set;
3032 int i;
3033
3034 frame_addr = regs->active_tc.gpr[29];
3035 trace_user_do_sigreturn(regs, frame_addr);
3036 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3037 goto badframe;
3038
3039 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3040 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3041 }
3042
3043 target_to_host_sigset_internal(&blocked, &target_set);
3044 set_sigmask(&blocked);
3045
3046 restore_sigcontext(regs, &frame->sf_sc);
3047
3048 #if 0
3049 /*
3050 * Don't let your children do this ...
3051 */
3052 __asm__ __volatile__(
3053 "move\t$29, %0\n\t"
3054 "j\tsyscall_exit"
3055 :/* no outputs */
3056 :"r" (&regs));
3057 /* Unreached */
3058 #endif
3059
3060 regs->active_tc.PC = regs->CP0_EPC;
3061 mips_set_hflags_isa_mode_from_pc(regs);
3062 /* I am not sure this is right, but it seems to work
3063 * maybe a problem with nested signals ? */
3064 regs->CP0_EPC = 0;
3065 return -TARGET_QEMU_ESIGRETURN;
3066
3067 badframe:
3068 force_sig(TARGET_SIGSEGV/*, current*/);
3069 return 0;
3070 }
3071 # endif /* O32 */
3072
3073 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3074 target_siginfo_t *info,
3075 target_sigset_t *set, CPUMIPSState *env)
3076 {
3077 struct target_rt_sigframe *frame;
3078 abi_ulong frame_addr;
3079 int i;
3080
3081 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3082 trace_user_setup_rt_frame(env, frame_addr);
3083 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3084 goto give_sigsegv;
3085 }
3086
3087 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3088
3089 tswap_siginfo(&frame->rs_info, info);
3090
3091 __put_user(0, &frame->rs_uc.tuc_flags);
3092 __put_user(0, &frame->rs_uc.tuc_link);
3093 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3094 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3095 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3096 &frame->rs_uc.tuc_stack.ss_flags);
3097
3098 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3099
3100 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3101 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3102 }
3103
3104 /*
3105 * Arguments to signal handler:
3106 *
3107 * a0 = signal number
3108 * a1 = pointer to siginfo_t
3109 * a2 = pointer to struct ucontext
3110 *
3111 * $25 and PC point to the signal handler, $29 points to the
3112 * struct sigframe.
3113 */
3114 env->active_tc.gpr[ 4] = sig;
3115 env->active_tc.gpr[ 5] = frame_addr
3116 + offsetof(struct target_rt_sigframe, rs_info);
3117 env->active_tc.gpr[ 6] = frame_addr
3118 + offsetof(struct target_rt_sigframe, rs_uc);
3119 env->active_tc.gpr[29] = frame_addr;
3120 env->active_tc.gpr[31] = frame_addr
3121 + offsetof(struct target_rt_sigframe, rs_code);
3122 /* The original kernel code sets CP0_EPC to the handler
3123 * since it returns to userland using eret
3124 * we cannot do this here, and we must set PC directly */
3125 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3126 mips_set_hflags_isa_mode_from_pc(env);
3127 unlock_user_struct(frame, frame_addr, 1);
3128 return;
3129
3130 give_sigsegv:
3131 unlock_user_struct(frame, frame_addr, 1);
3132 force_sig(TARGET_SIGSEGV/*, current*/);
3133 }
3134
3135 long do_rt_sigreturn(CPUMIPSState *env)
3136 {
3137 struct target_rt_sigframe *frame;
3138 abi_ulong frame_addr;
3139 sigset_t blocked;
3140
3141 frame_addr = env->active_tc.gpr[29];
3142 trace_user_do_rt_sigreturn(env, frame_addr);
3143 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3144 goto badframe;
3145 }
3146
3147 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3148 set_sigmask(&blocked);
3149
3150 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3151
3152 if (do_sigaltstack(frame_addr +
3153 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3154 0, get_sp_from_cpustate(env)) == -EFAULT)
3155 goto badframe;
3156
3157 env->active_tc.PC = env->CP0_EPC;
3158 mips_set_hflags_isa_mode_from_pc(env);
3159 /* I am not sure this is right, but it seems to work
3160 * maybe a problem with nested signals ? */
3161 env->CP0_EPC = 0;
3162 return -TARGET_QEMU_ESIGRETURN;
3163
3164 badframe:
3165 force_sig(TARGET_SIGSEGV/*, current*/);
3166 return 0;
3167 }
3168
3169 #elif defined(TARGET_SH4)
3170
3171 /*
3172 * code and data structures from linux kernel:
3173 * include/asm-sh/sigcontext.h
3174 * arch/sh/kernel/signal.c
3175 */
3176
3177 struct target_sigcontext {
3178 target_ulong oldmask;
3179
3180 /* CPU registers */
3181 target_ulong sc_gregs[16];
3182 target_ulong sc_pc;
3183 target_ulong sc_pr;
3184 target_ulong sc_sr;
3185 target_ulong sc_gbr;
3186 target_ulong sc_mach;
3187 target_ulong sc_macl;
3188
3189 /* FPU registers */
3190 target_ulong sc_fpregs[16];
3191 target_ulong sc_xfpregs[16];
3192 unsigned int sc_fpscr;
3193 unsigned int sc_fpul;
3194 unsigned int sc_ownedfp;
3195 };
3196
3197 struct target_sigframe
3198 {
3199 struct target_sigcontext sc;
3200 target_ulong extramask[TARGET_NSIG_WORDS-1];
3201 uint16_t retcode[3];
3202 };
3203
3204
3205 struct target_ucontext {
3206 target_ulong tuc_flags;
3207 struct target_ucontext *tuc_link;
3208 target_stack_t tuc_stack;
3209 struct target_sigcontext tuc_mcontext;
3210 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3211 };
3212
3213 struct target_rt_sigframe
3214 {
3215 struct target_siginfo info;
3216 struct target_ucontext uc;
3217 uint16_t retcode[3];
3218 };
3219
3220
3221 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3222 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3223
3224 static abi_ulong get_sigframe(struct target_sigaction *ka,
3225 unsigned long sp, size_t frame_size)
3226 {
3227 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3228 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3229 }
3230
3231 return (sp - frame_size) & -8ul;
3232 }
3233
3234 static void setup_sigcontext(struct target_sigcontext *sc,
3235 CPUSH4State *regs, unsigned long mask)
3236 {
3237 int i;
3238
3239 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3240 COPY(gregs[0]); COPY(gregs[1]);
3241 COPY(gregs[2]); COPY(gregs[3]);
3242 COPY(gregs[4]); COPY(gregs[5]);
3243 COPY(gregs[6]); COPY(gregs[7]);
3244 COPY(gregs[8]); COPY(gregs[9]);
3245 COPY(gregs[10]); COPY(gregs[11]);
3246 COPY(gregs[12]); COPY(gregs[13]);
3247 COPY(gregs[14]); COPY(gregs[15]);
3248 COPY(gbr); COPY(mach);
3249 COPY(macl); COPY(pr);
3250 COPY(sr); COPY(pc);
3251 #undef COPY
3252
3253 for (i=0; i<16; i++) {
3254 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3255 }
3256 __put_user(regs->fpscr, &sc->sc_fpscr);
3257 __put_user(regs->fpul, &sc->sc_fpul);
3258
3259 /* non-iBCS2 extensions.. */
3260 __put_user(mask, &sc->oldmask);
3261 }
3262
3263 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3264 {
3265 int i;
3266
3267 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3268 COPY(gregs[0]); COPY(gregs[1]);
3269 COPY(gregs[2]); COPY(gregs[3]);
3270 COPY(gregs[4]); COPY(gregs[5]);
3271 COPY(gregs[6]); COPY(gregs[7]);
3272 COPY(gregs[8]); COPY(gregs[9]);
3273 COPY(gregs[10]); COPY(gregs[11]);
3274 COPY(gregs[12]); COPY(gregs[13]);
3275 COPY(gregs[14]); COPY(gregs[15]);
3276 COPY(gbr); COPY(mach);
3277 COPY(macl); COPY(pr);
3278 COPY(sr); COPY(pc);
3279 #undef COPY
3280
3281 for (i=0; i<16; i++) {
3282 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3283 }
3284 __get_user(regs->fpscr, &sc->sc_fpscr);
3285 __get_user(regs->fpul, &sc->sc_fpul);
3286
3287 regs->tra = -1; /* disable syscall checks */
3288 }
3289
3290 static void setup_frame(int sig, struct target_sigaction *ka,
3291 target_sigset_t *set, CPUSH4State *regs)
3292 {
3293 struct target_sigframe *frame;
3294 abi_ulong frame_addr;
3295 int i;
3296
3297 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3298 trace_user_setup_frame(regs, frame_addr);
3299 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3300 goto give_sigsegv;
3301 }
3302
3303 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3304
3305 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3306 __put_user(set->sig[i + 1], &frame->extramask[i]);
3307 }
3308
3309 /* Set up to return from userspace. If provided, use a stub
3310 already in userspace. */
3311 if (ka->sa_flags & TARGET_SA_RESTORER) {
3312 regs->pr = (unsigned long) ka->sa_restorer;
3313 } else {
3314 /* Generate return code (system call to sigreturn) */
3315 abi_ulong retcode_addr = frame_addr +
3316 offsetof(struct target_sigframe, retcode);
3317 __put_user(MOVW(2), &frame->retcode[0]);
3318 __put_user(TRAP_NOARG, &frame->retcode[1]);
3319 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3320 regs->pr = (unsigned long) retcode_addr;
3321 }
3322
3323 /* Set up registers for signal handler */
3324 regs->gregs[15] = frame_addr;
3325 regs->gregs[4] = sig; /* Arg for signal handler */
3326 regs->gregs[5] = 0;
3327 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3328 regs->pc = (unsigned long) ka->_sa_handler;
3329
3330 unlock_user_struct(frame, frame_addr, 1);
3331 return;
3332
3333 give_sigsegv:
3334 unlock_user_struct(frame, frame_addr, 1);
3335 force_sig(TARGET_SIGSEGV);
3336 }
3337
3338 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3339 target_siginfo_t *info,
3340 target_sigset_t *set, CPUSH4State *regs)
3341 {
3342 struct target_rt_sigframe *frame;
3343 abi_ulong frame_addr;
3344 int i;
3345
3346 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3347 trace_user_setup_rt_frame(regs, frame_addr);
3348 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3349 goto give_sigsegv;
3350 }
3351
3352 tswap_siginfo(&frame->info, info);
3353
3354 /* Create the ucontext. */
3355 __put_user(0, &frame->uc.tuc_flags);
3356 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3357 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3358 &frame->uc.tuc_stack.ss_sp);
3359 __put_user(sas_ss_flags(regs->gregs[15]),
3360 &frame->uc.tuc_stack.ss_flags);
3361 __put_user(target_sigaltstack_used.ss_size,
3362 &frame->uc.tuc_stack.ss_size);
3363 setup_sigcontext(&frame->uc.tuc_mcontext,
3364 regs, set->sig[0]);
3365 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3366 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3367 }
3368
3369 /* Set up to return from userspace. If provided, use a stub
3370 already in userspace. */
3371 if (ka->sa_flags & TARGET_SA_RESTORER) {
3372 regs->pr = (unsigned long) ka->sa_restorer;
3373 } else {
3374 /* Generate return code (system call to sigreturn) */
3375 abi_ulong retcode_addr = frame_addr +
3376 offsetof(struct target_rt_sigframe, retcode);
3377 __put_user(MOVW(2), &frame->retcode[0]);
3378 __put_user(TRAP_NOARG, &frame->retcode[1]);
3379 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3380 regs->pr = (unsigned long) retcode_addr;
3381 }
3382
3383 /* Set up registers for signal handler */
3384 regs->gregs[15] = frame_addr;
3385 regs->gregs[4] = sig; /* Arg for signal handler */
3386 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3387 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3388 regs->pc = (unsigned long) ka->_sa_handler;
3389
3390 unlock_user_struct(frame, frame_addr, 1);
3391 return;
3392
3393 give_sigsegv:
3394 unlock_user_struct(frame, frame_addr, 1);
3395 force_sig(TARGET_SIGSEGV);
3396 }
3397
3398 long do_sigreturn(CPUSH4State *regs)
3399 {
3400 struct target_sigframe *frame;
3401 abi_ulong frame_addr;
3402 sigset_t blocked;
3403 target_sigset_t target_set;
3404 int i;
3405 int err = 0;
3406
3407 frame_addr = regs->gregs[15];
3408 trace_user_do_sigreturn(regs, frame_addr);
3409 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3410 goto badframe;
3411 }
3412
3413 __get_user(target_set.sig[0], &frame->sc.oldmask);
3414 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3415 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3416 }
3417
3418 if (err)
3419 goto badframe;
3420
3421 target_to_host_sigset_internal(&blocked, &target_set);
3422 set_sigmask(&blocked);
3423
3424 restore_sigcontext(regs, &frame->sc);
3425
3426 unlock_user_struct(frame, frame_addr, 0);
3427 return -TARGET_QEMU_ESIGRETURN;
3428
3429 badframe:
3430 unlock_user_struct(frame, frame_addr, 0);
3431 force_sig(TARGET_SIGSEGV);
3432 return 0;
3433 }
3434
3435 long do_rt_sigreturn(CPUSH4State *regs)
3436 {
3437 struct target_rt_sigframe *frame;
3438 abi_ulong frame_addr;
3439 sigset_t blocked;
3440
3441 frame_addr = regs->gregs[15];
3442 trace_user_do_rt_sigreturn(regs, frame_addr);
3443 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3444 goto badframe;
3445 }
3446
3447 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3448 set_sigmask(&blocked);
3449
3450 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3451
3452 if (do_sigaltstack(frame_addr +
3453 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3454 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3455 goto badframe;
3456 }
3457
3458 unlock_user_struct(frame, frame_addr, 0);
3459 return -TARGET_QEMU_ESIGRETURN;
3460
3461 badframe:
3462 unlock_user_struct(frame, frame_addr, 0);
3463 force_sig(TARGET_SIGSEGV);
3464 return 0;
3465 }
3466 #elif defined(TARGET_MICROBLAZE)
3467
3468 struct target_sigcontext {
3469 struct target_pt_regs regs; /* needs to be first */
3470 uint32_t oldmask;
3471 };
3472
3473 struct target_stack_t {
3474 abi_ulong ss_sp;
3475 int ss_flags;
3476 unsigned int ss_size;
3477 };
3478
3479 struct target_ucontext {
3480 abi_ulong tuc_flags;
3481 abi_ulong tuc_link;
3482 struct target_stack_t tuc_stack;
3483 struct target_sigcontext tuc_mcontext;
3484 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3485 };
3486
3487 /* Signal frames. */
3488 struct target_signal_frame {
3489 struct target_ucontext uc;
3490 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3491 uint32_t tramp[2];
3492 };
3493
3494 struct rt_signal_frame {
3495 siginfo_t info;
3496 struct ucontext uc;
3497 uint32_t tramp[2];
3498 };
3499
3500 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3501 {
3502 __put_user(env->regs[0], &sc->regs.r0);
3503 __put_user(env->regs[1], &sc->regs.r1);
3504 __put_user(env->regs[2], &sc->regs.r2);
3505 __put_user(env->regs[3], &sc->regs.r3);
3506 __put_user(env->regs[4], &sc->regs.r4);
3507 __put_user(env->regs[5], &sc->regs.r5);
3508 __put_user(env->regs[6], &sc->regs.r6);
3509 __put_user(env->regs[7], &sc->regs.r7);
3510 __put_user(env->regs[8], &sc->regs.r8);
3511 __put_user(env->regs[9], &sc->regs.r9);
3512 __put_user(env->regs[10], &sc->regs.r10);
3513 __put_user(env->regs[11], &sc->regs.r11);
3514 __put_user(env->regs[12], &sc->regs.r12);
3515 __put_user(env->regs[13], &sc->regs.r13);
3516 __put_user(env->regs[14], &sc->regs.r14);
3517 __put_user(env->regs[15], &sc->regs.r15);
3518 __put_user(env->regs[16], &sc->regs.r16);
3519 __put_user(env->regs[17], &sc->regs.r17);
3520 __put_user(env->regs[18], &sc->regs.r18);
3521 __put_user(env->regs[19], &sc->regs.r19);
3522 __put_user(env->regs[20], &sc->regs.r20);
3523 __put_user(env->regs[21], &sc->regs.r21);
3524 __put_user(env->regs[22], &sc->regs.r22);
3525 __put_user(env->regs[23], &sc->regs.r23);
3526 __put_user(env->regs[24], &sc->regs.r24);
3527 __put_user(env->regs[25], &sc->regs.r25);
3528 __put_user(env->regs[26], &sc->regs.r26);
3529 __put_user(env->regs[27], &sc->regs.r27);
3530 __put_user(env->regs[28], &sc->regs.r28);
3531 __put_user(env->regs[29], &sc->regs.r29);
3532 __put_user(env->regs[30], &sc->regs.r30);
3533 __put_user(env->regs[31], &sc->regs.r31);
3534 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3535 }
3536
3537 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3538 {
3539 __get_user(env->regs[0], &sc->regs.r0);
3540 __get_user(env->regs[1], &sc->regs.r1);
3541 __get_user(env->regs[2], &sc->regs.r2);
3542 __get_user(env->regs[3], &sc->regs.r3);
3543 __get_user(env->regs[4], &sc->regs.r4);
3544 __get_user(env->regs[5], &sc->regs.r5);
3545 __get_user(env->regs[6], &sc->regs.r6);
3546 __get_user(env->regs[7], &sc->regs.r7);
3547 __get_user(env->regs[8], &sc->regs.r8);
3548 __get_user(env->regs[9], &sc->regs.r9);
3549 __get_user(env->regs[10], &sc->regs.r10);
3550 __get_user(env->regs[11], &sc->regs.r11);
3551 __get_user(env->regs[12], &sc->regs.r12);
3552 __get_user(env->regs[13], &sc->regs.r13);
3553 __get_user(env->regs[14], &sc->regs.r14);
3554 __get_user(env->regs[15], &sc->regs.r15);
3555 __get_user(env->regs[16], &sc->regs.r16);
3556 __get_user(env->regs[17], &sc->regs.r17);
3557 __get_user(env->regs[18], &sc->regs.r18);
3558 __get_user(env->regs[19], &sc->regs.r19);
3559 __get_user(env->regs[20], &sc->regs.r20);
3560 __get_user(env->regs[21], &sc->regs.r21);
3561 __get_user(env->regs[22], &sc->regs.r22);
3562 __get_user(env->regs[23], &sc->regs.r23);
3563 __get_user(env->regs[24], &sc->regs.r24);
3564 __get_user(env->regs[25], &sc->regs.r25);
3565 __get_user(env->regs[26], &sc->regs.r26);
3566 __get_user(env->regs[27], &sc->regs.r27);
3567 __get_user(env->regs[28], &sc->regs.r28);
3568 __get_user(env->regs[29], &sc->regs.r29);
3569 __get_user(env->regs[30], &sc->regs.r30);
3570 __get_user(env->regs[31], &sc->regs.r31);
3571 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3572 }
3573
3574 static abi_ulong get_sigframe(struct target_sigaction *ka,
3575 CPUMBState *env, int frame_size)
3576 {
3577 abi_ulong sp = env->regs[1];
3578
3579 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3580 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3581 }
3582
3583 return ((sp - frame_size) & -8UL);
3584 }
3585
3586 static void setup_frame(int sig, struct target_sigaction *ka,
3587 target_sigset_t *set, CPUMBState *env)
3588 {
3589 struct target_signal_frame *frame;
3590 abi_ulong frame_addr;
3591 int i;
3592
3593 frame_addr = get_sigframe(ka, env, sizeof *frame);
3594 trace_user_setup_frame(env, frame_addr);
3595 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3596 goto badframe;
3597
3598 /* Save the mask. */
3599 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3600
3601 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3602 __put_user(set->sig[i], &frame->extramask[i - 1]);
3603 }
3604
3605 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3606
3607 /* Set up to return from userspace. If provided, use a stub
3608 already in userspace. */
3609 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3610 if (ka->sa_flags & TARGET_SA_RESTORER) {
3611 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3612 } else {
3613 uint32_t t;
3614 /* Note, these encodings are _big endian_! */
3615 /* addi r12, r0, __NR_sigreturn */
3616 t = 0x31800000UL | TARGET_NR_sigreturn;
3617 __put_user(t, frame->tramp + 0);
3618 /* brki r14, 0x8 */
3619 t = 0xb9cc0008UL;
3620 __put_user(t, frame->tramp + 1);
3621
3622 /* Return from sighandler will jump to the tramp.
3623 Negative 8 offset because return is rtsd r15, 8 */
3624 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3625 - 8;
3626 }
3627
3628 /* Set up registers for signal handler */
3629 env->regs[1] = frame_addr;
3630 /* Signal handler args: */
3631 env->regs[5] = sig; /* Arg 0: signum */
3632 env->regs[6] = 0;
3633 /* arg 1: sigcontext */
3634 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3635
3636 /* Offset of 4 to handle microblaze rtid r14, 0 */
3637 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3638
3639 unlock_user_struct(frame, frame_addr, 1);
3640 return;
3641 badframe:
3642 force_sig(TARGET_SIGSEGV);
3643 }
3644
3645 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3646 target_siginfo_t *info,
3647 target_sigset_t *set, CPUMBState *env)
3648 {
3649 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3650 }
3651
3652 long do_sigreturn(CPUMBState *env)
3653 {
3654 struct target_signal_frame *frame;
3655 abi_ulong frame_addr;
3656 target_sigset_t target_set;
3657 sigset_t set;
3658 int i;
3659
3660 frame_addr = env->regs[R_SP];
3661 trace_user_do_sigreturn(env, frame_addr);
3662 /* Make sure the guest isn't playing games. */
3663 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3664 goto badframe;
3665
3666 /* Restore blocked signals */
3667 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3668 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3669 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3670 }
3671 target_to_host_sigset_internal(&set, &target_set);
3672 set_sigmask(&set);
3673
3674 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3675 /* We got here through a sigreturn syscall, our path back is via an
3676 rtb insn so setup r14 for that. */
3677 env->regs[14] = env->sregs[SR_PC];
3678
3679 unlock_user_struct(frame, frame_addr, 0);
3680 return -TARGET_QEMU_ESIGRETURN;
3681 badframe:
3682 force_sig(TARGET_SIGSEGV);
3683 }
3684
3685 long do_rt_sigreturn(CPUMBState *env)
3686 {
3687 trace_user_do_rt_sigreturn(env, 0);
3688 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3689 return -TARGET_ENOSYS;
3690 }
3691
3692 #elif defined(TARGET_CRIS)
3693
3694 struct target_sigcontext {
3695 struct target_pt_regs regs; /* needs to be first */
3696 uint32_t oldmask;
3697 uint32_t usp; /* usp before stacking this gunk on it */
3698 };
3699
3700 /* Signal frames. */
3701 struct target_signal_frame {
3702 struct target_sigcontext sc;
3703 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3704 uint16_t retcode[4]; /* Trampoline code. */
3705 };
3706
3707 struct rt_signal_frame {
3708 siginfo_t *pinfo;
3709 void *puc;
3710 siginfo_t info;
3711 struct ucontext uc;
3712 uint16_t retcode[4]; /* Trampoline code. */
3713 };
3714
3715 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3716 {
3717 __put_user(env->regs[0], &sc->regs.r0);
3718 __put_user(env->regs[1], &sc->regs.r1);
3719 __put_user(env->regs[2], &sc->regs.r2);
3720 __put_user(env->regs[3], &sc->regs.r3);
3721 __put_user(env->regs[4], &sc->regs.r4);
3722 __put_user(env->regs[5], &sc->regs.r5);
3723 __put_user(env->regs[6], &sc->regs.r6);
3724 __put_user(env->regs[7], &sc->regs.r7);
3725 __put_user(env->regs[8], &sc->regs.r8);
3726 __put_user(env->regs[9], &sc->regs.r9);
3727 __put_user(env->regs[10], &sc->regs.r10);
3728 __put_user(env->regs[11], &sc->regs.r11);
3729 __put_user(env->regs[12], &sc->regs.r12);
3730 __put_user(env->regs[13], &sc->regs.r13);
3731 __put_user(env->regs[14], &sc->usp);
3732 __put_user(env->regs[15], &sc->regs.acr);
3733 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3734 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3735 __put_user(env->pc, &sc->regs.erp);
3736 }
3737
3738 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3739 {
3740 __get_user(env->regs[0], &sc->regs.r0);
3741 __get_user(env->regs[1], &sc->regs.r1);
3742 __get_user(env->regs[2], &sc->regs.r2);
3743 __get_user(env->regs[3], &sc->regs.r3);
3744 __get_user(env->regs[4], &sc->regs.r4);
3745 __get_user(env->regs[5], &sc->regs.r5);
3746 __get_user(env->regs[6], &sc->regs.r6);
3747 __get_user(env->regs[7], &sc->regs.r7);
3748 __get_user(env->regs[8], &sc->regs.r8);
3749 __get_user(env->regs[9], &sc->regs.r9);
3750 __get_user(env->regs[10], &sc->regs.r10);
3751 __get_user(env->regs[11], &sc->regs.r11);
3752 __get_user(env->regs[12], &sc->regs.r12);
3753 __get_user(env->regs[13], &sc->regs.r13);
3754 __get_user(env->regs[14], &sc->usp);
3755 __get_user(env->regs[15], &sc->regs.acr);
3756 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3757 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3758 __get_user(env->pc, &sc->regs.erp);
3759 }
3760
3761 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3762 {
3763 abi_ulong sp;
3764 /* Align the stack downwards to 4. */
3765 sp = (env->regs[R_SP] & ~3);
3766 return sp - framesize;
3767 }
3768
3769 static void setup_frame(int sig, struct target_sigaction *ka,
3770 target_sigset_t *set, CPUCRISState *env)
3771 {
3772 struct target_signal_frame *frame;
3773 abi_ulong frame_addr;
3774 int i;
3775
3776 frame_addr = get_sigframe(env, sizeof *frame);
3777 trace_user_setup_frame(env, frame_addr);
3778 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3779 goto badframe;
3780
3781 /*
3782 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3783 * use this trampoline anymore but it sets it up for GDB.
3784 * In QEMU, using the trampoline simplifies things a bit so we use it.
3785 *
3786 * This is movu.w __NR_sigreturn, r9; break 13;
3787 */
3788 __put_user(0x9c5f, frame->retcode+0);
3789 __put_user(TARGET_NR_sigreturn,
3790 frame->retcode + 1);
3791 __put_user(0xe93d, frame->retcode + 2);
3792
3793 /* Save the mask. */
3794 __put_user(set->sig[0], &frame->sc.oldmask);
3795
3796 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3797 __put_user(set->sig[i], &frame->extramask[i - 1]);
3798 }
3799
3800 setup_sigcontext(&frame->sc, env);
3801
3802 /* Move the stack and setup the arguments for the handler. */
3803 env->regs[R_SP] = frame_addr;
3804 env->regs[10] = sig;
3805 env->pc = (unsigned long) ka->_sa_handler;
3806 /* Link SRP so the guest returns through the trampoline. */
3807 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3808
3809 unlock_user_struct(frame, frame_addr, 1);
3810 return;
3811 badframe:
3812 force_sig(TARGET_SIGSEGV);
3813 }
3814
3815 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3816 target_siginfo_t *info,
3817 target_sigset_t *set, CPUCRISState *env)
3818 {
3819 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3820 }
3821
3822 long do_sigreturn(CPUCRISState *env)
3823 {
3824 struct target_signal_frame *frame;
3825 abi_ulong frame_addr;
3826 target_sigset_t target_set;
3827 sigset_t set;
3828 int i;
3829
3830 frame_addr = env->regs[R_SP];
3831 trace_user_do_sigreturn(env, frame_addr);
3832 /* Make sure the guest isn't playing games. */
3833 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3834 goto badframe;
3835 }
3836
3837 /* Restore blocked signals */
3838 __get_user(target_set.sig[0], &frame->sc.oldmask);
3839 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3840 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3841 }
3842 target_to_host_sigset_internal(&set, &target_set);
3843 set_sigmask(&set);
3844
3845 restore_sigcontext(&frame->sc, env);
3846 unlock_user_struct(frame, frame_addr, 0);
3847 return -TARGET_QEMU_ESIGRETURN;
3848 badframe:
3849 force_sig(TARGET_SIGSEGV);
3850 }
3851
3852 long do_rt_sigreturn(CPUCRISState *env)
3853 {
3854 trace_user_do_rt_sigreturn(env, 0);
3855 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3856 return -TARGET_ENOSYS;
3857 }
3858
3859 #elif defined(TARGET_OPENRISC)
3860
3861 struct target_sigcontext {
3862 struct target_pt_regs regs;
3863 abi_ulong oldmask;
3864 abi_ulong usp;
3865 };
3866
3867 struct target_ucontext {
3868 abi_ulong tuc_flags;
3869 abi_ulong tuc_link;
3870 target_stack_t tuc_stack;
3871 struct target_sigcontext tuc_mcontext;
3872 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3873 };
3874
3875 struct target_rt_sigframe {
3876 abi_ulong pinfo;
3877 uint64_t puc;
3878 struct target_siginfo info;
3879 struct target_sigcontext sc;
3880 struct target_ucontext uc;
3881 unsigned char retcode[16]; /* trampoline code */
3882 };
3883
3884 /* This is the asm-generic/ucontext.h version */
3885 #if 0
3886 static int restore_sigcontext(CPUOpenRISCState *regs,
3887 struct target_sigcontext *sc)
3888 {
3889 unsigned int err = 0;
3890 unsigned long old_usp;
3891
3892 /* Alwys make any pending restarted system call return -EINTR */
3893 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3894
3895 /* restore the regs from &sc->regs (same as sc, since regs is first)
3896 * (sc is already checked for VERIFY_READ since the sigframe was
3897 * checked in sys_sigreturn previously)
3898 */
3899
3900 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3901 goto badframe;
3902 }
3903
3904 /* make sure the U-flag is set so user-mode cannot fool us */
3905
3906 regs->sr &= ~SR_SM;
3907
3908 /* restore the old USP as it was before we stacked the sc etc.
3909 * (we cannot just pop the sigcontext since we aligned the sp and
3910 * stuff after pushing it)
3911 */
3912
3913 __get_user(old_usp, &sc->usp);
3914 phx_signal("old_usp 0x%lx", old_usp);
3915
3916 __PHX__ REALLY /* ??? */
3917 wrusp(old_usp);
3918 regs->gpr[1] = old_usp;
3919
3920 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3921 * after this completes, but we don't use that mechanism. maybe we can
3922 * use it now ?
3923 */
3924
3925 return err;
3926
3927 badframe:
3928 return 1;
3929 }
3930 #endif
3931
3932 /* Set up a signal frame. */
3933
3934 static void setup_sigcontext(struct target_sigcontext *sc,
3935 CPUOpenRISCState *regs,
3936 unsigned long mask)
3937 {
3938 unsigned long usp = regs->gpr[1];
3939
3940 /* copy the regs. they are first in sc so we can use sc directly */
3941
3942 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3943
3944 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3945 the signal handler. The frametype will be restored to its previous
3946 value in restore_sigcontext. */
3947 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3948
3949 /* then some other stuff */
3950 __put_user(mask, &sc->oldmask);
3951 __put_user(usp, &sc->usp);
3952 }
3953
3954 static inline unsigned long align_sigframe(unsigned long sp)
3955 {
3956 unsigned long i;
3957 i = sp & ~3UL;
3958 return i;
3959 }
3960
3961 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3962 CPUOpenRISCState *regs,
3963 size_t frame_size)
3964 {
3965 unsigned long sp = regs->gpr[1];
3966 int onsigstack = on_sig_stack(sp);
3967
3968 /* redzone */
3969 /* This is the X/Open sanctioned signal stack switching. */
3970 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3971 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3972 }
3973
3974 sp = align_sigframe(sp - frame_size);
3975
3976 /*
3977 * If we are on the alternate signal stack and would overflow it, don't.
3978 * Return an always-bogus address instead so we will die with SIGSEGV.
3979 */
3980
3981 if (onsigstack && !likely(on_sig_stack(sp))) {
3982 return -1L;
3983 }
3984
3985 return sp;
3986 }
3987
3988 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3989 target_siginfo_t *info,
3990 target_sigset_t *set, CPUOpenRISCState *env)
3991 {
3992 int err = 0;
3993 abi_ulong frame_addr;
3994 unsigned long return_ip;
3995 struct target_rt_sigframe *frame;
3996 abi_ulong info_addr, uc_addr;
3997
3998 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3999 trace_user_setup_rt_frame(env, frame_addr);
4000 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4001 goto give_sigsegv;
4002 }
4003
4004 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4005 __put_user(info_addr, &frame->pinfo);
4006 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4007 __put_user(uc_addr, &frame->puc);
4008
4009 if (ka->sa_flags & SA_SIGINFO) {
4010 tswap_siginfo(&frame->info, info);
4011 }
4012
4013 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4014 __put_user(0, &frame->uc.tuc_flags);
4015 __put_user(0, &frame->uc.tuc_link);
4016 __put_user(target_sigaltstack_used.ss_sp,
4017 &frame->uc.tuc_stack.ss_sp);
4018 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4019 __put_user(target_sigaltstack_used.ss_size,
4020 &frame->uc.tuc_stack.ss_size);
4021 setup_sigcontext(&frame->sc, env, set->sig[0]);
4022
4023 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4024
4025 /* trampoline - the desired return ip is the retcode itself */
4026 return_ip = (unsigned long)&frame->retcode;
4027 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4028 __put_user(0xa960, (short *)(frame->retcode + 0));
4029 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4030 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4031 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4032
4033 if (err) {
4034 goto give_sigsegv;
4035 }
4036
4037 /* TODO what is the current->exec_domain stuff and invmap ? */
4038
4039 /* Set up registers for signal handler */
4040 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4041 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4042 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4043 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4044 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4045
4046 /* actually move the usp to reflect the stacked frame */
4047 env->gpr[1] = (unsigned long)frame;
4048
4049 return;
4050
4051 give_sigsegv:
4052 unlock_user_struct(frame, frame_addr, 1);
4053 if (sig == TARGET_SIGSEGV) {
4054 ka->_sa_handler = TARGET_SIG_DFL;
4055 }
4056 force_sig(TARGET_SIGSEGV);
4057 }
4058
4059 long do_sigreturn(CPUOpenRISCState *env)
4060 {
4061 trace_user_do_sigreturn(env, 0);
4062 fprintf(stderr, "do_sigreturn: not implemented\n");
4063 return -TARGET_ENOSYS;
4064 }
4065
4066 long do_rt_sigreturn(CPUOpenRISCState *env)
4067 {
4068 trace_user_do_rt_sigreturn(env, 0);
4069 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4070 return -TARGET_ENOSYS;
4071 }
4072 /* TARGET_OPENRISC */
4073
4074 #elif defined(TARGET_S390X)
4075
4076 #define __NUM_GPRS 16
4077 #define __NUM_FPRS 16
4078 #define __NUM_ACRS 16
4079
4080 #define S390_SYSCALL_SIZE 2
4081 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4082
4083 #define _SIGCONTEXT_NSIG 64
4084 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4085 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4086 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4087 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4088 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4089
4090 typedef struct {
4091 target_psw_t psw;
4092 target_ulong gprs[__NUM_GPRS];
4093 unsigned int acrs[__NUM_ACRS];
4094 } target_s390_regs_common;
4095
4096 typedef struct {
4097 unsigned int fpc;
4098 double fprs[__NUM_FPRS];
4099 } target_s390_fp_regs;
4100
4101 typedef struct {
4102 target_s390_regs_common regs;
4103 target_s390_fp_regs fpregs;
4104 } target_sigregs;
4105
4106 struct target_sigcontext {
4107 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4108 target_sigregs *sregs;
4109 };
4110
4111 typedef struct {
4112 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4113 struct target_sigcontext sc;
4114 target_sigregs sregs;
4115 int signo;
4116 uint8_t retcode[S390_SYSCALL_SIZE];
4117 } sigframe;
4118
4119 struct target_ucontext {
4120 target_ulong tuc_flags;
4121 struct target_ucontext *tuc_link;
4122 target_stack_t tuc_stack;
4123 target_sigregs tuc_mcontext;
4124 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4125 };
4126
4127 typedef struct {
4128 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4129 uint8_t retcode[S390_SYSCALL_SIZE];
4130 struct target_siginfo info;
4131 struct target_ucontext uc;
4132 } rt_sigframe;
4133
4134 static inline abi_ulong
4135 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4136 {
4137 abi_ulong sp;
4138
4139 /* Default to using normal stack */
4140 sp = env->regs[15];
4141
4142 /* This is the X/Open sanctioned signal stack switching. */
4143 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4144 if (!sas_ss_flags(sp)) {
4145 sp = target_sigaltstack_used.ss_sp +
4146 target_sigaltstack_used.ss_size;
4147 }
4148 }
4149
4150 /* This is the legacy signal stack switching. */
4151 else if (/* FIXME !user_mode(regs) */ 0 &&
4152 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4153 ka->sa_restorer) {
4154 sp = (abi_ulong) ka->sa_restorer;
4155 }
4156
4157 return (sp - frame_size) & -8ul;
4158 }
4159
4160 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4161 {
4162 int i;
4163 //save_access_regs(current->thread.acrs); FIXME
4164
4165 /* Copy a 'clean' PSW mask to the user to avoid leaking
4166 information about whether PER is currently on. */
4167 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4168 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4169 for (i = 0; i < 16; i++) {
4170 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4171 }
4172 for (i = 0; i < 16; i++) {
4173 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4174 }
4175 /*
4176 * We have to store the fp registers to current->thread.fp_regs
4177 * to merge them with the emulated registers.
4178 */
4179 //save_fp_regs(&current->thread.fp_regs); FIXME
4180 for (i = 0; i < 16; i++) {
4181 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4182 }
4183 }
4184
4185 static void setup_frame(int sig, struct target_sigaction *ka,
4186 target_sigset_t *set, CPUS390XState *env)
4187 {
4188 sigframe *frame;
4189 abi_ulong frame_addr;
4190
4191 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4192 trace_user_setup_frame(env, frame_addr);
4193 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4194 goto give_sigsegv;
4195 }
4196
4197 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4198
4199 save_sigregs(env, &frame->sregs);
4200
4201 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4202 (abi_ulong *)&frame->sc.sregs);
4203
4204 /* Set up to return from userspace. If provided, use a stub
4205 already in userspace. */
4206 if (ka->sa_flags & TARGET_SA_RESTORER) {
4207 env->regs[14] = (unsigned long)
4208 ka->sa_restorer | PSW_ADDR_AMODE;
4209 } else {
4210 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4211 | PSW_ADDR_AMODE;
4212 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4213 (uint16_t *)(frame->retcode));
4214 }
4215
4216 /* Set up backchain. */
4217 __put_user(env->regs[15], (abi_ulong *) frame);
4218
4219 /* Set up registers for signal handler */
4220 env->regs[15] = frame_addr;
4221 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4222
4223 env->regs[2] = sig; //map_signal(sig);
4224 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4225
4226 /* We forgot to include these in the sigcontext.
4227 To avoid breaking binary compatibility, they are passed as args. */
4228 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4229 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4230
4231 /* Place signal number on stack to allow backtrace from handler. */
4232 __put_user(env->regs[2], (int *) &frame->signo);
4233 unlock_user_struct(frame, frame_addr, 1);
4234 return;
4235
4236 give_sigsegv:
4237 force_sig(TARGET_SIGSEGV);
4238 }
4239
4240 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4241 target_siginfo_t *info,
4242 target_sigset_t *set, CPUS390XState *env)
4243 {
4244 int i;
4245 rt_sigframe *frame;
4246 abi_ulong frame_addr;
4247
4248 frame_addr = get_sigframe(ka, env, sizeof *frame);
4249 trace_user_setup_rt_frame(env, frame_addr);
4250 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4251 goto give_sigsegv;
4252 }
4253
4254 tswap_siginfo(&frame->info, info);
4255
4256 /* Create the ucontext. */
4257 __put_user(0, &frame->uc.tuc_flags);
4258 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4259 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4260 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4261 &frame->uc.tuc_stack.ss_flags);
4262 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4263 save_sigregs(env, &frame->uc.tuc_mcontext);
4264 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4265 __put_user((abi_ulong)set->sig[i],
4266 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4267 }
4268
4269 /* Set up to return from userspace. If provided, use a stub
4270 already in userspace. */
4271 if (ka->sa_flags & TARGET_SA_RESTORER) {
4272 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4273 } else {
4274 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4275 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4276 (uint16_t *)(frame->retcode));
4277 }
4278
4279 /* Set up backchain. */
4280 __put_user(env->regs[15], (abi_ulong *) frame);
4281
4282 /* Set up registers for signal handler */
4283 env->regs[15] = frame_addr;
4284 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4285
4286 env->regs[2] = sig; //map_signal(sig);
4287 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4288 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4289 return;
4290
4291 give_sigsegv:
4292 force_sig(TARGET_SIGSEGV);
4293 }
4294
4295 static int
4296 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4297 {
4298 int err = 0;
4299 int i;
4300
4301 for (i = 0; i < 16; i++) {
4302 __get_user(env->regs[i], &sc->regs.gprs[i]);
4303 }
4304
4305 __get_user(env->psw.mask, &sc->regs.psw.mask);
4306 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4307 (unsigned long long)env->psw.addr);
4308 __get_user(env->psw.addr, &sc->regs.psw.addr);
4309 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4310
4311 for (i = 0; i < 16; i++) {
4312 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4313 }
4314 for (i = 0; i < 16; i++) {
4315 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4316 }
4317
4318 return err;
4319 }
4320
4321 long do_sigreturn(CPUS390XState *env)
4322 {
4323 sigframe *frame;
4324 abi_ulong frame_addr = env->regs[15];
4325 target_sigset_t target_set;
4326 sigset_t set;
4327
4328 trace_user_do_sigreturn(env, frame_addr);
4329 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4330 goto badframe;
4331 }
4332 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4333
4334 target_to_host_sigset_internal(&set, &target_set);
4335 set_sigmask(&set); /* ~_BLOCKABLE? */
4336
4337 if (restore_sigregs(env, &frame->sregs)) {
4338 goto badframe;
4339 }
4340
4341 unlock_user_struct(frame, frame_addr, 0);
4342 return -TARGET_QEMU_ESIGRETURN;
4343
4344 badframe:
4345 force_sig(TARGET_SIGSEGV);
4346 return 0;
4347 }
4348
4349 long do_rt_sigreturn(CPUS390XState *env)
4350 {
4351 rt_sigframe *frame;
4352 abi_ulong frame_addr = env->regs[15];
4353 sigset_t set;
4354
4355 trace_user_do_rt_sigreturn(env, frame_addr);
4356 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4357 goto badframe;
4358 }
4359 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4360
4361 set_sigmask(&set); /* ~_BLOCKABLE? */
4362
4363 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4364 goto badframe;
4365 }
4366
4367 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4368 get_sp_from_cpustate(env)) == -EFAULT) {
4369 goto badframe;
4370 }
4371 unlock_user_struct(frame, frame_addr, 0);
4372 return -TARGET_QEMU_ESIGRETURN;
4373
4374 badframe:
4375 unlock_user_struct(frame, frame_addr, 0);
4376 force_sig(TARGET_SIGSEGV);
4377 return 0;
4378 }
4379
4380 #elif defined(TARGET_PPC)
4381
4382 /* Size of dummy stack frame allocated when calling signal handler.
4383 See arch/powerpc/include/asm/ptrace.h. */
4384 #if defined(TARGET_PPC64)
4385 #define SIGNAL_FRAMESIZE 128
4386 #else
4387 #define SIGNAL_FRAMESIZE 64
4388 #endif
4389
4390 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4391 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4392 struct target_mcontext {
4393 target_ulong mc_gregs[48];
4394 /* Includes fpscr. */
4395 uint64_t mc_fregs[33];
4396 target_ulong mc_pad[2];
4397 /* We need to handle Altivec and SPE at the same time, which no
4398 kernel needs to do. Fortunately, the kernel defines this bit to
4399 be Altivec-register-large all the time, rather than trying to
4400 twiddle it based on the specific platform. */
4401 union {
4402 /* SPE vector registers. One extra for SPEFSCR. */
4403 uint32_t spe[33];
4404 /* Altivec vector registers. The packing of VSCR and VRSAVE
4405 varies depending on whether we're PPC64 or not: PPC64 splits
4406 them apart; PPC32 stuffs them together. */
4407 #if defined(TARGET_PPC64)
4408 #define QEMU_NVRREG 34
4409 #else
4410 #define QEMU_NVRREG 33
4411 #endif
4412 ppc_avr_t altivec[QEMU_NVRREG];
4413 #undef QEMU_NVRREG
4414 } mc_vregs __attribute__((__aligned__(16)));
4415 };
4416
4417 /* See arch/powerpc/include/asm/sigcontext.h. */
4418 struct target_sigcontext {
4419 target_ulong _unused[4];
4420 int32_t signal;
4421 #if defined(TARGET_PPC64)
4422 int32_t pad0;
4423 #endif
4424 target_ulong handler;
4425 target_ulong oldmask;
4426 target_ulong regs; /* struct pt_regs __user * */
4427 #if defined(TARGET_PPC64)
4428 struct target_mcontext mcontext;
4429 #endif
4430 };
4431
4432 /* Indices for target_mcontext.mc_gregs, below.
4433 See arch/powerpc/include/asm/ptrace.h for details. */
4434 enum {
4435 TARGET_PT_R0 = 0,
4436 TARGET_PT_R1 = 1,
4437 TARGET_PT_R2 = 2,
4438 TARGET_PT_R3 = 3,
4439 TARGET_PT_R4 = 4,
4440 TARGET_PT_R5 = 5,
4441 TARGET_PT_R6 = 6,
4442 TARGET_PT_R7 = 7,
4443 TARGET_PT_R8 = 8,
4444 TARGET_PT_R9 = 9,
4445 TARGET_PT_R10 = 10,
4446 TARGET_PT_R11 = 11,
4447 TARGET_PT_R12 = 12,
4448 TARGET_PT_R13 = 13,
4449 TARGET_PT_R14 = 14,
4450 TARGET_PT_R15 = 15,
4451 TARGET_PT_R16 = 16,
4452 TARGET_PT_R17 = 17,
4453 TARGET_PT_R18 = 18,
4454 TARGET_PT_R19 = 19,
4455 TARGET_PT_R20 = 20,
4456 TARGET_PT_R21 = 21,
4457 TARGET_PT_R22 = 22,
4458 TARGET_PT_R23 = 23,
4459 TARGET_PT_R24 = 24,
4460 TARGET_PT_R25 = 25,
4461 TARGET_PT_R26 = 26,
4462 TARGET_PT_R27 = 27,
4463 TARGET_PT_R28 = 28,
4464 TARGET_PT_R29 = 29,
4465 TARGET_PT_R30 = 30,
4466 TARGET_PT_R31 = 31,
4467 TARGET_PT_NIP = 32,
4468 TARGET_PT_MSR = 33,
4469 TARGET_PT_ORIG_R3 = 34,
4470 TARGET_PT_CTR = 35,
4471 TARGET_PT_LNK = 36,
4472 TARGET_PT_XER = 37,
4473 TARGET_PT_CCR = 38,
4474 /* Yes, there are two registers with #39. One is 64-bit only. */
4475 TARGET_PT_MQ = 39,
4476 TARGET_PT_SOFTE = 39,
4477 TARGET_PT_TRAP = 40,
4478 TARGET_PT_DAR = 41,
4479 TARGET_PT_DSISR = 42,
4480 TARGET_PT_RESULT = 43,
4481 TARGET_PT_REGS_COUNT = 44
4482 };
4483
4484
4485 struct target_ucontext {
4486 target_ulong tuc_flags;
4487 target_ulong tuc_link; /* struct ucontext __user * */
4488 struct target_sigaltstack tuc_stack;
4489 #if !defined(TARGET_PPC64)
4490 int32_t tuc_pad[7];
4491 target_ulong tuc_regs; /* struct mcontext __user *
4492 points to uc_mcontext field */
4493 #endif
4494 target_sigset_t tuc_sigmask;
4495 #if defined(TARGET_PPC64)
4496 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4497 struct target_sigcontext tuc_sigcontext;
4498 #else
4499 int32_t tuc_maskext[30];
4500 int32_t tuc_pad2[3];
4501 struct target_mcontext tuc_mcontext;
4502 #endif
4503 };
4504
4505 /* See arch/powerpc/kernel/signal_32.c. */
4506 struct target_sigframe {
4507 struct target_sigcontext sctx;
4508 struct target_mcontext mctx;
4509 int32_t abigap[56];
4510 };
4511
4512 #if defined(TARGET_PPC64)
4513
4514 #define TARGET_TRAMP_SIZE 6
4515
4516 struct target_rt_sigframe {
4517 /* sys_rt_sigreturn requires the ucontext be the first field */
4518 struct target_ucontext uc;
4519 target_ulong _unused[2];
4520 uint32_t trampoline[TARGET_TRAMP_SIZE];
4521 target_ulong pinfo; /* struct siginfo __user * */
4522 target_ulong puc; /* void __user * */
4523 struct target_siginfo info;
4524 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4525 char abigap[288];
4526 } __attribute__((aligned(16)));
4527
4528 #else
4529
4530 struct target_rt_sigframe {
4531 struct target_siginfo info;
4532 struct target_ucontext uc;
4533 int32_t abigap[56];
4534 };
4535
4536 #endif
4537
4538 #if defined(TARGET_PPC64)
4539
4540 struct target_func_ptr {
4541 target_ulong entry;
4542 target_ulong toc;
4543 };
4544
4545 #endif
4546
4547 /* We use the mc_pad field for the signal return trampoline. */
4548 #define tramp mc_pad
4549
4550 /* See arch/powerpc/kernel/signal.c. */
4551 static target_ulong get_sigframe(struct target_sigaction *ka,
4552 CPUPPCState *env,
4553 int frame_size)
4554 {
4555 target_ulong oldsp, newsp;
4556
4557 oldsp = env->gpr[1];
4558
4559 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4560 (sas_ss_flags(oldsp) == 0)) {
4561 oldsp = (target_sigaltstack_used.ss_sp
4562 + target_sigaltstack_used.ss_size);
4563 }
4564
4565 newsp = (oldsp - frame_size) & ~0xFUL;
4566
4567 return newsp;
4568 }
4569
4570 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4571 {
4572 target_ulong msr = env->msr;
4573 int i;
4574 target_ulong ccr = 0;
4575
4576 /* In general, the kernel attempts to be intelligent about what it
4577 needs to save for Altivec/FP/SPE registers. We don't care that
4578 much, so we just go ahead and save everything. */
4579
4580 /* Save general registers. */
4581 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4582 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4583 }
4584 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4585 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4586 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4587 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4588
4589 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4590 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4591 }
4592 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4593
4594 /* Save Altivec registers if necessary. */
4595 if (env->insns_flags & PPC_ALTIVEC) {
4596 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4597 ppc_avr_t *avr = &env->avr[i];
4598 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4599
4600 __put_user(avr->u64[0], &vreg->u64[0]);
4601 __put_user(avr->u64[1], &vreg->u64[1]);
4602 }
4603 /* Set MSR_VR in the saved MSR value to indicate that
4604 frame->mc_vregs contains valid data. */
4605 msr |= MSR_VR;
4606 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4607 &frame->mc_vregs.altivec[32].u32[3]);
4608 }
4609
4610 /* Save floating point registers. */
4611 if (env->insns_flags & PPC_FLOAT) {
4612 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4613 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4614 }
4615 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4616 }
4617
4618 /* Save SPE registers. The kernel only saves the high half. */
4619 if (env->insns_flags & PPC_SPE) {
4620 #if defined(TARGET_PPC64)
4621 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4622 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4623 }
4624 #else
4625 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4626 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4627 }
4628 #endif
4629 /* Set MSR_SPE in the saved MSR value to indicate that
4630 frame->mc_vregs contains valid data. */
4631 msr |= MSR_SPE;
4632 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4633 }
4634
4635 /* Store MSR. */
4636 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4637 }
4638
4639 static void encode_trampoline(int sigret, uint32_t *tramp)
4640 {
4641 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4642 if (sigret) {
4643 __put_user(0x38000000 | sigret, &tramp[0]);
4644 __put_user(0x44000002, &tramp[1]);
4645 }
4646 }
4647
4648 static void restore_user_regs(CPUPPCState *env,
4649 struct target_mcontext *frame, int sig)
4650 {
4651 target_ulong save_r2 = 0;
4652 target_ulong msr;
4653 target_ulong ccr;
4654
4655 int i;
4656
4657 if (!sig) {
4658 save_r2 = env->gpr[2];
4659 }
4660
4661 /* Restore general registers. */
4662 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4663 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4664 }
4665 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4666 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4667 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4668 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4669 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4670
4671 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4672 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4673 }
4674
4675 if (!sig) {
4676 env->gpr[2] = save_r2;
4677 }
4678 /* Restore MSR. */
4679 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4680
4681 /* If doing signal return, restore the previous little-endian mode. */
4682 if (sig)
4683 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4684
4685 /* Restore Altivec registers if necessary. */
4686 if (env->insns_flags & PPC_ALTIVEC) {
4687 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4688 ppc_avr_t *avr = &env->avr[i];
4689 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4690
4691 __get_user(avr->u64[0], &vreg->u64[0]);
4692 __get_user(avr->u64[1], &vreg->u64[1]);
4693 }
4694 /* Set MSR_VEC in the saved MSR value to indicate that
4695 frame->mc_vregs contains valid data. */
4696 __get_user(env->spr[SPR_VRSAVE],
4697 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4698 }
4699
4700 /* Restore floating point registers. */
4701 if (env->insns_flags & PPC_FLOAT) {
4702 uint64_t fpscr;
4703 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4704 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4705 }
4706 __get_user(fpscr, &frame->mc_fregs[32]);
4707 env->fpscr = (uint32_t) fpscr;
4708 }
4709
4710 /* Save SPE registers. The kernel only saves the high half. */
4711 if (env->insns_flags & PPC_SPE) {
4712 #if defined(TARGET_PPC64)
4713 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4714 uint32_t hi;
4715
4716 __get_user(hi, &frame->mc_vregs.spe[i]);
4717 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4718 }
4719 #else
4720 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4721 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4722 }
4723 #endif
4724 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4725 }
4726 }
4727
4728 static void setup_frame(int sig, struct target_sigaction *ka,
4729 target_sigset_t *set, CPUPPCState *env)
4730 {
4731 struct target_sigframe *frame;
4732 struct target_sigcontext *sc;
4733 target_ulong frame_addr, newsp;
4734 int err = 0;
4735 #if defined(TARGET_PPC64)
4736 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4737 #endif
4738
4739 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4740 trace_user_setup_frame(env, frame_addr);
4741 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4742 goto sigsegv;
4743 sc = &frame->sctx;
4744
4745 __put_user(ka->_sa_handler, &sc->handler);
4746 __put_user(set->sig[0], &sc->oldmask);
4747 #if TARGET_ABI_BITS == 64
4748 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4749 #else
4750 __put_user(set->sig[1], &sc->_unused[3]);
4751 #endif
4752 __put_user(h2g(&frame->mctx), &sc->regs);
4753 __put_user(sig, &sc->signal);
4754
4755 /* Save user regs. */
4756 save_user_regs(env, &frame->mctx);
4757
4758 /* Construct the trampoline code on the stack. */
4759 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4760
4761 /* The kernel checks for the presence of a VDSO here. We don't
4762 emulate a vdso, so use a sigreturn system call. */
4763 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4764
4765 /* Turn off all fp exceptions. */
4766 env->fpscr = 0;
4767
4768 /* Create a stack frame for the caller of the handler. */
4769 newsp = frame_addr - SIGNAL_FRAMESIZE;
4770 err |= put_user(env->gpr[1], newsp, target_ulong);
4771
4772 if (err)
4773 goto sigsegv;
4774
4775 /* Set up registers for signal handler. */
4776 env->gpr[1] = newsp;
4777 env->gpr[3] = sig;
4778 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4779
4780 #if defined(TARGET_PPC64)
4781 if (get_ppc64_abi(image) < 2) {
4782 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4783 struct target_func_ptr *handler =
4784 (struct target_func_ptr *)g2h(ka->_sa_handler);
4785 env->nip = tswapl(handler->entry);
4786 env->gpr[2] = tswapl(handler->toc);
4787 } else {
4788 /* ELFv2 PPC64 function pointers are entry points, but R12
4789 * must also be set */
4790 env->nip = tswapl((target_ulong) ka->_sa_handler);
4791 env->gpr[12] = env->nip;
4792 }
4793 #else
4794 env->nip = (target_ulong) ka->_sa_handler;
4795 #endif
4796
4797 /* Signal handlers are entered in big-endian mode. */
4798 env->msr &= ~(1ull << MSR_LE);
4799
4800 unlock_user_struct(frame, frame_addr, 1);
4801 return;
4802
4803 sigsegv:
4804 unlock_user_struct(frame, frame_addr, 1);
4805 force_sig(TARGET_SIGSEGV);
4806 }
4807
4808 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4809 target_siginfo_t *info,
4810 target_sigset_t *set, CPUPPCState *env)
4811 {
4812 struct target_rt_sigframe *rt_sf;
4813 uint32_t *trampptr = 0;
4814 struct target_mcontext *mctx = 0;
4815 target_ulong rt_sf_addr, newsp = 0;
4816 int i, err = 0;
4817 #if defined(TARGET_PPC64)
4818 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4819 #endif
4820
4821 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4822 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4823 goto sigsegv;
4824
4825 tswap_siginfo(&rt_sf->info, info);
4826
4827 __put_user(0, &rt_sf->uc.tuc_flags);
4828 __put_user(0, &rt_sf->uc.tuc_link);
4829 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4830 &rt_sf->uc.tuc_stack.ss_sp);
4831 __put_user(sas_ss_flags(env->gpr[1]),
4832 &rt_sf->uc.tuc_stack.ss_flags);
4833 __put_user(target_sigaltstack_used.ss_size,
4834 &rt_sf->uc.tuc_stack.ss_size);
4835 #if !defined(TARGET_PPC64)
4836 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4837 &rt_sf->uc.tuc_regs);
4838 #endif
4839 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4840 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4841 }
4842
4843 #if defined(TARGET_PPC64)
4844 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4845 trampptr = &rt_sf->trampoline[0];
4846 #else
4847 mctx = &rt_sf->uc.tuc_mcontext;
4848 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4849 #endif
4850
4851 save_user_regs(env, mctx);
4852 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4853
4854 /* The kernel checks for the presence of a VDSO here. We don't
4855 emulate a vdso, so use a sigreturn system call. */
4856 env->lr = (target_ulong) h2g(trampptr);
4857
4858 /* Turn off all fp exceptions. */
4859 env->fpscr = 0;
4860
4861 /* Create a stack frame for the caller of the handler. */
4862 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4863 err |= put_user(env->gpr[1], newsp, target_ulong);
4864
4865 if (err)
4866 goto sigsegv;
4867
4868 /* Set up registers for signal handler. */
4869 env->gpr[1] = newsp;
4870 env->gpr[3] = (target_ulong) sig;
4871 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4872 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4873 env->gpr[6] = (target_ulong) h2g(rt_sf);
4874
4875 #if defined(TARGET_PPC64)
4876 if (get_ppc64_abi(image) < 2) {
4877 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4878 struct target_func_ptr *handler =
4879 (struct target_func_ptr *)g2h(ka->_sa_handler);
4880 env->nip = tswapl(handler->entry);
4881 env->gpr[2] = tswapl(handler->toc);
4882 } else {
4883 /* ELFv2 PPC64 function pointers are entry points, but R12
4884 * must also be set */
4885 env->nip = tswapl((target_ulong) ka->_sa_handler);
4886 env->gpr[12] = env->nip;
4887 }
4888 #else
4889 env->nip = (target_ulong) ka->_sa_handler;
4890 #endif
4891
4892 /* Signal handlers are entered in big-endian mode. */
4893 env->msr &= ~(1ull << MSR_LE);
4894
4895 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4896 return;
4897
4898 sigsegv:
4899 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4900 force_sig(TARGET_SIGSEGV);
4901
4902 }
4903
4904 long do_sigreturn(CPUPPCState *env)
4905 {
4906 struct target_sigcontext *sc = NULL;
4907 struct target_mcontext *sr = NULL;
4908 target_ulong sr_addr = 0, sc_addr;
4909 sigset_t blocked;
4910 target_sigset_t set;
4911
4912 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4913 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4914 goto sigsegv;
4915
4916 #if defined(TARGET_PPC64)
4917 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4918 #else
4919 __get_user(set.sig[0], &sc->oldmask);
4920 __get_user(set.sig[1], &sc->_unused[3]);
4921 #endif
4922 target_to_host_sigset_internal(&blocked, &set);
4923 set_sigmask(&blocked);
4924
4925 __get_user(sr_addr, &sc->regs);
4926 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4927 goto sigsegv;
4928 restore_user_regs(env, sr, 1);
4929
4930 unlock_user_struct(sr, sr_addr, 1);
4931 unlock_user_struct(sc, sc_addr, 1);
4932 return -TARGET_QEMU_ESIGRETURN;
4933
4934 sigsegv:
4935 unlock_user_struct(sr, sr_addr, 1);
4936 unlock_user_struct(sc, sc_addr, 1);
4937 force_sig(TARGET_SIGSEGV);
4938 return 0;
4939 }
4940
4941 /* See arch/powerpc/kernel/signal_32.c. */
4942 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4943 {
4944 struct target_mcontext *mcp;
4945 target_ulong mcp_addr;
4946 sigset_t blocked;
4947 target_sigset_t set;
4948
4949 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4950 sizeof (set)))
4951 return 1;
4952
4953 #if defined(TARGET_PPC64)
4954 mcp_addr = h2g(ucp) +
4955 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4956 #else
4957 __get_user(mcp_addr, &ucp->tuc_regs);
4958 #endif
4959
4960 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4961 return 1;
4962
4963 target_to_host_sigset_internal(&blocked, &set);
4964 set_sigmask(&blocked);
4965 restore_user_regs(env, mcp, sig);
4966
4967 unlock_user_struct(mcp, mcp_addr, 1);
4968 return 0;
4969 }
4970
4971 long do_rt_sigreturn(CPUPPCState *env)
4972 {
4973 struct target_rt_sigframe *rt_sf = NULL;
4974 target_ulong rt_sf_addr;
4975
4976 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4977 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4978 goto sigsegv;
4979
4980 if (do_setcontext(&rt_sf->uc, env, 1))
4981 goto sigsegv;
4982
4983 do_sigaltstack(rt_sf_addr
4984 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4985 0, env->gpr[1]);
4986
4987 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4988 return -TARGET_QEMU_ESIGRETURN;
4989
4990 sigsegv:
4991 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4992 force_sig(TARGET_SIGSEGV);
4993 return 0;
4994 }
4995
4996 #elif defined(TARGET_M68K)
4997
4998 struct target_sigcontext {
4999 abi_ulong sc_mask;
5000 abi_ulong sc_usp;
5001 abi_ulong sc_d0;
5002 abi_ulong sc_d1;
5003 abi_ulong sc_a0;
5004 abi_ulong sc_a1;
5005 unsigned short sc_sr;
5006 abi_ulong sc_pc;
5007 };
5008
5009 struct target_sigframe
5010 {
5011 abi_ulong pretcode;
5012 int sig;
5013 int code;
5014 abi_ulong psc;
5015 char retcode[8];
5016 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5017 struct target_sigcontext sc;
5018 };
5019
5020 typedef int target_greg_t;
5021 #define TARGET_NGREG 18
5022 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5023
5024 typedef struct target_fpregset {
5025 int f_fpcntl[3];
5026 int f_fpregs[8*3];
5027 } target_fpregset_t;
5028
5029 struct target_mcontext {
5030 int version;
5031 target_gregset_t gregs;
5032 target_fpregset_t fpregs;
5033 };
5034
5035 #define TARGET_MCONTEXT_VERSION 2
5036
5037 struct target_ucontext {
5038 abi_ulong tuc_flags;
5039 abi_ulong tuc_link;
5040 target_stack_t tuc_stack;
5041 struct target_mcontext tuc_mcontext;
5042 abi_long tuc_filler[80];
5043 target_sigset_t tuc_sigmask;
5044 };
5045
5046 struct target_rt_sigframe
5047 {
5048 abi_ulong pretcode;
5049 int sig;
5050 abi_ulong pinfo;
5051 abi_ulong puc;
5052 char retcode[8];
5053 struct target_siginfo info;
5054 struct target_ucontext uc;
5055 };
5056
5057 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5058 abi_ulong mask)
5059 {
5060 __put_user(mask, &sc->sc_mask);
5061 __put_user(env->aregs[7], &sc->sc_usp);
5062 __put_user(env->dregs[0], &sc->sc_d0);
5063 __put_user(env->dregs[1], &sc->sc_d1);
5064 __put_user(env->aregs[0], &sc->sc_a0);
5065 __put_user(env->aregs[1], &sc->sc_a1);
5066 __put_user(env->sr, &sc->sc_sr);
5067 __put_user(env->pc, &sc->sc_pc);
5068 }
5069
5070 static void
5071 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5072 {
5073 int temp;
5074
5075 __get_user(env->aregs[7], &sc->sc_usp);
5076 __get_user(env->dregs[0], &sc->sc_d0);
5077 __get_user(env->dregs[1], &sc->sc_d1);
5078 __get_user(env->aregs[0], &sc->sc_a0);
5079 __get_user(env->aregs[1], &sc->sc_a1);
5080 __get_user(env->pc, &sc->sc_pc);
5081 __get_user(temp, &sc->sc_sr);
5082 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5083 }
5084
5085 /*
5086 * Determine which stack to use..
5087 */
5088 static inline abi_ulong
5089 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5090 size_t frame_size)
5091 {
5092 unsigned long sp;
5093
5094 sp = regs->aregs[7];
5095
5096 /* This is the X/Open sanctioned signal stack switching. */
5097 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5098 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5099 }
5100
5101 return ((sp - frame_size) & -8UL);
5102 }
5103
5104 static void setup_frame(int sig, struct target_sigaction *ka,
5105 target_sigset_t *set, CPUM68KState *env)
5106 {
5107 struct target_sigframe *frame;
5108 abi_ulong frame_addr;
5109 abi_ulong retcode_addr;
5110 abi_ulong sc_addr;
5111 int i;
5112
5113 frame_addr = get_sigframe(ka, env, sizeof *frame);
5114 trace_user_setup_frame(env, frame_addr);
5115 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5116 goto give_sigsegv;
5117 }
5118
5119 __put_user(sig, &frame->sig);
5120
5121 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5122 __put_user(sc_addr, &frame->psc);
5123
5124 setup_sigcontext(&frame->sc, env, set->sig[0]);
5125
5126 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5127 __put_user(set->sig[i], &frame->extramask[i - 1]);
5128 }
5129
5130 /* Set up to return from userspace. */
5131
5132 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5133 __put_user(retcode_addr, &frame->pretcode);
5134
5135 /* moveq #,d0; trap #0 */
5136
5137 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5138 (uint32_t *)(frame->retcode));
5139
5140 /* Set up to return from userspace */
5141
5142 env->aregs[7] = frame_addr;
5143 env->pc = ka->_sa_handler;
5144
5145 unlock_user_struct(frame, frame_addr, 1);
5146 return;
5147
5148 give_sigsegv:
5149 force_sig(TARGET_SIGSEGV);
5150 }
5151
5152 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5153 CPUM68KState *env)
5154 {
5155 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5156
5157 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5158 __put_user(env->dregs[0], &gregs[0]);
5159 __put_user(env->dregs[1], &gregs[1]);
5160 __put_user(env->dregs[2], &gregs[2]);
5161 __put_user(env->dregs[3], &gregs[3]);
5162 __put_user(env->dregs[4], &gregs[4]);
5163 __put_user(env->dregs[5], &gregs[5]);
5164 __put_user(env->dregs[6], &gregs[6]);
5165 __put_user(env->dregs[7], &gregs[7]);
5166 __put_user(env->aregs[0], &gregs[8]);
5167 __put_user(env->aregs[1], &gregs[9]);
5168 __put_user(env->aregs[2], &gregs[10]);
5169 __put_user(env->aregs[3], &gregs[11]);
5170 __put_user(env->aregs[4], &gregs[12]);
5171 __put_user(env->aregs[5], &gregs[13]);
5172 __put_user(env->aregs[6], &gregs[14]);
5173 __put_user(env->aregs[7], &gregs[15]);
5174 __put_user(env->pc, &gregs[16]);
5175 __put_user(env->sr, &gregs[17]);
5176
5177 return 0;
5178 }
5179
5180 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5181 struct target_ucontext *uc)
5182 {
5183 int temp;
5184 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5185
5186 __get_user(temp, &uc->tuc_mcontext.version);
5187 if (temp != TARGET_MCONTEXT_VERSION)
5188 goto badframe;
5189
5190 /* restore passed registers */
5191 __get_user(env->dregs[0], &gregs[0]);
5192 __get_user(env->dregs[1], &gregs[1]);
5193 __get_user(env->dregs[2], &gregs[2]);
5194 __get_user(env->dregs[3], &gregs[3]);
5195 __get_user(env->dregs[4], &gregs[4]);
5196 __get_user(env->dregs[5], &gregs[5]);
5197 __get_user(env->dregs[6], &gregs[6]);
5198 __get_user(env->dregs[7], &gregs[7]);
5199 __get_user(env->aregs[0], &gregs[8]);
5200 __get_user(env->aregs[1], &gregs[9]);
5201 __get_user(env->aregs[2], &gregs[10]);
5202 __get_user(env->aregs[3], &gregs[11]);
5203 __get_user(env->aregs[4], &gregs[12]);
5204 __get_user(env->aregs[5], &gregs[13]);
5205 __get_user(env->aregs[6], &gregs[14]);
5206 __get_user(env->aregs[7], &gregs[15]);
5207 __get_user(env->pc, &gregs[16]);
5208 __get_user(temp, &gregs[17]);
5209 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5210
5211 return 0;
5212
5213 badframe:
5214 return 1;
5215 }
5216
5217 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5218 target_siginfo_t *info,
5219 target_sigset_t *set, CPUM68KState *env)
5220 {
5221 struct target_rt_sigframe *frame;
5222 abi_ulong frame_addr;
5223 abi_ulong retcode_addr;
5224 abi_ulong info_addr;
5225 abi_ulong uc_addr;
5226 int err = 0;
5227 int i;
5228
5229 frame_addr = get_sigframe(ka, env, sizeof *frame);
5230 trace_user_setup_rt_frame(env, frame_addr);
5231 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5232 goto give_sigsegv;
5233 }
5234
5235 __put_user(sig, &frame->sig);
5236
5237 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5238 __put_user(info_addr, &frame->pinfo);
5239
5240 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5241 __put_user(uc_addr, &frame->puc);
5242
5243 tswap_siginfo(&frame->info, info);
5244
5245 /* Create the ucontext */
5246
5247 __put_user(0, &frame->uc.tuc_flags);
5248 __put_user(0, &frame->uc.tuc_link);
5249 __put_user(target_sigaltstack_used.ss_sp,
5250 &frame->uc.tuc_stack.ss_sp);
5251 __put_user(sas_ss_flags(env->aregs[7]),
5252 &frame->uc.tuc_stack.ss_flags);
5253 __put_user(target_sigaltstack_used.ss_size,
5254 &frame->uc.tuc_stack.ss_size);
5255 err |= target_rt_setup_ucontext(&frame->uc, env);
5256
5257 if (err)
5258 goto give_sigsegv;
5259
5260 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5261 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5262 }
5263
5264 /* Set up to return from userspace. */
5265
5266 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5267 __put_user(retcode_addr, &frame->pretcode);
5268
5269 /* moveq #,d0; notb d0; trap #0 */
5270
5271 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5272 (uint32_t *)(frame->retcode + 0));
5273 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5274
5275 if (err)
5276 goto give_sigsegv;
5277
5278 /* Set up to return from userspace */
5279
5280 env->aregs[7] = frame_addr;
5281 env->pc = ka->_sa_handler;
5282
5283 unlock_user_struct(frame, frame_addr, 1);
5284 return;
5285
5286 give_sigsegv:
5287 unlock_user_struct(frame, frame_addr, 1);
5288 force_sig(TARGET_SIGSEGV);
5289 }
5290
5291 long do_sigreturn(CPUM68KState *env)
5292 {
5293 struct target_sigframe *frame;
5294 abi_ulong frame_addr = env->aregs[7] - 4;
5295 target_sigset_t target_set;
5296 sigset_t set;
5297 int i;
5298
5299 trace_user_do_sigreturn(env, frame_addr);
5300 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5301 goto badframe;
5302
5303 /* set blocked signals */
5304
5305 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5306
5307 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5308 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5309 }
5310
5311 target_to_host_sigset_internal(&set, &target_set);
5312 set_sigmask(&set);
5313
5314 /* restore registers */
5315
5316 restore_sigcontext(env, &frame->sc);
5317
5318 unlock_user_struct(frame, frame_addr, 0);
5319 return -TARGET_QEMU_ESIGRETURN;
5320
5321 badframe:
5322 force_sig(TARGET_SIGSEGV);
5323 return 0;
5324 }
5325
5326 long do_rt_sigreturn(CPUM68KState *env)
5327 {
5328 struct target_rt_sigframe *frame;
5329 abi_ulong frame_addr = env->aregs[7] - 4;
5330 target_sigset_t target_set;
5331 sigset_t set;
5332
5333 trace_user_do_rt_sigreturn(env, frame_addr);
5334 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5335 goto badframe;
5336
5337 target_to_host_sigset_internal(&set, &target_set);
5338 set_sigmask(&set);
5339
5340 /* restore registers */
5341
5342 if (target_rt_restore_ucontext(env, &frame->uc))
5343 goto badframe;
5344
5345 if (do_sigaltstack(frame_addr +
5346 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5347 0, get_sp_from_cpustate(env)) == -EFAULT)
5348 goto badframe;
5349
5350 unlock_user_struct(frame, frame_addr, 0);
5351 return -TARGET_QEMU_ESIGRETURN;
5352
5353 badframe:
5354 unlock_user_struct(frame, frame_addr, 0);
5355 force_sig(TARGET_SIGSEGV);
5356 return 0;
5357 }
5358
5359 #elif defined(TARGET_ALPHA)
5360
5361 struct target_sigcontext {
5362 abi_long sc_onstack;
5363 abi_long sc_mask;
5364 abi_long sc_pc;
5365 abi_long sc_ps;
5366 abi_long sc_regs[32];
5367 abi_long sc_ownedfp;
5368 abi_long sc_fpregs[32];
5369 abi_ulong sc_fpcr;
5370 abi_ulong sc_fp_control;
5371 abi_ulong sc_reserved1;
5372 abi_ulong sc_reserved2;
5373 abi_ulong sc_ssize;
5374 abi_ulong sc_sbase;
5375 abi_ulong sc_traparg_a0;
5376 abi_ulong sc_traparg_a1;
5377 abi_ulong sc_traparg_a2;
5378 abi_ulong sc_fp_trap_pc;
5379 abi_ulong sc_fp_trigger_sum;
5380 abi_ulong sc_fp_trigger_inst;
5381 };
5382
5383 struct target_ucontext {
5384 abi_ulong tuc_flags;
5385 abi_ulong tuc_link;
5386 abi_ulong tuc_osf_sigmask;
5387 target_stack_t tuc_stack;
5388 struct target_sigcontext tuc_mcontext;
5389 target_sigset_t tuc_sigmask;
5390 };
5391
5392 struct target_sigframe {
5393 struct target_sigcontext sc;
5394 unsigned int retcode[3];
5395 };
5396
5397 struct target_rt_sigframe {
5398 target_siginfo_t info;
5399 struct target_ucontext uc;
5400 unsigned int retcode[3];
5401 };
5402
5403 #define INSN_MOV_R30_R16 0x47fe0410
5404 #define INSN_LDI_R0 0x201f0000
5405 #define INSN_CALLSYS 0x00000083
5406
5407 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5408 abi_ulong frame_addr, target_sigset_t *set)
5409 {
5410 int i;
5411
5412 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5413 __put_user(set->sig[0], &sc->sc_mask);
5414 __put_user(env->pc, &sc->sc_pc);
5415 __put_user(8, &sc->sc_ps);
5416
5417 for (i = 0; i < 31; ++i) {
5418 __put_user(env->ir[i], &sc->sc_regs[i]);
5419 }
5420 __put_user(0, &sc->sc_regs[31]);
5421
5422 for (i = 0; i < 31; ++i) {
5423 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5424 }
5425 __put_user(0, &sc->sc_fpregs[31]);
5426 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5427
5428 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5429 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5430 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5431 }
5432
5433 static void restore_sigcontext(CPUAlphaState *env,
5434 struct target_sigcontext *sc)
5435 {
5436 uint64_t fpcr;
5437 int i;
5438
5439 __get_user(env->pc, &sc->sc_pc);
5440
5441 for (i = 0; i < 31; ++i) {
5442 __get_user(env->ir[i], &sc->sc_regs[i]);
5443 }
5444 for (i = 0; i < 31; ++i) {
5445 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5446 }
5447
5448 __get_user(fpcr, &sc->sc_fpcr);
5449 cpu_alpha_store_fpcr(env, fpcr);
5450 }
5451
5452 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5453 CPUAlphaState *env,
5454 unsigned long framesize)
5455 {
5456 abi_ulong sp = env->ir[IR_SP];
5457
5458 /* This is the X/Open sanctioned signal stack switching. */
5459 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5460 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5461 }
5462 return (sp - framesize) & -32;
5463 }
5464
5465 static void setup_frame(int sig, struct target_sigaction *ka,
5466 target_sigset_t *set, CPUAlphaState *env)
5467 {
5468 abi_ulong frame_addr, r26;
5469 struct target_sigframe *frame;
5470 int err = 0;
5471
5472 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5473 trace_user_setup_frame(env, frame_addr);
5474 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5475 goto give_sigsegv;
5476 }
5477
5478 setup_sigcontext(&frame->sc, env, frame_addr, set);
5479
5480 if (ka->sa_restorer) {
5481 r26 = ka->sa_restorer;
5482 } else {
5483 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5484 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5485 &frame->retcode[1]);
5486 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5487 /* imb() */
5488 r26 = frame_addr;
5489 }
5490
5491 unlock_user_struct(frame, frame_addr, 1);
5492
5493 if (err) {
5494 give_sigsegv:
5495 if (sig == TARGET_SIGSEGV) {
5496 ka->_sa_handler = TARGET_SIG_DFL;
5497 }
5498 force_sig(TARGET_SIGSEGV);
5499 }
5500
5501 env->ir[IR_RA] = r26;
5502 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5503 env->ir[IR_A0] = sig;
5504 env->ir[IR_A1] = 0;
5505 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5506 env->ir[IR_SP] = frame_addr;
5507 }
5508
5509 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5510 target_siginfo_t *info,
5511 target_sigset_t *set, CPUAlphaState *env)
5512 {
5513 abi_ulong frame_addr, r26;
5514 struct target_rt_sigframe *frame;
5515 int i, err = 0;
5516
5517 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5518 trace_user_setup_rt_frame(env, frame_addr);
5519 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5520 goto give_sigsegv;
5521 }
5522
5523 tswap_siginfo(&frame->info, info);
5524
5525 __put_user(0, &frame->uc.tuc_flags);
5526 __put_user(0, &frame->uc.tuc_link);
5527 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5528 __put_user(target_sigaltstack_used.ss_sp,
5529 &frame->uc.tuc_stack.ss_sp);
5530 __put_user(sas_ss_flags(env->ir[IR_SP]),
5531 &frame->uc.tuc_stack.ss_flags);
5532 __put_user(target_sigaltstack_used.ss_size,
5533 &frame->uc.tuc_stack.ss_size);
5534 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5535 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5536 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5537 }
5538
5539 if (ka->sa_restorer) {
5540 r26 = ka->sa_restorer;
5541 } else {
5542 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5543 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5544 &frame->retcode[1]);
5545 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5546 /* imb(); */
5547 r26 = frame_addr;
5548 }
5549
5550 if (err) {
5551 give_sigsegv:
5552 if (sig == TARGET_SIGSEGV) {
5553 ka->_sa_handler = TARGET_SIG_DFL;
5554 }
5555 force_sig(TARGET_SIGSEGV);
5556 }
5557
5558 env->ir[IR_RA] = r26;
5559 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5560 env->ir[IR_A0] = sig;
5561 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5562 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5563 env->ir[IR_SP] = frame_addr;
5564 }
5565
5566 long do_sigreturn(CPUAlphaState *env)
5567 {
5568 struct target_sigcontext *sc;
5569 abi_ulong sc_addr = env->ir[IR_A0];
5570 target_sigset_t target_set;
5571 sigset_t set;
5572
5573 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5574 goto badframe;
5575 }
5576
5577 target_sigemptyset(&target_set);
5578 __get_user(target_set.sig[0], &sc->sc_mask);
5579
5580 target_to_host_sigset_internal(&set, &target_set);
5581 set_sigmask(&set);
5582
5583 restore_sigcontext(env, sc);
5584 unlock_user_struct(sc, sc_addr, 0);
5585 return -TARGET_QEMU_ESIGRETURN;
5586
5587 badframe:
5588 force_sig(TARGET_SIGSEGV);
5589 }
5590
5591 long do_rt_sigreturn(CPUAlphaState *env)
5592 {
5593 abi_ulong frame_addr = env->ir[IR_A0];
5594 struct target_rt_sigframe *frame;
5595 sigset_t set;
5596
5597 trace_user_do_rt_sigreturn(env, frame_addr);
5598 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5599 goto badframe;
5600 }
5601 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5602 set_sigmask(&set);
5603
5604 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5605 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5606 uc.tuc_stack),
5607 0, env->ir[IR_SP]) == -EFAULT) {
5608 goto badframe;
5609 }
5610
5611 unlock_user_struct(frame, frame_addr, 0);
5612 return -TARGET_QEMU_ESIGRETURN;
5613
5614
5615 badframe:
5616 unlock_user_struct(frame, frame_addr, 0);
5617 force_sig(TARGET_SIGSEGV);
5618 }
5619
5620 #elif defined(TARGET_TILEGX)
5621
5622 struct target_sigcontext {
5623 union {
5624 /* General-purpose registers. */
5625 abi_ulong gregs[56];
5626 struct {
5627 abi_ulong __gregs[53];
5628 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5629 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5630 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5631 };
5632 };
5633 abi_ulong pc; /* Program counter. */
5634 abi_ulong ics; /* In Interrupt Critical Section? */
5635 abi_ulong faultnum; /* Fault number. */
5636 abi_ulong pad[5];
5637 };
5638
5639 struct target_ucontext {
5640 abi_ulong tuc_flags;
5641 abi_ulong tuc_link;
5642 target_stack_t tuc_stack;
5643 struct target_sigcontext tuc_mcontext;
5644 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5645 };
5646
5647 struct target_rt_sigframe {
5648 unsigned char save_area[16]; /* caller save area */
5649 struct target_siginfo info;
5650 struct target_ucontext uc;
5651 abi_ulong retcode[2];
5652 };
5653
5654 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5655 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5656
5657
5658 static void setup_sigcontext(struct target_sigcontext *sc,
5659 CPUArchState *env, int signo)
5660 {
5661 int i;
5662
5663 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5664 __put_user(env->regs[i], &sc->gregs[i]);
5665 }
5666
5667 __put_user(env->pc, &sc->pc);
5668 __put_user(0, &sc->ics);
5669 __put_user(signo, &sc->faultnum);
5670 }
5671
5672 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5673 {
5674 int i;
5675
5676 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5677 __get_user(env->regs[i], &sc->gregs[i]);
5678 }
5679
5680 __get_user(env->pc, &sc->pc);
5681 }
5682
5683 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5684 size_t frame_size)
5685 {
5686 unsigned long sp = env->regs[TILEGX_R_SP];
5687
5688 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5689 return -1UL;
5690 }
5691
5692 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5693 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5694 }
5695
5696 sp -= frame_size;
5697 sp &= -16UL;
5698 return sp;
5699 }
5700
5701 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5702 target_siginfo_t *info,
5703 target_sigset_t *set, CPUArchState *env)
5704 {
5705 abi_ulong frame_addr;
5706 struct target_rt_sigframe *frame;
5707 unsigned long restorer;
5708
5709 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5710 trace_user_setup_rt_frame(env, frame_addr);
5711 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5712 goto give_sigsegv;
5713 }
5714
5715 /* Always write at least the signal number for the stack backtracer. */
5716 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5717 /* At sigreturn time, restore the callee-save registers too. */
5718 tswap_siginfo(&frame->info, info);
5719 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5720 } else {
5721 __put_user(info->si_signo, &frame->info.si_signo);
5722 }
5723
5724 /* Create the ucontext. */
5725 __put_user(0, &frame->uc.tuc_flags);
5726 __put_user(0, &frame->uc.tuc_link);
5727 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5728 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5729 &frame->uc.tuc_stack.ss_flags);
5730 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5731 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5732
5733 if (ka->sa_flags & TARGET_SA_RESTORER) {
5734 restorer = (unsigned long) ka->sa_restorer;
5735 } else {
5736 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5737 __put_user(INSN_SWINT1, &frame->retcode[1]);
5738 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5739 }
5740 env->pc = (unsigned long) ka->_sa_handler;
5741 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5742 env->regs[TILEGX_R_LR] = restorer;
5743 env->regs[0] = (unsigned long) sig;
5744 env->regs[1] = (unsigned long) &frame->info;
5745 env->regs[2] = (unsigned long) &frame->uc;
5746 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5747
5748 unlock_user_struct(frame, frame_addr, 1);
5749 return;
5750
5751 give_sigsegv:
5752 if (sig == TARGET_SIGSEGV) {
5753 ka->_sa_handler = TARGET_SIG_DFL;
5754 }
5755 force_sig(TARGET_SIGSEGV /* , current */);
5756 }
5757
5758 long do_rt_sigreturn(CPUTLGState *env)
5759 {
5760 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5761 struct target_rt_sigframe *frame;
5762 sigset_t set;
5763
5764 trace_user_do_rt_sigreturn(env, frame_addr);
5765 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5766 goto badframe;
5767 }
5768 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5769 set_sigmask(&set);
5770
5771 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5772 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5773 uc.tuc_stack),
5774 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5775 goto badframe;
5776 }
5777
5778 unlock_user_struct(frame, frame_addr, 0);
5779 return -TARGET_QEMU_ESIGRETURN;
5780
5781
5782 badframe:
5783 unlock_user_struct(frame, frame_addr, 0);
5784 force_sig(TARGET_SIGSEGV);
5785 }
5786
5787 #else
5788
5789 static void setup_frame(int sig, struct target_sigaction *ka,
5790 target_sigset_t *set, CPUArchState *env)
5791 {
5792 fprintf(stderr, "setup_frame: not implemented\n");
5793 }
5794
5795 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5796 target_siginfo_t *info,
5797 target_sigset_t *set, CPUArchState *env)
5798 {
5799 fprintf(stderr, "setup_rt_frame: not implemented\n");
5800 }
5801
5802 long do_sigreturn(CPUArchState *env)
5803 {
5804 fprintf(stderr, "do_sigreturn: not implemented\n");
5805 return -TARGET_ENOSYS;
5806 }
5807
5808 long do_rt_sigreturn(CPUArchState *env)
5809 {
5810 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5811 return -TARGET_ENOSYS;
5812 }
5813
5814 #endif
5815
5816 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5817 {
5818 CPUState *cpu = ENV_GET_CPU(cpu_env);
5819 abi_ulong handler;
5820 sigset_t set;
5821 target_sigset_t target_old_set;
5822 struct target_sigaction *sa;
5823 struct sigqueue *q;
5824 TaskState *ts = cpu->opaque;
5825 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5826
5827 trace_user_handle_signal(cpu_env, sig);
5828 /* dequeue signal */
5829 q = k->first;
5830 k->first = q->next;
5831 if (!k->first)
5832 k->pending = 0;
5833
5834 sig = gdb_handlesig(cpu, sig);
5835 if (!sig) {
5836 sa = NULL;
5837 handler = TARGET_SIG_IGN;
5838 } else {
5839 sa = &sigact_table[sig - 1];
5840 handler = sa->_sa_handler;
5841 }
5842
5843 if (sig == TARGET_SIGSEGV && sigismember(&ts->signal_mask, SIGSEGV)) {
5844 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
5845 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
5846 * because it got a real MMU fault), and treat as if default handler.
5847 */
5848 handler = TARGET_SIG_DFL;
5849 }
5850
5851 if (handler == TARGET_SIG_DFL) {
5852 /* default handler : ignore some signal. The other are job control or fatal */
5853 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5854 kill(getpid(),SIGSTOP);
5855 } else if (sig != TARGET_SIGCHLD &&
5856 sig != TARGET_SIGURG &&
5857 sig != TARGET_SIGWINCH &&
5858 sig != TARGET_SIGCONT) {
5859 force_sig(sig);
5860 }
5861 } else if (handler == TARGET_SIG_IGN) {
5862 /* ignore sig */
5863 } else if (handler == TARGET_SIG_ERR) {
5864 force_sig(sig);
5865 } else {
5866 /* compute the blocked signals during the handler execution */
5867 sigset_t *blocked_set;
5868
5869 target_to_host_sigset(&set, &sa->sa_mask);
5870 /* SA_NODEFER indicates that the current signal should not be
5871 blocked during the handler */
5872 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5873 sigaddset(&set, target_to_host_signal(sig));
5874
5875 /* save the previous blocked signal state to restore it at the
5876 end of the signal execution (see do_sigreturn) */
5877 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5878
5879 /* block signals in the handler */
5880 blocked_set = ts->in_sigsuspend ?
5881 &ts->sigsuspend_mask : &ts->signal_mask;
5882 sigorset(&ts->signal_mask, blocked_set, &set);
5883 ts->in_sigsuspend = 0;
5884
5885 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5886 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5887 {
5888 CPUX86State *env = cpu_env;
5889 if (env->eflags & VM_MASK)
5890 save_v86_state(env);
5891 }
5892 #endif
5893 /* prepare the stack frame of the virtual CPU */
5894 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5895 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5896 /* These targets do not have traditional signals. */
5897 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5898 #else
5899 if (sa->sa_flags & TARGET_SA_SIGINFO)
5900 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5901 else
5902 setup_frame(sig, sa, &target_old_set, cpu_env);
5903 #endif
5904 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5905 sa->_sa_handler = TARGET_SIG_DFL;
5906 }
5907 }
5908 if (q != &k->info)
5909 free_sigqueue(cpu_env, q);
5910 }
5911
5912 void process_pending_signals(CPUArchState *cpu_env)
5913 {
5914 CPUState *cpu = ENV_GET_CPU(cpu_env);
5915 int sig;
5916 TaskState *ts = cpu->opaque;
5917 sigset_t set;
5918 sigset_t *blocked_set;
5919
5920 while (atomic_read(&ts->signal_pending)) {
5921 /* FIXME: This is not threadsafe. */
5922 sigfillset(&set);
5923 sigprocmask(SIG_SETMASK, &set, 0);
5924
5925 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5926 blocked_set = ts->in_sigsuspend ?
5927 &ts->sigsuspend_mask : &ts->signal_mask;
5928
5929 if (ts->sigtab[sig - 1].pending &&
5930 (!sigismember(blocked_set,
5931 target_to_host_signal_table[sig])
5932 || sig == TARGET_SIGSEGV)) {
5933 handle_pending_signal(cpu_env, sig);
5934 /* Restart scan from the beginning */
5935 sig = 1;
5936 }
5937 }
5938
5939 /* if no signal is pending, unblock signals and recheck (the act
5940 * of unblocking might cause us to take another host signal which
5941 * will set signal_pending again).
5942 */
5943 atomic_set(&ts->signal_pending, 0);
5944 ts->in_sigsuspend = 0;
5945 set = ts->signal_mask;
5946 sigdelset(&set, SIGSEGV);
5947 sigdelset(&set, SIGBUS);
5948 sigprocmask(SIG_SETMASK, &set, 0);
5949 }
5950 ts->in_sigsuspend = 0;
5951 }