]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 return atomic_xchg(&ts->signal_pending, 1);
207 }
208
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
215 */
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
217 {
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
219
220 if (oldset) {
221 *oldset = ts->signal_mask;
222 }
223
224 if (set) {
225 int i;
226
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
229 }
230
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
239 }
240 }
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
247 }
248
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
252 }
253 return 0;
254 }
255
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_NIOS2)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
260 */
261 static void set_sigmask(const sigset_t *set)
262 {
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
264
265 ts->signal_mask = *set;
266 }
267 #endif
268
269 /* siginfo conversion */
270
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
273 {
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
280
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
286 */
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
288
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
297 *
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
302 */
303
304 switch (si_code) {
305 case SI_USER:
306 case SI_TKILL:
307 case SI_KERNEL:
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
310 */
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
314 break;
315 default:
316 /* Everything else is spoofable. Make best guess based on signal */
317 switch (sig) {
318 case TARGET_SIGCHLD:
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
326 break;
327 case TARGET_SIGIO:
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
331 break;
332 default:
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
340 break;
341 }
342 break;
343 }
344
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
346 }
347
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
350 {
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
353
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
357
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
361 */
362 switch (si_type) {
363 case QEMU_SI_KILL:
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
366 break;
367 case QEMU_SI_TIMER:
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
372 break;
373 case QEMU_SI_POLL:
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
378 break;
379 case QEMU_SI_FAULT:
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
382 break;
383 case QEMU_SI_CHLD:
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
394 break;
395 case QEMU_SI_RT:
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
400 break;
401 default:
402 g_assert_not_reached();
403 }
404 }
405
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
407 {
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
411 }
412
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
416 {
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
419 */
420 abi_ulong sival_ptr;
421
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
429 }
430
431 static int fatal_signal (int sig)
432 {
433 switch (sig) {
434 case TARGET_SIGCHLD:
435 case TARGET_SIGURG:
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
438 return 0;
439 case TARGET_SIGCONT:
440 case TARGET_SIGSTOP:
441 case TARGET_SIGTSTP:
442 case TARGET_SIGTTIN:
443 case TARGET_SIGTTOU:
444 /* Job control signals. */
445 return 0;
446 default:
447 return 1;
448 }
449 }
450
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
453 {
454 switch (sig) {
455 case TARGET_SIGABRT:
456 case TARGET_SIGFPE:
457 case TARGET_SIGILL:
458 case TARGET_SIGQUIT:
459 case TARGET_SIGSEGV:
460 case TARGET_SIGTRAP:
461 case TARGET_SIGBUS:
462 return (1);
463 default:
464 return (0);
465 }
466 }
467
468 void signal_init(void)
469 {
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
473 int i, j;
474 int host_sig;
475
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
480 }
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
484 }
485
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
488
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
492
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
503 }
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
512 }
513 }
514
515 #ifndef TARGET_UNICORE32
516 /* Force a synchronously taken signal. The kernel force_sig() function
517 * also forces the signal to "not blocked, not ignored", but for QEMU
518 * that work is done in process_pending_signals().
519 */
520 static void force_sig(int sig)
521 {
522 CPUState *cpu = thread_cpu;
523 CPUArchState *env = cpu->env_ptr;
524 target_siginfo_t info;
525
526 info.si_signo = sig;
527 info.si_errno = 0;
528 info.si_code = TARGET_SI_KERNEL;
529 info._sifields._kill._pid = 0;
530 info._sifields._kill._uid = 0;
531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
532 }
533
534 /* Force a SIGSEGV if we couldn't write to memory trying to set
535 * up the signal frame. oldsig is the signal we were trying to handle
536 * at the point of failure.
537 */
538 static void force_sigsegv(int oldsig)
539 {
540 if (oldsig == SIGSEGV) {
541 /* Make sure we don't try to deliver the signal again; this will
542 * end up with handle_pending_signal() calling dump_core_and_abort().
543 */
544 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
545 }
546 force_sig(TARGET_SIGSEGV);
547 }
548 #endif
549
550 /* abort execution with signal */
551 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
552 {
553 CPUState *cpu = thread_cpu;
554 CPUArchState *env = cpu->env_ptr;
555 TaskState *ts = (TaskState *)cpu->opaque;
556 int host_sig, core_dumped = 0;
557 struct sigaction act;
558
559 host_sig = target_to_host_signal(target_sig);
560 trace_user_force_sig(env, target_sig, host_sig);
561 gdb_signalled(env, target_sig);
562
563 /* dump core if supported by target binary format */
564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
565 stop_all_tasks();
566 core_dumped =
567 ((*ts->bprm->core_dump)(target_sig, env) == 0);
568 }
569 if (core_dumped) {
570 /* we already dumped the core of target process, we don't want
571 * a coredump of qemu itself */
572 struct rlimit nodump;
573 getrlimit(RLIMIT_CORE, &nodump);
574 nodump.rlim_cur=0;
575 setrlimit(RLIMIT_CORE, &nodump);
576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
577 target_sig, strsignal(host_sig), "core dumped" );
578 }
579
580 /* The proper exit code for dying from an uncaught signal is
581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
582 * a negative value. To get the proper exit code we need to
583 * actually die from an uncaught signal. Here the default signal
584 * handler is installed, we send ourself a signal and we wait for
585 * it to arrive. */
586 sigfillset(&act.sa_mask);
587 act.sa_handler = SIG_DFL;
588 act.sa_flags = 0;
589 sigaction(host_sig, &act, NULL);
590
591 /* For some reason raise(host_sig) doesn't send the signal when
592 * statically linked on x86-64. */
593 kill(getpid(), host_sig);
594
595 /* Make sure the signal isn't masked (just reuse the mask inside
596 of act) */
597 sigdelset(&act.sa_mask, host_sig);
598 sigsuspend(&act.sa_mask);
599
600 /* unreachable */
601 abort();
602 }
603
604 /* queue a signal so that it will be send to the virtual CPU as soon
605 as possible */
606 int queue_signal(CPUArchState *env, int sig, int si_type,
607 target_siginfo_t *info)
608 {
609 CPUState *cpu = ENV_GET_CPU(env);
610 TaskState *ts = cpu->opaque;
611
612 trace_user_queue_signal(env, sig);
613
614 info->si_code = deposit32(info->si_code, 16, 16, si_type);
615
616 ts->sync_signal.info = *info;
617 ts->sync_signal.pending = sig;
618 /* signal that a new signal is pending */
619 atomic_set(&ts->signal_pending, 1);
620 return 1; /* indicates that the signal was queued */
621 }
622
623 #ifndef HAVE_SAFE_SYSCALL
624 static inline void rewind_if_in_safe_syscall(void *puc)
625 {
626 /* Default version: never rewind */
627 }
628 #endif
629
630 static void host_signal_handler(int host_signum, siginfo_t *info,
631 void *puc)
632 {
633 CPUArchState *env = thread_cpu->env_ptr;
634 CPUState *cpu = ENV_GET_CPU(env);
635 TaskState *ts = cpu->opaque;
636
637 int sig;
638 target_siginfo_t tinfo;
639 ucontext_t *uc = puc;
640 struct emulated_sigtable *k;
641
642 /* the CPU emulator uses some host signals to detect exceptions,
643 we forward to it some signals */
644 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
645 && info->si_code > 0) {
646 if (cpu_signal_handler(host_signum, info, puc))
647 return;
648 }
649
650 /* get target signal number */
651 sig = host_to_target_signal(host_signum);
652 if (sig < 1 || sig > TARGET_NSIG)
653 return;
654 trace_user_host_signal(env, host_signum, sig);
655
656 rewind_if_in_safe_syscall(puc);
657
658 host_to_target_siginfo_noswap(&tinfo, info);
659 k = &ts->sigtab[sig - 1];
660 k->info = tinfo;
661 k->pending = sig;
662 ts->signal_pending = 1;
663
664 /* Block host signals until target signal handler entered. We
665 * can't block SIGSEGV or SIGBUS while we're executing guest
666 * code in case the guest code provokes one in the window between
667 * now and it getting out to the main loop. Signals will be
668 * unblocked again in process_pending_signals().
669 *
670 * WARNING: we cannot use sigfillset() here because the uc_sigmask
671 * field is a kernel sigset_t, which is much smaller than the
672 * libc sigset_t which sigfillset() operates on. Using sigfillset()
673 * would write 0xff bytes off the end of the structure and trash
674 * data on the struct.
675 * We can't use sizeof(uc->uc_sigmask) either, because the libc
676 * headers define the struct field with the wrong (too large) type.
677 */
678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
679 sigdelset(&uc->uc_sigmask, SIGSEGV);
680 sigdelset(&uc->uc_sigmask, SIGBUS);
681
682 /* interrupt the virtual CPU as soon as possible */
683 cpu_exit(thread_cpu);
684 }
685
686 /* do_sigaltstack() returns target values and errnos. */
687 /* compare linux/kernel/signal.c:do_sigaltstack() */
688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
689 {
690 int ret;
691 struct target_sigaltstack oss;
692
693 /* XXX: test errors */
694 if(uoss_addr)
695 {
696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
698 __put_user(sas_ss_flags(sp), &oss.ss_flags);
699 }
700
701 if(uss_addr)
702 {
703 struct target_sigaltstack *uss;
704 struct target_sigaltstack ss;
705 size_t minstacksize = TARGET_MINSIGSTKSZ;
706
707 #if defined(TARGET_PPC64)
708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
710 if (get_ppc64_abi(image) > 1) {
711 minstacksize = 4096;
712 }
713 #endif
714
715 ret = -TARGET_EFAULT;
716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
717 goto out;
718 }
719 __get_user(ss.ss_sp, &uss->ss_sp);
720 __get_user(ss.ss_size, &uss->ss_size);
721 __get_user(ss.ss_flags, &uss->ss_flags);
722 unlock_user_struct(uss, uss_addr, 0);
723
724 ret = -TARGET_EPERM;
725 if (on_sig_stack(sp))
726 goto out;
727
728 ret = -TARGET_EINVAL;
729 if (ss.ss_flags != TARGET_SS_DISABLE
730 && ss.ss_flags != TARGET_SS_ONSTACK
731 && ss.ss_flags != 0)
732 goto out;
733
734 if (ss.ss_flags == TARGET_SS_DISABLE) {
735 ss.ss_size = 0;
736 ss.ss_sp = 0;
737 } else {
738 ret = -TARGET_ENOMEM;
739 if (ss.ss_size < minstacksize) {
740 goto out;
741 }
742 }
743
744 target_sigaltstack_used.ss_sp = ss.ss_sp;
745 target_sigaltstack_used.ss_size = ss.ss_size;
746 }
747
748 if (uoss_addr) {
749 ret = -TARGET_EFAULT;
750 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
751 goto out;
752 }
753
754 ret = 0;
755 out:
756 return ret;
757 }
758
759 /* do_sigaction() return target values and host errnos */
760 int do_sigaction(int sig, const struct target_sigaction *act,
761 struct target_sigaction *oact)
762 {
763 struct target_sigaction *k;
764 struct sigaction act1;
765 int host_sig;
766 int ret = 0;
767
768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
769 return -TARGET_EINVAL;
770 }
771
772 if (block_signals()) {
773 return -TARGET_ERESTARTSYS;
774 }
775
776 k = &sigact_table[sig - 1];
777 if (oact) {
778 __put_user(k->_sa_handler, &oact->_sa_handler);
779 __put_user(k->sa_flags, &oact->sa_flags);
780 #if !defined(TARGET_MIPS)
781 __put_user(k->sa_restorer, &oact->sa_restorer);
782 #endif
783 /* Not swapped. */
784 oact->sa_mask = k->sa_mask;
785 }
786 if (act) {
787 /* FIXME: This is not threadsafe. */
788 __get_user(k->_sa_handler, &act->_sa_handler);
789 __get_user(k->sa_flags, &act->sa_flags);
790 #if !defined(TARGET_MIPS)
791 __get_user(k->sa_restorer, &act->sa_restorer);
792 #endif
793 /* To be swapped in target_to_host_sigset. */
794 k->sa_mask = act->sa_mask;
795
796 /* we update the host linux signal state */
797 host_sig = target_to_host_signal(sig);
798 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
799 sigfillset(&act1.sa_mask);
800 act1.sa_flags = SA_SIGINFO;
801 if (k->sa_flags & TARGET_SA_RESTART)
802 act1.sa_flags |= SA_RESTART;
803 /* NOTE: it is important to update the host kernel signal
804 ignore state to avoid getting unexpected interrupted
805 syscalls */
806 if (k->_sa_handler == TARGET_SIG_IGN) {
807 act1.sa_sigaction = (void *)SIG_IGN;
808 } else if (k->_sa_handler == TARGET_SIG_DFL) {
809 if (fatal_signal (sig))
810 act1.sa_sigaction = host_signal_handler;
811 else
812 act1.sa_sigaction = (void *)SIG_DFL;
813 } else {
814 act1.sa_sigaction = host_signal_handler;
815 }
816 ret = sigaction(host_sig, &act1, NULL);
817 }
818 }
819 return ret;
820 }
821
822 #if defined(TARGET_I386)
823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
824
825 struct target_fpreg {
826 uint16_t significand[4];
827 uint16_t exponent;
828 };
829
830 struct target_fpxreg {
831 uint16_t significand[4];
832 uint16_t exponent;
833 uint16_t padding[3];
834 };
835
836 struct target_xmmreg {
837 uint32_t element[4];
838 };
839
840 struct target_fpstate_32 {
841 /* Regular FPU environment */
842 uint32_t cw;
843 uint32_t sw;
844 uint32_t tag;
845 uint32_t ipoff;
846 uint32_t cssel;
847 uint32_t dataoff;
848 uint32_t datasel;
849 struct target_fpreg st[8];
850 uint16_t status;
851 uint16_t magic; /* 0xffff = regular FPU data only */
852
853 /* FXSR FPU environment */
854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
855 uint32_t mxcsr;
856 uint32_t reserved;
857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
858 struct target_xmmreg xmm[8];
859 uint32_t padding[56];
860 };
861
862 struct target_fpstate_64 {
863 /* FXSAVE format */
864 uint16_t cw;
865 uint16_t sw;
866 uint16_t twd;
867 uint16_t fop;
868 uint64_t rip;
869 uint64_t rdp;
870 uint32_t mxcsr;
871 uint32_t mxcsr_mask;
872 uint32_t st_space[32];
873 uint32_t xmm_space[64];
874 uint32_t reserved[24];
875 };
876
877 #ifndef TARGET_X86_64
878 # define target_fpstate target_fpstate_32
879 #else
880 # define target_fpstate target_fpstate_64
881 #endif
882
883 struct target_sigcontext_32 {
884 uint16_t gs, __gsh;
885 uint16_t fs, __fsh;
886 uint16_t es, __esh;
887 uint16_t ds, __dsh;
888 uint32_t edi;
889 uint32_t esi;
890 uint32_t ebp;
891 uint32_t esp;
892 uint32_t ebx;
893 uint32_t edx;
894 uint32_t ecx;
895 uint32_t eax;
896 uint32_t trapno;
897 uint32_t err;
898 uint32_t eip;
899 uint16_t cs, __csh;
900 uint32_t eflags;
901 uint32_t esp_at_signal;
902 uint16_t ss, __ssh;
903 uint32_t fpstate; /* pointer */
904 uint32_t oldmask;
905 uint32_t cr2;
906 };
907
908 struct target_sigcontext_64 {
909 uint64_t r8;
910 uint64_t r9;
911 uint64_t r10;
912 uint64_t r11;
913 uint64_t r12;
914 uint64_t r13;
915 uint64_t r14;
916 uint64_t r15;
917
918 uint64_t rdi;
919 uint64_t rsi;
920 uint64_t rbp;
921 uint64_t rbx;
922 uint64_t rdx;
923 uint64_t rax;
924 uint64_t rcx;
925 uint64_t rsp;
926 uint64_t rip;
927
928 uint64_t eflags;
929
930 uint16_t cs;
931 uint16_t gs;
932 uint16_t fs;
933 uint16_t ss;
934
935 uint64_t err;
936 uint64_t trapno;
937 uint64_t oldmask;
938 uint64_t cr2;
939
940 uint64_t fpstate; /* pointer */
941 uint64_t padding[8];
942 };
943
944 #ifndef TARGET_X86_64
945 # define target_sigcontext target_sigcontext_32
946 #else
947 # define target_sigcontext target_sigcontext_64
948 #endif
949
950 /* see Linux/include/uapi/asm-generic/ucontext.h */
951 struct target_ucontext {
952 abi_ulong tuc_flags;
953 abi_ulong tuc_link;
954 target_stack_t tuc_stack;
955 struct target_sigcontext tuc_mcontext;
956 target_sigset_t tuc_sigmask; /* mask last for extensibility */
957 };
958
959 #ifndef TARGET_X86_64
960 struct sigframe {
961 abi_ulong pretcode;
962 int sig;
963 struct target_sigcontext sc;
964 struct target_fpstate fpstate;
965 abi_ulong extramask[TARGET_NSIG_WORDS-1];
966 char retcode[8];
967 };
968
969 struct rt_sigframe {
970 abi_ulong pretcode;
971 int sig;
972 abi_ulong pinfo;
973 abi_ulong puc;
974 struct target_siginfo info;
975 struct target_ucontext uc;
976 struct target_fpstate fpstate;
977 char retcode[8];
978 };
979
980 #else
981
982 struct rt_sigframe {
983 abi_ulong pretcode;
984 struct target_ucontext uc;
985 struct target_siginfo info;
986 struct target_fpstate fpstate;
987 };
988
989 #endif
990
991 /*
992 * Set up a signal frame.
993 */
994
995 /* XXX: save x87 state */
996 static void setup_sigcontext(struct target_sigcontext *sc,
997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
998 abi_ulong fpstate_addr)
999 {
1000 CPUState *cs = CPU(x86_env_get_cpu(env));
1001 #ifndef TARGET_X86_64
1002 uint16_t magic;
1003
1004 /* already locked in setup_frame() */
1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1009 __put_user(env->regs[R_EDI], &sc->edi);
1010 __put_user(env->regs[R_ESI], &sc->esi);
1011 __put_user(env->regs[R_EBP], &sc->ebp);
1012 __put_user(env->regs[R_ESP], &sc->esp);
1013 __put_user(env->regs[R_EBX], &sc->ebx);
1014 __put_user(env->regs[R_EDX], &sc->edx);
1015 __put_user(env->regs[R_ECX], &sc->ecx);
1016 __put_user(env->regs[R_EAX], &sc->eax);
1017 __put_user(cs->exception_index, &sc->trapno);
1018 __put_user(env->error_code, &sc->err);
1019 __put_user(env->eip, &sc->eip);
1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1021 __put_user(env->eflags, &sc->eflags);
1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1024
1025 cpu_x86_fsave(env, fpstate_addr, 1);
1026 fpstate->status = fpstate->sw;
1027 magic = 0xffff;
1028 __put_user(magic, &fpstate->magic);
1029 __put_user(fpstate_addr, &sc->fpstate);
1030
1031 /* non-iBCS2 extensions.. */
1032 __put_user(mask, &sc->oldmask);
1033 __put_user(env->cr[2], &sc->cr2);
1034 #else
1035 __put_user(env->regs[R_EDI], &sc->rdi);
1036 __put_user(env->regs[R_ESI], &sc->rsi);
1037 __put_user(env->regs[R_EBP], &sc->rbp);
1038 __put_user(env->regs[R_ESP], &sc->rsp);
1039 __put_user(env->regs[R_EBX], &sc->rbx);
1040 __put_user(env->regs[R_EDX], &sc->rdx);
1041 __put_user(env->regs[R_ECX], &sc->rcx);
1042 __put_user(env->regs[R_EAX], &sc->rax);
1043
1044 __put_user(env->regs[8], &sc->r8);
1045 __put_user(env->regs[9], &sc->r9);
1046 __put_user(env->regs[10], &sc->r10);
1047 __put_user(env->regs[11], &sc->r11);
1048 __put_user(env->regs[12], &sc->r12);
1049 __put_user(env->regs[13], &sc->r13);
1050 __put_user(env->regs[14], &sc->r14);
1051 __put_user(env->regs[15], &sc->r15);
1052
1053 __put_user(cs->exception_index, &sc->trapno);
1054 __put_user(env->error_code, &sc->err);
1055 __put_user(env->eip, &sc->rip);
1056
1057 __put_user(env->eflags, &sc->eflags);
1058 __put_user(env->segs[R_CS].selector, &sc->cs);
1059 __put_user((uint16_t)0, &sc->gs);
1060 __put_user((uint16_t)0, &sc->fs);
1061 __put_user(env->segs[R_SS].selector, &sc->ss);
1062
1063 __put_user(mask, &sc->oldmask);
1064 __put_user(env->cr[2], &sc->cr2);
1065
1066 /* fpstate_addr must be 16 byte aligned for fxsave */
1067 assert(!(fpstate_addr & 0xf));
1068
1069 cpu_x86_fxsave(env, fpstate_addr);
1070 __put_user(fpstate_addr, &sc->fpstate);
1071 #endif
1072 }
1073
1074 /*
1075 * Determine which stack to use..
1076 */
1077
1078 static inline abi_ulong
1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1080 {
1081 unsigned long esp;
1082
1083 /* Default to using normal stack */
1084 esp = env->regs[R_ESP];
1085 #ifdef TARGET_X86_64
1086 esp -= 128; /* this is the redzone */
1087 #endif
1088
1089 /* This is the X/Open sanctioned signal stack switching. */
1090 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1091 if (sas_ss_flags(esp) == 0) {
1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1093 }
1094 } else {
1095 #ifndef TARGET_X86_64
1096 /* This is the legacy signal stack switching. */
1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1098 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1099 ka->sa_restorer) {
1100 esp = (unsigned long) ka->sa_restorer;
1101 }
1102 #endif
1103 }
1104
1105 #ifndef TARGET_X86_64
1106 return (esp - frame_size) & -8ul;
1107 #else
1108 return ((esp - frame_size) & (~15ul)) - 8;
1109 #endif
1110 }
1111
1112 #ifndef TARGET_X86_64
1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1114 static void setup_frame(int sig, struct target_sigaction *ka,
1115 target_sigset_t *set, CPUX86State *env)
1116 {
1117 abi_ulong frame_addr;
1118 struct sigframe *frame;
1119 int i;
1120
1121 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1122 trace_user_setup_frame(env, frame_addr);
1123
1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1125 goto give_sigsegv;
1126
1127 __put_user(sig, &frame->sig);
1128
1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1130 frame_addr + offsetof(struct sigframe, fpstate));
1131
1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1133 __put_user(set->sig[i], &frame->extramask[i - 1]);
1134 }
1135
1136 /* Set up to return from userspace. If provided, use a stub
1137 already in userspace. */
1138 if (ka->sa_flags & TARGET_SA_RESTORER) {
1139 __put_user(ka->sa_restorer, &frame->pretcode);
1140 } else {
1141 uint16_t val16;
1142 abi_ulong retcode_addr;
1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1144 __put_user(retcode_addr, &frame->pretcode);
1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1146 val16 = 0xb858;
1147 __put_user(val16, (uint16_t *)(frame->retcode+0));
1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1149 val16 = 0x80cd;
1150 __put_user(val16, (uint16_t *)(frame->retcode+6));
1151 }
1152
1153 /* Set up registers for signal handler */
1154 env->regs[R_ESP] = frame_addr;
1155 env->eip = ka->_sa_handler;
1156
1157 cpu_x86_load_seg(env, R_DS, __USER_DS);
1158 cpu_x86_load_seg(env, R_ES, __USER_DS);
1159 cpu_x86_load_seg(env, R_SS, __USER_DS);
1160 cpu_x86_load_seg(env, R_CS, __USER_CS);
1161 env->eflags &= ~TF_MASK;
1162
1163 unlock_user_struct(frame, frame_addr, 1);
1164
1165 return;
1166
1167 give_sigsegv:
1168 force_sigsegv(sig);
1169 }
1170 #endif
1171
1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1173 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1174 target_siginfo_t *info,
1175 target_sigset_t *set, CPUX86State *env)
1176 {
1177 abi_ulong frame_addr;
1178 #ifndef TARGET_X86_64
1179 abi_ulong addr;
1180 #endif
1181 struct rt_sigframe *frame;
1182 int i;
1183
1184 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1185 trace_user_setup_rt_frame(env, frame_addr);
1186
1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1188 goto give_sigsegv;
1189
1190 /* These fields are only in rt_sigframe on 32 bit */
1191 #ifndef TARGET_X86_64
1192 __put_user(sig, &frame->sig);
1193 addr = frame_addr + offsetof(struct rt_sigframe, info);
1194 __put_user(addr, &frame->pinfo);
1195 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1196 __put_user(addr, &frame->puc);
1197 #endif
1198 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1199 tswap_siginfo(&frame->info, info);
1200 }
1201
1202 /* Create the ucontext. */
1203 __put_user(0, &frame->uc.tuc_flags);
1204 __put_user(0, &frame->uc.tuc_link);
1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1207 &frame->uc.tuc_stack.ss_flags);
1208 __put_user(target_sigaltstack_used.ss_size,
1209 &frame->uc.tuc_stack.ss_size);
1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1212
1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1215 }
1216
1217 /* Set up to return from userspace. If provided, use a stub
1218 already in userspace. */
1219 #ifndef TARGET_X86_64
1220 if (ka->sa_flags & TARGET_SA_RESTORER) {
1221 __put_user(ka->sa_restorer, &frame->pretcode);
1222 } else {
1223 uint16_t val16;
1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1225 __put_user(addr, &frame->pretcode);
1226 /* This is movl $,%eax ; int $0x80 */
1227 __put_user(0xb8, (char *)(frame->retcode+0));
1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1229 val16 = 0x80cd;
1230 __put_user(val16, (uint16_t *)(frame->retcode+5));
1231 }
1232 #else
1233 /* XXX: Would be slightly better to return -EFAULT here if test fails
1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1235 __put_user(ka->sa_restorer, &frame->pretcode);
1236 #endif
1237
1238 /* Set up registers for signal handler */
1239 env->regs[R_ESP] = frame_addr;
1240 env->eip = ka->_sa_handler;
1241
1242 #ifndef TARGET_X86_64
1243 env->regs[R_EAX] = sig;
1244 env->regs[R_EDX] = (unsigned long)&frame->info;
1245 env->regs[R_ECX] = (unsigned long)&frame->uc;
1246 #else
1247 env->regs[R_EAX] = 0;
1248 env->regs[R_EDI] = sig;
1249 env->regs[R_ESI] = (unsigned long)&frame->info;
1250 env->regs[R_EDX] = (unsigned long)&frame->uc;
1251 #endif
1252
1253 cpu_x86_load_seg(env, R_DS, __USER_DS);
1254 cpu_x86_load_seg(env, R_ES, __USER_DS);
1255 cpu_x86_load_seg(env, R_CS, __USER_CS);
1256 cpu_x86_load_seg(env, R_SS, __USER_DS);
1257 env->eflags &= ~TF_MASK;
1258
1259 unlock_user_struct(frame, frame_addr, 1);
1260
1261 return;
1262
1263 give_sigsegv:
1264 force_sigsegv(sig);
1265 }
1266
1267 static int
1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1269 {
1270 unsigned int err = 0;
1271 abi_ulong fpstate_addr;
1272 unsigned int tmpflags;
1273
1274 #ifndef TARGET_X86_64
1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1279
1280 env->regs[R_EDI] = tswapl(sc->edi);
1281 env->regs[R_ESI] = tswapl(sc->esi);
1282 env->regs[R_EBP] = tswapl(sc->ebp);
1283 env->regs[R_ESP] = tswapl(sc->esp);
1284 env->regs[R_EBX] = tswapl(sc->ebx);
1285 env->regs[R_EDX] = tswapl(sc->edx);
1286 env->regs[R_ECX] = tswapl(sc->ecx);
1287 env->regs[R_EAX] = tswapl(sc->eax);
1288
1289 env->eip = tswapl(sc->eip);
1290 #else
1291 env->regs[8] = tswapl(sc->r8);
1292 env->regs[9] = tswapl(sc->r9);
1293 env->regs[10] = tswapl(sc->r10);
1294 env->regs[11] = tswapl(sc->r11);
1295 env->regs[12] = tswapl(sc->r12);
1296 env->regs[13] = tswapl(sc->r13);
1297 env->regs[14] = tswapl(sc->r14);
1298 env->regs[15] = tswapl(sc->r15);
1299
1300 env->regs[R_EDI] = tswapl(sc->rdi);
1301 env->regs[R_ESI] = tswapl(sc->rsi);
1302 env->regs[R_EBP] = tswapl(sc->rbp);
1303 env->regs[R_EBX] = tswapl(sc->rbx);
1304 env->regs[R_EDX] = tswapl(sc->rdx);
1305 env->regs[R_EAX] = tswapl(sc->rax);
1306 env->regs[R_ECX] = tswapl(sc->rcx);
1307 env->regs[R_ESP] = tswapl(sc->rsp);
1308
1309 env->eip = tswapl(sc->rip);
1310 #endif
1311
1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1314
1315 tmpflags = tswapl(sc->eflags);
1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1317 // regs->orig_eax = -1; /* disable syscall checks */
1318
1319 fpstate_addr = tswapl(sc->fpstate);
1320 if (fpstate_addr != 0) {
1321 if (!access_ok(VERIFY_READ, fpstate_addr,
1322 sizeof(struct target_fpstate)))
1323 goto badframe;
1324 #ifndef TARGET_X86_64
1325 cpu_x86_frstor(env, fpstate_addr, 1);
1326 #else
1327 cpu_x86_fxrstor(env, fpstate_addr);
1328 #endif
1329 }
1330
1331 return err;
1332 badframe:
1333 return 1;
1334 }
1335
1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1337 #ifndef TARGET_X86_64
1338 long do_sigreturn(CPUX86State *env)
1339 {
1340 struct sigframe *frame;
1341 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1342 target_sigset_t target_set;
1343 sigset_t set;
1344 int i;
1345
1346 trace_user_do_sigreturn(env, frame_addr);
1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1348 goto badframe;
1349 /* set blocked signals */
1350 __get_user(target_set.sig[0], &frame->sc.oldmask);
1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1353 }
1354
1355 target_to_host_sigset_internal(&set, &target_set);
1356 set_sigmask(&set);
1357
1358 /* restore registers */
1359 if (restore_sigcontext(env, &frame->sc))
1360 goto badframe;
1361 unlock_user_struct(frame, frame_addr, 0);
1362 return -TARGET_QEMU_ESIGRETURN;
1363
1364 badframe:
1365 unlock_user_struct(frame, frame_addr, 0);
1366 force_sig(TARGET_SIGSEGV);
1367 return -TARGET_QEMU_ESIGRETURN;
1368 }
1369 #endif
1370
1371 long do_rt_sigreturn(CPUX86State *env)
1372 {
1373 abi_ulong frame_addr;
1374 struct rt_sigframe *frame;
1375 sigset_t set;
1376
1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1378 trace_user_do_rt_sigreturn(env, frame_addr);
1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1380 goto badframe;
1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1382 set_sigmask(&set);
1383
1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1385 goto badframe;
1386 }
1387
1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1389 get_sp_from_cpustate(env)) == -EFAULT) {
1390 goto badframe;
1391 }
1392
1393 unlock_user_struct(frame, frame_addr, 0);
1394 return -TARGET_QEMU_ESIGRETURN;
1395
1396 badframe:
1397 unlock_user_struct(frame, frame_addr, 0);
1398 force_sig(TARGET_SIGSEGV);
1399 return -TARGET_QEMU_ESIGRETURN;
1400 }
1401
1402 #elif defined(TARGET_AARCH64)
1403
1404 struct target_sigcontext {
1405 uint64_t fault_address;
1406 /* AArch64 registers */
1407 uint64_t regs[31];
1408 uint64_t sp;
1409 uint64_t pc;
1410 uint64_t pstate;
1411 /* 4K reserved for FP/SIMD state and future expansion */
1412 char __reserved[4096] __attribute__((__aligned__(16)));
1413 };
1414
1415 struct target_ucontext {
1416 abi_ulong tuc_flags;
1417 abi_ulong tuc_link;
1418 target_stack_t tuc_stack;
1419 target_sigset_t tuc_sigmask;
1420 /* glibc uses a 1024-bit sigset_t */
1421 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1422 /* last for future expansion */
1423 struct target_sigcontext tuc_mcontext;
1424 };
1425
1426 /*
1427 * Header to be used at the beginning of structures extending the user
1428 * context. Such structures must be placed after the rt_sigframe on the stack
1429 * and be 16-byte aligned. The last structure must be a dummy one with the
1430 * magic and size set to 0.
1431 */
1432 struct target_aarch64_ctx {
1433 uint32_t magic;
1434 uint32_t size;
1435 };
1436
1437 #define TARGET_FPSIMD_MAGIC 0x46508001
1438
1439 struct target_fpsimd_context {
1440 struct target_aarch64_ctx head;
1441 uint32_t fpsr;
1442 uint32_t fpcr;
1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1444 };
1445
1446 /*
1447 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1448 * user space as it will change with the addition of new context. User space
1449 * should check the magic/size information.
1450 */
1451 struct target_aux_context {
1452 struct target_fpsimd_context fpsimd;
1453 /* additional context to be added before "end" */
1454 struct target_aarch64_ctx end;
1455 };
1456
1457 struct target_rt_sigframe {
1458 struct target_siginfo info;
1459 struct target_ucontext uc;
1460 uint64_t fp;
1461 uint64_t lr;
1462 uint32_t tramp[2];
1463 };
1464
1465 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1466 CPUARMState *env, target_sigset_t *set)
1467 {
1468 int i;
1469 struct target_aux_context *aux =
1470 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1471
1472 /* set up the stack frame for unwinding */
1473 __put_user(env->xregs[29], &sf->fp);
1474 __put_user(env->xregs[30], &sf->lr);
1475
1476 for (i = 0; i < 31; i++) {
1477 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1478 }
1479 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1480 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1481 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1482
1483 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1484
1485 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1486 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1487 }
1488
1489 for (i = 0; i < 32; i++) {
1490 #ifdef TARGET_WORDS_BIGENDIAN
1491 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1492 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1493 #else
1494 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1495 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1496 #endif
1497 }
1498 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1499 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1500 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1501 __put_user(sizeof(struct target_fpsimd_context),
1502 &aux->fpsimd.head.size);
1503
1504 /* set the "end" magic */
1505 __put_user(0, &aux->end.magic);
1506 __put_user(0, &aux->end.size);
1507
1508 return 0;
1509 }
1510
1511 static int target_restore_sigframe(CPUARMState *env,
1512 struct target_rt_sigframe *sf)
1513 {
1514 sigset_t set;
1515 int i;
1516 struct target_aux_context *aux =
1517 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1518 uint32_t magic, size, fpsr, fpcr;
1519 uint64_t pstate;
1520
1521 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1522 set_sigmask(&set);
1523
1524 for (i = 0; i < 31; i++) {
1525 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1526 }
1527
1528 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1529 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1530 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1531 pstate_write(env, pstate);
1532
1533 __get_user(magic, &aux->fpsimd.head.magic);
1534 __get_user(size, &aux->fpsimd.head.size);
1535
1536 if (magic != TARGET_FPSIMD_MAGIC
1537 || size != sizeof(struct target_fpsimd_context)) {
1538 return 1;
1539 }
1540
1541 for (i = 0; i < 32; i++) {
1542 #ifdef TARGET_WORDS_BIGENDIAN
1543 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1544 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1545 #else
1546 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1547 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1548 #endif
1549 }
1550 __get_user(fpsr, &aux->fpsimd.fpsr);
1551 vfp_set_fpsr(env, fpsr);
1552 __get_user(fpcr, &aux->fpsimd.fpcr);
1553 vfp_set_fpcr(env, fpcr);
1554
1555 return 0;
1556 }
1557
1558 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1559 {
1560 abi_ulong sp;
1561
1562 sp = env->xregs[31];
1563
1564 /*
1565 * This is the X/Open sanctioned signal stack switching.
1566 */
1567 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1568 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1569 }
1570
1571 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1572
1573 return sp;
1574 }
1575
1576 static void target_setup_frame(int usig, struct target_sigaction *ka,
1577 target_siginfo_t *info, target_sigset_t *set,
1578 CPUARMState *env)
1579 {
1580 struct target_rt_sigframe *frame;
1581 abi_ulong frame_addr, return_addr;
1582
1583 frame_addr = get_sigframe(ka, env);
1584 trace_user_setup_frame(env, frame_addr);
1585 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1586 goto give_sigsegv;
1587 }
1588
1589 __put_user(0, &frame->uc.tuc_flags);
1590 __put_user(0, &frame->uc.tuc_link);
1591
1592 __put_user(target_sigaltstack_used.ss_sp,
1593 &frame->uc.tuc_stack.ss_sp);
1594 __put_user(sas_ss_flags(env->xregs[31]),
1595 &frame->uc.tuc_stack.ss_flags);
1596 __put_user(target_sigaltstack_used.ss_size,
1597 &frame->uc.tuc_stack.ss_size);
1598 target_setup_sigframe(frame, env, set);
1599 if (ka->sa_flags & TARGET_SA_RESTORER) {
1600 return_addr = ka->sa_restorer;
1601 } else {
1602 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1603 __put_user(0xd2801168, &frame->tramp[0]);
1604 __put_user(0xd4000001, &frame->tramp[1]);
1605 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1606 }
1607 env->xregs[0] = usig;
1608 env->xregs[31] = frame_addr;
1609 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1610 env->pc = ka->_sa_handler;
1611 env->xregs[30] = return_addr;
1612 if (info) {
1613 tswap_siginfo(&frame->info, info);
1614 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1615 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1616 }
1617
1618 unlock_user_struct(frame, frame_addr, 1);
1619 return;
1620
1621 give_sigsegv:
1622 unlock_user_struct(frame, frame_addr, 1);
1623 force_sigsegv(usig);
1624 }
1625
1626 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1627 target_siginfo_t *info, target_sigset_t *set,
1628 CPUARMState *env)
1629 {
1630 target_setup_frame(sig, ka, info, set, env);
1631 }
1632
1633 static void setup_frame(int sig, struct target_sigaction *ka,
1634 target_sigset_t *set, CPUARMState *env)
1635 {
1636 target_setup_frame(sig, ka, 0, set, env);
1637 }
1638
1639 long do_rt_sigreturn(CPUARMState *env)
1640 {
1641 struct target_rt_sigframe *frame = NULL;
1642 abi_ulong frame_addr = env->xregs[31];
1643
1644 trace_user_do_rt_sigreturn(env, frame_addr);
1645 if (frame_addr & 15) {
1646 goto badframe;
1647 }
1648
1649 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1650 goto badframe;
1651 }
1652
1653 if (target_restore_sigframe(env, frame)) {
1654 goto badframe;
1655 }
1656
1657 if (do_sigaltstack(frame_addr +
1658 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1659 0, get_sp_from_cpustate(env)) == -EFAULT) {
1660 goto badframe;
1661 }
1662
1663 unlock_user_struct(frame, frame_addr, 0);
1664 return -TARGET_QEMU_ESIGRETURN;
1665
1666 badframe:
1667 unlock_user_struct(frame, frame_addr, 0);
1668 force_sig(TARGET_SIGSEGV);
1669 return -TARGET_QEMU_ESIGRETURN;
1670 }
1671
1672 long do_sigreturn(CPUARMState *env)
1673 {
1674 return do_rt_sigreturn(env);
1675 }
1676
1677 #elif defined(TARGET_ARM)
1678
1679 struct target_sigcontext {
1680 abi_ulong trap_no;
1681 abi_ulong error_code;
1682 abi_ulong oldmask;
1683 abi_ulong arm_r0;
1684 abi_ulong arm_r1;
1685 abi_ulong arm_r2;
1686 abi_ulong arm_r3;
1687 abi_ulong arm_r4;
1688 abi_ulong arm_r5;
1689 abi_ulong arm_r6;
1690 abi_ulong arm_r7;
1691 abi_ulong arm_r8;
1692 abi_ulong arm_r9;
1693 abi_ulong arm_r10;
1694 abi_ulong arm_fp;
1695 abi_ulong arm_ip;
1696 abi_ulong arm_sp;
1697 abi_ulong arm_lr;
1698 abi_ulong arm_pc;
1699 abi_ulong arm_cpsr;
1700 abi_ulong fault_address;
1701 };
1702
1703 struct target_ucontext_v1 {
1704 abi_ulong tuc_flags;
1705 abi_ulong tuc_link;
1706 target_stack_t tuc_stack;
1707 struct target_sigcontext tuc_mcontext;
1708 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1709 };
1710
1711 struct target_ucontext_v2 {
1712 abi_ulong tuc_flags;
1713 abi_ulong tuc_link;
1714 target_stack_t tuc_stack;
1715 struct target_sigcontext tuc_mcontext;
1716 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1717 char __unused[128 - sizeof(target_sigset_t)];
1718 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1719 };
1720
1721 struct target_user_vfp {
1722 uint64_t fpregs[32];
1723 abi_ulong fpscr;
1724 };
1725
1726 struct target_user_vfp_exc {
1727 abi_ulong fpexc;
1728 abi_ulong fpinst;
1729 abi_ulong fpinst2;
1730 };
1731
1732 struct target_vfp_sigframe {
1733 abi_ulong magic;
1734 abi_ulong size;
1735 struct target_user_vfp ufp;
1736 struct target_user_vfp_exc ufp_exc;
1737 } __attribute__((__aligned__(8)));
1738
1739 struct target_iwmmxt_sigframe {
1740 abi_ulong magic;
1741 abi_ulong size;
1742 uint64_t regs[16];
1743 /* Note that not all the coprocessor control registers are stored here */
1744 uint32_t wcssf;
1745 uint32_t wcasf;
1746 uint32_t wcgr0;
1747 uint32_t wcgr1;
1748 uint32_t wcgr2;
1749 uint32_t wcgr3;
1750 } __attribute__((__aligned__(8)));
1751
1752 #define TARGET_VFP_MAGIC 0x56465001
1753 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1754
1755 struct sigframe_v1
1756 {
1757 struct target_sigcontext sc;
1758 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1759 abi_ulong retcode;
1760 };
1761
1762 struct sigframe_v2
1763 {
1764 struct target_ucontext_v2 uc;
1765 abi_ulong retcode;
1766 };
1767
1768 struct rt_sigframe_v1
1769 {
1770 abi_ulong pinfo;
1771 abi_ulong puc;
1772 struct target_siginfo info;
1773 struct target_ucontext_v1 uc;
1774 abi_ulong retcode;
1775 };
1776
1777 struct rt_sigframe_v2
1778 {
1779 struct target_siginfo info;
1780 struct target_ucontext_v2 uc;
1781 abi_ulong retcode;
1782 };
1783
1784 #define TARGET_CONFIG_CPU_32 1
1785
1786 /*
1787 * For ARM syscalls, we encode the syscall number into the instruction.
1788 */
1789 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1790 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1791
1792 /*
1793 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1794 * need two 16-bit instructions.
1795 */
1796 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1797 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1798
1799 static const abi_ulong retcodes[4] = {
1800 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1801 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1802 };
1803
1804
1805 static inline int valid_user_regs(CPUARMState *regs)
1806 {
1807 return 1;
1808 }
1809
1810 static void
1811 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1812 CPUARMState *env, abi_ulong mask)
1813 {
1814 __put_user(env->regs[0], &sc->arm_r0);
1815 __put_user(env->regs[1], &sc->arm_r1);
1816 __put_user(env->regs[2], &sc->arm_r2);
1817 __put_user(env->regs[3], &sc->arm_r3);
1818 __put_user(env->regs[4], &sc->arm_r4);
1819 __put_user(env->regs[5], &sc->arm_r5);
1820 __put_user(env->regs[6], &sc->arm_r6);
1821 __put_user(env->regs[7], &sc->arm_r7);
1822 __put_user(env->regs[8], &sc->arm_r8);
1823 __put_user(env->regs[9], &sc->arm_r9);
1824 __put_user(env->regs[10], &sc->arm_r10);
1825 __put_user(env->regs[11], &sc->arm_fp);
1826 __put_user(env->regs[12], &sc->arm_ip);
1827 __put_user(env->regs[13], &sc->arm_sp);
1828 __put_user(env->regs[14], &sc->arm_lr);
1829 __put_user(env->regs[15], &sc->arm_pc);
1830 #ifdef TARGET_CONFIG_CPU_32
1831 __put_user(cpsr_read(env), &sc->arm_cpsr);
1832 #endif
1833
1834 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1835 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1836 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1837 __put_user(mask, &sc->oldmask);
1838 }
1839
1840 static inline abi_ulong
1841 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1842 {
1843 unsigned long sp = regs->regs[13];
1844
1845 /*
1846 * This is the X/Open sanctioned signal stack switching.
1847 */
1848 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1849 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1850 }
1851 /*
1852 * ATPCS B01 mandates 8-byte alignment
1853 */
1854 return (sp - framesize) & ~7;
1855 }
1856
1857 static void
1858 setup_return(CPUARMState *env, struct target_sigaction *ka,
1859 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1860 {
1861 abi_ulong handler = ka->_sa_handler;
1862 abi_ulong retcode;
1863 int thumb = handler & 1;
1864 uint32_t cpsr = cpsr_read(env);
1865
1866 cpsr &= ~CPSR_IT;
1867 if (thumb) {
1868 cpsr |= CPSR_T;
1869 } else {
1870 cpsr &= ~CPSR_T;
1871 }
1872
1873 if (ka->sa_flags & TARGET_SA_RESTORER) {
1874 retcode = ka->sa_restorer;
1875 } else {
1876 unsigned int idx = thumb;
1877
1878 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1879 idx += 2;
1880 }
1881
1882 __put_user(retcodes[idx], rc);
1883
1884 retcode = rc_addr + thumb;
1885 }
1886
1887 env->regs[0] = usig;
1888 env->regs[13] = frame_addr;
1889 env->regs[14] = retcode;
1890 env->regs[15] = handler & (thumb ? ~1 : ~3);
1891 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1892 }
1893
1894 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1895 {
1896 int i;
1897 struct target_vfp_sigframe *vfpframe;
1898 vfpframe = (struct target_vfp_sigframe *)regspace;
1899 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1900 __put_user(sizeof(*vfpframe), &vfpframe->size);
1901 for (i = 0; i < 32; i++) {
1902 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1903 }
1904 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1905 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1906 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1907 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1908 return (abi_ulong*)(vfpframe+1);
1909 }
1910
1911 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1912 CPUARMState *env)
1913 {
1914 int i;
1915 struct target_iwmmxt_sigframe *iwmmxtframe;
1916 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1917 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1918 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1919 for (i = 0; i < 16; i++) {
1920 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1921 }
1922 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1923 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1924 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1925 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1926 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1927 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1928 return (abi_ulong*)(iwmmxtframe+1);
1929 }
1930
1931 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1932 target_sigset_t *set, CPUARMState *env)
1933 {
1934 struct target_sigaltstack stack;
1935 int i;
1936 abi_ulong *regspace;
1937
1938 /* Clear all the bits of the ucontext we don't use. */
1939 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1940
1941 memset(&stack, 0, sizeof(stack));
1942 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1943 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1944 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1945 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1946
1947 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1948 /* Save coprocessor signal frame. */
1949 regspace = uc->tuc_regspace;
1950 if (arm_feature(env, ARM_FEATURE_VFP)) {
1951 regspace = setup_sigframe_v2_vfp(regspace, env);
1952 }
1953 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1954 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1955 }
1956
1957 /* Write terminating magic word */
1958 __put_user(0, regspace);
1959
1960 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1961 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1962 }
1963 }
1964
1965 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1966 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1967 target_sigset_t *set, CPUARMState *regs)
1968 {
1969 struct sigframe_v1 *frame;
1970 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1971 int i;
1972
1973 trace_user_setup_frame(regs, frame_addr);
1974 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1975 goto sigsegv;
1976 }
1977
1978 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1979
1980 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1981 __put_user(set->sig[i], &frame->extramask[i - 1]);
1982 }
1983
1984 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1985 frame_addr + offsetof(struct sigframe_v1, retcode));
1986
1987 unlock_user_struct(frame, frame_addr, 1);
1988 return;
1989 sigsegv:
1990 force_sigsegv(usig);
1991 }
1992
1993 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1994 target_sigset_t *set, CPUARMState *regs)
1995 {
1996 struct sigframe_v2 *frame;
1997 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1998
1999 trace_user_setup_frame(regs, frame_addr);
2000 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2001 goto sigsegv;
2002 }
2003
2004 setup_sigframe_v2(&frame->uc, set, regs);
2005
2006 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2007 frame_addr + offsetof(struct sigframe_v2, retcode));
2008
2009 unlock_user_struct(frame, frame_addr, 1);
2010 return;
2011 sigsegv:
2012 force_sigsegv(usig);
2013 }
2014
2015 static void setup_frame(int usig, struct target_sigaction *ka,
2016 target_sigset_t *set, CPUARMState *regs)
2017 {
2018 if (get_osversion() >= 0x020612) {
2019 setup_frame_v2(usig, ka, set, regs);
2020 } else {
2021 setup_frame_v1(usig, ka, set, regs);
2022 }
2023 }
2024
2025 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2026 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2027 target_siginfo_t *info,
2028 target_sigset_t *set, CPUARMState *env)
2029 {
2030 struct rt_sigframe_v1 *frame;
2031 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2032 struct target_sigaltstack stack;
2033 int i;
2034 abi_ulong info_addr, uc_addr;
2035
2036 trace_user_setup_rt_frame(env, frame_addr);
2037 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2038 goto sigsegv;
2039 }
2040
2041 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2042 __put_user(info_addr, &frame->pinfo);
2043 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2044 __put_user(uc_addr, &frame->puc);
2045 tswap_siginfo(&frame->info, info);
2046
2047 /* Clear all the bits of the ucontext we don't use. */
2048 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2049
2050 memset(&stack, 0, sizeof(stack));
2051 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2052 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2053 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2054 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2055
2056 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2057 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2058 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2059 }
2060
2061 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2062 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2063
2064 env->regs[1] = info_addr;
2065 env->regs[2] = uc_addr;
2066
2067 unlock_user_struct(frame, frame_addr, 1);
2068 return;
2069 sigsegv:
2070 force_sigsegv(usig);
2071 }
2072
2073 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2074 target_siginfo_t *info,
2075 target_sigset_t *set, CPUARMState *env)
2076 {
2077 struct rt_sigframe_v2 *frame;
2078 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2079 abi_ulong info_addr, uc_addr;
2080
2081 trace_user_setup_rt_frame(env, frame_addr);
2082 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2083 goto sigsegv;
2084 }
2085
2086 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2087 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2088 tswap_siginfo(&frame->info, info);
2089
2090 setup_sigframe_v2(&frame->uc, set, env);
2091
2092 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2093 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2094
2095 env->regs[1] = info_addr;
2096 env->regs[2] = uc_addr;
2097
2098 unlock_user_struct(frame, frame_addr, 1);
2099 return;
2100 sigsegv:
2101 force_sigsegv(usig);
2102 }
2103
2104 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2105 target_siginfo_t *info,
2106 target_sigset_t *set, CPUARMState *env)
2107 {
2108 if (get_osversion() >= 0x020612) {
2109 setup_rt_frame_v2(usig, ka, info, set, env);
2110 } else {
2111 setup_rt_frame_v1(usig, ka, info, set, env);
2112 }
2113 }
2114
2115 static int
2116 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2117 {
2118 int err = 0;
2119 uint32_t cpsr;
2120
2121 __get_user(env->regs[0], &sc->arm_r0);
2122 __get_user(env->regs[1], &sc->arm_r1);
2123 __get_user(env->regs[2], &sc->arm_r2);
2124 __get_user(env->regs[3], &sc->arm_r3);
2125 __get_user(env->regs[4], &sc->arm_r4);
2126 __get_user(env->regs[5], &sc->arm_r5);
2127 __get_user(env->regs[6], &sc->arm_r6);
2128 __get_user(env->regs[7], &sc->arm_r7);
2129 __get_user(env->regs[8], &sc->arm_r8);
2130 __get_user(env->regs[9], &sc->arm_r9);
2131 __get_user(env->regs[10], &sc->arm_r10);
2132 __get_user(env->regs[11], &sc->arm_fp);
2133 __get_user(env->regs[12], &sc->arm_ip);
2134 __get_user(env->regs[13], &sc->arm_sp);
2135 __get_user(env->regs[14], &sc->arm_lr);
2136 __get_user(env->regs[15], &sc->arm_pc);
2137 #ifdef TARGET_CONFIG_CPU_32
2138 __get_user(cpsr, &sc->arm_cpsr);
2139 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2140 #endif
2141
2142 err |= !valid_user_regs(env);
2143
2144 return err;
2145 }
2146
2147 static long do_sigreturn_v1(CPUARMState *env)
2148 {
2149 abi_ulong frame_addr;
2150 struct sigframe_v1 *frame = NULL;
2151 target_sigset_t set;
2152 sigset_t host_set;
2153 int i;
2154
2155 /*
2156 * Since we stacked the signal on a 64-bit boundary,
2157 * then 'sp' should be word aligned here. If it's
2158 * not, then the user is trying to mess with us.
2159 */
2160 frame_addr = env->regs[13];
2161 trace_user_do_sigreturn(env, frame_addr);
2162 if (frame_addr & 7) {
2163 goto badframe;
2164 }
2165
2166 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2167 goto badframe;
2168 }
2169
2170 __get_user(set.sig[0], &frame->sc.oldmask);
2171 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2172 __get_user(set.sig[i], &frame->extramask[i - 1]);
2173 }
2174
2175 target_to_host_sigset_internal(&host_set, &set);
2176 set_sigmask(&host_set);
2177
2178 if (restore_sigcontext(env, &frame->sc)) {
2179 goto badframe;
2180 }
2181
2182 #if 0
2183 /* Send SIGTRAP if we're single-stepping */
2184 if (ptrace_cancel_bpt(current))
2185 send_sig(SIGTRAP, current, 1);
2186 #endif
2187 unlock_user_struct(frame, frame_addr, 0);
2188 return -TARGET_QEMU_ESIGRETURN;
2189
2190 badframe:
2191 force_sig(TARGET_SIGSEGV);
2192 return -TARGET_QEMU_ESIGRETURN;
2193 }
2194
2195 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2196 {
2197 int i;
2198 abi_ulong magic, sz;
2199 uint32_t fpscr, fpexc;
2200 struct target_vfp_sigframe *vfpframe;
2201 vfpframe = (struct target_vfp_sigframe *)regspace;
2202
2203 __get_user(magic, &vfpframe->magic);
2204 __get_user(sz, &vfpframe->size);
2205 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2206 return 0;
2207 }
2208 for (i = 0; i < 32; i++) {
2209 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
2210 }
2211 __get_user(fpscr, &vfpframe->ufp.fpscr);
2212 vfp_set_fpscr(env, fpscr);
2213 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2214 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2215 * and the exception flag is cleared
2216 */
2217 fpexc |= (1 << 30);
2218 fpexc &= ~((1 << 31) | (1 << 28));
2219 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2220 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2221 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2222 return (abi_ulong*)(vfpframe + 1);
2223 }
2224
2225 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2226 abi_ulong *regspace)
2227 {
2228 int i;
2229 abi_ulong magic, sz;
2230 struct target_iwmmxt_sigframe *iwmmxtframe;
2231 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2232
2233 __get_user(magic, &iwmmxtframe->magic);
2234 __get_user(sz, &iwmmxtframe->size);
2235 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2236 return 0;
2237 }
2238 for (i = 0; i < 16; i++) {
2239 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2240 }
2241 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2242 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2243 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2244 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2245 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2246 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2247 return (abi_ulong*)(iwmmxtframe + 1);
2248 }
2249
2250 static int do_sigframe_return_v2(CPUARMState *env,
2251 target_ulong context_addr,
2252 struct target_ucontext_v2 *uc)
2253 {
2254 sigset_t host_set;
2255 abi_ulong *regspace;
2256
2257 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2258 set_sigmask(&host_set);
2259
2260 if (restore_sigcontext(env, &uc->tuc_mcontext))
2261 return 1;
2262
2263 /* Restore coprocessor signal frame */
2264 regspace = uc->tuc_regspace;
2265 if (arm_feature(env, ARM_FEATURE_VFP)) {
2266 regspace = restore_sigframe_v2_vfp(env, regspace);
2267 if (!regspace) {
2268 return 1;
2269 }
2270 }
2271 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2272 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2273 if (!regspace) {
2274 return 1;
2275 }
2276 }
2277
2278 if (do_sigaltstack(context_addr
2279 + offsetof(struct target_ucontext_v2, tuc_stack),
2280 0, get_sp_from_cpustate(env)) == -EFAULT) {
2281 return 1;
2282 }
2283
2284 #if 0
2285 /* Send SIGTRAP if we're single-stepping */
2286 if (ptrace_cancel_bpt(current))
2287 send_sig(SIGTRAP, current, 1);
2288 #endif
2289
2290 return 0;
2291 }
2292
2293 static long do_sigreturn_v2(CPUARMState *env)
2294 {
2295 abi_ulong frame_addr;
2296 struct sigframe_v2 *frame = NULL;
2297
2298 /*
2299 * Since we stacked the signal on a 64-bit boundary,
2300 * then 'sp' should be word aligned here. If it's
2301 * not, then the user is trying to mess with us.
2302 */
2303 frame_addr = env->regs[13];
2304 trace_user_do_sigreturn(env, frame_addr);
2305 if (frame_addr & 7) {
2306 goto badframe;
2307 }
2308
2309 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2310 goto badframe;
2311 }
2312
2313 if (do_sigframe_return_v2(env,
2314 frame_addr
2315 + offsetof(struct sigframe_v2, uc),
2316 &frame->uc)) {
2317 goto badframe;
2318 }
2319
2320 unlock_user_struct(frame, frame_addr, 0);
2321 return -TARGET_QEMU_ESIGRETURN;
2322
2323 badframe:
2324 unlock_user_struct(frame, frame_addr, 0);
2325 force_sig(TARGET_SIGSEGV);
2326 return -TARGET_QEMU_ESIGRETURN;
2327 }
2328
2329 long do_sigreturn(CPUARMState *env)
2330 {
2331 if (get_osversion() >= 0x020612) {
2332 return do_sigreturn_v2(env);
2333 } else {
2334 return do_sigreturn_v1(env);
2335 }
2336 }
2337
2338 static long do_rt_sigreturn_v1(CPUARMState *env)
2339 {
2340 abi_ulong frame_addr;
2341 struct rt_sigframe_v1 *frame = NULL;
2342 sigset_t host_set;
2343
2344 /*
2345 * Since we stacked the signal on a 64-bit boundary,
2346 * then 'sp' should be word aligned here. If it's
2347 * not, then the user is trying to mess with us.
2348 */
2349 frame_addr = env->regs[13];
2350 trace_user_do_rt_sigreturn(env, frame_addr);
2351 if (frame_addr & 7) {
2352 goto badframe;
2353 }
2354
2355 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2356 goto badframe;
2357 }
2358
2359 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2360 set_sigmask(&host_set);
2361
2362 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2363 goto badframe;
2364 }
2365
2366 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2367 goto badframe;
2368
2369 #if 0
2370 /* Send SIGTRAP if we're single-stepping */
2371 if (ptrace_cancel_bpt(current))
2372 send_sig(SIGTRAP, current, 1);
2373 #endif
2374 unlock_user_struct(frame, frame_addr, 0);
2375 return -TARGET_QEMU_ESIGRETURN;
2376
2377 badframe:
2378 unlock_user_struct(frame, frame_addr, 0);
2379 force_sig(TARGET_SIGSEGV);
2380 return -TARGET_QEMU_ESIGRETURN;
2381 }
2382
2383 static long do_rt_sigreturn_v2(CPUARMState *env)
2384 {
2385 abi_ulong frame_addr;
2386 struct rt_sigframe_v2 *frame = NULL;
2387
2388 /*
2389 * Since we stacked the signal on a 64-bit boundary,
2390 * then 'sp' should be word aligned here. If it's
2391 * not, then the user is trying to mess with us.
2392 */
2393 frame_addr = env->regs[13];
2394 trace_user_do_rt_sigreturn(env, frame_addr);
2395 if (frame_addr & 7) {
2396 goto badframe;
2397 }
2398
2399 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2400 goto badframe;
2401 }
2402
2403 if (do_sigframe_return_v2(env,
2404 frame_addr
2405 + offsetof(struct rt_sigframe_v2, uc),
2406 &frame->uc)) {
2407 goto badframe;
2408 }
2409
2410 unlock_user_struct(frame, frame_addr, 0);
2411 return -TARGET_QEMU_ESIGRETURN;
2412
2413 badframe:
2414 unlock_user_struct(frame, frame_addr, 0);
2415 force_sig(TARGET_SIGSEGV);
2416 return -TARGET_QEMU_ESIGRETURN;
2417 }
2418
2419 long do_rt_sigreturn(CPUARMState *env)
2420 {
2421 if (get_osversion() >= 0x020612) {
2422 return do_rt_sigreturn_v2(env);
2423 } else {
2424 return do_rt_sigreturn_v1(env);
2425 }
2426 }
2427
2428 #elif defined(TARGET_SPARC)
2429
2430 #define __SUNOS_MAXWIN 31
2431
2432 /* This is what SunOS does, so shall I. */
2433 struct target_sigcontext {
2434 abi_ulong sigc_onstack; /* state to restore */
2435
2436 abi_ulong sigc_mask; /* sigmask to restore */
2437 abi_ulong sigc_sp; /* stack pointer */
2438 abi_ulong sigc_pc; /* program counter */
2439 abi_ulong sigc_npc; /* next program counter */
2440 abi_ulong sigc_psr; /* for condition codes etc */
2441 abi_ulong sigc_g1; /* User uses these two registers */
2442 abi_ulong sigc_o0; /* within the trampoline code. */
2443
2444 /* Now comes information regarding the users window set
2445 * at the time of the signal.
2446 */
2447 abi_ulong sigc_oswins; /* outstanding windows */
2448
2449 /* stack ptrs for each regwin buf */
2450 char *sigc_spbuf[__SUNOS_MAXWIN];
2451
2452 /* Windows to restore after signal */
2453 struct {
2454 abi_ulong locals[8];
2455 abi_ulong ins[8];
2456 } sigc_wbuf[__SUNOS_MAXWIN];
2457 };
2458 /* A Sparc stack frame */
2459 struct sparc_stackf {
2460 abi_ulong locals[8];
2461 abi_ulong ins[8];
2462 /* It's simpler to treat fp and callers_pc as elements of ins[]
2463 * since we never need to access them ourselves.
2464 */
2465 char *structptr;
2466 abi_ulong xargs[6];
2467 abi_ulong xxargs[1];
2468 };
2469
2470 typedef struct {
2471 struct {
2472 abi_ulong psr;
2473 abi_ulong pc;
2474 abi_ulong npc;
2475 abi_ulong y;
2476 abi_ulong u_regs[16]; /* globals and ins */
2477 } si_regs;
2478 int si_mask;
2479 } __siginfo_t;
2480
2481 typedef struct {
2482 abi_ulong si_float_regs[32];
2483 unsigned long si_fsr;
2484 unsigned long si_fpqdepth;
2485 struct {
2486 unsigned long *insn_addr;
2487 unsigned long insn;
2488 } si_fpqueue [16];
2489 } qemu_siginfo_fpu_t;
2490
2491
2492 struct target_signal_frame {
2493 struct sparc_stackf ss;
2494 __siginfo_t info;
2495 abi_ulong fpu_save;
2496 abi_ulong insns[2] __attribute__ ((aligned (8)));
2497 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2498 abi_ulong extra_size; /* Should be 0 */
2499 qemu_siginfo_fpu_t fpu_state;
2500 };
2501 struct target_rt_signal_frame {
2502 struct sparc_stackf ss;
2503 siginfo_t info;
2504 abi_ulong regs[20];
2505 sigset_t mask;
2506 abi_ulong fpu_save;
2507 unsigned int insns[2];
2508 stack_t stack;
2509 unsigned int extra_size; /* Should be 0 */
2510 qemu_siginfo_fpu_t fpu_state;
2511 };
2512
2513 #define UREG_O0 16
2514 #define UREG_O6 22
2515 #define UREG_I0 0
2516 #define UREG_I1 1
2517 #define UREG_I2 2
2518 #define UREG_I3 3
2519 #define UREG_I4 4
2520 #define UREG_I5 5
2521 #define UREG_I6 6
2522 #define UREG_I7 7
2523 #define UREG_L0 8
2524 #define UREG_FP UREG_I6
2525 #define UREG_SP UREG_O6
2526
2527 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2528 CPUSPARCState *env,
2529 unsigned long framesize)
2530 {
2531 abi_ulong sp;
2532
2533 sp = env->regwptr[UREG_FP];
2534
2535 /* This is the X/Open sanctioned signal stack switching. */
2536 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2537 if (!on_sig_stack(sp)
2538 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2539 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2540 }
2541 }
2542 return sp - framesize;
2543 }
2544
2545 static int
2546 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2547 {
2548 int err = 0, i;
2549
2550 __put_user(env->psr, &si->si_regs.psr);
2551 __put_user(env->pc, &si->si_regs.pc);
2552 __put_user(env->npc, &si->si_regs.npc);
2553 __put_user(env->y, &si->si_regs.y);
2554 for (i=0; i < 8; i++) {
2555 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2556 }
2557 for (i=0; i < 8; i++) {
2558 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2559 }
2560 __put_user(mask, &si->si_mask);
2561 return err;
2562 }
2563
2564 #if 0
2565 static int
2566 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2567 CPUSPARCState *env, unsigned long mask)
2568 {
2569 int err = 0;
2570
2571 __put_user(mask, &sc->sigc_mask);
2572 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2573 __put_user(env->pc, &sc->sigc_pc);
2574 __put_user(env->npc, &sc->sigc_npc);
2575 __put_user(env->psr, &sc->sigc_psr);
2576 __put_user(env->gregs[1], &sc->sigc_g1);
2577 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2578
2579 return err;
2580 }
2581 #endif
2582 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2583
2584 static void setup_frame(int sig, struct target_sigaction *ka,
2585 target_sigset_t *set, CPUSPARCState *env)
2586 {
2587 abi_ulong sf_addr;
2588 struct target_signal_frame *sf;
2589 int sigframe_size, err, i;
2590
2591 /* 1. Make sure everything is clean */
2592 //synchronize_user_stack();
2593
2594 sigframe_size = NF_ALIGNEDSZ;
2595 sf_addr = get_sigframe(ka, env, sigframe_size);
2596 trace_user_setup_frame(env, sf_addr);
2597
2598 sf = lock_user(VERIFY_WRITE, sf_addr,
2599 sizeof(struct target_signal_frame), 0);
2600 if (!sf) {
2601 goto sigsegv;
2602 }
2603 #if 0
2604 if (invalid_frame_pointer(sf, sigframe_size))
2605 goto sigill_and_return;
2606 #endif
2607 /* 2. Save the current process state */
2608 err = setup___siginfo(&sf->info, env, set->sig[0]);
2609 __put_user(0, &sf->extra_size);
2610
2611 //save_fpu_state(regs, &sf->fpu_state);
2612 //__put_user(&sf->fpu_state, &sf->fpu_save);
2613
2614 __put_user(set->sig[0], &sf->info.si_mask);
2615 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2616 __put_user(set->sig[i + 1], &sf->extramask[i]);
2617 }
2618
2619 for (i = 0; i < 8; i++) {
2620 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2621 }
2622 for (i = 0; i < 8; i++) {
2623 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2624 }
2625 if (err)
2626 goto sigsegv;
2627
2628 /* 3. signal handler back-trampoline and parameters */
2629 env->regwptr[UREG_FP] = sf_addr;
2630 env->regwptr[UREG_I0] = sig;
2631 env->regwptr[UREG_I1] = sf_addr +
2632 offsetof(struct target_signal_frame, info);
2633 env->regwptr[UREG_I2] = sf_addr +
2634 offsetof(struct target_signal_frame, info);
2635
2636 /* 4. signal handler */
2637 env->pc = ka->_sa_handler;
2638 env->npc = (env->pc + 4);
2639 /* 5. return to kernel instructions */
2640 if (ka->sa_restorer) {
2641 env->regwptr[UREG_I7] = ka->sa_restorer;
2642 } else {
2643 uint32_t val32;
2644
2645 env->regwptr[UREG_I7] = sf_addr +
2646 offsetof(struct target_signal_frame, insns) - 2 * 4;
2647
2648 /* mov __NR_sigreturn, %g1 */
2649 val32 = 0x821020d8;
2650 __put_user(val32, &sf->insns[0]);
2651
2652 /* t 0x10 */
2653 val32 = 0x91d02010;
2654 __put_user(val32, &sf->insns[1]);
2655 if (err)
2656 goto sigsegv;
2657
2658 /* Flush instruction space. */
2659 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2660 // tb_flush(env);
2661 }
2662 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2663 return;
2664 #if 0
2665 sigill_and_return:
2666 force_sig(TARGET_SIGILL);
2667 #endif
2668 sigsegv:
2669 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2670 force_sigsegv(sig);
2671 }
2672
2673 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2674 target_siginfo_t *info,
2675 target_sigset_t *set, CPUSPARCState *env)
2676 {
2677 fprintf(stderr, "setup_rt_frame: not implemented\n");
2678 }
2679
2680 long do_sigreturn(CPUSPARCState *env)
2681 {
2682 abi_ulong sf_addr;
2683 struct target_signal_frame *sf;
2684 uint32_t up_psr, pc, npc;
2685 target_sigset_t set;
2686 sigset_t host_set;
2687 int err=0, i;
2688
2689 sf_addr = env->regwptr[UREG_FP];
2690 trace_user_do_sigreturn(env, sf_addr);
2691 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2692 goto segv_and_exit;
2693 }
2694
2695 /* 1. Make sure we are not getting garbage from the user */
2696
2697 if (sf_addr & 3)
2698 goto segv_and_exit;
2699
2700 __get_user(pc, &sf->info.si_regs.pc);
2701 __get_user(npc, &sf->info.si_regs.npc);
2702
2703 if ((pc | npc) & 3) {
2704 goto segv_and_exit;
2705 }
2706
2707 /* 2. Restore the state */
2708 __get_user(up_psr, &sf->info.si_regs.psr);
2709
2710 /* User can only change condition codes and FPU enabling in %psr. */
2711 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2712 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2713
2714 env->pc = pc;
2715 env->npc = npc;
2716 __get_user(env->y, &sf->info.si_regs.y);
2717 for (i=0; i < 8; i++) {
2718 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2719 }
2720 for (i=0; i < 8; i++) {
2721 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2722 }
2723
2724 /* FIXME: implement FPU save/restore:
2725 * __get_user(fpu_save, &sf->fpu_save);
2726 * if (fpu_save)
2727 * err |= restore_fpu_state(env, fpu_save);
2728 */
2729
2730 /* This is pretty much atomic, no amount locking would prevent
2731 * the races which exist anyways.
2732 */
2733 __get_user(set.sig[0], &sf->info.si_mask);
2734 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2735 __get_user(set.sig[i], &sf->extramask[i - 1]);
2736 }
2737
2738 target_to_host_sigset_internal(&host_set, &set);
2739 set_sigmask(&host_set);
2740
2741 if (err) {
2742 goto segv_and_exit;
2743 }
2744 unlock_user_struct(sf, sf_addr, 0);
2745 return -TARGET_QEMU_ESIGRETURN;
2746
2747 segv_and_exit:
2748 unlock_user_struct(sf, sf_addr, 0);
2749 force_sig(TARGET_SIGSEGV);
2750 return -TARGET_QEMU_ESIGRETURN;
2751 }
2752
2753 long do_rt_sigreturn(CPUSPARCState *env)
2754 {
2755 trace_user_do_rt_sigreturn(env, 0);
2756 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2757 return -TARGET_ENOSYS;
2758 }
2759
2760 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2761 #define MC_TSTATE 0
2762 #define MC_PC 1
2763 #define MC_NPC 2
2764 #define MC_Y 3
2765 #define MC_G1 4
2766 #define MC_G2 5
2767 #define MC_G3 6
2768 #define MC_G4 7
2769 #define MC_G5 8
2770 #define MC_G6 9
2771 #define MC_G7 10
2772 #define MC_O0 11
2773 #define MC_O1 12
2774 #define MC_O2 13
2775 #define MC_O3 14
2776 #define MC_O4 15
2777 #define MC_O5 16
2778 #define MC_O6 17
2779 #define MC_O7 18
2780 #define MC_NGREG 19
2781
2782 typedef abi_ulong target_mc_greg_t;
2783 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2784
2785 struct target_mc_fq {
2786 abi_ulong *mcfq_addr;
2787 uint32_t mcfq_insn;
2788 };
2789
2790 struct target_mc_fpu {
2791 union {
2792 uint32_t sregs[32];
2793 uint64_t dregs[32];
2794 //uint128_t qregs[16];
2795 } mcfpu_fregs;
2796 abi_ulong mcfpu_fsr;
2797 abi_ulong mcfpu_fprs;
2798 abi_ulong mcfpu_gsr;
2799 struct target_mc_fq *mcfpu_fq;
2800 unsigned char mcfpu_qcnt;
2801 unsigned char mcfpu_qentsz;
2802 unsigned char mcfpu_enab;
2803 };
2804 typedef struct target_mc_fpu target_mc_fpu_t;
2805
2806 typedef struct {
2807 target_mc_gregset_t mc_gregs;
2808 target_mc_greg_t mc_fp;
2809 target_mc_greg_t mc_i7;
2810 target_mc_fpu_t mc_fpregs;
2811 } target_mcontext_t;
2812
2813 struct target_ucontext {
2814 struct target_ucontext *tuc_link;
2815 abi_ulong tuc_flags;
2816 target_sigset_t tuc_sigmask;
2817 target_mcontext_t tuc_mcontext;
2818 };
2819
2820 /* A V9 register window */
2821 struct target_reg_window {
2822 abi_ulong locals[8];
2823 abi_ulong ins[8];
2824 };
2825
2826 #define TARGET_STACK_BIAS 2047
2827
2828 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2829 void sparc64_set_context(CPUSPARCState *env)
2830 {
2831 abi_ulong ucp_addr;
2832 struct target_ucontext *ucp;
2833 target_mc_gregset_t *grp;
2834 abi_ulong pc, npc, tstate;
2835 abi_ulong fp, i7, w_addr;
2836 unsigned int i;
2837
2838 ucp_addr = env->regwptr[UREG_I0];
2839 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2840 goto do_sigsegv;
2841 }
2842 grp = &ucp->tuc_mcontext.mc_gregs;
2843 __get_user(pc, &((*grp)[MC_PC]));
2844 __get_user(npc, &((*grp)[MC_NPC]));
2845 if ((pc | npc) & 3) {
2846 goto do_sigsegv;
2847 }
2848 if (env->regwptr[UREG_I1]) {
2849 target_sigset_t target_set;
2850 sigset_t set;
2851
2852 if (TARGET_NSIG_WORDS == 1) {
2853 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2854 } else {
2855 abi_ulong *src, *dst;
2856 src = ucp->tuc_sigmask.sig;
2857 dst = target_set.sig;
2858 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2859 __get_user(*dst, src);
2860 }
2861 }
2862 target_to_host_sigset_internal(&set, &target_set);
2863 set_sigmask(&set);
2864 }
2865 env->pc = pc;
2866 env->npc = npc;
2867 __get_user(env->y, &((*grp)[MC_Y]));
2868 __get_user(tstate, &((*grp)[MC_TSTATE]));
2869 env->asi = (tstate >> 24) & 0xff;
2870 cpu_put_ccr(env, tstate >> 32);
2871 cpu_put_cwp64(env, tstate & 0x1f);
2872 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2873 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2874 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2875 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2876 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2877 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2878 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2879 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2880 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2881 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2882 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2883 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2884 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2885 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2886 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2887
2888 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2889 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2890
2891 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2892 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2893 abi_ulong) != 0) {
2894 goto do_sigsegv;
2895 }
2896 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2897 abi_ulong) != 0) {
2898 goto do_sigsegv;
2899 }
2900 /* FIXME this does not match how the kernel handles the FPU in
2901 * its sparc64_set_context implementation. In particular the FPU
2902 * is only restored if fenab is non-zero in:
2903 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2904 */
2905 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2906 {
2907 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2908 for (i = 0; i < 64; i++, src++) {
2909 if (i & 1) {
2910 __get_user(env->fpr[i/2].l.lower, src);
2911 } else {
2912 __get_user(env->fpr[i/2].l.upper, src);
2913 }
2914 }
2915 }
2916 __get_user(env->fsr,
2917 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2918 __get_user(env->gsr,
2919 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2920 unlock_user_struct(ucp, ucp_addr, 0);
2921 return;
2922 do_sigsegv:
2923 unlock_user_struct(ucp, ucp_addr, 0);
2924 force_sig(TARGET_SIGSEGV);
2925 }
2926
2927 void sparc64_get_context(CPUSPARCState *env)
2928 {
2929 abi_ulong ucp_addr;
2930 struct target_ucontext *ucp;
2931 target_mc_gregset_t *grp;
2932 target_mcontext_t *mcp;
2933 abi_ulong fp, i7, w_addr;
2934 int err;
2935 unsigned int i;
2936 target_sigset_t target_set;
2937 sigset_t set;
2938
2939 ucp_addr = env->regwptr[UREG_I0];
2940 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2941 goto do_sigsegv;
2942 }
2943
2944 mcp = &ucp->tuc_mcontext;
2945 grp = &mcp->mc_gregs;
2946
2947 /* Skip over the trap instruction, first. */
2948 env->pc = env->npc;
2949 env->npc += 4;
2950
2951 /* If we're only reading the signal mask then do_sigprocmask()
2952 * is guaranteed not to fail, which is important because we don't
2953 * have any way to signal a failure or restart this operation since
2954 * this is not a normal syscall.
2955 */
2956 err = do_sigprocmask(0, NULL, &set);
2957 assert(err == 0);
2958 host_to_target_sigset_internal(&target_set, &set);
2959 if (TARGET_NSIG_WORDS == 1) {
2960 __put_user(target_set.sig[0],
2961 (abi_ulong *)&ucp->tuc_sigmask);
2962 } else {
2963 abi_ulong *src, *dst;
2964 src = target_set.sig;
2965 dst = ucp->tuc_sigmask.sig;
2966 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2967 __put_user(*src, dst);
2968 }
2969 if (err)
2970 goto do_sigsegv;
2971 }
2972
2973 /* XXX: tstate must be saved properly */
2974 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2975 __put_user(env->pc, &((*grp)[MC_PC]));
2976 __put_user(env->npc, &((*grp)[MC_NPC]));
2977 __put_user(env->y, &((*grp)[MC_Y]));
2978 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2979 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2980 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2981 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2982 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2983 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2984 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2985 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2986 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2987 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2988 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2989 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2990 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2991 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2992 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2993
2994 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2995 fp = i7 = 0;
2996 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2997 abi_ulong) != 0) {
2998 goto do_sigsegv;
2999 }
3000 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3001 abi_ulong) != 0) {
3002 goto do_sigsegv;
3003 }
3004 __put_user(fp, &(mcp->mc_fp));
3005 __put_user(i7, &(mcp->mc_i7));
3006
3007 {
3008 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3009 for (i = 0; i < 64; i++, dst++) {
3010 if (i & 1) {
3011 __put_user(env->fpr[i/2].l.lower, dst);
3012 } else {
3013 __put_user(env->fpr[i/2].l.upper, dst);
3014 }
3015 }
3016 }
3017 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3018 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3019 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3020
3021 if (err)
3022 goto do_sigsegv;
3023 unlock_user_struct(ucp, ucp_addr, 1);
3024 return;
3025 do_sigsegv:
3026 unlock_user_struct(ucp, ucp_addr, 1);
3027 force_sig(TARGET_SIGSEGV);
3028 }
3029 #endif
3030 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3031
3032 # if defined(TARGET_ABI_MIPSO32)
3033 struct target_sigcontext {
3034 uint32_t sc_regmask; /* Unused */
3035 uint32_t sc_status;
3036 uint64_t sc_pc;
3037 uint64_t sc_regs[32];
3038 uint64_t sc_fpregs[32];
3039 uint32_t sc_ownedfp; /* Unused */
3040 uint32_t sc_fpc_csr;
3041 uint32_t sc_fpc_eir; /* Unused */
3042 uint32_t sc_used_math;
3043 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3044 uint32_t pad0;
3045 uint64_t sc_mdhi;
3046 uint64_t sc_mdlo;
3047 target_ulong sc_hi1; /* Was sc_cause */
3048 target_ulong sc_lo1; /* Was sc_badvaddr */
3049 target_ulong sc_hi2; /* Was sc_sigset[4] */
3050 target_ulong sc_lo2;
3051 target_ulong sc_hi3;
3052 target_ulong sc_lo3;
3053 };
3054 # else /* N32 || N64 */
3055 struct target_sigcontext {
3056 uint64_t sc_regs[32];
3057 uint64_t sc_fpregs[32];
3058 uint64_t sc_mdhi;
3059 uint64_t sc_hi1;
3060 uint64_t sc_hi2;
3061 uint64_t sc_hi3;
3062 uint64_t sc_mdlo;
3063 uint64_t sc_lo1;
3064 uint64_t sc_lo2;
3065 uint64_t sc_lo3;
3066 uint64_t sc_pc;
3067 uint32_t sc_fpc_csr;
3068 uint32_t sc_used_math;
3069 uint32_t sc_dsp;
3070 uint32_t sc_reserved;
3071 };
3072 # endif /* O32 */
3073
3074 struct sigframe {
3075 uint32_t sf_ass[4]; /* argument save space for o32 */
3076 uint32_t sf_code[2]; /* signal trampoline */
3077 struct target_sigcontext sf_sc;
3078 target_sigset_t sf_mask;
3079 };
3080
3081 struct target_ucontext {
3082 target_ulong tuc_flags;
3083 target_ulong tuc_link;
3084 target_stack_t tuc_stack;
3085 target_ulong pad0;
3086 struct target_sigcontext tuc_mcontext;
3087 target_sigset_t tuc_sigmask;
3088 };
3089
3090 struct target_rt_sigframe {
3091 uint32_t rs_ass[4]; /* argument save space for o32 */
3092 uint32_t rs_code[2]; /* signal trampoline */
3093 struct target_siginfo rs_info;
3094 struct target_ucontext rs_uc;
3095 };
3096
3097 /* Install trampoline to jump back from signal handler */
3098 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3099 {
3100 int err = 0;
3101
3102 /*
3103 * Set up the return code ...
3104 *
3105 * li v0, __NR__foo_sigreturn
3106 * syscall
3107 */
3108
3109 __put_user(0x24020000 + syscall, tramp + 0);
3110 __put_user(0x0000000c , tramp + 1);
3111 return err;
3112 }
3113
3114 static inline void setup_sigcontext(CPUMIPSState *regs,
3115 struct target_sigcontext *sc)
3116 {
3117 int i;
3118
3119 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3120 regs->hflags &= ~MIPS_HFLAG_BMASK;
3121
3122 __put_user(0, &sc->sc_regs[0]);
3123 for (i = 1; i < 32; ++i) {
3124 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3125 }
3126
3127 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3128 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3129
3130 /* Rather than checking for dsp existence, always copy. The storage
3131 would just be garbage otherwise. */
3132 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3133 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3134 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3135 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3136 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3137 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3138 {
3139 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3140 __put_user(dsp, &sc->sc_dsp);
3141 }
3142
3143 __put_user(1, &sc->sc_used_math);
3144
3145 for (i = 0; i < 32; ++i) {
3146 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3147 }
3148 }
3149
3150 static inline void
3151 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3152 {
3153 int i;
3154
3155 __get_user(regs->CP0_EPC, &sc->sc_pc);
3156
3157 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3158 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3159
3160 for (i = 1; i < 32; ++i) {
3161 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3162 }
3163
3164 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3165 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3166 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3167 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3168 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3169 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3170 {
3171 uint32_t dsp;
3172 __get_user(dsp, &sc->sc_dsp);
3173 cpu_wrdsp(dsp, 0x3ff, regs);
3174 }
3175
3176 for (i = 0; i < 32; ++i) {
3177 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3178 }
3179 }
3180
3181 /*
3182 * Determine which stack to use..
3183 */
3184 static inline abi_ulong
3185 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3186 {
3187 unsigned long sp;
3188
3189 /* Default to using normal stack */
3190 sp = regs->active_tc.gpr[29];
3191
3192 /*
3193 * FPU emulator may have its own trampoline active just
3194 * above the user stack, 16-bytes before the next lowest
3195 * 16 byte boundary. Try to avoid trashing it.
3196 */
3197 sp -= 32;
3198
3199 /* This is the X/Open sanctioned signal stack switching. */
3200 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3201 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3202 }
3203
3204 return (sp - frame_size) & ~7;
3205 }
3206
3207 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3208 {
3209 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3210 env->hflags &= ~MIPS_HFLAG_M16;
3211 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3212 env->active_tc.PC &= ~(target_ulong) 1;
3213 }
3214 }
3215
3216 # if defined(TARGET_ABI_MIPSO32)
3217 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3218 static void setup_frame(int sig, struct target_sigaction * ka,
3219 target_sigset_t *set, CPUMIPSState *regs)
3220 {
3221 struct sigframe *frame;
3222 abi_ulong frame_addr;
3223 int i;
3224
3225 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3226 trace_user_setup_frame(regs, frame_addr);
3227 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3228 goto give_sigsegv;
3229 }
3230
3231 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3232
3233 setup_sigcontext(regs, &frame->sf_sc);
3234
3235 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3236 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3237 }
3238
3239 /*
3240 * Arguments to signal handler:
3241 *
3242 * a0 = signal number
3243 * a1 = 0 (should be cause)
3244 * a2 = pointer to struct sigcontext
3245 *
3246 * $25 and PC point to the signal handler, $29 points to the
3247 * struct sigframe.
3248 */
3249 regs->active_tc.gpr[ 4] = sig;
3250 regs->active_tc.gpr[ 5] = 0;
3251 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3252 regs->active_tc.gpr[29] = frame_addr;
3253 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3254 /* The original kernel code sets CP0_EPC to the handler
3255 * since it returns to userland using eret
3256 * we cannot do this here, and we must set PC directly */
3257 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3258 mips_set_hflags_isa_mode_from_pc(regs);
3259 unlock_user_struct(frame, frame_addr, 1);
3260 return;
3261
3262 give_sigsegv:
3263 force_sigsegv(sig);
3264 }
3265
3266 long do_sigreturn(CPUMIPSState *regs)
3267 {
3268 struct sigframe *frame;
3269 abi_ulong frame_addr;
3270 sigset_t blocked;
3271 target_sigset_t target_set;
3272 int i;
3273
3274 frame_addr = regs->active_tc.gpr[29];
3275 trace_user_do_sigreturn(regs, frame_addr);
3276 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3277 goto badframe;
3278
3279 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3280 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3281 }
3282
3283 target_to_host_sigset_internal(&blocked, &target_set);
3284 set_sigmask(&blocked);
3285
3286 restore_sigcontext(regs, &frame->sf_sc);
3287
3288 #if 0
3289 /*
3290 * Don't let your children do this ...
3291 */
3292 __asm__ __volatile__(
3293 "move\t$29, %0\n\t"
3294 "j\tsyscall_exit"
3295 :/* no outputs */
3296 :"r" (&regs));
3297 /* Unreached */
3298 #endif
3299
3300 regs->active_tc.PC = regs->CP0_EPC;
3301 mips_set_hflags_isa_mode_from_pc(regs);
3302 /* I am not sure this is right, but it seems to work
3303 * maybe a problem with nested signals ? */
3304 regs->CP0_EPC = 0;
3305 return -TARGET_QEMU_ESIGRETURN;
3306
3307 badframe:
3308 force_sig(TARGET_SIGSEGV);
3309 return -TARGET_QEMU_ESIGRETURN;
3310 }
3311 # endif /* O32 */
3312
3313 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3314 target_siginfo_t *info,
3315 target_sigset_t *set, CPUMIPSState *env)
3316 {
3317 struct target_rt_sigframe *frame;
3318 abi_ulong frame_addr;
3319 int i;
3320
3321 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3322 trace_user_setup_rt_frame(env, frame_addr);
3323 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3324 goto give_sigsegv;
3325 }
3326
3327 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3328
3329 tswap_siginfo(&frame->rs_info, info);
3330
3331 __put_user(0, &frame->rs_uc.tuc_flags);
3332 __put_user(0, &frame->rs_uc.tuc_link);
3333 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3334 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3335 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3336 &frame->rs_uc.tuc_stack.ss_flags);
3337
3338 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3339
3340 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3341 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3342 }
3343
3344 /*
3345 * Arguments to signal handler:
3346 *
3347 * a0 = signal number
3348 * a1 = pointer to siginfo_t
3349 * a2 = pointer to struct ucontext
3350 *
3351 * $25 and PC point to the signal handler, $29 points to the
3352 * struct sigframe.
3353 */
3354 env->active_tc.gpr[ 4] = sig;
3355 env->active_tc.gpr[ 5] = frame_addr
3356 + offsetof(struct target_rt_sigframe, rs_info);
3357 env->active_tc.gpr[ 6] = frame_addr
3358 + offsetof(struct target_rt_sigframe, rs_uc);
3359 env->active_tc.gpr[29] = frame_addr;
3360 env->active_tc.gpr[31] = frame_addr
3361 + offsetof(struct target_rt_sigframe, rs_code);
3362 /* The original kernel code sets CP0_EPC to the handler
3363 * since it returns to userland using eret
3364 * we cannot do this here, and we must set PC directly */
3365 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3366 mips_set_hflags_isa_mode_from_pc(env);
3367 unlock_user_struct(frame, frame_addr, 1);
3368 return;
3369
3370 give_sigsegv:
3371 unlock_user_struct(frame, frame_addr, 1);
3372 force_sigsegv(sig);
3373 }
3374
3375 long do_rt_sigreturn(CPUMIPSState *env)
3376 {
3377 struct target_rt_sigframe *frame;
3378 abi_ulong frame_addr;
3379 sigset_t blocked;
3380
3381 frame_addr = env->active_tc.gpr[29];
3382 trace_user_do_rt_sigreturn(env, frame_addr);
3383 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3384 goto badframe;
3385 }
3386
3387 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3388 set_sigmask(&blocked);
3389
3390 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3391
3392 if (do_sigaltstack(frame_addr +
3393 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3394 0, get_sp_from_cpustate(env)) == -EFAULT)
3395 goto badframe;
3396
3397 env->active_tc.PC = env->CP0_EPC;
3398 mips_set_hflags_isa_mode_from_pc(env);
3399 /* I am not sure this is right, but it seems to work
3400 * maybe a problem with nested signals ? */
3401 env->CP0_EPC = 0;
3402 return -TARGET_QEMU_ESIGRETURN;
3403
3404 badframe:
3405 force_sig(TARGET_SIGSEGV);
3406 return -TARGET_QEMU_ESIGRETURN;
3407 }
3408
3409 #elif defined(TARGET_SH4)
3410
3411 /*
3412 * code and data structures from linux kernel:
3413 * include/asm-sh/sigcontext.h
3414 * arch/sh/kernel/signal.c
3415 */
3416
3417 struct target_sigcontext {
3418 target_ulong oldmask;
3419
3420 /* CPU registers */
3421 target_ulong sc_gregs[16];
3422 target_ulong sc_pc;
3423 target_ulong sc_pr;
3424 target_ulong sc_sr;
3425 target_ulong sc_gbr;
3426 target_ulong sc_mach;
3427 target_ulong sc_macl;
3428
3429 /* FPU registers */
3430 target_ulong sc_fpregs[16];
3431 target_ulong sc_xfpregs[16];
3432 unsigned int sc_fpscr;
3433 unsigned int sc_fpul;
3434 unsigned int sc_ownedfp;
3435 };
3436
3437 struct target_sigframe
3438 {
3439 struct target_sigcontext sc;
3440 target_ulong extramask[TARGET_NSIG_WORDS-1];
3441 uint16_t retcode[3];
3442 };
3443
3444
3445 struct target_ucontext {
3446 target_ulong tuc_flags;
3447 struct target_ucontext *tuc_link;
3448 target_stack_t tuc_stack;
3449 struct target_sigcontext tuc_mcontext;
3450 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3451 };
3452
3453 struct target_rt_sigframe
3454 {
3455 struct target_siginfo info;
3456 struct target_ucontext uc;
3457 uint16_t retcode[3];
3458 };
3459
3460
3461 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3462 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3463
3464 static abi_ulong get_sigframe(struct target_sigaction *ka,
3465 unsigned long sp, size_t frame_size)
3466 {
3467 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3468 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3469 }
3470
3471 return (sp - frame_size) & -8ul;
3472 }
3473
3474 static void setup_sigcontext(struct target_sigcontext *sc,
3475 CPUSH4State *regs, unsigned long mask)
3476 {
3477 int i;
3478
3479 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3480 COPY(gregs[0]); COPY(gregs[1]);
3481 COPY(gregs[2]); COPY(gregs[3]);
3482 COPY(gregs[4]); COPY(gregs[5]);
3483 COPY(gregs[6]); COPY(gregs[7]);
3484 COPY(gregs[8]); COPY(gregs[9]);
3485 COPY(gregs[10]); COPY(gregs[11]);
3486 COPY(gregs[12]); COPY(gregs[13]);
3487 COPY(gregs[14]); COPY(gregs[15]);
3488 COPY(gbr); COPY(mach);
3489 COPY(macl); COPY(pr);
3490 COPY(sr); COPY(pc);
3491 #undef COPY
3492
3493 for (i=0; i<16; i++) {
3494 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3495 }
3496 __put_user(regs->fpscr, &sc->sc_fpscr);
3497 __put_user(regs->fpul, &sc->sc_fpul);
3498
3499 /* non-iBCS2 extensions.. */
3500 __put_user(mask, &sc->oldmask);
3501 }
3502
3503 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3504 {
3505 int i;
3506
3507 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3508 COPY(gregs[0]); COPY(gregs[1]);
3509 COPY(gregs[2]); COPY(gregs[3]);
3510 COPY(gregs[4]); COPY(gregs[5]);
3511 COPY(gregs[6]); COPY(gregs[7]);
3512 COPY(gregs[8]); COPY(gregs[9]);
3513 COPY(gregs[10]); COPY(gregs[11]);
3514 COPY(gregs[12]); COPY(gregs[13]);
3515 COPY(gregs[14]); COPY(gregs[15]);
3516 COPY(gbr); COPY(mach);
3517 COPY(macl); COPY(pr);
3518 COPY(sr); COPY(pc);
3519 #undef COPY
3520
3521 for (i=0; i<16; i++) {
3522 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3523 }
3524 __get_user(regs->fpscr, &sc->sc_fpscr);
3525 __get_user(regs->fpul, &sc->sc_fpul);
3526
3527 regs->tra = -1; /* disable syscall checks */
3528 }
3529
3530 static void setup_frame(int sig, struct target_sigaction *ka,
3531 target_sigset_t *set, CPUSH4State *regs)
3532 {
3533 struct target_sigframe *frame;
3534 abi_ulong frame_addr;
3535 int i;
3536
3537 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3538 trace_user_setup_frame(regs, frame_addr);
3539 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3540 goto give_sigsegv;
3541 }
3542
3543 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3544
3545 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3546 __put_user(set->sig[i + 1], &frame->extramask[i]);
3547 }
3548
3549 /* Set up to return from userspace. If provided, use a stub
3550 already in userspace. */
3551 if (ka->sa_flags & TARGET_SA_RESTORER) {
3552 regs->pr = (unsigned long) ka->sa_restorer;
3553 } else {
3554 /* Generate return code (system call to sigreturn) */
3555 abi_ulong retcode_addr = frame_addr +
3556 offsetof(struct target_sigframe, retcode);
3557 __put_user(MOVW(2), &frame->retcode[0]);
3558 __put_user(TRAP_NOARG, &frame->retcode[1]);
3559 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3560 regs->pr = (unsigned long) retcode_addr;
3561 }
3562
3563 /* Set up registers for signal handler */
3564 regs->gregs[15] = frame_addr;
3565 regs->gregs[4] = sig; /* Arg for signal handler */
3566 regs->gregs[5] = 0;
3567 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3568 regs->pc = (unsigned long) ka->_sa_handler;
3569
3570 unlock_user_struct(frame, frame_addr, 1);
3571 return;
3572
3573 give_sigsegv:
3574 unlock_user_struct(frame, frame_addr, 1);
3575 force_sigsegv(sig);
3576 }
3577
3578 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3579 target_siginfo_t *info,
3580 target_sigset_t *set, CPUSH4State *regs)
3581 {
3582 struct target_rt_sigframe *frame;
3583 abi_ulong frame_addr;
3584 int i;
3585
3586 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3587 trace_user_setup_rt_frame(regs, frame_addr);
3588 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3589 goto give_sigsegv;
3590 }
3591
3592 tswap_siginfo(&frame->info, info);
3593
3594 /* Create the ucontext. */
3595 __put_user(0, &frame->uc.tuc_flags);
3596 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3597 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3598 &frame->uc.tuc_stack.ss_sp);
3599 __put_user(sas_ss_flags(regs->gregs[15]),
3600 &frame->uc.tuc_stack.ss_flags);
3601 __put_user(target_sigaltstack_used.ss_size,
3602 &frame->uc.tuc_stack.ss_size);
3603 setup_sigcontext(&frame->uc.tuc_mcontext,
3604 regs, set->sig[0]);
3605 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3606 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3607 }
3608
3609 /* Set up to return from userspace. If provided, use a stub
3610 already in userspace. */
3611 if (ka->sa_flags & TARGET_SA_RESTORER) {
3612 regs->pr = (unsigned long) ka->sa_restorer;
3613 } else {
3614 /* Generate return code (system call to sigreturn) */
3615 abi_ulong retcode_addr = frame_addr +
3616 offsetof(struct target_rt_sigframe, retcode);
3617 __put_user(MOVW(2), &frame->retcode[0]);
3618 __put_user(TRAP_NOARG, &frame->retcode[1]);
3619 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3620 regs->pr = (unsigned long) retcode_addr;
3621 }
3622
3623 /* Set up registers for signal handler */
3624 regs->gregs[15] = frame_addr;
3625 regs->gregs[4] = sig; /* Arg for signal handler */
3626 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3627 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3628 regs->pc = (unsigned long) ka->_sa_handler;
3629
3630 unlock_user_struct(frame, frame_addr, 1);
3631 return;
3632
3633 give_sigsegv:
3634 unlock_user_struct(frame, frame_addr, 1);
3635 force_sigsegv(sig);
3636 }
3637
3638 long do_sigreturn(CPUSH4State *regs)
3639 {
3640 struct target_sigframe *frame;
3641 abi_ulong frame_addr;
3642 sigset_t blocked;
3643 target_sigset_t target_set;
3644 int i;
3645 int err = 0;
3646
3647 frame_addr = regs->gregs[15];
3648 trace_user_do_sigreturn(regs, frame_addr);
3649 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3650 goto badframe;
3651 }
3652
3653 __get_user(target_set.sig[0], &frame->sc.oldmask);
3654 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3655 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3656 }
3657
3658 if (err)
3659 goto badframe;
3660
3661 target_to_host_sigset_internal(&blocked, &target_set);
3662 set_sigmask(&blocked);
3663
3664 restore_sigcontext(regs, &frame->sc);
3665
3666 unlock_user_struct(frame, frame_addr, 0);
3667 return -TARGET_QEMU_ESIGRETURN;
3668
3669 badframe:
3670 unlock_user_struct(frame, frame_addr, 0);
3671 force_sig(TARGET_SIGSEGV);
3672 return -TARGET_QEMU_ESIGRETURN;
3673 }
3674
3675 long do_rt_sigreturn(CPUSH4State *regs)
3676 {
3677 struct target_rt_sigframe *frame;
3678 abi_ulong frame_addr;
3679 sigset_t blocked;
3680
3681 frame_addr = regs->gregs[15];
3682 trace_user_do_rt_sigreturn(regs, frame_addr);
3683 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3684 goto badframe;
3685 }
3686
3687 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3688 set_sigmask(&blocked);
3689
3690 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3691
3692 if (do_sigaltstack(frame_addr +
3693 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3694 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3695 goto badframe;
3696 }
3697
3698 unlock_user_struct(frame, frame_addr, 0);
3699 return -TARGET_QEMU_ESIGRETURN;
3700
3701 badframe:
3702 unlock_user_struct(frame, frame_addr, 0);
3703 force_sig(TARGET_SIGSEGV);
3704 return -TARGET_QEMU_ESIGRETURN;
3705 }
3706 #elif defined(TARGET_MICROBLAZE)
3707
3708 struct target_sigcontext {
3709 struct target_pt_regs regs; /* needs to be first */
3710 uint32_t oldmask;
3711 };
3712
3713 struct target_stack_t {
3714 abi_ulong ss_sp;
3715 int ss_flags;
3716 unsigned int ss_size;
3717 };
3718
3719 struct target_ucontext {
3720 abi_ulong tuc_flags;
3721 abi_ulong tuc_link;
3722 struct target_stack_t tuc_stack;
3723 struct target_sigcontext tuc_mcontext;
3724 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3725 };
3726
3727 /* Signal frames. */
3728 struct target_signal_frame {
3729 struct target_ucontext uc;
3730 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3731 uint32_t tramp[2];
3732 };
3733
3734 struct rt_signal_frame {
3735 siginfo_t info;
3736 struct ucontext uc;
3737 uint32_t tramp[2];
3738 };
3739
3740 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3741 {
3742 __put_user(env->regs[0], &sc->regs.r0);
3743 __put_user(env->regs[1], &sc->regs.r1);
3744 __put_user(env->regs[2], &sc->regs.r2);
3745 __put_user(env->regs[3], &sc->regs.r3);
3746 __put_user(env->regs[4], &sc->regs.r4);
3747 __put_user(env->regs[5], &sc->regs.r5);
3748 __put_user(env->regs[6], &sc->regs.r6);
3749 __put_user(env->regs[7], &sc->regs.r7);
3750 __put_user(env->regs[8], &sc->regs.r8);
3751 __put_user(env->regs[9], &sc->regs.r9);
3752 __put_user(env->regs[10], &sc->regs.r10);
3753 __put_user(env->regs[11], &sc->regs.r11);
3754 __put_user(env->regs[12], &sc->regs.r12);
3755 __put_user(env->regs[13], &sc->regs.r13);
3756 __put_user(env->regs[14], &sc->regs.r14);
3757 __put_user(env->regs[15], &sc->regs.r15);
3758 __put_user(env->regs[16], &sc->regs.r16);
3759 __put_user(env->regs[17], &sc->regs.r17);
3760 __put_user(env->regs[18], &sc->regs.r18);
3761 __put_user(env->regs[19], &sc->regs.r19);
3762 __put_user(env->regs[20], &sc->regs.r20);
3763 __put_user(env->regs[21], &sc->regs.r21);
3764 __put_user(env->regs[22], &sc->regs.r22);
3765 __put_user(env->regs[23], &sc->regs.r23);
3766 __put_user(env->regs[24], &sc->regs.r24);
3767 __put_user(env->regs[25], &sc->regs.r25);
3768 __put_user(env->regs[26], &sc->regs.r26);
3769 __put_user(env->regs[27], &sc->regs.r27);
3770 __put_user(env->regs[28], &sc->regs.r28);
3771 __put_user(env->regs[29], &sc->regs.r29);
3772 __put_user(env->regs[30], &sc->regs.r30);
3773 __put_user(env->regs[31], &sc->regs.r31);
3774 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3775 }
3776
3777 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3778 {
3779 __get_user(env->regs[0], &sc->regs.r0);
3780 __get_user(env->regs[1], &sc->regs.r1);
3781 __get_user(env->regs[2], &sc->regs.r2);
3782 __get_user(env->regs[3], &sc->regs.r3);
3783 __get_user(env->regs[4], &sc->regs.r4);
3784 __get_user(env->regs[5], &sc->regs.r5);
3785 __get_user(env->regs[6], &sc->regs.r6);
3786 __get_user(env->regs[7], &sc->regs.r7);
3787 __get_user(env->regs[8], &sc->regs.r8);
3788 __get_user(env->regs[9], &sc->regs.r9);
3789 __get_user(env->regs[10], &sc->regs.r10);
3790 __get_user(env->regs[11], &sc->regs.r11);
3791 __get_user(env->regs[12], &sc->regs.r12);
3792 __get_user(env->regs[13], &sc->regs.r13);
3793 __get_user(env->regs[14], &sc->regs.r14);
3794 __get_user(env->regs[15], &sc->regs.r15);
3795 __get_user(env->regs[16], &sc->regs.r16);
3796 __get_user(env->regs[17], &sc->regs.r17);
3797 __get_user(env->regs[18], &sc->regs.r18);
3798 __get_user(env->regs[19], &sc->regs.r19);
3799 __get_user(env->regs[20], &sc->regs.r20);
3800 __get_user(env->regs[21], &sc->regs.r21);
3801 __get_user(env->regs[22], &sc->regs.r22);
3802 __get_user(env->regs[23], &sc->regs.r23);
3803 __get_user(env->regs[24], &sc->regs.r24);
3804 __get_user(env->regs[25], &sc->regs.r25);
3805 __get_user(env->regs[26], &sc->regs.r26);
3806 __get_user(env->regs[27], &sc->regs.r27);
3807 __get_user(env->regs[28], &sc->regs.r28);
3808 __get_user(env->regs[29], &sc->regs.r29);
3809 __get_user(env->regs[30], &sc->regs.r30);
3810 __get_user(env->regs[31], &sc->regs.r31);
3811 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3812 }
3813
3814 static abi_ulong get_sigframe(struct target_sigaction *ka,
3815 CPUMBState *env, int frame_size)
3816 {
3817 abi_ulong sp = env->regs[1];
3818
3819 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3820 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3821 }
3822
3823 return ((sp - frame_size) & -8UL);
3824 }
3825
3826 static void setup_frame(int sig, struct target_sigaction *ka,
3827 target_sigset_t *set, CPUMBState *env)
3828 {
3829 struct target_signal_frame *frame;
3830 abi_ulong frame_addr;
3831 int i;
3832
3833 frame_addr = get_sigframe(ka, env, sizeof *frame);
3834 trace_user_setup_frame(env, frame_addr);
3835 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3836 goto badframe;
3837
3838 /* Save the mask. */
3839 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3840
3841 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3842 __put_user(set->sig[i], &frame->extramask[i - 1]);
3843 }
3844
3845 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3846
3847 /* Set up to return from userspace. If provided, use a stub
3848 already in userspace. */
3849 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3850 if (ka->sa_flags & TARGET_SA_RESTORER) {
3851 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3852 } else {
3853 uint32_t t;
3854 /* Note, these encodings are _big endian_! */
3855 /* addi r12, r0, __NR_sigreturn */
3856 t = 0x31800000UL | TARGET_NR_sigreturn;
3857 __put_user(t, frame->tramp + 0);
3858 /* brki r14, 0x8 */
3859 t = 0xb9cc0008UL;
3860 __put_user(t, frame->tramp + 1);
3861
3862 /* Return from sighandler will jump to the tramp.
3863 Negative 8 offset because return is rtsd r15, 8 */
3864 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3865 - 8;
3866 }
3867
3868 /* Set up registers for signal handler */
3869 env->regs[1] = frame_addr;
3870 /* Signal handler args: */
3871 env->regs[5] = sig; /* Arg 0: signum */
3872 env->regs[6] = 0;
3873 /* arg 1: sigcontext */
3874 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3875
3876 /* Offset of 4 to handle microblaze rtid r14, 0 */
3877 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3878
3879 unlock_user_struct(frame, frame_addr, 1);
3880 return;
3881 badframe:
3882 force_sigsegv(sig);
3883 }
3884
3885 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3886 target_siginfo_t *info,
3887 target_sigset_t *set, CPUMBState *env)
3888 {
3889 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3890 }
3891
3892 long do_sigreturn(CPUMBState *env)
3893 {
3894 struct target_signal_frame *frame;
3895 abi_ulong frame_addr;
3896 target_sigset_t target_set;
3897 sigset_t set;
3898 int i;
3899
3900 frame_addr = env->regs[R_SP];
3901 trace_user_do_sigreturn(env, frame_addr);
3902 /* Make sure the guest isn't playing games. */
3903 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3904 goto badframe;
3905
3906 /* Restore blocked signals */
3907 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3908 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3909 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3910 }
3911 target_to_host_sigset_internal(&set, &target_set);
3912 set_sigmask(&set);
3913
3914 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3915 /* We got here through a sigreturn syscall, our path back is via an
3916 rtb insn so setup r14 for that. */
3917 env->regs[14] = env->sregs[SR_PC];
3918
3919 unlock_user_struct(frame, frame_addr, 0);
3920 return -TARGET_QEMU_ESIGRETURN;
3921 badframe:
3922 force_sig(TARGET_SIGSEGV);
3923 return -TARGET_QEMU_ESIGRETURN;
3924 }
3925
3926 long do_rt_sigreturn(CPUMBState *env)
3927 {
3928 trace_user_do_rt_sigreturn(env, 0);
3929 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3930 return -TARGET_ENOSYS;
3931 }
3932
3933 #elif defined(TARGET_CRIS)
3934
3935 struct target_sigcontext {
3936 struct target_pt_regs regs; /* needs to be first */
3937 uint32_t oldmask;
3938 uint32_t usp; /* usp before stacking this gunk on it */
3939 };
3940
3941 /* Signal frames. */
3942 struct target_signal_frame {
3943 struct target_sigcontext sc;
3944 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3945 uint16_t retcode[4]; /* Trampoline code. */
3946 };
3947
3948 struct rt_signal_frame {
3949 siginfo_t *pinfo;
3950 void *puc;
3951 siginfo_t info;
3952 struct ucontext uc;
3953 uint16_t retcode[4]; /* Trampoline code. */
3954 };
3955
3956 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3957 {
3958 __put_user(env->regs[0], &sc->regs.r0);
3959 __put_user(env->regs[1], &sc->regs.r1);
3960 __put_user(env->regs[2], &sc->regs.r2);
3961 __put_user(env->regs[3], &sc->regs.r3);
3962 __put_user(env->regs[4], &sc->regs.r4);
3963 __put_user(env->regs[5], &sc->regs.r5);
3964 __put_user(env->regs[6], &sc->regs.r6);
3965 __put_user(env->regs[7], &sc->regs.r7);
3966 __put_user(env->regs[8], &sc->regs.r8);
3967 __put_user(env->regs[9], &sc->regs.r9);
3968 __put_user(env->regs[10], &sc->regs.r10);
3969 __put_user(env->regs[11], &sc->regs.r11);
3970 __put_user(env->regs[12], &sc->regs.r12);
3971 __put_user(env->regs[13], &sc->regs.r13);
3972 __put_user(env->regs[14], &sc->usp);
3973 __put_user(env->regs[15], &sc->regs.acr);
3974 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3975 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3976 __put_user(env->pc, &sc->regs.erp);
3977 }
3978
3979 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3980 {
3981 __get_user(env->regs[0], &sc->regs.r0);
3982 __get_user(env->regs[1], &sc->regs.r1);
3983 __get_user(env->regs[2], &sc->regs.r2);
3984 __get_user(env->regs[3], &sc->regs.r3);
3985 __get_user(env->regs[4], &sc->regs.r4);
3986 __get_user(env->regs[5], &sc->regs.r5);
3987 __get_user(env->regs[6], &sc->regs.r6);
3988 __get_user(env->regs[7], &sc->regs.r7);
3989 __get_user(env->regs[8], &sc->regs.r8);
3990 __get_user(env->regs[9], &sc->regs.r9);
3991 __get_user(env->regs[10], &sc->regs.r10);
3992 __get_user(env->regs[11], &sc->regs.r11);
3993 __get_user(env->regs[12], &sc->regs.r12);
3994 __get_user(env->regs[13], &sc->regs.r13);
3995 __get_user(env->regs[14], &sc->usp);
3996 __get_user(env->regs[15], &sc->regs.acr);
3997 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3998 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3999 __get_user(env->pc, &sc->regs.erp);
4000 }
4001
4002 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4003 {
4004 abi_ulong sp;
4005 /* Align the stack downwards to 4. */
4006 sp = (env->regs[R_SP] & ~3);
4007 return sp - framesize;
4008 }
4009
4010 static void setup_frame(int sig, struct target_sigaction *ka,
4011 target_sigset_t *set, CPUCRISState *env)
4012 {
4013 struct target_signal_frame *frame;
4014 abi_ulong frame_addr;
4015 int i;
4016
4017 frame_addr = get_sigframe(env, sizeof *frame);
4018 trace_user_setup_frame(env, frame_addr);
4019 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4020 goto badframe;
4021
4022 /*
4023 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4024 * use this trampoline anymore but it sets it up for GDB.
4025 * In QEMU, using the trampoline simplifies things a bit so we use it.
4026 *
4027 * This is movu.w __NR_sigreturn, r9; break 13;
4028 */
4029 __put_user(0x9c5f, frame->retcode+0);
4030 __put_user(TARGET_NR_sigreturn,
4031 frame->retcode + 1);
4032 __put_user(0xe93d, frame->retcode + 2);
4033
4034 /* Save the mask. */
4035 __put_user(set->sig[0], &frame->sc.oldmask);
4036
4037 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4038 __put_user(set->sig[i], &frame->extramask[i - 1]);
4039 }
4040
4041 setup_sigcontext(&frame->sc, env);
4042
4043 /* Move the stack and setup the arguments for the handler. */
4044 env->regs[R_SP] = frame_addr;
4045 env->regs[10] = sig;
4046 env->pc = (unsigned long) ka->_sa_handler;
4047 /* Link SRP so the guest returns through the trampoline. */
4048 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4049
4050 unlock_user_struct(frame, frame_addr, 1);
4051 return;
4052 badframe:
4053 force_sigsegv(sig);
4054 }
4055
4056 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4057 target_siginfo_t *info,
4058 target_sigset_t *set, CPUCRISState *env)
4059 {
4060 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4061 }
4062
4063 long do_sigreturn(CPUCRISState *env)
4064 {
4065 struct target_signal_frame *frame;
4066 abi_ulong frame_addr;
4067 target_sigset_t target_set;
4068 sigset_t set;
4069 int i;
4070
4071 frame_addr = env->regs[R_SP];
4072 trace_user_do_sigreturn(env, frame_addr);
4073 /* Make sure the guest isn't playing games. */
4074 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4075 goto badframe;
4076 }
4077
4078 /* Restore blocked signals */
4079 __get_user(target_set.sig[0], &frame->sc.oldmask);
4080 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4081 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4082 }
4083 target_to_host_sigset_internal(&set, &target_set);
4084 set_sigmask(&set);
4085
4086 restore_sigcontext(&frame->sc, env);
4087 unlock_user_struct(frame, frame_addr, 0);
4088 return -TARGET_QEMU_ESIGRETURN;
4089 badframe:
4090 force_sig(TARGET_SIGSEGV);
4091 return -TARGET_QEMU_ESIGRETURN;
4092 }
4093
4094 long do_rt_sigreturn(CPUCRISState *env)
4095 {
4096 trace_user_do_rt_sigreturn(env, 0);
4097 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4098 return -TARGET_ENOSYS;
4099 }
4100
4101 #elif defined(TARGET_NIOS2)
4102
4103 #define MCONTEXT_VERSION 2
4104
4105 struct target_sigcontext {
4106 int version;
4107 unsigned long gregs[32];
4108 };
4109
4110 struct target_ucontext {
4111 abi_ulong tuc_flags;
4112 abi_ulong tuc_link;
4113 target_stack_t tuc_stack;
4114 struct target_sigcontext tuc_mcontext;
4115 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4116 };
4117
4118 struct target_rt_sigframe {
4119 struct target_siginfo info;
4120 struct target_ucontext uc;
4121 };
4122
4123 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4124 {
4125 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4126 #ifdef CONFIG_STACK_GROWSUP
4127 return target_sigaltstack_used.ss_sp;
4128 #else
4129 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4130 #endif
4131 }
4132 return sp;
4133 }
4134
4135 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4136 {
4137 unsigned long *gregs = uc->tuc_mcontext.gregs;
4138
4139 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4140 __put_user(env->regs[1], &gregs[0]);
4141 __put_user(env->regs[2], &gregs[1]);
4142 __put_user(env->regs[3], &gregs[2]);
4143 __put_user(env->regs[4], &gregs[3]);
4144 __put_user(env->regs[5], &gregs[4]);
4145 __put_user(env->regs[6], &gregs[5]);
4146 __put_user(env->regs[7], &gregs[6]);
4147 __put_user(env->regs[8], &gregs[7]);
4148 __put_user(env->regs[9], &gregs[8]);
4149 __put_user(env->regs[10], &gregs[9]);
4150 __put_user(env->regs[11], &gregs[10]);
4151 __put_user(env->regs[12], &gregs[11]);
4152 __put_user(env->regs[13], &gregs[12]);
4153 __put_user(env->regs[14], &gregs[13]);
4154 __put_user(env->regs[15], &gregs[14]);
4155 __put_user(env->regs[16], &gregs[15]);
4156 __put_user(env->regs[17], &gregs[16]);
4157 __put_user(env->regs[18], &gregs[17]);
4158 __put_user(env->regs[19], &gregs[18]);
4159 __put_user(env->regs[20], &gregs[19]);
4160 __put_user(env->regs[21], &gregs[20]);
4161 __put_user(env->regs[22], &gregs[21]);
4162 __put_user(env->regs[23], &gregs[22]);
4163 __put_user(env->regs[R_RA], &gregs[23]);
4164 __put_user(env->regs[R_FP], &gregs[24]);
4165 __put_user(env->regs[R_GP], &gregs[25]);
4166 __put_user(env->regs[R_EA], &gregs[27]);
4167 __put_user(env->regs[R_SP], &gregs[28]);
4168
4169 return 0;
4170 }
4171
4172 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4173 int *pr2)
4174 {
4175 int temp;
4176 abi_ulong off, frame_addr = env->regs[R_SP];
4177 unsigned long *gregs = uc->tuc_mcontext.gregs;
4178 int err;
4179
4180 /* Always make any pending restarted system calls return -EINTR */
4181 /* current->restart_block.fn = do_no_restart_syscall; */
4182
4183 __get_user(temp, &uc->tuc_mcontext.version);
4184 if (temp != MCONTEXT_VERSION) {
4185 return 1;
4186 }
4187
4188 /* restore passed registers */
4189 __get_user(env->regs[1], &gregs[0]);
4190 __get_user(env->regs[2], &gregs[1]);
4191 __get_user(env->regs[3], &gregs[2]);
4192 __get_user(env->regs[4], &gregs[3]);
4193 __get_user(env->regs[5], &gregs[4]);
4194 __get_user(env->regs[6], &gregs[5]);
4195 __get_user(env->regs[7], &gregs[6]);
4196 __get_user(env->regs[8], &gregs[7]);
4197 __get_user(env->regs[9], &gregs[8]);
4198 __get_user(env->regs[10], &gregs[9]);
4199 __get_user(env->regs[11], &gregs[10]);
4200 __get_user(env->regs[12], &gregs[11]);
4201 __get_user(env->regs[13], &gregs[12]);
4202 __get_user(env->regs[14], &gregs[13]);
4203 __get_user(env->regs[15], &gregs[14]);
4204 __get_user(env->regs[16], &gregs[15]);
4205 __get_user(env->regs[17], &gregs[16]);
4206 __get_user(env->regs[18], &gregs[17]);
4207 __get_user(env->regs[19], &gregs[18]);
4208 __get_user(env->regs[20], &gregs[19]);
4209 __get_user(env->regs[21], &gregs[20]);
4210 __get_user(env->regs[22], &gregs[21]);
4211 __get_user(env->regs[23], &gregs[22]);
4212 /* gregs[23] is handled below */
4213 /* Verify, should this be settable */
4214 __get_user(env->regs[R_FP], &gregs[24]);
4215 /* Verify, should this be settable */
4216 __get_user(env->regs[R_GP], &gregs[25]);
4217 /* Not really necessary no user settable bits */
4218 __get_user(temp, &gregs[26]);
4219 __get_user(env->regs[R_EA], &gregs[27]);
4220
4221 __get_user(env->regs[R_RA], &gregs[23]);
4222 __get_user(env->regs[R_SP], &gregs[28]);
4223
4224 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4225 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4226 if (err == -EFAULT) {
4227 return 1;
4228 }
4229
4230 *pr2 = env->regs[2];
4231 return 0;
4232 }
4233
4234 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4235 size_t frame_size)
4236 {
4237 unsigned long usp;
4238
4239 /* Default to using normal stack. */
4240 usp = env->regs[R_SP];
4241
4242 /* This is the X/Open sanctioned signal stack switching. */
4243 usp = sigsp(usp, ka);
4244
4245 /* Verify, is it 32 or 64 bit aligned */
4246 return (void *)((usp - frame_size) & -8UL);
4247 }
4248
4249 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4250 target_siginfo_t *info,
4251 target_sigset_t *set,
4252 CPUNios2State *env)
4253 {
4254 struct target_rt_sigframe *frame;
4255 int i, err = 0;
4256
4257 frame = get_sigframe(ka, env, sizeof(*frame));
4258
4259 if (ka->sa_flags & SA_SIGINFO) {
4260 tswap_siginfo(&frame->info, info);
4261 }
4262
4263 /* Create the ucontext. */
4264 __put_user(0, &frame->uc.tuc_flags);
4265 __put_user(0, &frame->uc.tuc_link);
4266 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4267 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4268 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4269 err |= rt_setup_ucontext(&frame->uc, env);
4270 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4271 __put_user((abi_ulong)set->sig[i],
4272 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4273 }
4274
4275 if (err) {
4276 goto give_sigsegv;
4277 }
4278
4279 /* Set up to return from userspace; jump to fixed address sigreturn
4280 trampoline on kuser page. */
4281 env->regs[R_RA] = (unsigned long) (0x1044);
4282
4283 /* Set up registers for signal handler */
4284 env->regs[R_SP] = (unsigned long) frame;
4285 env->regs[4] = (unsigned long) sig;
4286 env->regs[5] = (unsigned long) &frame->info;
4287 env->regs[6] = (unsigned long) &frame->uc;
4288 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4289 return;
4290
4291 give_sigsegv:
4292 if (sig == TARGET_SIGSEGV) {
4293 ka->_sa_handler = TARGET_SIG_DFL;
4294 }
4295 force_sigsegv(sig);
4296 return;
4297 }
4298
4299 long do_sigreturn(CPUNios2State *env)
4300 {
4301 trace_user_do_sigreturn(env, 0);
4302 fprintf(stderr, "do_sigreturn: not implemented\n");
4303 return -TARGET_ENOSYS;
4304 }
4305
4306 long do_rt_sigreturn(CPUNios2State *env)
4307 {
4308 /* Verify, can we follow the stack back */
4309 abi_ulong frame_addr = env->regs[R_SP];
4310 struct target_rt_sigframe *frame;
4311 sigset_t set;
4312 int rval;
4313
4314 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4315 goto badframe;
4316 }
4317
4318 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4319 do_sigprocmask(SIG_SETMASK, &set, NULL);
4320
4321 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4322 goto badframe;
4323 }
4324
4325 unlock_user_struct(frame, frame_addr, 0);
4326 return rval;
4327
4328 badframe:
4329 unlock_user_struct(frame, frame_addr, 0);
4330 force_sig(TARGET_SIGSEGV);
4331 return 0;
4332 }
4333 /* TARGET_NIOS2 */
4334
4335 #elif defined(TARGET_OPENRISC)
4336
4337 struct target_sigcontext {
4338 struct target_pt_regs regs;
4339 abi_ulong oldmask;
4340 abi_ulong usp;
4341 };
4342
4343 struct target_ucontext {
4344 abi_ulong tuc_flags;
4345 abi_ulong tuc_link;
4346 target_stack_t tuc_stack;
4347 struct target_sigcontext tuc_mcontext;
4348 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4349 };
4350
4351 struct target_rt_sigframe {
4352 abi_ulong pinfo;
4353 uint64_t puc;
4354 struct target_siginfo info;
4355 struct target_sigcontext sc;
4356 struct target_ucontext uc;
4357 unsigned char retcode[16]; /* trampoline code */
4358 };
4359
4360 /* This is the asm-generic/ucontext.h version */
4361 #if 0
4362 static int restore_sigcontext(CPUOpenRISCState *regs,
4363 struct target_sigcontext *sc)
4364 {
4365 unsigned int err = 0;
4366 unsigned long old_usp;
4367
4368 /* Alwys make any pending restarted system call return -EINTR */
4369 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4370
4371 /* restore the regs from &sc->regs (same as sc, since regs is first)
4372 * (sc is already checked for VERIFY_READ since the sigframe was
4373 * checked in sys_sigreturn previously)
4374 */
4375
4376 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4377 goto badframe;
4378 }
4379
4380 /* make sure the U-flag is set so user-mode cannot fool us */
4381
4382 regs->sr &= ~SR_SM;
4383
4384 /* restore the old USP as it was before we stacked the sc etc.
4385 * (we cannot just pop the sigcontext since we aligned the sp and
4386 * stuff after pushing it)
4387 */
4388
4389 __get_user(old_usp, &sc->usp);
4390 phx_signal("old_usp 0x%lx", old_usp);
4391
4392 __PHX__ REALLY /* ??? */
4393 wrusp(old_usp);
4394 regs->gpr[1] = old_usp;
4395
4396 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4397 * after this completes, but we don't use that mechanism. maybe we can
4398 * use it now ?
4399 */
4400
4401 return err;
4402
4403 badframe:
4404 return 1;
4405 }
4406 #endif
4407
4408 /* Set up a signal frame. */
4409
4410 static void setup_sigcontext(struct target_sigcontext *sc,
4411 CPUOpenRISCState *regs,
4412 unsigned long mask)
4413 {
4414 unsigned long usp = regs->gpr[1];
4415
4416 /* copy the regs. they are first in sc so we can use sc directly */
4417
4418 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4419
4420 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4421 the signal handler. The frametype will be restored to its previous
4422 value in restore_sigcontext. */
4423 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4424
4425 /* then some other stuff */
4426 __put_user(mask, &sc->oldmask);
4427 __put_user(usp, &sc->usp);
4428 }
4429
4430 static inline unsigned long align_sigframe(unsigned long sp)
4431 {
4432 return sp & ~3UL;
4433 }
4434
4435 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4436 CPUOpenRISCState *regs,
4437 size_t frame_size)
4438 {
4439 unsigned long sp = regs->gpr[1];
4440 int onsigstack = on_sig_stack(sp);
4441
4442 /* redzone */
4443 /* This is the X/Open sanctioned signal stack switching. */
4444 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4445 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4446 }
4447
4448 sp = align_sigframe(sp - frame_size);
4449
4450 /*
4451 * If we are on the alternate signal stack and would overflow it, don't.
4452 * Return an always-bogus address instead so we will die with SIGSEGV.
4453 */
4454
4455 if (onsigstack && !likely(on_sig_stack(sp))) {
4456 return -1L;
4457 }
4458
4459 return sp;
4460 }
4461
4462 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4463 target_siginfo_t *info,
4464 target_sigset_t *set, CPUOpenRISCState *env)
4465 {
4466 int err = 0;
4467 abi_ulong frame_addr;
4468 unsigned long return_ip;
4469 struct target_rt_sigframe *frame;
4470 abi_ulong info_addr, uc_addr;
4471
4472 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4473 trace_user_setup_rt_frame(env, frame_addr);
4474 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4475 goto give_sigsegv;
4476 }
4477
4478 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4479 __put_user(info_addr, &frame->pinfo);
4480 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4481 __put_user(uc_addr, &frame->puc);
4482
4483 if (ka->sa_flags & SA_SIGINFO) {
4484 tswap_siginfo(&frame->info, info);
4485 }
4486
4487 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4488 __put_user(0, &frame->uc.tuc_flags);
4489 __put_user(0, &frame->uc.tuc_link);
4490 __put_user(target_sigaltstack_used.ss_sp,
4491 &frame->uc.tuc_stack.ss_sp);
4492 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4493 __put_user(target_sigaltstack_used.ss_size,
4494 &frame->uc.tuc_stack.ss_size);
4495 setup_sigcontext(&frame->sc, env, set->sig[0]);
4496
4497 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4498
4499 /* trampoline - the desired return ip is the retcode itself */
4500 return_ip = (unsigned long)&frame->retcode;
4501 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4502 __put_user(0xa960, (short *)(frame->retcode + 0));
4503 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4504 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4505 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4506
4507 if (err) {
4508 goto give_sigsegv;
4509 }
4510
4511 /* TODO what is the current->exec_domain stuff and invmap ? */
4512
4513 /* Set up registers for signal handler */
4514 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4515 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4516 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4517 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4518 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4519
4520 /* actually move the usp to reflect the stacked frame */
4521 env->gpr[1] = (unsigned long)frame;
4522
4523 return;
4524
4525 give_sigsegv:
4526 unlock_user_struct(frame, frame_addr, 1);
4527 force_sigsegv(sig);
4528 }
4529
4530 long do_sigreturn(CPUOpenRISCState *env)
4531 {
4532 trace_user_do_sigreturn(env, 0);
4533 fprintf(stderr, "do_sigreturn: not implemented\n");
4534 return -TARGET_ENOSYS;
4535 }
4536
4537 long do_rt_sigreturn(CPUOpenRISCState *env)
4538 {
4539 trace_user_do_rt_sigreturn(env, 0);
4540 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4541 return -TARGET_ENOSYS;
4542 }
4543 /* TARGET_OPENRISC */
4544
4545 #elif defined(TARGET_S390X)
4546
4547 #define __NUM_GPRS 16
4548 #define __NUM_FPRS 16
4549 #define __NUM_ACRS 16
4550
4551 #define S390_SYSCALL_SIZE 2
4552 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4553
4554 #define _SIGCONTEXT_NSIG 64
4555 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4556 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4557 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4558 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4559 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4560
4561 typedef struct {
4562 target_psw_t psw;
4563 target_ulong gprs[__NUM_GPRS];
4564 unsigned int acrs[__NUM_ACRS];
4565 } target_s390_regs_common;
4566
4567 typedef struct {
4568 unsigned int fpc;
4569 double fprs[__NUM_FPRS];
4570 } target_s390_fp_regs;
4571
4572 typedef struct {
4573 target_s390_regs_common regs;
4574 target_s390_fp_regs fpregs;
4575 } target_sigregs;
4576
4577 struct target_sigcontext {
4578 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4579 target_sigregs *sregs;
4580 };
4581
4582 typedef struct {
4583 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4584 struct target_sigcontext sc;
4585 target_sigregs sregs;
4586 int signo;
4587 uint8_t retcode[S390_SYSCALL_SIZE];
4588 } sigframe;
4589
4590 struct target_ucontext {
4591 target_ulong tuc_flags;
4592 struct target_ucontext *tuc_link;
4593 target_stack_t tuc_stack;
4594 target_sigregs tuc_mcontext;
4595 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4596 };
4597
4598 typedef struct {
4599 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4600 uint8_t retcode[S390_SYSCALL_SIZE];
4601 struct target_siginfo info;
4602 struct target_ucontext uc;
4603 } rt_sigframe;
4604
4605 static inline abi_ulong
4606 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4607 {
4608 abi_ulong sp;
4609
4610 /* Default to using normal stack */
4611 sp = env->regs[15];
4612
4613 /* This is the X/Open sanctioned signal stack switching. */
4614 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4615 if (!sas_ss_flags(sp)) {
4616 sp = target_sigaltstack_used.ss_sp +
4617 target_sigaltstack_used.ss_size;
4618 }
4619 }
4620
4621 /* This is the legacy signal stack switching. */
4622 else if (/* FIXME !user_mode(regs) */ 0 &&
4623 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4624 ka->sa_restorer) {
4625 sp = (abi_ulong) ka->sa_restorer;
4626 }
4627
4628 return (sp - frame_size) & -8ul;
4629 }
4630
4631 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4632 {
4633 int i;
4634 //save_access_regs(current->thread.acrs); FIXME
4635
4636 /* Copy a 'clean' PSW mask to the user to avoid leaking
4637 information about whether PER is currently on. */
4638 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4639 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4640 for (i = 0; i < 16; i++) {
4641 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4642 }
4643 for (i = 0; i < 16; i++) {
4644 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4645 }
4646 /*
4647 * We have to store the fp registers to current->thread.fp_regs
4648 * to merge them with the emulated registers.
4649 */
4650 //save_fp_regs(&current->thread.fp_regs); FIXME
4651 for (i = 0; i < 16; i++) {
4652 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4653 }
4654 }
4655
4656 static void setup_frame(int sig, struct target_sigaction *ka,
4657 target_sigset_t *set, CPUS390XState *env)
4658 {
4659 sigframe *frame;
4660 abi_ulong frame_addr;
4661
4662 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4663 trace_user_setup_frame(env, frame_addr);
4664 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4665 goto give_sigsegv;
4666 }
4667
4668 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4669
4670 save_sigregs(env, &frame->sregs);
4671
4672 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4673 (abi_ulong *)&frame->sc.sregs);
4674
4675 /* Set up to return from userspace. If provided, use a stub
4676 already in userspace. */
4677 if (ka->sa_flags & TARGET_SA_RESTORER) {
4678 env->regs[14] = (unsigned long)
4679 ka->sa_restorer | PSW_ADDR_AMODE;
4680 } else {
4681 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4682 | PSW_ADDR_AMODE;
4683 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4684 (uint16_t *)(frame->retcode));
4685 }
4686
4687 /* Set up backchain. */
4688 __put_user(env->regs[15], (abi_ulong *) frame);
4689
4690 /* Set up registers for signal handler */
4691 env->regs[15] = frame_addr;
4692 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4693
4694 env->regs[2] = sig; //map_signal(sig);
4695 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4696
4697 /* We forgot to include these in the sigcontext.
4698 To avoid breaking binary compatibility, they are passed as args. */
4699 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4700 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4701
4702 /* Place signal number on stack to allow backtrace from handler. */
4703 __put_user(env->regs[2], &frame->signo);
4704 unlock_user_struct(frame, frame_addr, 1);
4705 return;
4706
4707 give_sigsegv:
4708 force_sigsegv(sig);
4709 }
4710
4711 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4712 target_siginfo_t *info,
4713 target_sigset_t *set, CPUS390XState *env)
4714 {
4715 int i;
4716 rt_sigframe *frame;
4717 abi_ulong frame_addr;
4718
4719 frame_addr = get_sigframe(ka, env, sizeof *frame);
4720 trace_user_setup_rt_frame(env, frame_addr);
4721 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4722 goto give_sigsegv;
4723 }
4724
4725 tswap_siginfo(&frame->info, info);
4726
4727 /* Create the ucontext. */
4728 __put_user(0, &frame->uc.tuc_flags);
4729 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4730 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4731 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4732 &frame->uc.tuc_stack.ss_flags);
4733 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4734 save_sigregs(env, &frame->uc.tuc_mcontext);
4735 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4736 __put_user((abi_ulong)set->sig[i],
4737 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4738 }
4739
4740 /* Set up to return from userspace. If provided, use a stub
4741 already in userspace. */
4742 if (ka->sa_flags & TARGET_SA_RESTORER) {
4743 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4744 } else {
4745 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4746 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4747 (uint16_t *)(frame->retcode));
4748 }
4749
4750 /* Set up backchain. */
4751 __put_user(env->regs[15], (abi_ulong *) frame);
4752
4753 /* Set up registers for signal handler */
4754 env->regs[15] = frame_addr;
4755 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4756
4757 env->regs[2] = sig; //map_signal(sig);
4758 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4759 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4760 return;
4761
4762 give_sigsegv:
4763 force_sigsegv(sig);
4764 }
4765
4766 static int
4767 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4768 {
4769 int err = 0;
4770 int i;
4771
4772 for (i = 0; i < 16; i++) {
4773 __get_user(env->regs[i], &sc->regs.gprs[i]);
4774 }
4775
4776 __get_user(env->psw.mask, &sc->regs.psw.mask);
4777 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4778 (unsigned long long)env->psw.addr);
4779 __get_user(env->psw.addr, &sc->regs.psw.addr);
4780 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4781
4782 for (i = 0; i < 16; i++) {
4783 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4784 }
4785 for (i = 0; i < 16; i++) {
4786 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4787 }
4788
4789 return err;
4790 }
4791
4792 long do_sigreturn(CPUS390XState *env)
4793 {
4794 sigframe *frame;
4795 abi_ulong frame_addr = env->regs[15];
4796 target_sigset_t target_set;
4797 sigset_t set;
4798
4799 trace_user_do_sigreturn(env, frame_addr);
4800 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4801 goto badframe;
4802 }
4803 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4804
4805 target_to_host_sigset_internal(&set, &target_set);
4806 set_sigmask(&set); /* ~_BLOCKABLE? */
4807
4808 if (restore_sigregs(env, &frame->sregs)) {
4809 goto badframe;
4810 }
4811
4812 unlock_user_struct(frame, frame_addr, 0);
4813 return -TARGET_QEMU_ESIGRETURN;
4814
4815 badframe:
4816 force_sig(TARGET_SIGSEGV);
4817 return -TARGET_QEMU_ESIGRETURN;
4818 }
4819
4820 long do_rt_sigreturn(CPUS390XState *env)
4821 {
4822 rt_sigframe *frame;
4823 abi_ulong frame_addr = env->regs[15];
4824 sigset_t set;
4825
4826 trace_user_do_rt_sigreturn(env, frame_addr);
4827 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4828 goto badframe;
4829 }
4830 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4831
4832 set_sigmask(&set); /* ~_BLOCKABLE? */
4833
4834 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4835 goto badframe;
4836 }
4837
4838 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4839 get_sp_from_cpustate(env)) == -EFAULT) {
4840 goto badframe;
4841 }
4842 unlock_user_struct(frame, frame_addr, 0);
4843 return -TARGET_QEMU_ESIGRETURN;
4844
4845 badframe:
4846 unlock_user_struct(frame, frame_addr, 0);
4847 force_sig(TARGET_SIGSEGV);
4848 return -TARGET_QEMU_ESIGRETURN;
4849 }
4850
4851 #elif defined(TARGET_PPC)
4852
4853 /* Size of dummy stack frame allocated when calling signal handler.
4854 See arch/powerpc/include/asm/ptrace.h. */
4855 #if defined(TARGET_PPC64)
4856 #define SIGNAL_FRAMESIZE 128
4857 #else
4858 #define SIGNAL_FRAMESIZE 64
4859 #endif
4860
4861 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4862 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4863 struct target_mcontext {
4864 target_ulong mc_gregs[48];
4865 /* Includes fpscr. */
4866 uint64_t mc_fregs[33];
4867 #if defined(TARGET_PPC64)
4868 /* Pointer to the vector regs */
4869 target_ulong v_regs;
4870 #else
4871 target_ulong mc_pad[2];
4872 #endif
4873 /* We need to handle Altivec and SPE at the same time, which no
4874 kernel needs to do. Fortunately, the kernel defines this bit to
4875 be Altivec-register-large all the time, rather than trying to
4876 twiddle it based on the specific platform. */
4877 union {
4878 /* SPE vector registers. One extra for SPEFSCR. */
4879 uint32_t spe[33];
4880 /* Altivec vector registers. The packing of VSCR and VRSAVE
4881 varies depending on whether we're PPC64 or not: PPC64 splits
4882 them apart; PPC32 stuffs them together.
4883 We also need to account for the VSX registers on PPC64
4884 */
4885 #if defined(TARGET_PPC64)
4886 #define QEMU_NVRREG (34 + 16)
4887 /* On ppc64, this mcontext structure is naturally *unaligned*,
4888 * or rather it is aligned on a 8 bytes boundary but not on
4889 * a 16 bytes one. This pad fixes it up. This is also why the
4890 * vector regs are referenced by the v_regs pointer above so
4891 * any amount of padding can be added here
4892 */
4893 target_ulong pad;
4894 #else
4895 /* On ppc32, we are already aligned to 16 bytes */
4896 #define QEMU_NVRREG 33
4897 #endif
4898 /* We cannot use ppc_avr_t here as we do *not* want the implied
4899 * 16-bytes alignment that would result from it. This would have
4900 * the effect of making the whole struct target_mcontext aligned
4901 * which breaks the layout of struct target_ucontext on ppc64.
4902 */
4903 uint64_t altivec[QEMU_NVRREG][2];
4904 #undef QEMU_NVRREG
4905 } mc_vregs;
4906 };
4907
4908 /* See arch/powerpc/include/asm/sigcontext.h. */
4909 struct target_sigcontext {
4910 target_ulong _unused[4];
4911 int32_t signal;
4912 #if defined(TARGET_PPC64)
4913 int32_t pad0;
4914 #endif
4915 target_ulong handler;
4916 target_ulong oldmask;
4917 target_ulong regs; /* struct pt_regs __user * */
4918 #if defined(TARGET_PPC64)
4919 struct target_mcontext mcontext;
4920 #endif
4921 };
4922
4923 /* Indices for target_mcontext.mc_gregs, below.
4924 See arch/powerpc/include/asm/ptrace.h for details. */
4925 enum {
4926 TARGET_PT_R0 = 0,
4927 TARGET_PT_R1 = 1,
4928 TARGET_PT_R2 = 2,
4929 TARGET_PT_R3 = 3,
4930 TARGET_PT_R4 = 4,
4931 TARGET_PT_R5 = 5,
4932 TARGET_PT_R6 = 6,
4933 TARGET_PT_R7 = 7,
4934 TARGET_PT_R8 = 8,
4935 TARGET_PT_R9 = 9,
4936 TARGET_PT_R10 = 10,
4937 TARGET_PT_R11 = 11,
4938 TARGET_PT_R12 = 12,
4939 TARGET_PT_R13 = 13,
4940 TARGET_PT_R14 = 14,
4941 TARGET_PT_R15 = 15,
4942 TARGET_PT_R16 = 16,
4943 TARGET_PT_R17 = 17,
4944 TARGET_PT_R18 = 18,
4945 TARGET_PT_R19 = 19,
4946 TARGET_PT_R20 = 20,
4947 TARGET_PT_R21 = 21,
4948 TARGET_PT_R22 = 22,
4949 TARGET_PT_R23 = 23,
4950 TARGET_PT_R24 = 24,
4951 TARGET_PT_R25 = 25,
4952 TARGET_PT_R26 = 26,
4953 TARGET_PT_R27 = 27,
4954 TARGET_PT_R28 = 28,
4955 TARGET_PT_R29 = 29,
4956 TARGET_PT_R30 = 30,
4957 TARGET_PT_R31 = 31,
4958 TARGET_PT_NIP = 32,
4959 TARGET_PT_MSR = 33,
4960 TARGET_PT_ORIG_R3 = 34,
4961 TARGET_PT_CTR = 35,
4962 TARGET_PT_LNK = 36,
4963 TARGET_PT_XER = 37,
4964 TARGET_PT_CCR = 38,
4965 /* Yes, there are two registers with #39. One is 64-bit only. */
4966 TARGET_PT_MQ = 39,
4967 TARGET_PT_SOFTE = 39,
4968 TARGET_PT_TRAP = 40,
4969 TARGET_PT_DAR = 41,
4970 TARGET_PT_DSISR = 42,
4971 TARGET_PT_RESULT = 43,
4972 TARGET_PT_REGS_COUNT = 44
4973 };
4974
4975
4976 struct target_ucontext {
4977 target_ulong tuc_flags;
4978 target_ulong tuc_link; /* struct ucontext __user * */
4979 struct target_sigaltstack tuc_stack;
4980 #if !defined(TARGET_PPC64)
4981 int32_t tuc_pad[7];
4982 target_ulong tuc_regs; /* struct mcontext __user *
4983 points to uc_mcontext field */
4984 #endif
4985 target_sigset_t tuc_sigmask;
4986 #if defined(TARGET_PPC64)
4987 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4988 struct target_sigcontext tuc_sigcontext;
4989 #else
4990 int32_t tuc_maskext[30];
4991 int32_t tuc_pad2[3];
4992 struct target_mcontext tuc_mcontext;
4993 #endif
4994 };
4995
4996 /* See arch/powerpc/kernel/signal_32.c. */
4997 struct target_sigframe {
4998 struct target_sigcontext sctx;
4999 struct target_mcontext mctx;
5000 int32_t abigap[56];
5001 };
5002
5003 #if defined(TARGET_PPC64)
5004
5005 #define TARGET_TRAMP_SIZE 6
5006
5007 struct target_rt_sigframe {
5008 /* sys_rt_sigreturn requires the ucontext be the first field */
5009 struct target_ucontext uc;
5010 target_ulong _unused[2];
5011 uint32_t trampoline[TARGET_TRAMP_SIZE];
5012 target_ulong pinfo; /* struct siginfo __user * */
5013 target_ulong puc; /* void __user * */
5014 struct target_siginfo info;
5015 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5016 char abigap[288];
5017 } __attribute__((aligned(16)));
5018
5019 #else
5020
5021 struct target_rt_sigframe {
5022 struct target_siginfo info;
5023 struct target_ucontext uc;
5024 int32_t abigap[56];
5025 };
5026
5027 #endif
5028
5029 #if defined(TARGET_PPC64)
5030
5031 struct target_func_ptr {
5032 target_ulong entry;
5033 target_ulong toc;
5034 };
5035
5036 #endif
5037
5038 /* We use the mc_pad field for the signal return trampoline. */
5039 #define tramp mc_pad
5040
5041 /* See arch/powerpc/kernel/signal.c. */
5042 static target_ulong get_sigframe(struct target_sigaction *ka,
5043 CPUPPCState *env,
5044 int frame_size)
5045 {
5046 target_ulong oldsp;
5047
5048 oldsp = env->gpr[1];
5049
5050 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5051 (sas_ss_flags(oldsp) == 0)) {
5052 oldsp = (target_sigaltstack_used.ss_sp
5053 + target_sigaltstack_used.ss_size);
5054 }
5055
5056 return (oldsp - frame_size) & ~0xFUL;
5057 }
5058
5059 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5060 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5061 #define PPC_VEC_HI 0
5062 #define PPC_VEC_LO 1
5063 #else
5064 #define PPC_VEC_HI 1
5065 #define PPC_VEC_LO 0
5066 #endif
5067
5068
5069 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5070 {
5071 target_ulong msr = env->msr;
5072 int i;
5073 target_ulong ccr = 0;
5074
5075 /* In general, the kernel attempts to be intelligent about what it
5076 needs to save for Altivec/FP/SPE registers. We don't care that
5077 much, so we just go ahead and save everything. */
5078
5079 /* Save general registers. */
5080 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5081 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5082 }
5083 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5084 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5085 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5086 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5087
5088 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5089 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5090 }
5091 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5092
5093 /* Save Altivec registers if necessary. */
5094 if (env->insns_flags & PPC_ALTIVEC) {
5095 uint32_t *vrsave;
5096 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5097 ppc_avr_t *avr = &env->avr[i];
5098 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5099
5100 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5101 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5102 }
5103 /* Set MSR_VR in the saved MSR value to indicate that
5104 frame->mc_vregs contains valid data. */
5105 msr |= MSR_VR;
5106 #if defined(TARGET_PPC64)
5107 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5108 /* 64-bit needs to put a pointer to the vectors in the frame */
5109 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5110 #else
5111 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5112 #endif
5113 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5114 }
5115
5116 /* Save VSX second halves */
5117 if (env->insns_flags2 & PPC2_VSX) {
5118 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5119 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5120 __put_user(env->vsr[i], &vsregs[i]);
5121 }
5122 }
5123
5124 /* Save floating point registers. */
5125 if (env->insns_flags & PPC_FLOAT) {
5126 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5127 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5128 }
5129 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5130 }
5131
5132 /* Save SPE registers. The kernel only saves the high half. */
5133 if (env->insns_flags & PPC_SPE) {
5134 #if defined(TARGET_PPC64)
5135 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5136 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5137 }
5138 #else
5139 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5140 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5141 }
5142 #endif
5143 /* Set MSR_SPE in the saved MSR value to indicate that
5144 frame->mc_vregs contains valid data. */
5145 msr |= MSR_SPE;
5146 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5147 }
5148
5149 /* Store MSR. */
5150 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5151 }
5152
5153 static void encode_trampoline(int sigret, uint32_t *tramp)
5154 {
5155 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5156 if (sigret) {
5157 __put_user(0x38000000 | sigret, &tramp[0]);
5158 __put_user(0x44000002, &tramp[1]);
5159 }
5160 }
5161
5162 static void restore_user_regs(CPUPPCState *env,
5163 struct target_mcontext *frame, int sig)
5164 {
5165 target_ulong save_r2 = 0;
5166 target_ulong msr;
5167 target_ulong ccr;
5168
5169 int i;
5170
5171 if (!sig) {
5172 save_r2 = env->gpr[2];
5173 }
5174
5175 /* Restore general registers. */
5176 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5177 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5178 }
5179 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5180 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5181 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5182 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5183 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5184
5185 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5186 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5187 }
5188
5189 if (!sig) {
5190 env->gpr[2] = save_r2;
5191 }
5192 /* Restore MSR. */
5193 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5194
5195 /* If doing signal return, restore the previous little-endian mode. */
5196 if (sig)
5197 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5198
5199 /* Restore Altivec registers if necessary. */
5200 if (env->insns_flags & PPC_ALTIVEC) {
5201 ppc_avr_t *v_regs;
5202 uint32_t *vrsave;
5203 #if defined(TARGET_PPC64)
5204 uint64_t v_addr;
5205 /* 64-bit needs to recover the pointer to the vectors from the frame */
5206 __get_user(v_addr, &frame->v_regs);
5207 v_regs = g2h(v_addr);
5208 #else
5209 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5210 #endif
5211 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5212 ppc_avr_t *avr = &env->avr[i];
5213 ppc_avr_t *vreg = &v_regs[i];
5214
5215 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5216 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5217 }
5218 /* Set MSR_VEC in the saved MSR value to indicate that
5219 frame->mc_vregs contains valid data. */
5220 #if defined(TARGET_PPC64)
5221 vrsave = (uint32_t *)&v_regs[33];
5222 #else
5223 vrsave = (uint32_t *)&v_regs[32];
5224 #endif
5225 __get_user(env->spr[SPR_VRSAVE], vrsave);
5226 }
5227
5228 /* Restore VSX second halves */
5229 if (env->insns_flags2 & PPC2_VSX) {
5230 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5231 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5232 __get_user(env->vsr[i], &vsregs[i]);
5233 }
5234 }
5235
5236 /* Restore floating point registers. */
5237 if (env->insns_flags & PPC_FLOAT) {
5238 uint64_t fpscr;
5239 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5240 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5241 }
5242 __get_user(fpscr, &frame->mc_fregs[32]);
5243 env->fpscr = (uint32_t) fpscr;
5244 }
5245
5246 /* Save SPE registers. The kernel only saves the high half. */
5247 if (env->insns_flags & PPC_SPE) {
5248 #if defined(TARGET_PPC64)
5249 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5250 uint32_t hi;
5251
5252 __get_user(hi, &frame->mc_vregs.spe[i]);
5253 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5254 }
5255 #else
5256 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5257 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5258 }
5259 #endif
5260 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5261 }
5262 }
5263
5264 #if !defined(TARGET_PPC64)
5265 static void setup_frame(int sig, struct target_sigaction *ka,
5266 target_sigset_t *set, CPUPPCState *env)
5267 {
5268 struct target_sigframe *frame;
5269 struct target_sigcontext *sc;
5270 target_ulong frame_addr, newsp;
5271 int err = 0;
5272
5273 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5274 trace_user_setup_frame(env, frame_addr);
5275 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5276 goto sigsegv;
5277 sc = &frame->sctx;
5278
5279 __put_user(ka->_sa_handler, &sc->handler);
5280 __put_user(set->sig[0], &sc->oldmask);
5281 __put_user(set->sig[1], &sc->_unused[3]);
5282 __put_user(h2g(&frame->mctx), &sc->regs);
5283 __put_user(sig, &sc->signal);
5284
5285 /* Save user regs. */
5286 save_user_regs(env, &frame->mctx);
5287
5288 /* Construct the trampoline code on the stack. */
5289 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5290
5291 /* The kernel checks for the presence of a VDSO here. We don't
5292 emulate a vdso, so use a sigreturn system call. */
5293 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5294
5295 /* Turn off all fp exceptions. */
5296 env->fpscr = 0;
5297
5298 /* Create a stack frame for the caller of the handler. */
5299 newsp = frame_addr - SIGNAL_FRAMESIZE;
5300 err |= put_user(env->gpr[1], newsp, target_ulong);
5301
5302 if (err)
5303 goto sigsegv;
5304
5305 /* Set up registers for signal handler. */
5306 env->gpr[1] = newsp;
5307 env->gpr[3] = sig;
5308 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5309
5310 env->nip = (target_ulong) ka->_sa_handler;
5311
5312 /* Signal handlers are entered in big-endian mode. */
5313 env->msr &= ~(1ull << MSR_LE);
5314
5315 unlock_user_struct(frame, frame_addr, 1);
5316 return;
5317
5318 sigsegv:
5319 unlock_user_struct(frame, frame_addr, 1);
5320 force_sigsegv(sig);
5321 }
5322 #endif /* !defined(TARGET_PPC64) */
5323
5324 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5325 target_siginfo_t *info,
5326 target_sigset_t *set, CPUPPCState *env)
5327 {
5328 struct target_rt_sigframe *rt_sf;
5329 uint32_t *trampptr = 0;
5330 struct target_mcontext *mctx = 0;
5331 target_ulong rt_sf_addr, newsp = 0;
5332 int i, err = 0;
5333 #if defined(TARGET_PPC64)
5334 struct target_sigcontext *sc = 0;
5335 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5336 #endif
5337
5338 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5339 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5340 goto sigsegv;
5341
5342 tswap_siginfo(&rt_sf->info, info);
5343
5344 __put_user(0, &rt_sf->uc.tuc_flags);
5345 __put_user(0, &rt_sf->uc.tuc_link);
5346 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5347 &rt_sf->uc.tuc_stack.ss_sp);
5348 __put_user(sas_ss_flags(env->gpr[1]),
5349 &rt_sf->uc.tuc_stack.ss_flags);
5350 __put_user(target_sigaltstack_used.ss_size,
5351 &rt_sf->uc.tuc_stack.ss_size);
5352 #if !defined(TARGET_PPC64)
5353 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5354 &rt_sf->uc.tuc_regs);
5355 #endif
5356 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5357 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5358 }
5359
5360 #if defined(TARGET_PPC64)
5361 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5362 trampptr = &rt_sf->trampoline[0];
5363
5364 sc = &rt_sf->uc.tuc_sigcontext;
5365 __put_user(h2g(mctx), &sc->regs);
5366 __put_user(sig, &sc->signal);
5367 #else
5368 mctx = &rt_sf->uc.tuc_mcontext;
5369 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5370 #endif
5371
5372 save_user_regs(env, mctx);
5373 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5374
5375 /* The kernel checks for the presence of a VDSO here. We don't
5376 emulate a vdso, so use a sigreturn system call. */
5377 env->lr = (target_ulong) h2g(trampptr);
5378
5379 /* Turn off all fp exceptions. */
5380 env->fpscr = 0;
5381
5382 /* Create a stack frame for the caller of the handler. */
5383 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5384 err |= put_user(env->gpr[1], newsp, target_ulong);
5385
5386 if (err)
5387 goto sigsegv;
5388
5389 /* Set up registers for signal handler. */
5390 env->gpr[1] = newsp;
5391 env->gpr[3] = (target_ulong) sig;
5392 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5393 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5394 env->gpr[6] = (target_ulong) h2g(rt_sf);
5395
5396 #if defined(TARGET_PPC64)
5397 if (get_ppc64_abi(image) < 2) {
5398 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5399 struct target_func_ptr *handler =
5400 (struct target_func_ptr *)g2h(ka->_sa_handler);
5401 env->nip = tswapl(handler->entry);
5402 env->gpr[2] = tswapl(handler->toc);
5403 } else {
5404 /* ELFv2 PPC64 function pointers are entry points, but R12
5405 * must also be set */
5406 env->nip = tswapl((target_ulong) ka->_sa_handler);
5407 env->gpr[12] = env->nip;
5408 }
5409 #else
5410 env->nip = (target_ulong) ka->_sa_handler;
5411 #endif
5412
5413 /* Signal handlers are entered in big-endian mode. */
5414 env->msr &= ~(1ull << MSR_LE);
5415
5416 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5417 return;
5418
5419 sigsegv:
5420 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5421 force_sigsegv(sig);
5422
5423 }
5424
5425 #if !defined(TARGET_PPC64)
5426 long do_sigreturn(CPUPPCState *env)
5427 {
5428 struct target_sigcontext *sc = NULL;
5429 struct target_mcontext *sr = NULL;
5430 target_ulong sr_addr = 0, sc_addr;
5431 sigset_t blocked;
5432 target_sigset_t set;
5433
5434 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5435 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5436 goto sigsegv;
5437
5438 #if defined(TARGET_PPC64)
5439 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5440 #else
5441 __get_user(set.sig[0], &sc->oldmask);
5442 __get_user(set.sig[1], &sc->_unused[3]);
5443 #endif
5444 target_to_host_sigset_internal(&blocked, &set);
5445 set_sigmask(&blocked);
5446
5447 __get_user(sr_addr, &sc->regs);
5448 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5449 goto sigsegv;
5450 restore_user_regs(env, sr, 1);
5451
5452 unlock_user_struct(sr, sr_addr, 1);
5453 unlock_user_struct(sc, sc_addr, 1);
5454 return -TARGET_QEMU_ESIGRETURN;
5455
5456 sigsegv:
5457 unlock_user_struct(sr, sr_addr, 1);
5458 unlock_user_struct(sc, sc_addr, 1);
5459 force_sig(TARGET_SIGSEGV);
5460 return -TARGET_QEMU_ESIGRETURN;
5461 }
5462 #endif /* !defined(TARGET_PPC64) */
5463
5464 /* See arch/powerpc/kernel/signal_32.c. */
5465 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5466 {
5467 struct target_mcontext *mcp;
5468 target_ulong mcp_addr;
5469 sigset_t blocked;
5470 target_sigset_t set;
5471
5472 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5473 sizeof (set)))
5474 return 1;
5475
5476 #if defined(TARGET_PPC64)
5477 mcp_addr = h2g(ucp) +
5478 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5479 #else
5480 __get_user(mcp_addr, &ucp->tuc_regs);
5481 #endif
5482
5483 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5484 return 1;
5485
5486 target_to_host_sigset_internal(&blocked, &set);
5487 set_sigmask(&blocked);
5488 restore_user_regs(env, mcp, sig);
5489
5490 unlock_user_struct(mcp, mcp_addr, 1);
5491 return 0;
5492 }
5493
5494 long do_rt_sigreturn(CPUPPCState *env)
5495 {
5496 struct target_rt_sigframe *rt_sf = NULL;
5497 target_ulong rt_sf_addr;
5498
5499 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5500 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5501 goto sigsegv;
5502
5503 if (do_setcontext(&rt_sf->uc, env, 1))
5504 goto sigsegv;
5505
5506 do_sigaltstack(rt_sf_addr
5507 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5508 0, env->gpr[1]);
5509
5510 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5511 return -TARGET_QEMU_ESIGRETURN;
5512
5513 sigsegv:
5514 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5515 force_sig(TARGET_SIGSEGV);
5516 return -TARGET_QEMU_ESIGRETURN;
5517 }
5518
5519 #elif defined(TARGET_M68K)
5520
5521 struct target_sigcontext {
5522 abi_ulong sc_mask;
5523 abi_ulong sc_usp;
5524 abi_ulong sc_d0;
5525 abi_ulong sc_d1;
5526 abi_ulong sc_a0;
5527 abi_ulong sc_a1;
5528 unsigned short sc_sr;
5529 abi_ulong sc_pc;
5530 };
5531
5532 struct target_sigframe
5533 {
5534 abi_ulong pretcode;
5535 int sig;
5536 int code;
5537 abi_ulong psc;
5538 char retcode[8];
5539 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5540 struct target_sigcontext sc;
5541 };
5542
5543 typedef int target_greg_t;
5544 #define TARGET_NGREG 18
5545 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5546
5547 typedef struct target_fpregset {
5548 int f_fpcntl[3];
5549 int f_fpregs[8*3];
5550 } target_fpregset_t;
5551
5552 struct target_mcontext {
5553 int version;
5554 target_gregset_t gregs;
5555 target_fpregset_t fpregs;
5556 };
5557
5558 #define TARGET_MCONTEXT_VERSION 2
5559
5560 struct target_ucontext {
5561 abi_ulong tuc_flags;
5562 abi_ulong tuc_link;
5563 target_stack_t tuc_stack;
5564 struct target_mcontext tuc_mcontext;
5565 abi_long tuc_filler[80];
5566 target_sigset_t tuc_sigmask;
5567 };
5568
5569 struct target_rt_sigframe
5570 {
5571 abi_ulong pretcode;
5572 int sig;
5573 abi_ulong pinfo;
5574 abi_ulong puc;
5575 char retcode[8];
5576 struct target_siginfo info;
5577 struct target_ucontext uc;
5578 };
5579
5580 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5581 abi_ulong mask)
5582 {
5583 __put_user(mask, &sc->sc_mask);
5584 __put_user(env->aregs[7], &sc->sc_usp);
5585 __put_user(env->dregs[0], &sc->sc_d0);
5586 __put_user(env->dregs[1], &sc->sc_d1);
5587 __put_user(env->aregs[0], &sc->sc_a0);
5588 __put_user(env->aregs[1], &sc->sc_a1);
5589 __put_user(env->sr, &sc->sc_sr);
5590 __put_user(env->pc, &sc->sc_pc);
5591 }
5592
5593 static void
5594 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5595 {
5596 int temp;
5597
5598 __get_user(env->aregs[7], &sc->sc_usp);
5599 __get_user(env->dregs[0], &sc->sc_d0);
5600 __get_user(env->dregs[1], &sc->sc_d1);
5601 __get_user(env->aregs[0], &sc->sc_a0);
5602 __get_user(env->aregs[1], &sc->sc_a1);
5603 __get_user(env->pc, &sc->sc_pc);
5604 __get_user(temp, &sc->sc_sr);
5605 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5606 }
5607
5608 /*
5609 * Determine which stack to use..
5610 */
5611 static inline abi_ulong
5612 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5613 size_t frame_size)
5614 {
5615 unsigned long sp;
5616
5617 sp = regs->aregs[7];
5618
5619 /* This is the X/Open sanctioned signal stack switching. */
5620 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5621 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5622 }
5623
5624 return ((sp - frame_size) & -8UL);
5625 }
5626
5627 static void setup_frame(int sig, struct target_sigaction *ka,
5628 target_sigset_t *set, CPUM68KState *env)
5629 {
5630 struct target_sigframe *frame;
5631 abi_ulong frame_addr;
5632 abi_ulong retcode_addr;
5633 abi_ulong sc_addr;
5634 int i;
5635
5636 frame_addr = get_sigframe(ka, env, sizeof *frame);
5637 trace_user_setup_frame(env, frame_addr);
5638 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5639 goto give_sigsegv;
5640 }
5641
5642 __put_user(sig, &frame->sig);
5643
5644 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5645 __put_user(sc_addr, &frame->psc);
5646
5647 setup_sigcontext(&frame->sc, env, set->sig[0]);
5648
5649 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5650 __put_user(set->sig[i], &frame->extramask[i - 1]);
5651 }
5652
5653 /* Set up to return from userspace. */
5654
5655 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5656 __put_user(retcode_addr, &frame->pretcode);
5657
5658 /* moveq #,d0; trap #0 */
5659
5660 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5661 (uint32_t *)(frame->retcode));
5662
5663 /* Set up to return from userspace */
5664
5665 env->aregs[7] = frame_addr;
5666 env->pc = ka->_sa_handler;
5667
5668 unlock_user_struct(frame, frame_addr, 1);
5669 return;
5670
5671 give_sigsegv:
5672 force_sigsegv(sig);
5673 }
5674
5675 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5676 CPUM68KState *env)
5677 {
5678 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5679 uint32_t sr = cpu_m68k_get_ccr(env);
5680
5681 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5682 __put_user(env->dregs[0], &gregs[0]);
5683 __put_user(env->dregs[1], &gregs[1]);
5684 __put_user(env->dregs[2], &gregs[2]);
5685 __put_user(env->dregs[3], &gregs[3]);
5686 __put_user(env->dregs[4], &gregs[4]);
5687 __put_user(env->dregs[5], &gregs[5]);
5688 __put_user(env->dregs[6], &gregs[6]);
5689 __put_user(env->dregs[7], &gregs[7]);
5690 __put_user(env->aregs[0], &gregs[8]);
5691 __put_user(env->aregs[1], &gregs[9]);
5692 __put_user(env->aregs[2], &gregs[10]);
5693 __put_user(env->aregs[3], &gregs[11]);
5694 __put_user(env->aregs[4], &gregs[12]);
5695 __put_user(env->aregs[5], &gregs[13]);
5696 __put_user(env->aregs[6], &gregs[14]);
5697 __put_user(env->aregs[7], &gregs[15]);
5698 __put_user(env->pc, &gregs[16]);
5699 __put_user(sr, &gregs[17]);
5700
5701 return 0;
5702 }
5703
5704 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5705 struct target_ucontext *uc)
5706 {
5707 int temp;
5708 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5709
5710 __get_user(temp, &uc->tuc_mcontext.version);
5711 if (temp != TARGET_MCONTEXT_VERSION)
5712 goto badframe;
5713
5714 /* restore passed registers */
5715 __get_user(env->dregs[0], &gregs[0]);
5716 __get_user(env->dregs[1], &gregs[1]);
5717 __get_user(env->dregs[2], &gregs[2]);
5718 __get_user(env->dregs[3], &gregs[3]);
5719 __get_user(env->dregs[4], &gregs[4]);
5720 __get_user(env->dregs[5], &gregs[5]);
5721 __get_user(env->dregs[6], &gregs[6]);
5722 __get_user(env->dregs[7], &gregs[7]);
5723 __get_user(env->aregs[0], &gregs[8]);
5724 __get_user(env->aregs[1], &gregs[9]);
5725 __get_user(env->aregs[2], &gregs[10]);
5726 __get_user(env->aregs[3], &gregs[11]);
5727 __get_user(env->aregs[4], &gregs[12]);
5728 __get_user(env->aregs[5], &gregs[13]);
5729 __get_user(env->aregs[6], &gregs[14]);
5730 __get_user(env->aregs[7], &gregs[15]);
5731 __get_user(env->pc, &gregs[16]);
5732 __get_user(temp, &gregs[17]);
5733 cpu_m68k_set_ccr(env, temp);
5734
5735 return 0;
5736
5737 badframe:
5738 return 1;
5739 }
5740
5741 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5742 target_siginfo_t *info,
5743 target_sigset_t *set, CPUM68KState *env)
5744 {
5745 struct target_rt_sigframe *frame;
5746 abi_ulong frame_addr;
5747 abi_ulong retcode_addr;
5748 abi_ulong info_addr;
5749 abi_ulong uc_addr;
5750 int err = 0;
5751 int i;
5752
5753 frame_addr = get_sigframe(ka, env, sizeof *frame);
5754 trace_user_setup_rt_frame(env, frame_addr);
5755 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5756 goto give_sigsegv;
5757 }
5758
5759 __put_user(sig, &frame->sig);
5760
5761 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5762 __put_user(info_addr, &frame->pinfo);
5763
5764 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5765 __put_user(uc_addr, &frame->puc);
5766
5767 tswap_siginfo(&frame->info, info);
5768
5769 /* Create the ucontext */
5770
5771 __put_user(0, &frame->uc.tuc_flags);
5772 __put_user(0, &frame->uc.tuc_link);
5773 __put_user(target_sigaltstack_used.ss_sp,
5774 &frame->uc.tuc_stack.ss_sp);
5775 __put_user(sas_ss_flags(env->aregs[7]),
5776 &frame->uc.tuc_stack.ss_flags);
5777 __put_user(target_sigaltstack_used.ss_size,
5778 &frame->uc.tuc_stack.ss_size);
5779 err |= target_rt_setup_ucontext(&frame->uc, env);
5780
5781 if (err)
5782 goto give_sigsegv;
5783
5784 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5785 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5786 }
5787
5788 /* Set up to return from userspace. */
5789
5790 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5791 __put_user(retcode_addr, &frame->pretcode);
5792
5793 /* moveq #,d0; notb d0; trap #0 */
5794
5795 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5796 (uint32_t *)(frame->retcode + 0));
5797 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5798
5799 if (err)
5800 goto give_sigsegv;
5801
5802 /* Set up to return from userspace */
5803
5804 env->aregs[7] = frame_addr;
5805 env->pc = ka->_sa_handler;
5806
5807 unlock_user_struct(frame, frame_addr, 1);
5808 return;
5809
5810 give_sigsegv:
5811 unlock_user_struct(frame, frame_addr, 1);
5812 force_sigsegv(sig);
5813 }
5814
5815 long do_sigreturn(CPUM68KState *env)
5816 {
5817 struct target_sigframe *frame;
5818 abi_ulong frame_addr = env->aregs[7] - 4;
5819 target_sigset_t target_set;
5820 sigset_t set;
5821 int i;
5822
5823 trace_user_do_sigreturn(env, frame_addr);
5824 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5825 goto badframe;
5826
5827 /* set blocked signals */
5828
5829 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5830
5831 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5832 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5833 }
5834
5835 target_to_host_sigset_internal(&set, &target_set);
5836 set_sigmask(&set);
5837
5838 /* restore registers */
5839
5840 restore_sigcontext(env, &frame->sc);
5841
5842 unlock_user_struct(frame, frame_addr, 0);
5843 return -TARGET_QEMU_ESIGRETURN;
5844
5845 badframe:
5846 force_sig(TARGET_SIGSEGV);
5847 return -TARGET_QEMU_ESIGRETURN;
5848 }
5849
5850 long do_rt_sigreturn(CPUM68KState *env)
5851 {
5852 struct target_rt_sigframe *frame;
5853 abi_ulong frame_addr = env->aregs[7] - 4;
5854 sigset_t set;
5855
5856 trace_user_do_rt_sigreturn(env, frame_addr);
5857 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5858 goto badframe;
5859
5860 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5861 set_sigmask(&set);
5862
5863 /* restore registers */
5864
5865 if (target_rt_restore_ucontext(env, &frame->uc))
5866 goto badframe;
5867
5868 if (do_sigaltstack(frame_addr +
5869 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5870 0, get_sp_from_cpustate(env)) == -EFAULT)
5871 goto badframe;
5872
5873 unlock_user_struct(frame, frame_addr, 0);
5874 return -TARGET_QEMU_ESIGRETURN;
5875
5876 badframe:
5877 unlock_user_struct(frame, frame_addr, 0);
5878 force_sig(TARGET_SIGSEGV);
5879 return -TARGET_QEMU_ESIGRETURN;
5880 }
5881
5882 #elif defined(TARGET_ALPHA)
5883
5884 struct target_sigcontext {
5885 abi_long sc_onstack;
5886 abi_long sc_mask;
5887 abi_long sc_pc;
5888 abi_long sc_ps;
5889 abi_long sc_regs[32];
5890 abi_long sc_ownedfp;
5891 abi_long sc_fpregs[32];
5892 abi_ulong sc_fpcr;
5893 abi_ulong sc_fp_control;
5894 abi_ulong sc_reserved1;
5895 abi_ulong sc_reserved2;
5896 abi_ulong sc_ssize;
5897 abi_ulong sc_sbase;
5898 abi_ulong sc_traparg_a0;
5899 abi_ulong sc_traparg_a1;
5900 abi_ulong sc_traparg_a2;
5901 abi_ulong sc_fp_trap_pc;
5902 abi_ulong sc_fp_trigger_sum;
5903 abi_ulong sc_fp_trigger_inst;
5904 };
5905
5906 struct target_ucontext {
5907 abi_ulong tuc_flags;
5908 abi_ulong tuc_link;
5909 abi_ulong tuc_osf_sigmask;
5910 target_stack_t tuc_stack;
5911 struct target_sigcontext tuc_mcontext;
5912 target_sigset_t tuc_sigmask;
5913 };
5914
5915 struct target_sigframe {
5916 struct target_sigcontext sc;
5917 unsigned int retcode[3];
5918 };
5919
5920 struct target_rt_sigframe {
5921 target_siginfo_t info;
5922 struct target_ucontext uc;
5923 unsigned int retcode[3];
5924 };
5925
5926 #define INSN_MOV_R30_R16 0x47fe0410
5927 #define INSN_LDI_R0 0x201f0000
5928 #define INSN_CALLSYS 0x00000083
5929
5930 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5931 abi_ulong frame_addr, target_sigset_t *set)
5932 {
5933 int i;
5934
5935 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5936 __put_user(set->sig[0], &sc->sc_mask);
5937 __put_user(env->pc, &sc->sc_pc);
5938 __put_user(8, &sc->sc_ps);
5939
5940 for (i = 0; i < 31; ++i) {
5941 __put_user(env->ir[i], &sc->sc_regs[i]);
5942 }
5943 __put_user(0, &sc->sc_regs[31]);
5944
5945 for (i = 0; i < 31; ++i) {
5946 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5947 }
5948 __put_user(0, &sc->sc_fpregs[31]);
5949 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5950
5951 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5952 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5953 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5954 }
5955
5956 static void restore_sigcontext(CPUAlphaState *env,
5957 struct target_sigcontext *sc)
5958 {
5959 uint64_t fpcr;
5960 int i;
5961
5962 __get_user(env->pc, &sc->sc_pc);
5963
5964 for (i = 0; i < 31; ++i) {
5965 __get_user(env->ir[i], &sc->sc_regs[i]);
5966 }
5967 for (i = 0; i < 31; ++i) {
5968 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5969 }
5970
5971 __get_user(fpcr, &sc->sc_fpcr);
5972 cpu_alpha_store_fpcr(env, fpcr);
5973 }
5974
5975 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5976 CPUAlphaState *env,
5977 unsigned long framesize)
5978 {
5979 abi_ulong sp = env->ir[IR_SP];
5980
5981 /* This is the X/Open sanctioned signal stack switching. */
5982 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5983 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5984 }
5985 return (sp - framesize) & -32;
5986 }
5987
5988 static void setup_frame(int sig, struct target_sigaction *ka,
5989 target_sigset_t *set, CPUAlphaState *env)
5990 {
5991 abi_ulong frame_addr, r26;
5992 struct target_sigframe *frame;
5993 int err = 0;
5994
5995 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5996 trace_user_setup_frame(env, frame_addr);
5997 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5998 goto give_sigsegv;
5999 }
6000
6001 setup_sigcontext(&frame->sc, env, frame_addr, set);
6002
6003 if (ka->sa_restorer) {
6004 r26 = ka->sa_restorer;
6005 } else {
6006 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6007 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6008 &frame->retcode[1]);
6009 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6010 /* imb() */
6011 r26 = frame_addr;
6012 }
6013
6014 unlock_user_struct(frame, frame_addr, 1);
6015
6016 if (err) {
6017 give_sigsegv:
6018 force_sigsegv(sig);
6019 return;
6020 }
6021
6022 env->ir[IR_RA] = r26;
6023 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6024 env->ir[IR_A0] = sig;
6025 env->ir[IR_A1] = 0;
6026 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6027 env->ir[IR_SP] = frame_addr;
6028 }
6029
6030 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6031 target_siginfo_t *info,
6032 target_sigset_t *set, CPUAlphaState *env)
6033 {
6034 abi_ulong frame_addr, r26;
6035 struct target_rt_sigframe *frame;
6036 int i, err = 0;
6037
6038 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6039 trace_user_setup_rt_frame(env, frame_addr);
6040 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6041 goto give_sigsegv;
6042 }
6043
6044 tswap_siginfo(&frame->info, info);
6045
6046 __put_user(0, &frame->uc.tuc_flags);
6047 __put_user(0, &frame->uc.tuc_link);
6048 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6049 __put_user(target_sigaltstack_used.ss_sp,
6050 &frame->uc.tuc_stack.ss_sp);
6051 __put_user(sas_ss_flags(env->ir[IR_SP]),
6052 &frame->uc.tuc_stack.ss_flags);
6053 __put_user(target_sigaltstack_used.ss_size,
6054 &frame->uc.tuc_stack.ss_size);
6055 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6056 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6057 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6058 }
6059
6060 if (ka->sa_restorer) {
6061 r26 = ka->sa_restorer;
6062 } else {
6063 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6064 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6065 &frame->retcode[1]);
6066 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6067 /* imb(); */
6068 r26 = frame_addr;
6069 }
6070
6071 if (err) {
6072 give_sigsegv:
6073 force_sigsegv(sig);
6074 return;
6075 }
6076
6077 env->ir[IR_RA] = r26;
6078 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6079 env->ir[IR_A0] = sig;
6080 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6081 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6082 env->ir[IR_SP] = frame_addr;
6083 }
6084
6085 long do_sigreturn(CPUAlphaState *env)
6086 {
6087 struct target_sigcontext *sc;
6088 abi_ulong sc_addr = env->ir[IR_A0];
6089 target_sigset_t target_set;
6090 sigset_t set;
6091
6092 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6093 goto badframe;
6094 }
6095
6096 target_sigemptyset(&target_set);
6097 __get_user(target_set.sig[0], &sc->sc_mask);
6098
6099 target_to_host_sigset_internal(&set, &target_set);
6100 set_sigmask(&set);
6101
6102 restore_sigcontext(env, sc);
6103 unlock_user_struct(sc, sc_addr, 0);
6104 return -TARGET_QEMU_ESIGRETURN;
6105
6106 badframe:
6107 force_sig(TARGET_SIGSEGV);
6108 return -TARGET_QEMU_ESIGRETURN;
6109 }
6110
6111 long do_rt_sigreturn(CPUAlphaState *env)
6112 {
6113 abi_ulong frame_addr = env->ir[IR_A0];
6114 struct target_rt_sigframe *frame;
6115 sigset_t set;
6116
6117 trace_user_do_rt_sigreturn(env, frame_addr);
6118 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6119 goto badframe;
6120 }
6121 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6122 set_sigmask(&set);
6123
6124 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6125 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6126 uc.tuc_stack),
6127 0, env->ir[IR_SP]) == -EFAULT) {
6128 goto badframe;
6129 }
6130
6131 unlock_user_struct(frame, frame_addr, 0);
6132 return -TARGET_QEMU_ESIGRETURN;
6133
6134
6135 badframe:
6136 unlock_user_struct(frame, frame_addr, 0);
6137 force_sig(TARGET_SIGSEGV);
6138 return -TARGET_QEMU_ESIGRETURN;
6139 }
6140
6141 #elif defined(TARGET_TILEGX)
6142
6143 struct target_sigcontext {
6144 union {
6145 /* General-purpose registers. */
6146 abi_ulong gregs[56];
6147 struct {
6148 abi_ulong __gregs[53];
6149 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6150 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6151 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6152 };
6153 };
6154 abi_ulong pc; /* Program counter. */
6155 abi_ulong ics; /* In Interrupt Critical Section? */
6156 abi_ulong faultnum; /* Fault number. */
6157 abi_ulong pad[5];
6158 };
6159
6160 struct target_ucontext {
6161 abi_ulong tuc_flags;
6162 abi_ulong tuc_link;
6163 target_stack_t tuc_stack;
6164 struct target_sigcontext tuc_mcontext;
6165 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6166 };
6167
6168 struct target_rt_sigframe {
6169 unsigned char save_area[16]; /* caller save area */
6170 struct target_siginfo info;
6171 struct target_ucontext uc;
6172 abi_ulong retcode[2];
6173 };
6174
6175 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6176 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6177
6178
6179 static void setup_sigcontext(struct target_sigcontext *sc,
6180 CPUArchState *env, int signo)
6181 {
6182 int i;
6183
6184 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6185 __put_user(env->regs[i], &sc->gregs[i]);
6186 }
6187
6188 __put_user(env->pc, &sc->pc);
6189 __put_user(0, &sc->ics);
6190 __put_user(signo, &sc->faultnum);
6191 }
6192
6193 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6194 {
6195 int i;
6196
6197 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6198 __get_user(env->regs[i], &sc->gregs[i]);
6199 }
6200
6201 __get_user(env->pc, &sc->pc);
6202 }
6203
6204 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6205 size_t frame_size)
6206 {
6207 unsigned long sp = env->regs[TILEGX_R_SP];
6208
6209 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6210 return -1UL;
6211 }
6212
6213 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6214 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6215 }
6216
6217 sp -= frame_size;
6218 sp &= -16UL;
6219 return sp;
6220 }
6221
6222 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6223 target_siginfo_t *info,
6224 target_sigset_t *set, CPUArchState *env)
6225 {
6226 abi_ulong frame_addr;
6227 struct target_rt_sigframe *frame;
6228 unsigned long restorer;
6229
6230 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6231 trace_user_setup_rt_frame(env, frame_addr);
6232 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6233 goto give_sigsegv;
6234 }
6235
6236 /* Always write at least the signal number for the stack backtracer. */
6237 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6238 /* At sigreturn time, restore the callee-save registers too. */
6239 tswap_siginfo(&frame->info, info);
6240 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6241 } else {
6242 __put_user(info->si_signo, &frame->info.si_signo);
6243 }
6244
6245 /* Create the ucontext. */
6246 __put_user(0, &frame->uc.tuc_flags);
6247 __put_user(0, &frame->uc.tuc_link);
6248 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6249 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6250 &frame->uc.tuc_stack.ss_flags);
6251 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6252 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6253
6254 if (ka->sa_flags & TARGET_SA_RESTORER) {
6255 restorer = (unsigned long) ka->sa_restorer;
6256 } else {
6257 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6258 __put_user(INSN_SWINT1, &frame->retcode[1]);
6259 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6260 }
6261 env->pc = (unsigned long) ka->_sa_handler;
6262 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6263 env->regs[TILEGX_R_LR] = restorer;
6264 env->regs[0] = (unsigned long) sig;
6265 env->regs[1] = (unsigned long) &frame->info;
6266 env->regs[2] = (unsigned long) &frame->uc;
6267 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6268
6269 unlock_user_struct(frame, frame_addr, 1);
6270 return;
6271
6272 give_sigsegv:
6273 force_sigsegv(sig);
6274 }
6275
6276 long do_rt_sigreturn(CPUTLGState *env)
6277 {
6278 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6279 struct target_rt_sigframe *frame;
6280 sigset_t set;
6281
6282 trace_user_do_rt_sigreturn(env, frame_addr);
6283 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6284 goto badframe;
6285 }
6286 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6287 set_sigmask(&set);
6288
6289 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6290 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6291 uc.tuc_stack),
6292 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6293 goto badframe;
6294 }
6295
6296 unlock_user_struct(frame, frame_addr, 0);
6297 return -TARGET_QEMU_ESIGRETURN;
6298
6299
6300 badframe:
6301 unlock_user_struct(frame, frame_addr, 0);
6302 force_sig(TARGET_SIGSEGV);
6303 return -TARGET_QEMU_ESIGRETURN;
6304 }
6305
6306 #elif defined(TARGET_HPPA)
6307
6308 struct target_sigcontext {
6309 abi_ulong sc_flags;
6310 abi_ulong sc_gr[32];
6311 uint64_t sc_fr[32];
6312 abi_ulong sc_iasq[2];
6313 abi_ulong sc_iaoq[2];
6314 abi_ulong sc_sar;
6315 };
6316
6317 struct target_ucontext {
6318 abi_uint tuc_flags;
6319 abi_ulong tuc_link;
6320 target_stack_t tuc_stack;
6321 abi_uint pad[1];
6322 struct target_sigcontext tuc_mcontext;
6323 target_sigset_t tuc_sigmask;
6324 };
6325
6326 struct target_rt_sigframe {
6327 abi_uint tramp[9];
6328 target_siginfo_t info;
6329 struct target_ucontext uc;
6330 /* hidden location of upper halves of pa2.0 64-bit gregs */
6331 };
6332
6333 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6334 {
6335 int flags = 0;
6336 int i;
6337
6338 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6339
6340 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6341 /* In the gateway page, executing a syscall. */
6342 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6343 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6344 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6345 } else {
6346 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6347 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6348 }
6349 __put_user(0, &sc->sc_iasq[0]);
6350 __put_user(0, &sc->sc_iasq[1]);
6351 __put_user(flags, &sc->sc_flags);
6352
6353 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6354 for (i = 1; i < 32; ++i) {
6355 __put_user(env->gr[i], &sc->sc_gr[i]);
6356 }
6357
6358 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6359 for (i = 1; i < 32; ++i) {
6360 __put_user(env->fr[i], &sc->sc_fr[i]);
6361 }
6362
6363 __put_user(env->sar, &sc->sc_sar);
6364 }
6365
6366 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6367 {
6368 target_ulong psw;
6369 int i;
6370
6371 __get_user(psw, &sc->sc_gr[0]);
6372 cpu_hppa_put_psw(env, psw);
6373
6374 for (i = 1; i < 32; ++i) {
6375 __get_user(env->gr[i], &sc->sc_gr[i]);
6376 }
6377 for (i = 0; i < 32; ++i) {
6378 __get_user(env->fr[i], &sc->sc_fr[i]);
6379 }
6380 cpu_hppa_loaded_fr0(env);
6381
6382 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6383 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6384 __get_user(env->sar, &sc->sc_sar);
6385 }
6386
6387 /* No, this doesn't look right, but it's copied straight from the kernel. */
6388 #define PARISC_RT_SIGFRAME_SIZE32 \
6389 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6390
6391 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6392 target_siginfo_t *info,
6393 target_sigset_t *set, CPUArchState *env)
6394 {
6395 abi_ulong frame_addr, sp, haddr;
6396 struct target_rt_sigframe *frame;
6397 int i;
6398
6399 sp = env->gr[30];
6400 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6401 if (sas_ss_flags(sp) == 0) {
6402 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6403 }
6404 }
6405 frame_addr = QEMU_ALIGN_UP(sp, 64);
6406 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6407
6408 trace_user_setup_rt_frame(env, frame_addr);
6409
6410 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6411 goto give_sigsegv;
6412 }
6413
6414 tswap_siginfo(&frame->info, info);
6415 frame->uc.tuc_flags = 0;
6416 frame->uc.tuc_link = 0;
6417
6418 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6419 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6420 &frame->uc.tuc_stack.ss_flags);
6421 __put_user(target_sigaltstack_used.ss_size,
6422 &frame->uc.tuc_stack.ss_size);
6423
6424 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6425 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6426 }
6427
6428 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6429
6430 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6431 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6432 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6433 __put_user(0x08000240, frame->tramp + 3); /* nop */
6434
6435 unlock_user_struct(frame, frame_addr, 1);
6436
6437 env->gr[2] = h2g(frame->tramp);
6438 env->gr[30] = sp;
6439 env->gr[26] = sig;
6440 env->gr[25] = h2g(&frame->info);
6441 env->gr[24] = h2g(&frame->uc);
6442
6443 haddr = ka->_sa_handler;
6444 if (haddr & 2) {
6445 /* Function descriptor. */
6446 target_ulong *fdesc, dest;
6447
6448 haddr &= -4;
6449 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
6450 goto give_sigsegv;
6451 }
6452 __get_user(dest, fdesc);
6453 __get_user(env->gr[19], fdesc + 1);
6454 unlock_user_struct(fdesc, haddr, 1);
6455 haddr = dest;
6456 }
6457 env->iaoq_f = haddr;
6458 env->iaoq_b = haddr + 4;;
6459 return;
6460
6461 give_sigsegv:
6462 force_sigsegv(sig);
6463 }
6464
6465 long do_rt_sigreturn(CPUArchState *env)
6466 {
6467 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
6468 struct target_rt_sigframe *frame;
6469 sigset_t set;
6470
6471 trace_user_do_rt_sigreturn(env, frame_addr);
6472 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6473 goto badframe;
6474 }
6475 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6476 set_sigmask(&set);
6477
6478 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6479 unlock_user_struct(frame, frame_addr, 0);
6480
6481 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6482 uc.tuc_stack),
6483 0, env->gr[30]) == -EFAULT) {
6484 goto badframe;
6485 }
6486
6487 unlock_user_struct(frame, frame_addr, 0);
6488 return -TARGET_QEMU_ESIGRETURN;
6489
6490 badframe:
6491 force_sig(TARGET_SIGSEGV);
6492 return -TARGET_QEMU_ESIGRETURN;
6493 }
6494
6495 #else
6496
6497 static void setup_frame(int sig, struct target_sigaction *ka,
6498 target_sigset_t *set, CPUArchState *env)
6499 {
6500 fprintf(stderr, "setup_frame: not implemented\n");
6501 }
6502
6503 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6504 target_siginfo_t *info,
6505 target_sigset_t *set, CPUArchState *env)
6506 {
6507 fprintf(stderr, "setup_rt_frame: not implemented\n");
6508 }
6509
6510 long do_sigreturn(CPUArchState *env)
6511 {
6512 fprintf(stderr, "do_sigreturn: not implemented\n");
6513 return -TARGET_ENOSYS;
6514 }
6515
6516 long do_rt_sigreturn(CPUArchState *env)
6517 {
6518 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
6519 return -TARGET_ENOSYS;
6520 }
6521
6522 #endif
6523
6524 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
6525 struct emulated_sigtable *k)
6526 {
6527 CPUState *cpu = ENV_GET_CPU(cpu_env);
6528 abi_ulong handler;
6529 sigset_t set;
6530 target_sigset_t target_old_set;
6531 struct target_sigaction *sa;
6532 TaskState *ts = cpu->opaque;
6533
6534 trace_user_handle_signal(cpu_env, sig);
6535 /* dequeue signal */
6536 k->pending = 0;
6537
6538 sig = gdb_handlesig(cpu, sig);
6539 if (!sig) {
6540 sa = NULL;
6541 handler = TARGET_SIG_IGN;
6542 } else {
6543 sa = &sigact_table[sig - 1];
6544 handler = sa->_sa_handler;
6545 }
6546
6547 if (do_strace) {
6548 print_taken_signal(sig, &k->info);
6549 }
6550
6551 if (handler == TARGET_SIG_DFL) {
6552 /* default handler : ignore some signal. The other are job control or fatal */
6553 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
6554 kill(getpid(),SIGSTOP);
6555 } else if (sig != TARGET_SIGCHLD &&
6556 sig != TARGET_SIGURG &&
6557 sig != TARGET_SIGWINCH &&
6558 sig != TARGET_SIGCONT) {
6559 dump_core_and_abort(sig);
6560 }
6561 } else if (handler == TARGET_SIG_IGN) {
6562 /* ignore sig */
6563 } else if (handler == TARGET_SIG_ERR) {
6564 dump_core_and_abort(sig);
6565 } else {
6566 /* compute the blocked signals during the handler execution */
6567 sigset_t *blocked_set;
6568
6569 target_to_host_sigset(&set, &sa->sa_mask);
6570 /* SA_NODEFER indicates that the current signal should not be
6571 blocked during the handler */
6572 if (!(sa->sa_flags & TARGET_SA_NODEFER))
6573 sigaddset(&set, target_to_host_signal(sig));
6574
6575 /* save the previous blocked signal state to restore it at the
6576 end of the signal execution (see do_sigreturn) */
6577 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
6578
6579 /* block signals in the handler */
6580 blocked_set = ts->in_sigsuspend ?
6581 &ts->sigsuspend_mask : &ts->signal_mask;
6582 sigorset(&ts->signal_mask, blocked_set, &set);
6583 ts->in_sigsuspend = 0;
6584
6585 /* if the CPU is in VM86 mode, we restore the 32 bit values */
6586 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
6587 {
6588 CPUX86State *env = cpu_env;
6589 if (env->eflags & VM_MASK)
6590 save_v86_state(env);
6591 }
6592 #endif
6593 /* prepare the stack frame of the virtual CPU */
6594 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
6595 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
6596 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
6597 || defined(TARGET_NIOS2) || defined(TARGET_X86_64)
6598 /* These targets do not have traditional signals. */
6599 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6600 #else
6601 if (sa->sa_flags & TARGET_SA_SIGINFO)
6602 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6603 else
6604 setup_frame(sig, sa, &target_old_set, cpu_env);
6605 #endif
6606 if (sa->sa_flags & TARGET_SA_RESETHAND) {
6607 sa->_sa_handler = TARGET_SIG_DFL;
6608 }
6609 }
6610 }
6611
6612 void process_pending_signals(CPUArchState *cpu_env)
6613 {
6614 CPUState *cpu = ENV_GET_CPU(cpu_env);
6615 int sig;
6616 TaskState *ts = cpu->opaque;
6617 sigset_t set;
6618 sigset_t *blocked_set;
6619
6620 while (atomic_read(&ts->signal_pending)) {
6621 /* FIXME: This is not threadsafe. */
6622 sigfillset(&set);
6623 sigprocmask(SIG_SETMASK, &set, 0);
6624
6625 restart_scan:
6626 sig = ts->sync_signal.pending;
6627 if (sig) {
6628 /* Synchronous signals are forced,
6629 * see force_sig_info() and callers in Linux
6630 * Note that not all of our queue_signal() calls in QEMU correspond
6631 * to force_sig_info() calls in Linux (some are send_sig_info()).
6632 * However it seems like a kernel bug to me to allow the process
6633 * to block a synchronous signal since it could then just end up
6634 * looping round and round indefinitely.
6635 */
6636 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
6637 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
6638 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
6639 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
6640 }
6641
6642 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
6643 }
6644
6645 for (sig = 1; sig <= TARGET_NSIG; sig++) {
6646 blocked_set = ts->in_sigsuspend ?
6647 &ts->sigsuspend_mask : &ts->signal_mask;
6648
6649 if (ts->sigtab[sig - 1].pending &&
6650 (!sigismember(blocked_set,
6651 target_to_host_signal_table[sig]))) {
6652 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
6653 /* Restart scan from the beginning, as handle_pending_signal
6654 * might have resulted in a new synchronous signal (eg SIGSEGV).
6655 */
6656 goto restart_scan;
6657 }
6658 }
6659
6660 /* if no signal is pending, unblock signals and recheck (the act
6661 * of unblocking might cause us to take another host signal which
6662 * will set signal_pending again).
6663 */
6664 atomic_set(&ts->signal_pending, 0);
6665 ts->in_sigsuspend = 0;
6666 set = ts->signal_mask;
6667 sigdelset(&set, SIGSEGV);
6668 sigdelset(&set, SIGBUS);
6669 sigprocmask(SIG_SETMASK, &set, 0);
6670 }
6671 ts->in_sigsuspend = 0;
6672 }