]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: Remove the unused "not implemented" signal handling stubs
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 return atomic_xchg(&ts->signal_pending, 1);
207 }
208
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
215 */
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
217 {
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
219
220 if (oldset) {
221 *oldset = ts->signal_mask;
222 }
223
224 if (set) {
225 int i;
226
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
229 }
230
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
239 }
240 }
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
247 }
248
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
252 }
253 return 0;
254 }
255
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
257 /* Just set the guest's signal mask to the specified value; the
258 * caller is assumed to have called block_signals() already.
259 */
260 static void set_sigmask(const sigset_t *set)
261 {
262 TaskState *ts = (TaskState *)thread_cpu->opaque;
263
264 ts->signal_mask = *set;
265 }
266 #endif
267
268 /* siginfo conversion */
269
270 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
271 const siginfo_t *info)
272 {
273 int sig = host_to_target_signal(info->si_signo);
274 int si_code = info->si_code;
275 int si_type;
276 tinfo->si_signo = sig;
277 tinfo->si_errno = 0;
278 tinfo->si_code = info->si_code;
279
280 /* This memset serves two purposes:
281 * (1) ensure we don't leak random junk to the guest later
282 * (2) placate false positives from gcc about fields
283 * being used uninitialized if it chooses to inline both this
284 * function and tswap_siginfo() into host_to_target_siginfo().
285 */
286 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
287
288 /* This is awkward, because we have to use a combination of
289 * the si_code and si_signo to figure out which of the union's
290 * members are valid. (Within the host kernel it is always possible
291 * to tell, but the kernel carefully avoids giving userspace the
292 * high 16 bits of si_code, so we don't have the information to
293 * do this the easy way...) We therefore make our best guess,
294 * bearing in mind that a guest can spoof most of the si_codes
295 * via rt_sigqueueinfo() if it likes.
296 *
297 * Once we have made our guess, we record it in the top 16 bits of
298 * the si_code, so that tswap_siginfo() later can use it.
299 * tswap_siginfo() will strip these top bits out before writing
300 * si_code to the guest (sign-extending the lower bits).
301 */
302
303 switch (si_code) {
304 case SI_USER:
305 case SI_TKILL:
306 case SI_KERNEL:
307 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
308 * These are the only unspoofable si_code values.
309 */
310 tinfo->_sifields._kill._pid = info->si_pid;
311 tinfo->_sifields._kill._uid = info->si_uid;
312 si_type = QEMU_SI_KILL;
313 break;
314 default:
315 /* Everything else is spoofable. Make best guess based on signal */
316 switch (sig) {
317 case TARGET_SIGCHLD:
318 tinfo->_sifields._sigchld._pid = info->si_pid;
319 tinfo->_sifields._sigchld._uid = info->si_uid;
320 tinfo->_sifields._sigchld._status
321 = host_to_target_waitstatus(info->si_status);
322 tinfo->_sifields._sigchld._utime = info->si_utime;
323 tinfo->_sifields._sigchld._stime = info->si_stime;
324 si_type = QEMU_SI_CHLD;
325 break;
326 case TARGET_SIGIO:
327 tinfo->_sifields._sigpoll._band = info->si_band;
328 tinfo->_sifields._sigpoll._fd = info->si_fd;
329 si_type = QEMU_SI_POLL;
330 break;
331 default:
332 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
333 tinfo->_sifields._rt._pid = info->si_pid;
334 tinfo->_sifields._rt._uid = info->si_uid;
335 /* XXX: potential problem if 64 bit */
336 tinfo->_sifields._rt._sigval.sival_ptr
337 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
338 si_type = QEMU_SI_RT;
339 break;
340 }
341 break;
342 }
343
344 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
345 }
346
347 static void tswap_siginfo(target_siginfo_t *tinfo,
348 const target_siginfo_t *info)
349 {
350 int si_type = extract32(info->si_code, 16, 16);
351 int si_code = sextract32(info->si_code, 0, 16);
352
353 __put_user(info->si_signo, &tinfo->si_signo);
354 __put_user(info->si_errno, &tinfo->si_errno);
355 __put_user(si_code, &tinfo->si_code);
356
357 /* We can use our internal marker of which fields in the structure
358 * are valid, rather than duplicating the guesswork of
359 * host_to_target_siginfo_noswap() here.
360 */
361 switch (si_type) {
362 case QEMU_SI_KILL:
363 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
364 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
365 break;
366 case QEMU_SI_TIMER:
367 __put_user(info->_sifields._timer._timer1,
368 &tinfo->_sifields._timer._timer1);
369 __put_user(info->_sifields._timer._timer2,
370 &tinfo->_sifields._timer._timer2);
371 break;
372 case QEMU_SI_POLL:
373 __put_user(info->_sifields._sigpoll._band,
374 &tinfo->_sifields._sigpoll._band);
375 __put_user(info->_sifields._sigpoll._fd,
376 &tinfo->_sifields._sigpoll._fd);
377 break;
378 case QEMU_SI_FAULT:
379 __put_user(info->_sifields._sigfault._addr,
380 &tinfo->_sifields._sigfault._addr);
381 break;
382 case QEMU_SI_CHLD:
383 __put_user(info->_sifields._sigchld._pid,
384 &tinfo->_sifields._sigchld._pid);
385 __put_user(info->_sifields._sigchld._uid,
386 &tinfo->_sifields._sigchld._uid);
387 __put_user(info->_sifields._sigchld._status,
388 &tinfo->_sifields._sigchld._status);
389 __put_user(info->_sifields._sigchld._utime,
390 &tinfo->_sifields._sigchld._utime);
391 __put_user(info->_sifields._sigchld._stime,
392 &tinfo->_sifields._sigchld._stime);
393 break;
394 case QEMU_SI_RT:
395 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
396 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
397 __put_user(info->_sifields._rt._sigval.sival_ptr,
398 &tinfo->_sifields._rt._sigval.sival_ptr);
399 break;
400 default:
401 g_assert_not_reached();
402 }
403 }
404
405 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
406 {
407 target_siginfo_t tgt_tmp;
408 host_to_target_siginfo_noswap(&tgt_tmp, info);
409 tswap_siginfo(tinfo, &tgt_tmp);
410 }
411
412 /* XXX: we support only POSIX RT signals are used. */
413 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
414 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
415 {
416 /* This conversion is used only for the rt_sigqueueinfo syscall,
417 * and so we know that the _rt fields are the valid ones.
418 */
419 abi_ulong sival_ptr;
420
421 __get_user(info->si_signo, &tinfo->si_signo);
422 __get_user(info->si_errno, &tinfo->si_errno);
423 __get_user(info->si_code, &tinfo->si_code);
424 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
425 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
426 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
427 info->si_value.sival_ptr = (void *)(long)sival_ptr;
428 }
429
430 static int fatal_signal (int sig)
431 {
432 switch (sig) {
433 case TARGET_SIGCHLD:
434 case TARGET_SIGURG:
435 case TARGET_SIGWINCH:
436 /* Ignored by default. */
437 return 0;
438 case TARGET_SIGCONT:
439 case TARGET_SIGSTOP:
440 case TARGET_SIGTSTP:
441 case TARGET_SIGTTIN:
442 case TARGET_SIGTTOU:
443 /* Job control signals. */
444 return 0;
445 default:
446 return 1;
447 }
448 }
449
450 /* returns 1 if given signal should dump core if not handled */
451 static int core_dump_signal(int sig)
452 {
453 switch (sig) {
454 case TARGET_SIGABRT:
455 case TARGET_SIGFPE:
456 case TARGET_SIGILL:
457 case TARGET_SIGQUIT:
458 case TARGET_SIGSEGV:
459 case TARGET_SIGTRAP:
460 case TARGET_SIGBUS:
461 return (1);
462 default:
463 return (0);
464 }
465 }
466
467 void signal_init(void)
468 {
469 TaskState *ts = (TaskState *)thread_cpu->opaque;
470 struct sigaction act;
471 struct sigaction oact;
472 int i, j;
473 int host_sig;
474
475 /* generate signal conversion tables */
476 for(i = 1; i < _NSIG; i++) {
477 if (host_to_target_signal_table[i] == 0)
478 host_to_target_signal_table[i] = i;
479 }
480 for(i = 1; i < _NSIG; i++) {
481 j = host_to_target_signal_table[i];
482 target_to_host_signal_table[j] = i;
483 }
484
485 /* Set the signal mask from the host mask. */
486 sigprocmask(0, 0, &ts->signal_mask);
487
488 /* set all host signal handlers. ALL signals are blocked during
489 the handlers to serialize them. */
490 memset(sigact_table, 0, sizeof(sigact_table));
491
492 sigfillset(&act.sa_mask);
493 act.sa_flags = SA_SIGINFO;
494 act.sa_sigaction = host_signal_handler;
495 for(i = 1; i <= TARGET_NSIG; i++) {
496 host_sig = target_to_host_signal(i);
497 sigaction(host_sig, NULL, &oact);
498 if (oact.sa_sigaction == (void *)SIG_IGN) {
499 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
500 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
501 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
502 }
503 /* If there's already a handler installed then something has
504 gone horribly wrong, so don't even try to handle that case. */
505 /* Install some handlers for our own use. We need at least
506 SIGSEGV and SIGBUS, to detect exceptions. We can not just
507 trap all signals because it affects syscall interrupt
508 behavior. But do trap all default-fatal signals. */
509 if (fatal_signal (i))
510 sigaction(host_sig, &act, NULL);
511 }
512 }
513
514 /* Force a synchronously taken signal. The kernel force_sig() function
515 * also forces the signal to "not blocked, not ignored", but for QEMU
516 * that work is done in process_pending_signals().
517 */
518 static void force_sig(int sig)
519 {
520 CPUState *cpu = thread_cpu;
521 CPUArchState *env = cpu->env_ptr;
522 target_siginfo_t info;
523
524 info.si_signo = sig;
525 info.si_errno = 0;
526 info.si_code = TARGET_SI_KERNEL;
527 info._sifields._kill._pid = 0;
528 info._sifields._kill._uid = 0;
529 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
530 }
531
532 /* Force a SIGSEGV if we couldn't write to memory trying to set
533 * up the signal frame. oldsig is the signal we were trying to handle
534 * at the point of failure.
535 */
536 #if !defined(TARGET_RISCV)
537 static void force_sigsegv(int oldsig)
538 {
539 if (oldsig == SIGSEGV) {
540 /* Make sure we don't try to deliver the signal again; this will
541 * end up with handle_pending_signal() calling dump_core_and_abort().
542 */
543 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
544 }
545 force_sig(TARGET_SIGSEGV);
546 }
547
548 #endif
549
550 /* abort execution with signal */
551 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
552 {
553 CPUState *cpu = thread_cpu;
554 CPUArchState *env = cpu->env_ptr;
555 TaskState *ts = (TaskState *)cpu->opaque;
556 int host_sig, core_dumped = 0;
557 struct sigaction act;
558
559 host_sig = target_to_host_signal(target_sig);
560 trace_user_force_sig(env, target_sig, host_sig);
561 gdb_signalled(env, target_sig);
562
563 /* dump core if supported by target binary format */
564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
565 stop_all_tasks();
566 core_dumped =
567 ((*ts->bprm->core_dump)(target_sig, env) == 0);
568 }
569 if (core_dumped) {
570 /* we already dumped the core of target process, we don't want
571 * a coredump of qemu itself */
572 struct rlimit nodump;
573 getrlimit(RLIMIT_CORE, &nodump);
574 nodump.rlim_cur=0;
575 setrlimit(RLIMIT_CORE, &nodump);
576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
577 target_sig, strsignal(host_sig), "core dumped" );
578 }
579
580 /* The proper exit code for dying from an uncaught signal is
581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
582 * a negative value. To get the proper exit code we need to
583 * actually die from an uncaught signal. Here the default signal
584 * handler is installed, we send ourself a signal and we wait for
585 * it to arrive. */
586 sigfillset(&act.sa_mask);
587 act.sa_handler = SIG_DFL;
588 act.sa_flags = 0;
589 sigaction(host_sig, &act, NULL);
590
591 /* For some reason raise(host_sig) doesn't send the signal when
592 * statically linked on x86-64. */
593 kill(getpid(), host_sig);
594
595 /* Make sure the signal isn't masked (just reuse the mask inside
596 of act) */
597 sigdelset(&act.sa_mask, host_sig);
598 sigsuspend(&act.sa_mask);
599
600 /* unreachable */
601 abort();
602 }
603
604 /* queue a signal so that it will be send to the virtual CPU as soon
605 as possible */
606 int queue_signal(CPUArchState *env, int sig, int si_type,
607 target_siginfo_t *info)
608 {
609 CPUState *cpu = ENV_GET_CPU(env);
610 TaskState *ts = cpu->opaque;
611
612 trace_user_queue_signal(env, sig);
613
614 info->si_code = deposit32(info->si_code, 16, 16, si_type);
615
616 ts->sync_signal.info = *info;
617 ts->sync_signal.pending = sig;
618 /* signal that a new signal is pending */
619 atomic_set(&ts->signal_pending, 1);
620 return 1; /* indicates that the signal was queued */
621 }
622
623 #ifndef HAVE_SAFE_SYSCALL
624 static inline void rewind_if_in_safe_syscall(void *puc)
625 {
626 /* Default version: never rewind */
627 }
628 #endif
629
630 static void host_signal_handler(int host_signum, siginfo_t *info,
631 void *puc)
632 {
633 CPUArchState *env = thread_cpu->env_ptr;
634 CPUState *cpu = ENV_GET_CPU(env);
635 TaskState *ts = cpu->opaque;
636
637 int sig;
638 target_siginfo_t tinfo;
639 ucontext_t *uc = puc;
640 struct emulated_sigtable *k;
641
642 /* the CPU emulator uses some host signals to detect exceptions,
643 we forward to it some signals */
644 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
645 && info->si_code > 0) {
646 if (cpu_signal_handler(host_signum, info, puc))
647 return;
648 }
649
650 /* get target signal number */
651 sig = host_to_target_signal(host_signum);
652 if (sig < 1 || sig > TARGET_NSIG)
653 return;
654 trace_user_host_signal(env, host_signum, sig);
655
656 rewind_if_in_safe_syscall(puc);
657
658 host_to_target_siginfo_noswap(&tinfo, info);
659 k = &ts->sigtab[sig - 1];
660 k->info = tinfo;
661 k->pending = sig;
662 ts->signal_pending = 1;
663
664 /* Block host signals until target signal handler entered. We
665 * can't block SIGSEGV or SIGBUS while we're executing guest
666 * code in case the guest code provokes one in the window between
667 * now and it getting out to the main loop. Signals will be
668 * unblocked again in process_pending_signals().
669 *
670 * WARNING: we cannot use sigfillset() here because the uc_sigmask
671 * field is a kernel sigset_t, which is much smaller than the
672 * libc sigset_t which sigfillset() operates on. Using sigfillset()
673 * would write 0xff bytes off the end of the structure and trash
674 * data on the struct.
675 * We can't use sizeof(uc->uc_sigmask) either, because the libc
676 * headers define the struct field with the wrong (too large) type.
677 */
678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
679 sigdelset(&uc->uc_sigmask, SIGSEGV);
680 sigdelset(&uc->uc_sigmask, SIGBUS);
681
682 /* interrupt the virtual CPU as soon as possible */
683 cpu_exit(thread_cpu);
684 }
685
686 /* do_sigaltstack() returns target values and errnos. */
687 /* compare linux/kernel/signal.c:do_sigaltstack() */
688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
689 {
690 int ret;
691 struct target_sigaltstack oss;
692
693 /* XXX: test errors */
694 if(uoss_addr)
695 {
696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
698 __put_user(sas_ss_flags(sp), &oss.ss_flags);
699 }
700
701 if(uss_addr)
702 {
703 struct target_sigaltstack *uss;
704 struct target_sigaltstack ss;
705 size_t minstacksize = TARGET_MINSIGSTKSZ;
706
707 #if defined(TARGET_PPC64)
708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
710 if (get_ppc64_abi(image) > 1) {
711 minstacksize = 4096;
712 }
713 #endif
714
715 ret = -TARGET_EFAULT;
716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
717 goto out;
718 }
719 __get_user(ss.ss_sp, &uss->ss_sp);
720 __get_user(ss.ss_size, &uss->ss_size);
721 __get_user(ss.ss_flags, &uss->ss_flags);
722 unlock_user_struct(uss, uss_addr, 0);
723
724 ret = -TARGET_EPERM;
725 if (on_sig_stack(sp))
726 goto out;
727
728 ret = -TARGET_EINVAL;
729 if (ss.ss_flags != TARGET_SS_DISABLE
730 && ss.ss_flags != TARGET_SS_ONSTACK
731 && ss.ss_flags != 0)
732 goto out;
733
734 if (ss.ss_flags == TARGET_SS_DISABLE) {
735 ss.ss_size = 0;
736 ss.ss_sp = 0;
737 } else {
738 ret = -TARGET_ENOMEM;
739 if (ss.ss_size < minstacksize) {
740 goto out;
741 }
742 }
743
744 target_sigaltstack_used.ss_sp = ss.ss_sp;
745 target_sigaltstack_used.ss_size = ss.ss_size;
746 }
747
748 if (uoss_addr) {
749 ret = -TARGET_EFAULT;
750 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
751 goto out;
752 }
753
754 ret = 0;
755 out:
756 return ret;
757 }
758
759 /* do_sigaction() return target values and host errnos */
760 int do_sigaction(int sig, const struct target_sigaction *act,
761 struct target_sigaction *oact)
762 {
763 struct target_sigaction *k;
764 struct sigaction act1;
765 int host_sig;
766 int ret = 0;
767
768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
769 return -TARGET_EINVAL;
770 }
771
772 if (block_signals()) {
773 return -TARGET_ERESTARTSYS;
774 }
775
776 k = &sigact_table[sig - 1];
777 if (oact) {
778 __put_user(k->_sa_handler, &oact->_sa_handler);
779 __put_user(k->sa_flags, &oact->sa_flags);
780 #ifdef TARGET_ARCH_HAS_SA_RESTORER
781 __put_user(k->sa_restorer, &oact->sa_restorer);
782 #endif
783 /* Not swapped. */
784 oact->sa_mask = k->sa_mask;
785 }
786 if (act) {
787 /* FIXME: This is not threadsafe. */
788 __get_user(k->_sa_handler, &act->_sa_handler);
789 __get_user(k->sa_flags, &act->sa_flags);
790 #ifdef TARGET_ARCH_HAS_SA_RESTORER
791 __get_user(k->sa_restorer, &act->sa_restorer);
792 #endif
793 /* To be swapped in target_to_host_sigset. */
794 k->sa_mask = act->sa_mask;
795
796 /* we update the host linux signal state */
797 host_sig = target_to_host_signal(sig);
798 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
799 sigfillset(&act1.sa_mask);
800 act1.sa_flags = SA_SIGINFO;
801 if (k->sa_flags & TARGET_SA_RESTART)
802 act1.sa_flags |= SA_RESTART;
803 /* NOTE: it is important to update the host kernel signal
804 ignore state to avoid getting unexpected interrupted
805 syscalls */
806 if (k->_sa_handler == TARGET_SIG_IGN) {
807 act1.sa_sigaction = (void *)SIG_IGN;
808 } else if (k->_sa_handler == TARGET_SIG_DFL) {
809 if (fatal_signal (sig))
810 act1.sa_sigaction = host_signal_handler;
811 else
812 act1.sa_sigaction = (void *)SIG_DFL;
813 } else {
814 act1.sa_sigaction = host_signal_handler;
815 }
816 ret = sigaction(host_sig, &act1, NULL);
817 }
818 }
819 return ret;
820 }
821
822 #if defined(TARGET_I386)
823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
824
825 struct target_fpreg {
826 uint16_t significand[4];
827 uint16_t exponent;
828 };
829
830 struct target_fpxreg {
831 uint16_t significand[4];
832 uint16_t exponent;
833 uint16_t padding[3];
834 };
835
836 struct target_xmmreg {
837 uint32_t element[4];
838 };
839
840 struct target_fpstate_32 {
841 /* Regular FPU environment */
842 uint32_t cw;
843 uint32_t sw;
844 uint32_t tag;
845 uint32_t ipoff;
846 uint32_t cssel;
847 uint32_t dataoff;
848 uint32_t datasel;
849 struct target_fpreg st[8];
850 uint16_t status;
851 uint16_t magic; /* 0xffff = regular FPU data only */
852
853 /* FXSR FPU environment */
854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
855 uint32_t mxcsr;
856 uint32_t reserved;
857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
858 struct target_xmmreg xmm[8];
859 uint32_t padding[56];
860 };
861
862 struct target_fpstate_64 {
863 /* FXSAVE format */
864 uint16_t cw;
865 uint16_t sw;
866 uint16_t twd;
867 uint16_t fop;
868 uint64_t rip;
869 uint64_t rdp;
870 uint32_t mxcsr;
871 uint32_t mxcsr_mask;
872 uint32_t st_space[32];
873 uint32_t xmm_space[64];
874 uint32_t reserved[24];
875 };
876
877 #ifndef TARGET_X86_64
878 # define target_fpstate target_fpstate_32
879 #else
880 # define target_fpstate target_fpstate_64
881 #endif
882
883 struct target_sigcontext_32 {
884 uint16_t gs, __gsh;
885 uint16_t fs, __fsh;
886 uint16_t es, __esh;
887 uint16_t ds, __dsh;
888 uint32_t edi;
889 uint32_t esi;
890 uint32_t ebp;
891 uint32_t esp;
892 uint32_t ebx;
893 uint32_t edx;
894 uint32_t ecx;
895 uint32_t eax;
896 uint32_t trapno;
897 uint32_t err;
898 uint32_t eip;
899 uint16_t cs, __csh;
900 uint32_t eflags;
901 uint32_t esp_at_signal;
902 uint16_t ss, __ssh;
903 uint32_t fpstate; /* pointer */
904 uint32_t oldmask;
905 uint32_t cr2;
906 };
907
908 struct target_sigcontext_64 {
909 uint64_t r8;
910 uint64_t r9;
911 uint64_t r10;
912 uint64_t r11;
913 uint64_t r12;
914 uint64_t r13;
915 uint64_t r14;
916 uint64_t r15;
917
918 uint64_t rdi;
919 uint64_t rsi;
920 uint64_t rbp;
921 uint64_t rbx;
922 uint64_t rdx;
923 uint64_t rax;
924 uint64_t rcx;
925 uint64_t rsp;
926 uint64_t rip;
927
928 uint64_t eflags;
929
930 uint16_t cs;
931 uint16_t gs;
932 uint16_t fs;
933 uint16_t ss;
934
935 uint64_t err;
936 uint64_t trapno;
937 uint64_t oldmask;
938 uint64_t cr2;
939
940 uint64_t fpstate; /* pointer */
941 uint64_t padding[8];
942 };
943
944 #ifndef TARGET_X86_64
945 # define target_sigcontext target_sigcontext_32
946 #else
947 # define target_sigcontext target_sigcontext_64
948 #endif
949
950 /* see Linux/include/uapi/asm-generic/ucontext.h */
951 struct target_ucontext {
952 abi_ulong tuc_flags;
953 abi_ulong tuc_link;
954 target_stack_t tuc_stack;
955 struct target_sigcontext tuc_mcontext;
956 target_sigset_t tuc_sigmask; /* mask last for extensibility */
957 };
958
959 #ifndef TARGET_X86_64
960 struct sigframe {
961 abi_ulong pretcode;
962 int sig;
963 struct target_sigcontext sc;
964 struct target_fpstate fpstate;
965 abi_ulong extramask[TARGET_NSIG_WORDS-1];
966 char retcode[8];
967 };
968
969 struct rt_sigframe {
970 abi_ulong pretcode;
971 int sig;
972 abi_ulong pinfo;
973 abi_ulong puc;
974 struct target_siginfo info;
975 struct target_ucontext uc;
976 struct target_fpstate fpstate;
977 char retcode[8];
978 };
979
980 #else
981
982 struct rt_sigframe {
983 abi_ulong pretcode;
984 struct target_ucontext uc;
985 struct target_siginfo info;
986 struct target_fpstate fpstate;
987 };
988
989 #endif
990
991 /*
992 * Set up a signal frame.
993 */
994
995 /* XXX: save x87 state */
996 static void setup_sigcontext(struct target_sigcontext *sc,
997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
998 abi_ulong fpstate_addr)
999 {
1000 CPUState *cs = CPU(x86_env_get_cpu(env));
1001 #ifndef TARGET_X86_64
1002 uint16_t magic;
1003
1004 /* already locked in setup_frame() */
1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1009 __put_user(env->regs[R_EDI], &sc->edi);
1010 __put_user(env->regs[R_ESI], &sc->esi);
1011 __put_user(env->regs[R_EBP], &sc->ebp);
1012 __put_user(env->regs[R_ESP], &sc->esp);
1013 __put_user(env->regs[R_EBX], &sc->ebx);
1014 __put_user(env->regs[R_EDX], &sc->edx);
1015 __put_user(env->regs[R_ECX], &sc->ecx);
1016 __put_user(env->regs[R_EAX], &sc->eax);
1017 __put_user(cs->exception_index, &sc->trapno);
1018 __put_user(env->error_code, &sc->err);
1019 __put_user(env->eip, &sc->eip);
1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1021 __put_user(env->eflags, &sc->eflags);
1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1024
1025 cpu_x86_fsave(env, fpstate_addr, 1);
1026 fpstate->status = fpstate->sw;
1027 magic = 0xffff;
1028 __put_user(magic, &fpstate->magic);
1029 __put_user(fpstate_addr, &sc->fpstate);
1030
1031 /* non-iBCS2 extensions.. */
1032 __put_user(mask, &sc->oldmask);
1033 __put_user(env->cr[2], &sc->cr2);
1034 #else
1035 __put_user(env->regs[R_EDI], &sc->rdi);
1036 __put_user(env->regs[R_ESI], &sc->rsi);
1037 __put_user(env->regs[R_EBP], &sc->rbp);
1038 __put_user(env->regs[R_ESP], &sc->rsp);
1039 __put_user(env->regs[R_EBX], &sc->rbx);
1040 __put_user(env->regs[R_EDX], &sc->rdx);
1041 __put_user(env->regs[R_ECX], &sc->rcx);
1042 __put_user(env->regs[R_EAX], &sc->rax);
1043
1044 __put_user(env->regs[8], &sc->r8);
1045 __put_user(env->regs[9], &sc->r9);
1046 __put_user(env->regs[10], &sc->r10);
1047 __put_user(env->regs[11], &sc->r11);
1048 __put_user(env->regs[12], &sc->r12);
1049 __put_user(env->regs[13], &sc->r13);
1050 __put_user(env->regs[14], &sc->r14);
1051 __put_user(env->regs[15], &sc->r15);
1052
1053 __put_user(cs->exception_index, &sc->trapno);
1054 __put_user(env->error_code, &sc->err);
1055 __put_user(env->eip, &sc->rip);
1056
1057 __put_user(env->eflags, &sc->eflags);
1058 __put_user(env->segs[R_CS].selector, &sc->cs);
1059 __put_user((uint16_t)0, &sc->gs);
1060 __put_user((uint16_t)0, &sc->fs);
1061 __put_user(env->segs[R_SS].selector, &sc->ss);
1062
1063 __put_user(mask, &sc->oldmask);
1064 __put_user(env->cr[2], &sc->cr2);
1065
1066 /* fpstate_addr must be 16 byte aligned for fxsave */
1067 assert(!(fpstate_addr & 0xf));
1068
1069 cpu_x86_fxsave(env, fpstate_addr);
1070 __put_user(fpstate_addr, &sc->fpstate);
1071 #endif
1072 }
1073
1074 /*
1075 * Determine which stack to use..
1076 */
1077
1078 static inline abi_ulong
1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1080 {
1081 unsigned long esp;
1082
1083 /* Default to using normal stack */
1084 esp = env->regs[R_ESP];
1085 #ifdef TARGET_X86_64
1086 esp -= 128; /* this is the redzone */
1087 #endif
1088
1089 /* This is the X/Open sanctioned signal stack switching. */
1090 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1091 if (sas_ss_flags(esp) == 0) {
1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1093 }
1094 } else {
1095 #ifndef TARGET_X86_64
1096 /* This is the legacy signal stack switching. */
1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1098 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1099 ka->sa_restorer) {
1100 esp = (unsigned long) ka->sa_restorer;
1101 }
1102 #endif
1103 }
1104
1105 #ifndef TARGET_X86_64
1106 return (esp - frame_size) & -8ul;
1107 #else
1108 return ((esp - frame_size) & (~15ul)) - 8;
1109 #endif
1110 }
1111
1112 #ifndef TARGET_X86_64
1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1114 static void setup_frame(int sig, struct target_sigaction *ka,
1115 target_sigset_t *set, CPUX86State *env)
1116 {
1117 abi_ulong frame_addr;
1118 struct sigframe *frame;
1119 int i;
1120
1121 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1122 trace_user_setup_frame(env, frame_addr);
1123
1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1125 goto give_sigsegv;
1126
1127 __put_user(sig, &frame->sig);
1128
1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1130 frame_addr + offsetof(struct sigframe, fpstate));
1131
1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1133 __put_user(set->sig[i], &frame->extramask[i - 1]);
1134 }
1135
1136 /* Set up to return from userspace. If provided, use a stub
1137 already in userspace. */
1138 if (ka->sa_flags & TARGET_SA_RESTORER) {
1139 __put_user(ka->sa_restorer, &frame->pretcode);
1140 } else {
1141 uint16_t val16;
1142 abi_ulong retcode_addr;
1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1144 __put_user(retcode_addr, &frame->pretcode);
1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1146 val16 = 0xb858;
1147 __put_user(val16, (uint16_t *)(frame->retcode+0));
1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1149 val16 = 0x80cd;
1150 __put_user(val16, (uint16_t *)(frame->retcode+6));
1151 }
1152
1153 /* Set up registers for signal handler */
1154 env->regs[R_ESP] = frame_addr;
1155 env->eip = ka->_sa_handler;
1156
1157 cpu_x86_load_seg(env, R_DS, __USER_DS);
1158 cpu_x86_load_seg(env, R_ES, __USER_DS);
1159 cpu_x86_load_seg(env, R_SS, __USER_DS);
1160 cpu_x86_load_seg(env, R_CS, __USER_CS);
1161 env->eflags &= ~TF_MASK;
1162
1163 unlock_user_struct(frame, frame_addr, 1);
1164
1165 return;
1166
1167 give_sigsegv:
1168 force_sigsegv(sig);
1169 }
1170 #endif
1171
1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1173 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1174 target_siginfo_t *info,
1175 target_sigset_t *set, CPUX86State *env)
1176 {
1177 abi_ulong frame_addr;
1178 #ifndef TARGET_X86_64
1179 abi_ulong addr;
1180 #endif
1181 struct rt_sigframe *frame;
1182 int i;
1183
1184 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1185 trace_user_setup_rt_frame(env, frame_addr);
1186
1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1188 goto give_sigsegv;
1189
1190 /* These fields are only in rt_sigframe on 32 bit */
1191 #ifndef TARGET_X86_64
1192 __put_user(sig, &frame->sig);
1193 addr = frame_addr + offsetof(struct rt_sigframe, info);
1194 __put_user(addr, &frame->pinfo);
1195 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1196 __put_user(addr, &frame->puc);
1197 #endif
1198 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1199 tswap_siginfo(&frame->info, info);
1200 }
1201
1202 /* Create the ucontext. */
1203 __put_user(0, &frame->uc.tuc_flags);
1204 __put_user(0, &frame->uc.tuc_link);
1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1207 &frame->uc.tuc_stack.ss_flags);
1208 __put_user(target_sigaltstack_used.ss_size,
1209 &frame->uc.tuc_stack.ss_size);
1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1212
1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1215 }
1216
1217 /* Set up to return from userspace. If provided, use a stub
1218 already in userspace. */
1219 #ifndef TARGET_X86_64
1220 if (ka->sa_flags & TARGET_SA_RESTORER) {
1221 __put_user(ka->sa_restorer, &frame->pretcode);
1222 } else {
1223 uint16_t val16;
1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1225 __put_user(addr, &frame->pretcode);
1226 /* This is movl $,%eax ; int $0x80 */
1227 __put_user(0xb8, (char *)(frame->retcode+0));
1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1229 val16 = 0x80cd;
1230 __put_user(val16, (uint16_t *)(frame->retcode+5));
1231 }
1232 #else
1233 /* XXX: Would be slightly better to return -EFAULT here if test fails
1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1235 __put_user(ka->sa_restorer, &frame->pretcode);
1236 #endif
1237
1238 /* Set up registers for signal handler */
1239 env->regs[R_ESP] = frame_addr;
1240 env->eip = ka->_sa_handler;
1241
1242 #ifndef TARGET_X86_64
1243 env->regs[R_EAX] = sig;
1244 env->regs[R_EDX] = (unsigned long)&frame->info;
1245 env->regs[R_ECX] = (unsigned long)&frame->uc;
1246 #else
1247 env->regs[R_EAX] = 0;
1248 env->regs[R_EDI] = sig;
1249 env->regs[R_ESI] = (unsigned long)&frame->info;
1250 env->regs[R_EDX] = (unsigned long)&frame->uc;
1251 #endif
1252
1253 cpu_x86_load_seg(env, R_DS, __USER_DS);
1254 cpu_x86_load_seg(env, R_ES, __USER_DS);
1255 cpu_x86_load_seg(env, R_CS, __USER_CS);
1256 cpu_x86_load_seg(env, R_SS, __USER_DS);
1257 env->eflags &= ~TF_MASK;
1258
1259 unlock_user_struct(frame, frame_addr, 1);
1260
1261 return;
1262
1263 give_sigsegv:
1264 force_sigsegv(sig);
1265 }
1266
1267 static int
1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1269 {
1270 unsigned int err = 0;
1271 abi_ulong fpstate_addr;
1272 unsigned int tmpflags;
1273
1274 #ifndef TARGET_X86_64
1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1279
1280 env->regs[R_EDI] = tswapl(sc->edi);
1281 env->regs[R_ESI] = tswapl(sc->esi);
1282 env->regs[R_EBP] = tswapl(sc->ebp);
1283 env->regs[R_ESP] = tswapl(sc->esp);
1284 env->regs[R_EBX] = tswapl(sc->ebx);
1285 env->regs[R_EDX] = tswapl(sc->edx);
1286 env->regs[R_ECX] = tswapl(sc->ecx);
1287 env->regs[R_EAX] = tswapl(sc->eax);
1288
1289 env->eip = tswapl(sc->eip);
1290 #else
1291 env->regs[8] = tswapl(sc->r8);
1292 env->regs[9] = tswapl(sc->r9);
1293 env->regs[10] = tswapl(sc->r10);
1294 env->regs[11] = tswapl(sc->r11);
1295 env->regs[12] = tswapl(sc->r12);
1296 env->regs[13] = tswapl(sc->r13);
1297 env->regs[14] = tswapl(sc->r14);
1298 env->regs[15] = tswapl(sc->r15);
1299
1300 env->regs[R_EDI] = tswapl(sc->rdi);
1301 env->regs[R_ESI] = tswapl(sc->rsi);
1302 env->regs[R_EBP] = tswapl(sc->rbp);
1303 env->regs[R_EBX] = tswapl(sc->rbx);
1304 env->regs[R_EDX] = tswapl(sc->rdx);
1305 env->regs[R_EAX] = tswapl(sc->rax);
1306 env->regs[R_ECX] = tswapl(sc->rcx);
1307 env->regs[R_ESP] = tswapl(sc->rsp);
1308
1309 env->eip = tswapl(sc->rip);
1310 #endif
1311
1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1314
1315 tmpflags = tswapl(sc->eflags);
1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1317 // regs->orig_eax = -1; /* disable syscall checks */
1318
1319 fpstate_addr = tswapl(sc->fpstate);
1320 if (fpstate_addr != 0) {
1321 if (!access_ok(VERIFY_READ, fpstate_addr,
1322 sizeof(struct target_fpstate)))
1323 goto badframe;
1324 #ifndef TARGET_X86_64
1325 cpu_x86_frstor(env, fpstate_addr, 1);
1326 #else
1327 cpu_x86_fxrstor(env, fpstate_addr);
1328 #endif
1329 }
1330
1331 return err;
1332 badframe:
1333 return 1;
1334 }
1335
1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1337 #ifndef TARGET_X86_64
1338 long do_sigreturn(CPUX86State *env)
1339 {
1340 struct sigframe *frame;
1341 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1342 target_sigset_t target_set;
1343 sigset_t set;
1344 int i;
1345
1346 trace_user_do_sigreturn(env, frame_addr);
1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1348 goto badframe;
1349 /* set blocked signals */
1350 __get_user(target_set.sig[0], &frame->sc.oldmask);
1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1353 }
1354
1355 target_to_host_sigset_internal(&set, &target_set);
1356 set_sigmask(&set);
1357
1358 /* restore registers */
1359 if (restore_sigcontext(env, &frame->sc))
1360 goto badframe;
1361 unlock_user_struct(frame, frame_addr, 0);
1362 return -TARGET_QEMU_ESIGRETURN;
1363
1364 badframe:
1365 unlock_user_struct(frame, frame_addr, 0);
1366 force_sig(TARGET_SIGSEGV);
1367 return -TARGET_QEMU_ESIGRETURN;
1368 }
1369 #endif
1370
1371 long do_rt_sigreturn(CPUX86State *env)
1372 {
1373 abi_ulong frame_addr;
1374 struct rt_sigframe *frame;
1375 sigset_t set;
1376
1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1378 trace_user_do_rt_sigreturn(env, frame_addr);
1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1380 goto badframe;
1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1382 set_sigmask(&set);
1383
1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1385 goto badframe;
1386 }
1387
1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1389 get_sp_from_cpustate(env)) == -EFAULT) {
1390 goto badframe;
1391 }
1392
1393 unlock_user_struct(frame, frame_addr, 0);
1394 return -TARGET_QEMU_ESIGRETURN;
1395
1396 badframe:
1397 unlock_user_struct(frame, frame_addr, 0);
1398 force_sig(TARGET_SIGSEGV);
1399 return -TARGET_QEMU_ESIGRETURN;
1400 }
1401
1402 #elif defined(TARGET_AARCH64)
1403
1404 struct target_sigcontext {
1405 uint64_t fault_address;
1406 /* AArch64 registers */
1407 uint64_t regs[31];
1408 uint64_t sp;
1409 uint64_t pc;
1410 uint64_t pstate;
1411 /* 4K reserved for FP/SIMD state and future expansion */
1412 char __reserved[4096] __attribute__((__aligned__(16)));
1413 };
1414
1415 struct target_ucontext {
1416 abi_ulong tuc_flags;
1417 abi_ulong tuc_link;
1418 target_stack_t tuc_stack;
1419 target_sigset_t tuc_sigmask;
1420 /* glibc uses a 1024-bit sigset_t */
1421 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1422 /* last for future expansion */
1423 struct target_sigcontext tuc_mcontext;
1424 };
1425
1426 /*
1427 * Header to be used at the beginning of structures extending the user
1428 * context. Such structures must be placed after the rt_sigframe on the stack
1429 * and be 16-byte aligned. The last structure must be a dummy one with the
1430 * magic and size set to 0.
1431 */
1432 struct target_aarch64_ctx {
1433 uint32_t magic;
1434 uint32_t size;
1435 };
1436
1437 #define TARGET_FPSIMD_MAGIC 0x46508001
1438
1439 struct target_fpsimd_context {
1440 struct target_aarch64_ctx head;
1441 uint32_t fpsr;
1442 uint32_t fpcr;
1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1444 };
1445
1446 /*
1447 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1448 * user space as it will change with the addition of new context. User space
1449 * should check the magic/size information.
1450 */
1451 struct target_aux_context {
1452 struct target_fpsimd_context fpsimd;
1453 /* additional context to be added before "end" */
1454 struct target_aarch64_ctx end;
1455 };
1456
1457 struct target_rt_sigframe {
1458 struct target_siginfo info;
1459 struct target_ucontext uc;
1460 uint64_t fp;
1461 uint64_t lr;
1462 uint32_t tramp[2];
1463 };
1464
1465 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1466 CPUARMState *env, target_sigset_t *set)
1467 {
1468 int i;
1469 struct target_aux_context *aux =
1470 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1471
1472 /* set up the stack frame for unwinding */
1473 __put_user(env->xregs[29], &sf->fp);
1474 __put_user(env->xregs[30], &sf->lr);
1475
1476 for (i = 0; i < 31; i++) {
1477 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1478 }
1479 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1480 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1481 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1482
1483 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1484
1485 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1486 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1487 }
1488
1489 for (i = 0; i < 32; i++) {
1490 uint64_t *q = aa64_vfp_qreg(env, i);
1491 #ifdef TARGET_WORDS_BIGENDIAN
1492 __put_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
1493 __put_user(q[1], &aux->fpsimd.vregs[i * 2]);
1494 #else
1495 __put_user(q[0], &aux->fpsimd.vregs[i * 2]);
1496 __put_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
1497 #endif
1498 }
1499 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1500 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1501 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1502 __put_user(sizeof(struct target_fpsimd_context),
1503 &aux->fpsimd.head.size);
1504
1505 /* set the "end" magic */
1506 __put_user(0, &aux->end.magic);
1507 __put_user(0, &aux->end.size);
1508
1509 return 0;
1510 }
1511
1512 static int target_restore_sigframe(CPUARMState *env,
1513 struct target_rt_sigframe *sf)
1514 {
1515 sigset_t set;
1516 int i;
1517 struct target_aux_context *aux =
1518 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1519 uint32_t magic, size, fpsr, fpcr;
1520 uint64_t pstate;
1521
1522 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1523 set_sigmask(&set);
1524
1525 for (i = 0; i < 31; i++) {
1526 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1527 }
1528
1529 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1530 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1531 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1532 pstate_write(env, pstate);
1533
1534 __get_user(magic, &aux->fpsimd.head.magic);
1535 __get_user(size, &aux->fpsimd.head.size);
1536
1537 if (magic != TARGET_FPSIMD_MAGIC
1538 || size != sizeof(struct target_fpsimd_context)) {
1539 return 1;
1540 }
1541
1542 for (i = 0; i < 32; i++) {
1543 uint64_t *q = aa64_vfp_qreg(env, i);
1544 #ifdef TARGET_WORDS_BIGENDIAN
1545 __get_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
1546 __get_user(q[1], &aux->fpsimd.vregs[i * 2]);
1547 #else
1548 __get_user(q[0], &aux->fpsimd.vregs[i * 2]);
1549 __get_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
1550 #endif
1551 }
1552 __get_user(fpsr, &aux->fpsimd.fpsr);
1553 vfp_set_fpsr(env, fpsr);
1554 __get_user(fpcr, &aux->fpsimd.fpcr);
1555 vfp_set_fpcr(env, fpcr);
1556
1557 return 0;
1558 }
1559
1560 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1561 {
1562 abi_ulong sp;
1563
1564 sp = env->xregs[31];
1565
1566 /*
1567 * This is the X/Open sanctioned signal stack switching.
1568 */
1569 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1570 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1571 }
1572
1573 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1574
1575 return sp;
1576 }
1577
1578 static void target_setup_frame(int usig, struct target_sigaction *ka,
1579 target_siginfo_t *info, target_sigset_t *set,
1580 CPUARMState *env)
1581 {
1582 struct target_rt_sigframe *frame;
1583 abi_ulong frame_addr, return_addr;
1584
1585 frame_addr = get_sigframe(ka, env);
1586 trace_user_setup_frame(env, frame_addr);
1587 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1588 goto give_sigsegv;
1589 }
1590
1591 __put_user(0, &frame->uc.tuc_flags);
1592 __put_user(0, &frame->uc.tuc_link);
1593
1594 __put_user(target_sigaltstack_used.ss_sp,
1595 &frame->uc.tuc_stack.ss_sp);
1596 __put_user(sas_ss_flags(env->xregs[31]),
1597 &frame->uc.tuc_stack.ss_flags);
1598 __put_user(target_sigaltstack_used.ss_size,
1599 &frame->uc.tuc_stack.ss_size);
1600 target_setup_sigframe(frame, env, set);
1601 if (ka->sa_flags & TARGET_SA_RESTORER) {
1602 return_addr = ka->sa_restorer;
1603 } else {
1604 /*
1605 * mov x8,#__NR_rt_sigreturn; svc #0
1606 * Since these are instructions they need to be put as little-endian
1607 * regardless of target default or current CPU endianness.
1608 */
1609 __put_user_e(0xd2801168, &frame->tramp[0], le);
1610 __put_user_e(0xd4000001, &frame->tramp[1], le);
1611 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1612 }
1613 env->xregs[0] = usig;
1614 env->xregs[31] = frame_addr;
1615 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1616 env->pc = ka->_sa_handler;
1617 env->xregs[30] = return_addr;
1618 if (info) {
1619 tswap_siginfo(&frame->info, info);
1620 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1621 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1622 }
1623
1624 unlock_user_struct(frame, frame_addr, 1);
1625 return;
1626
1627 give_sigsegv:
1628 unlock_user_struct(frame, frame_addr, 1);
1629 force_sigsegv(usig);
1630 }
1631
1632 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1633 target_siginfo_t *info, target_sigset_t *set,
1634 CPUARMState *env)
1635 {
1636 target_setup_frame(sig, ka, info, set, env);
1637 }
1638
1639 static void setup_frame(int sig, struct target_sigaction *ka,
1640 target_sigset_t *set, CPUARMState *env)
1641 {
1642 target_setup_frame(sig, ka, 0, set, env);
1643 }
1644
1645 long do_rt_sigreturn(CPUARMState *env)
1646 {
1647 struct target_rt_sigframe *frame = NULL;
1648 abi_ulong frame_addr = env->xregs[31];
1649
1650 trace_user_do_rt_sigreturn(env, frame_addr);
1651 if (frame_addr & 15) {
1652 goto badframe;
1653 }
1654
1655 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1656 goto badframe;
1657 }
1658
1659 if (target_restore_sigframe(env, frame)) {
1660 goto badframe;
1661 }
1662
1663 if (do_sigaltstack(frame_addr +
1664 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1665 0, get_sp_from_cpustate(env)) == -EFAULT) {
1666 goto badframe;
1667 }
1668
1669 unlock_user_struct(frame, frame_addr, 0);
1670 return -TARGET_QEMU_ESIGRETURN;
1671
1672 badframe:
1673 unlock_user_struct(frame, frame_addr, 0);
1674 force_sig(TARGET_SIGSEGV);
1675 return -TARGET_QEMU_ESIGRETURN;
1676 }
1677
1678 long do_sigreturn(CPUARMState *env)
1679 {
1680 return do_rt_sigreturn(env);
1681 }
1682
1683 #elif defined(TARGET_ARM)
1684
1685 struct target_sigcontext {
1686 abi_ulong trap_no;
1687 abi_ulong error_code;
1688 abi_ulong oldmask;
1689 abi_ulong arm_r0;
1690 abi_ulong arm_r1;
1691 abi_ulong arm_r2;
1692 abi_ulong arm_r3;
1693 abi_ulong arm_r4;
1694 abi_ulong arm_r5;
1695 abi_ulong arm_r6;
1696 abi_ulong arm_r7;
1697 abi_ulong arm_r8;
1698 abi_ulong arm_r9;
1699 abi_ulong arm_r10;
1700 abi_ulong arm_fp;
1701 abi_ulong arm_ip;
1702 abi_ulong arm_sp;
1703 abi_ulong arm_lr;
1704 abi_ulong arm_pc;
1705 abi_ulong arm_cpsr;
1706 abi_ulong fault_address;
1707 };
1708
1709 struct target_ucontext_v1 {
1710 abi_ulong tuc_flags;
1711 abi_ulong tuc_link;
1712 target_stack_t tuc_stack;
1713 struct target_sigcontext tuc_mcontext;
1714 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1715 };
1716
1717 struct target_ucontext_v2 {
1718 abi_ulong tuc_flags;
1719 abi_ulong tuc_link;
1720 target_stack_t tuc_stack;
1721 struct target_sigcontext tuc_mcontext;
1722 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1723 char __unused[128 - sizeof(target_sigset_t)];
1724 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1725 };
1726
1727 struct target_user_vfp {
1728 uint64_t fpregs[32];
1729 abi_ulong fpscr;
1730 };
1731
1732 struct target_user_vfp_exc {
1733 abi_ulong fpexc;
1734 abi_ulong fpinst;
1735 abi_ulong fpinst2;
1736 };
1737
1738 struct target_vfp_sigframe {
1739 abi_ulong magic;
1740 abi_ulong size;
1741 struct target_user_vfp ufp;
1742 struct target_user_vfp_exc ufp_exc;
1743 } __attribute__((__aligned__(8)));
1744
1745 struct target_iwmmxt_sigframe {
1746 abi_ulong magic;
1747 abi_ulong size;
1748 uint64_t regs[16];
1749 /* Note that not all the coprocessor control registers are stored here */
1750 uint32_t wcssf;
1751 uint32_t wcasf;
1752 uint32_t wcgr0;
1753 uint32_t wcgr1;
1754 uint32_t wcgr2;
1755 uint32_t wcgr3;
1756 } __attribute__((__aligned__(8)));
1757
1758 #define TARGET_VFP_MAGIC 0x56465001
1759 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1760
1761 struct sigframe_v1
1762 {
1763 struct target_sigcontext sc;
1764 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1765 abi_ulong retcode;
1766 };
1767
1768 struct sigframe_v2
1769 {
1770 struct target_ucontext_v2 uc;
1771 abi_ulong retcode;
1772 };
1773
1774 struct rt_sigframe_v1
1775 {
1776 abi_ulong pinfo;
1777 abi_ulong puc;
1778 struct target_siginfo info;
1779 struct target_ucontext_v1 uc;
1780 abi_ulong retcode;
1781 };
1782
1783 struct rt_sigframe_v2
1784 {
1785 struct target_siginfo info;
1786 struct target_ucontext_v2 uc;
1787 abi_ulong retcode;
1788 };
1789
1790 #define TARGET_CONFIG_CPU_32 1
1791
1792 /*
1793 * For ARM syscalls, we encode the syscall number into the instruction.
1794 */
1795 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1796 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1797
1798 /*
1799 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1800 * need two 16-bit instructions.
1801 */
1802 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1803 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1804
1805 static const abi_ulong retcodes[4] = {
1806 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1807 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1808 };
1809
1810
1811 static inline int valid_user_regs(CPUARMState *regs)
1812 {
1813 return 1;
1814 }
1815
1816 static void
1817 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1818 CPUARMState *env, abi_ulong mask)
1819 {
1820 __put_user(env->regs[0], &sc->arm_r0);
1821 __put_user(env->regs[1], &sc->arm_r1);
1822 __put_user(env->regs[2], &sc->arm_r2);
1823 __put_user(env->regs[3], &sc->arm_r3);
1824 __put_user(env->regs[4], &sc->arm_r4);
1825 __put_user(env->regs[5], &sc->arm_r5);
1826 __put_user(env->regs[6], &sc->arm_r6);
1827 __put_user(env->regs[7], &sc->arm_r7);
1828 __put_user(env->regs[8], &sc->arm_r8);
1829 __put_user(env->regs[9], &sc->arm_r9);
1830 __put_user(env->regs[10], &sc->arm_r10);
1831 __put_user(env->regs[11], &sc->arm_fp);
1832 __put_user(env->regs[12], &sc->arm_ip);
1833 __put_user(env->regs[13], &sc->arm_sp);
1834 __put_user(env->regs[14], &sc->arm_lr);
1835 __put_user(env->regs[15], &sc->arm_pc);
1836 #ifdef TARGET_CONFIG_CPU_32
1837 __put_user(cpsr_read(env), &sc->arm_cpsr);
1838 #endif
1839
1840 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1841 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1842 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1843 __put_user(mask, &sc->oldmask);
1844 }
1845
1846 static inline abi_ulong
1847 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1848 {
1849 unsigned long sp = regs->regs[13];
1850
1851 /*
1852 * This is the X/Open sanctioned signal stack switching.
1853 */
1854 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1855 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1856 }
1857 /*
1858 * ATPCS B01 mandates 8-byte alignment
1859 */
1860 return (sp - framesize) & ~7;
1861 }
1862
1863 static void
1864 setup_return(CPUARMState *env, struct target_sigaction *ka,
1865 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1866 {
1867 abi_ulong handler = ka->_sa_handler;
1868 abi_ulong retcode;
1869 int thumb = handler & 1;
1870 uint32_t cpsr = cpsr_read(env);
1871
1872 cpsr &= ~CPSR_IT;
1873 if (thumb) {
1874 cpsr |= CPSR_T;
1875 } else {
1876 cpsr &= ~CPSR_T;
1877 }
1878
1879 if (ka->sa_flags & TARGET_SA_RESTORER) {
1880 retcode = ka->sa_restorer;
1881 } else {
1882 unsigned int idx = thumb;
1883
1884 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1885 idx += 2;
1886 }
1887
1888 __put_user(retcodes[idx], rc);
1889
1890 retcode = rc_addr + thumb;
1891 }
1892
1893 env->regs[0] = usig;
1894 env->regs[13] = frame_addr;
1895 env->regs[14] = retcode;
1896 env->regs[15] = handler & (thumb ? ~1 : ~3);
1897 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1898 }
1899
1900 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1901 {
1902 int i;
1903 struct target_vfp_sigframe *vfpframe;
1904 vfpframe = (struct target_vfp_sigframe *)regspace;
1905 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1906 __put_user(sizeof(*vfpframe), &vfpframe->size);
1907 for (i = 0; i < 32; i++) {
1908 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
1909 }
1910 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1911 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1912 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1913 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1914 return (abi_ulong*)(vfpframe+1);
1915 }
1916
1917 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1918 CPUARMState *env)
1919 {
1920 int i;
1921 struct target_iwmmxt_sigframe *iwmmxtframe;
1922 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1923 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1924 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1925 for (i = 0; i < 16; i++) {
1926 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1927 }
1928 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1929 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1930 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1931 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1932 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1933 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1934 return (abi_ulong*)(iwmmxtframe+1);
1935 }
1936
1937 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1938 target_sigset_t *set, CPUARMState *env)
1939 {
1940 struct target_sigaltstack stack;
1941 int i;
1942 abi_ulong *regspace;
1943
1944 /* Clear all the bits of the ucontext we don't use. */
1945 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1946
1947 memset(&stack, 0, sizeof(stack));
1948 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1949 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1950 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1951 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1952
1953 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1954 /* Save coprocessor signal frame. */
1955 regspace = uc->tuc_regspace;
1956 if (arm_feature(env, ARM_FEATURE_VFP)) {
1957 regspace = setup_sigframe_v2_vfp(regspace, env);
1958 }
1959 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1960 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1961 }
1962
1963 /* Write terminating magic word */
1964 __put_user(0, regspace);
1965
1966 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1967 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1968 }
1969 }
1970
1971 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1972 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1973 target_sigset_t *set, CPUARMState *regs)
1974 {
1975 struct sigframe_v1 *frame;
1976 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1977 int i;
1978
1979 trace_user_setup_frame(regs, frame_addr);
1980 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1981 goto sigsegv;
1982 }
1983
1984 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1985
1986 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1987 __put_user(set->sig[i], &frame->extramask[i - 1]);
1988 }
1989
1990 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1991 frame_addr + offsetof(struct sigframe_v1, retcode));
1992
1993 unlock_user_struct(frame, frame_addr, 1);
1994 return;
1995 sigsegv:
1996 force_sigsegv(usig);
1997 }
1998
1999 static void setup_frame_v2(int usig, struct target_sigaction *ka,
2000 target_sigset_t *set, CPUARMState *regs)
2001 {
2002 struct sigframe_v2 *frame;
2003 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2004
2005 trace_user_setup_frame(regs, frame_addr);
2006 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2007 goto sigsegv;
2008 }
2009
2010 setup_sigframe_v2(&frame->uc, set, regs);
2011
2012 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2013 frame_addr + offsetof(struct sigframe_v2, retcode));
2014
2015 unlock_user_struct(frame, frame_addr, 1);
2016 return;
2017 sigsegv:
2018 force_sigsegv(usig);
2019 }
2020
2021 static void setup_frame(int usig, struct target_sigaction *ka,
2022 target_sigset_t *set, CPUARMState *regs)
2023 {
2024 if (get_osversion() >= 0x020612) {
2025 setup_frame_v2(usig, ka, set, regs);
2026 } else {
2027 setup_frame_v1(usig, ka, set, regs);
2028 }
2029 }
2030
2031 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2032 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2033 target_siginfo_t *info,
2034 target_sigset_t *set, CPUARMState *env)
2035 {
2036 struct rt_sigframe_v1 *frame;
2037 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2038 struct target_sigaltstack stack;
2039 int i;
2040 abi_ulong info_addr, uc_addr;
2041
2042 trace_user_setup_rt_frame(env, frame_addr);
2043 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2044 goto sigsegv;
2045 }
2046
2047 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2048 __put_user(info_addr, &frame->pinfo);
2049 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2050 __put_user(uc_addr, &frame->puc);
2051 tswap_siginfo(&frame->info, info);
2052
2053 /* Clear all the bits of the ucontext we don't use. */
2054 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2055
2056 memset(&stack, 0, sizeof(stack));
2057 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2058 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2059 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2060 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2061
2062 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2063 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2064 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2065 }
2066
2067 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2068 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2069
2070 env->regs[1] = info_addr;
2071 env->regs[2] = uc_addr;
2072
2073 unlock_user_struct(frame, frame_addr, 1);
2074 return;
2075 sigsegv:
2076 force_sigsegv(usig);
2077 }
2078
2079 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2080 target_siginfo_t *info,
2081 target_sigset_t *set, CPUARMState *env)
2082 {
2083 struct rt_sigframe_v2 *frame;
2084 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2085 abi_ulong info_addr, uc_addr;
2086
2087 trace_user_setup_rt_frame(env, frame_addr);
2088 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2089 goto sigsegv;
2090 }
2091
2092 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2093 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2094 tswap_siginfo(&frame->info, info);
2095
2096 setup_sigframe_v2(&frame->uc, set, env);
2097
2098 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2099 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2100
2101 env->regs[1] = info_addr;
2102 env->regs[2] = uc_addr;
2103
2104 unlock_user_struct(frame, frame_addr, 1);
2105 return;
2106 sigsegv:
2107 force_sigsegv(usig);
2108 }
2109
2110 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2111 target_siginfo_t *info,
2112 target_sigset_t *set, CPUARMState *env)
2113 {
2114 if (get_osversion() >= 0x020612) {
2115 setup_rt_frame_v2(usig, ka, info, set, env);
2116 } else {
2117 setup_rt_frame_v1(usig, ka, info, set, env);
2118 }
2119 }
2120
2121 static int
2122 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2123 {
2124 int err = 0;
2125 uint32_t cpsr;
2126
2127 __get_user(env->regs[0], &sc->arm_r0);
2128 __get_user(env->regs[1], &sc->arm_r1);
2129 __get_user(env->regs[2], &sc->arm_r2);
2130 __get_user(env->regs[3], &sc->arm_r3);
2131 __get_user(env->regs[4], &sc->arm_r4);
2132 __get_user(env->regs[5], &sc->arm_r5);
2133 __get_user(env->regs[6], &sc->arm_r6);
2134 __get_user(env->regs[7], &sc->arm_r7);
2135 __get_user(env->regs[8], &sc->arm_r8);
2136 __get_user(env->regs[9], &sc->arm_r9);
2137 __get_user(env->regs[10], &sc->arm_r10);
2138 __get_user(env->regs[11], &sc->arm_fp);
2139 __get_user(env->regs[12], &sc->arm_ip);
2140 __get_user(env->regs[13], &sc->arm_sp);
2141 __get_user(env->regs[14], &sc->arm_lr);
2142 __get_user(env->regs[15], &sc->arm_pc);
2143 #ifdef TARGET_CONFIG_CPU_32
2144 __get_user(cpsr, &sc->arm_cpsr);
2145 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2146 #endif
2147
2148 err |= !valid_user_regs(env);
2149
2150 return err;
2151 }
2152
2153 static long do_sigreturn_v1(CPUARMState *env)
2154 {
2155 abi_ulong frame_addr;
2156 struct sigframe_v1 *frame = NULL;
2157 target_sigset_t set;
2158 sigset_t host_set;
2159 int i;
2160
2161 /*
2162 * Since we stacked the signal on a 64-bit boundary,
2163 * then 'sp' should be word aligned here. If it's
2164 * not, then the user is trying to mess with us.
2165 */
2166 frame_addr = env->regs[13];
2167 trace_user_do_sigreturn(env, frame_addr);
2168 if (frame_addr & 7) {
2169 goto badframe;
2170 }
2171
2172 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2173 goto badframe;
2174 }
2175
2176 __get_user(set.sig[0], &frame->sc.oldmask);
2177 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2178 __get_user(set.sig[i], &frame->extramask[i - 1]);
2179 }
2180
2181 target_to_host_sigset_internal(&host_set, &set);
2182 set_sigmask(&host_set);
2183
2184 if (restore_sigcontext(env, &frame->sc)) {
2185 goto badframe;
2186 }
2187
2188 #if 0
2189 /* Send SIGTRAP if we're single-stepping */
2190 if (ptrace_cancel_bpt(current))
2191 send_sig(SIGTRAP, current, 1);
2192 #endif
2193 unlock_user_struct(frame, frame_addr, 0);
2194 return -TARGET_QEMU_ESIGRETURN;
2195
2196 badframe:
2197 force_sig(TARGET_SIGSEGV);
2198 return -TARGET_QEMU_ESIGRETURN;
2199 }
2200
2201 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2202 {
2203 int i;
2204 abi_ulong magic, sz;
2205 uint32_t fpscr, fpexc;
2206 struct target_vfp_sigframe *vfpframe;
2207 vfpframe = (struct target_vfp_sigframe *)regspace;
2208
2209 __get_user(magic, &vfpframe->magic);
2210 __get_user(sz, &vfpframe->size);
2211 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2212 return 0;
2213 }
2214 for (i = 0; i < 32; i++) {
2215 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2216 }
2217 __get_user(fpscr, &vfpframe->ufp.fpscr);
2218 vfp_set_fpscr(env, fpscr);
2219 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2220 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2221 * and the exception flag is cleared
2222 */
2223 fpexc |= (1 << 30);
2224 fpexc &= ~((1 << 31) | (1 << 28));
2225 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2226 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2227 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2228 return (abi_ulong*)(vfpframe + 1);
2229 }
2230
2231 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2232 abi_ulong *regspace)
2233 {
2234 int i;
2235 abi_ulong magic, sz;
2236 struct target_iwmmxt_sigframe *iwmmxtframe;
2237 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2238
2239 __get_user(magic, &iwmmxtframe->magic);
2240 __get_user(sz, &iwmmxtframe->size);
2241 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2242 return 0;
2243 }
2244 for (i = 0; i < 16; i++) {
2245 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2246 }
2247 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2248 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2249 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2250 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2251 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2252 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2253 return (abi_ulong*)(iwmmxtframe + 1);
2254 }
2255
2256 static int do_sigframe_return_v2(CPUARMState *env,
2257 target_ulong context_addr,
2258 struct target_ucontext_v2 *uc)
2259 {
2260 sigset_t host_set;
2261 abi_ulong *regspace;
2262
2263 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2264 set_sigmask(&host_set);
2265
2266 if (restore_sigcontext(env, &uc->tuc_mcontext))
2267 return 1;
2268
2269 /* Restore coprocessor signal frame */
2270 regspace = uc->tuc_regspace;
2271 if (arm_feature(env, ARM_FEATURE_VFP)) {
2272 regspace = restore_sigframe_v2_vfp(env, regspace);
2273 if (!regspace) {
2274 return 1;
2275 }
2276 }
2277 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2278 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2279 if (!regspace) {
2280 return 1;
2281 }
2282 }
2283
2284 if (do_sigaltstack(context_addr
2285 + offsetof(struct target_ucontext_v2, tuc_stack),
2286 0, get_sp_from_cpustate(env)) == -EFAULT) {
2287 return 1;
2288 }
2289
2290 #if 0
2291 /* Send SIGTRAP if we're single-stepping */
2292 if (ptrace_cancel_bpt(current))
2293 send_sig(SIGTRAP, current, 1);
2294 #endif
2295
2296 return 0;
2297 }
2298
2299 static long do_sigreturn_v2(CPUARMState *env)
2300 {
2301 abi_ulong frame_addr;
2302 struct sigframe_v2 *frame = NULL;
2303
2304 /*
2305 * Since we stacked the signal on a 64-bit boundary,
2306 * then 'sp' should be word aligned here. If it's
2307 * not, then the user is trying to mess with us.
2308 */
2309 frame_addr = env->regs[13];
2310 trace_user_do_sigreturn(env, frame_addr);
2311 if (frame_addr & 7) {
2312 goto badframe;
2313 }
2314
2315 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2316 goto badframe;
2317 }
2318
2319 if (do_sigframe_return_v2(env,
2320 frame_addr
2321 + offsetof(struct sigframe_v2, uc),
2322 &frame->uc)) {
2323 goto badframe;
2324 }
2325
2326 unlock_user_struct(frame, frame_addr, 0);
2327 return -TARGET_QEMU_ESIGRETURN;
2328
2329 badframe:
2330 unlock_user_struct(frame, frame_addr, 0);
2331 force_sig(TARGET_SIGSEGV);
2332 return -TARGET_QEMU_ESIGRETURN;
2333 }
2334
2335 long do_sigreturn(CPUARMState *env)
2336 {
2337 if (get_osversion() >= 0x020612) {
2338 return do_sigreturn_v2(env);
2339 } else {
2340 return do_sigreturn_v1(env);
2341 }
2342 }
2343
2344 static long do_rt_sigreturn_v1(CPUARMState *env)
2345 {
2346 abi_ulong frame_addr;
2347 struct rt_sigframe_v1 *frame = NULL;
2348 sigset_t host_set;
2349
2350 /*
2351 * Since we stacked the signal on a 64-bit boundary,
2352 * then 'sp' should be word aligned here. If it's
2353 * not, then the user is trying to mess with us.
2354 */
2355 frame_addr = env->regs[13];
2356 trace_user_do_rt_sigreturn(env, frame_addr);
2357 if (frame_addr & 7) {
2358 goto badframe;
2359 }
2360
2361 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2362 goto badframe;
2363 }
2364
2365 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2366 set_sigmask(&host_set);
2367
2368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2369 goto badframe;
2370 }
2371
2372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2373 goto badframe;
2374
2375 #if 0
2376 /* Send SIGTRAP if we're single-stepping */
2377 if (ptrace_cancel_bpt(current))
2378 send_sig(SIGTRAP, current, 1);
2379 #endif
2380 unlock_user_struct(frame, frame_addr, 0);
2381 return -TARGET_QEMU_ESIGRETURN;
2382
2383 badframe:
2384 unlock_user_struct(frame, frame_addr, 0);
2385 force_sig(TARGET_SIGSEGV);
2386 return -TARGET_QEMU_ESIGRETURN;
2387 }
2388
2389 static long do_rt_sigreturn_v2(CPUARMState *env)
2390 {
2391 abi_ulong frame_addr;
2392 struct rt_sigframe_v2 *frame = NULL;
2393
2394 /*
2395 * Since we stacked the signal on a 64-bit boundary,
2396 * then 'sp' should be word aligned here. If it's
2397 * not, then the user is trying to mess with us.
2398 */
2399 frame_addr = env->regs[13];
2400 trace_user_do_rt_sigreturn(env, frame_addr);
2401 if (frame_addr & 7) {
2402 goto badframe;
2403 }
2404
2405 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2406 goto badframe;
2407 }
2408
2409 if (do_sigframe_return_v2(env,
2410 frame_addr
2411 + offsetof(struct rt_sigframe_v2, uc),
2412 &frame->uc)) {
2413 goto badframe;
2414 }
2415
2416 unlock_user_struct(frame, frame_addr, 0);
2417 return -TARGET_QEMU_ESIGRETURN;
2418
2419 badframe:
2420 unlock_user_struct(frame, frame_addr, 0);
2421 force_sig(TARGET_SIGSEGV);
2422 return -TARGET_QEMU_ESIGRETURN;
2423 }
2424
2425 long do_rt_sigreturn(CPUARMState *env)
2426 {
2427 if (get_osversion() >= 0x020612) {
2428 return do_rt_sigreturn_v2(env);
2429 } else {
2430 return do_rt_sigreturn_v1(env);
2431 }
2432 }
2433
2434 #elif defined(TARGET_SPARC)
2435
2436 #define __SUNOS_MAXWIN 31
2437
2438 /* This is what SunOS does, so shall I. */
2439 struct target_sigcontext {
2440 abi_ulong sigc_onstack; /* state to restore */
2441
2442 abi_ulong sigc_mask; /* sigmask to restore */
2443 abi_ulong sigc_sp; /* stack pointer */
2444 abi_ulong sigc_pc; /* program counter */
2445 abi_ulong sigc_npc; /* next program counter */
2446 abi_ulong sigc_psr; /* for condition codes etc */
2447 abi_ulong sigc_g1; /* User uses these two registers */
2448 abi_ulong sigc_o0; /* within the trampoline code. */
2449
2450 /* Now comes information regarding the users window set
2451 * at the time of the signal.
2452 */
2453 abi_ulong sigc_oswins; /* outstanding windows */
2454
2455 /* stack ptrs for each regwin buf */
2456 char *sigc_spbuf[__SUNOS_MAXWIN];
2457
2458 /* Windows to restore after signal */
2459 struct {
2460 abi_ulong locals[8];
2461 abi_ulong ins[8];
2462 } sigc_wbuf[__SUNOS_MAXWIN];
2463 };
2464 /* A Sparc stack frame */
2465 struct sparc_stackf {
2466 abi_ulong locals[8];
2467 abi_ulong ins[8];
2468 /* It's simpler to treat fp and callers_pc as elements of ins[]
2469 * since we never need to access them ourselves.
2470 */
2471 char *structptr;
2472 abi_ulong xargs[6];
2473 abi_ulong xxargs[1];
2474 };
2475
2476 typedef struct {
2477 struct {
2478 abi_ulong psr;
2479 abi_ulong pc;
2480 abi_ulong npc;
2481 abi_ulong y;
2482 abi_ulong u_regs[16]; /* globals and ins */
2483 } si_regs;
2484 int si_mask;
2485 } __siginfo_t;
2486
2487 typedef struct {
2488 abi_ulong si_float_regs[32];
2489 unsigned long si_fsr;
2490 unsigned long si_fpqdepth;
2491 struct {
2492 unsigned long *insn_addr;
2493 unsigned long insn;
2494 } si_fpqueue [16];
2495 } qemu_siginfo_fpu_t;
2496
2497
2498 struct target_signal_frame {
2499 struct sparc_stackf ss;
2500 __siginfo_t info;
2501 abi_ulong fpu_save;
2502 abi_ulong insns[2] __attribute__ ((aligned (8)));
2503 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2504 abi_ulong extra_size; /* Should be 0 */
2505 qemu_siginfo_fpu_t fpu_state;
2506 };
2507 struct target_rt_signal_frame {
2508 struct sparc_stackf ss;
2509 siginfo_t info;
2510 abi_ulong regs[20];
2511 sigset_t mask;
2512 abi_ulong fpu_save;
2513 unsigned int insns[2];
2514 stack_t stack;
2515 unsigned int extra_size; /* Should be 0 */
2516 qemu_siginfo_fpu_t fpu_state;
2517 };
2518
2519 #define UREG_O0 16
2520 #define UREG_O6 22
2521 #define UREG_I0 0
2522 #define UREG_I1 1
2523 #define UREG_I2 2
2524 #define UREG_I3 3
2525 #define UREG_I4 4
2526 #define UREG_I5 5
2527 #define UREG_I6 6
2528 #define UREG_I7 7
2529 #define UREG_L0 8
2530 #define UREG_FP UREG_I6
2531 #define UREG_SP UREG_O6
2532
2533 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2534 CPUSPARCState *env,
2535 unsigned long framesize)
2536 {
2537 abi_ulong sp;
2538
2539 sp = env->regwptr[UREG_FP];
2540
2541 /* This is the X/Open sanctioned signal stack switching. */
2542 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2543 if (!on_sig_stack(sp)
2544 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2545 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2546 }
2547 }
2548 return sp - framesize;
2549 }
2550
2551 static int
2552 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2553 {
2554 int err = 0, i;
2555
2556 __put_user(env->psr, &si->si_regs.psr);
2557 __put_user(env->pc, &si->si_regs.pc);
2558 __put_user(env->npc, &si->si_regs.npc);
2559 __put_user(env->y, &si->si_regs.y);
2560 for (i=0; i < 8; i++) {
2561 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2562 }
2563 for (i=0; i < 8; i++) {
2564 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2565 }
2566 __put_user(mask, &si->si_mask);
2567 return err;
2568 }
2569
2570 #if 0
2571 static int
2572 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2573 CPUSPARCState *env, unsigned long mask)
2574 {
2575 int err = 0;
2576
2577 __put_user(mask, &sc->sigc_mask);
2578 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2579 __put_user(env->pc, &sc->sigc_pc);
2580 __put_user(env->npc, &sc->sigc_npc);
2581 __put_user(env->psr, &sc->sigc_psr);
2582 __put_user(env->gregs[1], &sc->sigc_g1);
2583 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2584
2585 return err;
2586 }
2587 #endif
2588 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2589
2590 static void setup_frame(int sig, struct target_sigaction *ka,
2591 target_sigset_t *set, CPUSPARCState *env)
2592 {
2593 abi_ulong sf_addr;
2594 struct target_signal_frame *sf;
2595 int sigframe_size, err, i;
2596
2597 /* 1. Make sure everything is clean */
2598 //synchronize_user_stack();
2599
2600 sigframe_size = NF_ALIGNEDSZ;
2601 sf_addr = get_sigframe(ka, env, sigframe_size);
2602 trace_user_setup_frame(env, sf_addr);
2603
2604 sf = lock_user(VERIFY_WRITE, sf_addr,
2605 sizeof(struct target_signal_frame), 0);
2606 if (!sf) {
2607 goto sigsegv;
2608 }
2609 #if 0
2610 if (invalid_frame_pointer(sf, sigframe_size))
2611 goto sigill_and_return;
2612 #endif
2613 /* 2. Save the current process state */
2614 err = setup___siginfo(&sf->info, env, set->sig[0]);
2615 __put_user(0, &sf->extra_size);
2616
2617 //save_fpu_state(regs, &sf->fpu_state);
2618 //__put_user(&sf->fpu_state, &sf->fpu_save);
2619
2620 __put_user(set->sig[0], &sf->info.si_mask);
2621 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2622 __put_user(set->sig[i + 1], &sf->extramask[i]);
2623 }
2624
2625 for (i = 0; i < 8; i++) {
2626 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2627 }
2628 for (i = 0; i < 8; i++) {
2629 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2630 }
2631 if (err)
2632 goto sigsegv;
2633
2634 /* 3. signal handler back-trampoline and parameters */
2635 env->regwptr[UREG_FP] = sf_addr;
2636 env->regwptr[UREG_I0] = sig;
2637 env->regwptr[UREG_I1] = sf_addr +
2638 offsetof(struct target_signal_frame, info);
2639 env->regwptr[UREG_I2] = sf_addr +
2640 offsetof(struct target_signal_frame, info);
2641
2642 /* 4. signal handler */
2643 env->pc = ka->_sa_handler;
2644 env->npc = (env->pc + 4);
2645 /* 5. return to kernel instructions */
2646 if (ka->sa_restorer) {
2647 env->regwptr[UREG_I7] = ka->sa_restorer;
2648 } else {
2649 uint32_t val32;
2650
2651 env->regwptr[UREG_I7] = sf_addr +
2652 offsetof(struct target_signal_frame, insns) - 2 * 4;
2653
2654 /* mov __NR_sigreturn, %g1 */
2655 val32 = 0x821020d8;
2656 __put_user(val32, &sf->insns[0]);
2657
2658 /* t 0x10 */
2659 val32 = 0x91d02010;
2660 __put_user(val32, &sf->insns[1]);
2661 if (err)
2662 goto sigsegv;
2663
2664 /* Flush instruction space. */
2665 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2666 // tb_flush(env);
2667 }
2668 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2669 return;
2670 #if 0
2671 sigill_and_return:
2672 force_sig(TARGET_SIGILL);
2673 #endif
2674 sigsegv:
2675 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2676 force_sigsegv(sig);
2677 }
2678
2679 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2680 target_siginfo_t *info,
2681 target_sigset_t *set, CPUSPARCState *env)
2682 {
2683 fprintf(stderr, "setup_rt_frame: not implemented\n");
2684 }
2685
2686 long do_sigreturn(CPUSPARCState *env)
2687 {
2688 abi_ulong sf_addr;
2689 struct target_signal_frame *sf;
2690 uint32_t up_psr, pc, npc;
2691 target_sigset_t set;
2692 sigset_t host_set;
2693 int err=0, i;
2694
2695 sf_addr = env->regwptr[UREG_FP];
2696 trace_user_do_sigreturn(env, sf_addr);
2697 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2698 goto segv_and_exit;
2699 }
2700
2701 /* 1. Make sure we are not getting garbage from the user */
2702
2703 if (sf_addr & 3)
2704 goto segv_and_exit;
2705
2706 __get_user(pc, &sf->info.si_regs.pc);
2707 __get_user(npc, &sf->info.si_regs.npc);
2708
2709 if ((pc | npc) & 3) {
2710 goto segv_and_exit;
2711 }
2712
2713 /* 2. Restore the state */
2714 __get_user(up_psr, &sf->info.si_regs.psr);
2715
2716 /* User can only change condition codes and FPU enabling in %psr. */
2717 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2718 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2719
2720 env->pc = pc;
2721 env->npc = npc;
2722 __get_user(env->y, &sf->info.si_regs.y);
2723 for (i=0; i < 8; i++) {
2724 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2725 }
2726 for (i=0; i < 8; i++) {
2727 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2728 }
2729
2730 /* FIXME: implement FPU save/restore:
2731 * __get_user(fpu_save, &sf->fpu_save);
2732 * if (fpu_save)
2733 * err |= restore_fpu_state(env, fpu_save);
2734 */
2735
2736 /* This is pretty much atomic, no amount locking would prevent
2737 * the races which exist anyways.
2738 */
2739 __get_user(set.sig[0], &sf->info.si_mask);
2740 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2741 __get_user(set.sig[i], &sf->extramask[i - 1]);
2742 }
2743
2744 target_to_host_sigset_internal(&host_set, &set);
2745 set_sigmask(&host_set);
2746
2747 if (err) {
2748 goto segv_and_exit;
2749 }
2750 unlock_user_struct(sf, sf_addr, 0);
2751 return -TARGET_QEMU_ESIGRETURN;
2752
2753 segv_and_exit:
2754 unlock_user_struct(sf, sf_addr, 0);
2755 force_sig(TARGET_SIGSEGV);
2756 return -TARGET_QEMU_ESIGRETURN;
2757 }
2758
2759 long do_rt_sigreturn(CPUSPARCState *env)
2760 {
2761 trace_user_do_rt_sigreturn(env, 0);
2762 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2763 return -TARGET_ENOSYS;
2764 }
2765
2766 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2767 #define SPARC_MC_TSTATE 0
2768 #define SPARC_MC_PC 1
2769 #define SPARC_MC_NPC 2
2770 #define SPARC_MC_Y 3
2771 #define SPARC_MC_G1 4
2772 #define SPARC_MC_G2 5
2773 #define SPARC_MC_G3 6
2774 #define SPARC_MC_G4 7
2775 #define SPARC_MC_G5 8
2776 #define SPARC_MC_G6 9
2777 #define SPARC_MC_G7 10
2778 #define SPARC_MC_O0 11
2779 #define SPARC_MC_O1 12
2780 #define SPARC_MC_O2 13
2781 #define SPARC_MC_O3 14
2782 #define SPARC_MC_O4 15
2783 #define SPARC_MC_O5 16
2784 #define SPARC_MC_O6 17
2785 #define SPARC_MC_O7 18
2786 #define SPARC_MC_NGREG 19
2787
2788 typedef abi_ulong target_mc_greg_t;
2789 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
2790
2791 struct target_mc_fq {
2792 abi_ulong *mcfq_addr;
2793 uint32_t mcfq_insn;
2794 };
2795
2796 struct target_mc_fpu {
2797 union {
2798 uint32_t sregs[32];
2799 uint64_t dregs[32];
2800 //uint128_t qregs[16];
2801 } mcfpu_fregs;
2802 abi_ulong mcfpu_fsr;
2803 abi_ulong mcfpu_fprs;
2804 abi_ulong mcfpu_gsr;
2805 struct target_mc_fq *mcfpu_fq;
2806 unsigned char mcfpu_qcnt;
2807 unsigned char mcfpu_qentsz;
2808 unsigned char mcfpu_enab;
2809 };
2810 typedef struct target_mc_fpu target_mc_fpu_t;
2811
2812 typedef struct {
2813 target_mc_gregset_t mc_gregs;
2814 target_mc_greg_t mc_fp;
2815 target_mc_greg_t mc_i7;
2816 target_mc_fpu_t mc_fpregs;
2817 } target_mcontext_t;
2818
2819 struct target_ucontext {
2820 struct target_ucontext *tuc_link;
2821 abi_ulong tuc_flags;
2822 target_sigset_t tuc_sigmask;
2823 target_mcontext_t tuc_mcontext;
2824 };
2825
2826 /* A V9 register window */
2827 struct target_reg_window {
2828 abi_ulong locals[8];
2829 abi_ulong ins[8];
2830 };
2831
2832 #define TARGET_STACK_BIAS 2047
2833
2834 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2835 void sparc64_set_context(CPUSPARCState *env)
2836 {
2837 abi_ulong ucp_addr;
2838 struct target_ucontext *ucp;
2839 target_mc_gregset_t *grp;
2840 abi_ulong pc, npc, tstate;
2841 abi_ulong fp, i7, w_addr;
2842 unsigned int i;
2843
2844 ucp_addr = env->regwptr[UREG_I0];
2845 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2846 goto do_sigsegv;
2847 }
2848 grp = &ucp->tuc_mcontext.mc_gregs;
2849 __get_user(pc, &((*grp)[SPARC_MC_PC]));
2850 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
2851 if ((pc | npc) & 3) {
2852 goto do_sigsegv;
2853 }
2854 if (env->regwptr[UREG_I1]) {
2855 target_sigset_t target_set;
2856 sigset_t set;
2857
2858 if (TARGET_NSIG_WORDS == 1) {
2859 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2860 } else {
2861 abi_ulong *src, *dst;
2862 src = ucp->tuc_sigmask.sig;
2863 dst = target_set.sig;
2864 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2865 __get_user(*dst, src);
2866 }
2867 }
2868 target_to_host_sigset_internal(&set, &target_set);
2869 set_sigmask(&set);
2870 }
2871 env->pc = pc;
2872 env->npc = npc;
2873 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
2874 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
2875 env->asi = (tstate >> 24) & 0xff;
2876 cpu_put_ccr(env, tstate >> 32);
2877 cpu_put_cwp64(env, tstate & 0x1f);
2878 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
2879 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
2880 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
2881 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
2882 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
2883 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
2884 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
2885 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
2886 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
2887 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
2888 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
2889 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
2890 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
2891 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
2892 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
2893
2894 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2895 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2896
2897 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2898 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2899 abi_ulong) != 0) {
2900 goto do_sigsegv;
2901 }
2902 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2903 abi_ulong) != 0) {
2904 goto do_sigsegv;
2905 }
2906 /* FIXME this does not match how the kernel handles the FPU in
2907 * its sparc64_set_context implementation. In particular the FPU
2908 * is only restored if fenab is non-zero in:
2909 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2910 */
2911 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2912 {
2913 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2914 for (i = 0; i < 64; i++, src++) {
2915 if (i & 1) {
2916 __get_user(env->fpr[i/2].l.lower, src);
2917 } else {
2918 __get_user(env->fpr[i/2].l.upper, src);
2919 }
2920 }
2921 }
2922 __get_user(env->fsr,
2923 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2924 __get_user(env->gsr,
2925 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2926 unlock_user_struct(ucp, ucp_addr, 0);
2927 return;
2928 do_sigsegv:
2929 unlock_user_struct(ucp, ucp_addr, 0);
2930 force_sig(TARGET_SIGSEGV);
2931 }
2932
2933 void sparc64_get_context(CPUSPARCState *env)
2934 {
2935 abi_ulong ucp_addr;
2936 struct target_ucontext *ucp;
2937 target_mc_gregset_t *grp;
2938 target_mcontext_t *mcp;
2939 abi_ulong fp, i7, w_addr;
2940 int err;
2941 unsigned int i;
2942 target_sigset_t target_set;
2943 sigset_t set;
2944
2945 ucp_addr = env->regwptr[UREG_I0];
2946 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2947 goto do_sigsegv;
2948 }
2949
2950 mcp = &ucp->tuc_mcontext;
2951 grp = &mcp->mc_gregs;
2952
2953 /* Skip over the trap instruction, first. */
2954 env->pc = env->npc;
2955 env->npc += 4;
2956
2957 /* If we're only reading the signal mask then do_sigprocmask()
2958 * is guaranteed not to fail, which is important because we don't
2959 * have any way to signal a failure or restart this operation since
2960 * this is not a normal syscall.
2961 */
2962 err = do_sigprocmask(0, NULL, &set);
2963 assert(err == 0);
2964 host_to_target_sigset_internal(&target_set, &set);
2965 if (TARGET_NSIG_WORDS == 1) {
2966 __put_user(target_set.sig[0],
2967 (abi_ulong *)&ucp->tuc_sigmask);
2968 } else {
2969 abi_ulong *src, *dst;
2970 src = target_set.sig;
2971 dst = ucp->tuc_sigmask.sig;
2972 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2973 __put_user(*src, dst);
2974 }
2975 if (err)
2976 goto do_sigsegv;
2977 }
2978
2979 /* XXX: tstate must be saved properly */
2980 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
2981 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
2982 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
2983 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
2984 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
2985 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
2986 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
2987 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
2988 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
2989 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
2990 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
2991 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
2992 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
2993 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
2994 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
2995 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
2996 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
2997 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
2998 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
2999
3000 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3001 fp = i7 = 0;
3002 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3003 abi_ulong) != 0) {
3004 goto do_sigsegv;
3005 }
3006 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3007 abi_ulong) != 0) {
3008 goto do_sigsegv;
3009 }
3010 __put_user(fp, &(mcp->mc_fp));
3011 __put_user(i7, &(mcp->mc_i7));
3012
3013 {
3014 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3015 for (i = 0; i < 64; i++, dst++) {
3016 if (i & 1) {
3017 __put_user(env->fpr[i/2].l.lower, dst);
3018 } else {
3019 __put_user(env->fpr[i/2].l.upper, dst);
3020 }
3021 }
3022 }
3023 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3024 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3025 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3026
3027 if (err)
3028 goto do_sigsegv;
3029 unlock_user_struct(ucp, ucp_addr, 1);
3030 return;
3031 do_sigsegv:
3032 unlock_user_struct(ucp, ucp_addr, 1);
3033 force_sig(TARGET_SIGSEGV);
3034 }
3035 #endif
3036 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3037
3038 # if defined(TARGET_ABI_MIPSO32)
3039 struct target_sigcontext {
3040 uint32_t sc_regmask; /* Unused */
3041 uint32_t sc_status;
3042 uint64_t sc_pc;
3043 uint64_t sc_regs[32];
3044 uint64_t sc_fpregs[32];
3045 uint32_t sc_ownedfp; /* Unused */
3046 uint32_t sc_fpc_csr;
3047 uint32_t sc_fpc_eir; /* Unused */
3048 uint32_t sc_used_math;
3049 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3050 uint32_t pad0;
3051 uint64_t sc_mdhi;
3052 uint64_t sc_mdlo;
3053 target_ulong sc_hi1; /* Was sc_cause */
3054 target_ulong sc_lo1; /* Was sc_badvaddr */
3055 target_ulong sc_hi2; /* Was sc_sigset[4] */
3056 target_ulong sc_lo2;
3057 target_ulong sc_hi3;
3058 target_ulong sc_lo3;
3059 };
3060 # else /* N32 || N64 */
3061 struct target_sigcontext {
3062 uint64_t sc_regs[32];
3063 uint64_t sc_fpregs[32];
3064 uint64_t sc_mdhi;
3065 uint64_t sc_hi1;
3066 uint64_t sc_hi2;
3067 uint64_t sc_hi3;
3068 uint64_t sc_mdlo;
3069 uint64_t sc_lo1;
3070 uint64_t sc_lo2;
3071 uint64_t sc_lo3;
3072 uint64_t sc_pc;
3073 uint32_t sc_fpc_csr;
3074 uint32_t sc_used_math;
3075 uint32_t sc_dsp;
3076 uint32_t sc_reserved;
3077 };
3078 # endif /* O32 */
3079
3080 struct sigframe {
3081 uint32_t sf_ass[4]; /* argument save space for o32 */
3082 uint32_t sf_code[2]; /* signal trampoline */
3083 struct target_sigcontext sf_sc;
3084 target_sigset_t sf_mask;
3085 };
3086
3087 struct target_ucontext {
3088 target_ulong tuc_flags;
3089 target_ulong tuc_link;
3090 target_stack_t tuc_stack;
3091 target_ulong pad0;
3092 struct target_sigcontext tuc_mcontext;
3093 target_sigset_t tuc_sigmask;
3094 };
3095
3096 struct target_rt_sigframe {
3097 uint32_t rs_ass[4]; /* argument save space for o32 */
3098 uint32_t rs_code[2]; /* signal trampoline */
3099 struct target_siginfo rs_info;
3100 struct target_ucontext rs_uc;
3101 };
3102
3103 /* Install trampoline to jump back from signal handler */
3104 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3105 {
3106 int err = 0;
3107
3108 /*
3109 * Set up the return code ...
3110 *
3111 * li v0, __NR__foo_sigreturn
3112 * syscall
3113 */
3114
3115 __put_user(0x24020000 + syscall, tramp + 0);
3116 __put_user(0x0000000c , tramp + 1);
3117 return err;
3118 }
3119
3120 static inline void setup_sigcontext(CPUMIPSState *regs,
3121 struct target_sigcontext *sc)
3122 {
3123 int i;
3124
3125 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3126 regs->hflags &= ~MIPS_HFLAG_BMASK;
3127
3128 __put_user(0, &sc->sc_regs[0]);
3129 for (i = 1; i < 32; ++i) {
3130 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3131 }
3132
3133 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3134 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3135
3136 /* Rather than checking for dsp existence, always copy. The storage
3137 would just be garbage otherwise. */
3138 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3139 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3140 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3141 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3142 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3143 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3144 {
3145 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3146 __put_user(dsp, &sc->sc_dsp);
3147 }
3148
3149 __put_user(1, &sc->sc_used_math);
3150
3151 for (i = 0; i < 32; ++i) {
3152 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3153 }
3154 }
3155
3156 static inline void
3157 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3158 {
3159 int i;
3160
3161 __get_user(regs->CP0_EPC, &sc->sc_pc);
3162
3163 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3164 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3165
3166 for (i = 1; i < 32; ++i) {
3167 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3168 }
3169
3170 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3171 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3172 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3173 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3174 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3175 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3176 {
3177 uint32_t dsp;
3178 __get_user(dsp, &sc->sc_dsp);
3179 cpu_wrdsp(dsp, 0x3ff, regs);
3180 }
3181
3182 for (i = 0; i < 32; ++i) {
3183 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3184 }
3185 }
3186
3187 /*
3188 * Determine which stack to use..
3189 */
3190 static inline abi_ulong
3191 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3192 {
3193 unsigned long sp;
3194
3195 /* Default to using normal stack */
3196 sp = regs->active_tc.gpr[29];
3197
3198 /*
3199 * FPU emulator may have its own trampoline active just
3200 * above the user stack, 16-bytes before the next lowest
3201 * 16 byte boundary. Try to avoid trashing it.
3202 */
3203 sp -= 32;
3204
3205 /* This is the X/Open sanctioned signal stack switching. */
3206 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3207 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3208 }
3209
3210 return (sp - frame_size) & ~7;
3211 }
3212
3213 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3214 {
3215 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3216 env->hflags &= ~MIPS_HFLAG_M16;
3217 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3218 env->active_tc.PC &= ~(target_ulong) 1;
3219 }
3220 }
3221
3222 # if defined(TARGET_ABI_MIPSO32)
3223 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3224 static void setup_frame(int sig, struct target_sigaction * ka,
3225 target_sigset_t *set, CPUMIPSState *regs)
3226 {
3227 struct sigframe *frame;
3228 abi_ulong frame_addr;
3229 int i;
3230
3231 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3232 trace_user_setup_frame(regs, frame_addr);
3233 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3234 goto give_sigsegv;
3235 }
3236
3237 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3238
3239 setup_sigcontext(regs, &frame->sf_sc);
3240
3241 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3242 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3243 }
3244
3245 /*
3246 * Arguments to signal handler:
3247 *
3248 * a0 = signal number
3249 * a1 = 0 (should be cause)
3250 * a2 = pointer to struct sigcontext
3251 *
3252 * $25 and PC point to the signal handler, $29 points to the
3253 * struct sigframe.
3254 */
3255 regs->active_tc.gpr[ 4] = sig;
3256 regs->active_tc.gpr[ 5] = 0;
3257 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3258 regs->active_tc.gpr[29] = frame_addr;
3259 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3260 /* The original kernel code sets CP0_EPC to the handler
3261 * since it returns to userland using eret
3262 * we cannot do this here, and we must set PC directly */
3263 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3264 mips_set_hflags_isa_mode_from_pc(regs);
3265 unlock_user_struct(frame, frame_addr, 1);
3266 return;
3267
3268 give_sigsegv:
3269 force_sigsegv(sig);
3270 }
3271
3272 long do_sigreturn(CPUMIPSState *regs)
3273 {
3274 struct sigframe *frame;
3275 abi_ulong frame_addr;
3276 sigset_t blocked;
3277 target_sigset_t target_set;
3278 int i;
3279
3280 frame_addr = regs->active_tc.gpr[29];
3281 trace_user_do_sigreturn(regs, frame_addr);
3282 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3283 goto badframe;
3284
3285 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3286 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3287 }
3288
3289 target_to_host_sigset_internal(&blocked, &target_set);
3290 set_sigmask(&blocked);
3291
3292 restore_sigcontext(regs, &frame->sf_sc);
3293
3294 #if 0
3295 /*
3296 * Don't let your children do this ...
3297 */
3298 __asm__ __volatile__(
3299 "move\t$29, %0\n\t"
3300 "j\tsyscall_exit"
3301 :/* no outputs */
3302 :"r" (&regs));
3303 /* Unreached */
3304 #endif
3305
3306 regs->active_tc.PC = regs->CP0_EPC;
3307 mips_set_hflags_isa_mode_from_pc(regs);
3308 /* I am not sure this is right, but it seems to work
3309 * maybe a problem with nested signals ? */
3310 regs->CP0_EPC = 0;
3311 return -TARGET_QEMU_ESIGRETURN;
3312
3313 badframe:
3314 force_sig(TARGET_SIGSEGV);
3315 return -TARGET_QEMU_ESIGRETURN;
3316 }
3317 # endif /* O32 */
3318
3319 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3320 target_siginfo_t *info,
3321 target_sigset_t *set, CPUMIPSState *env)
3322 {
3323 struct target_rt_sigframe *frame;
3324 abi_ulong frame_addr;
3325 int i;
3326
3327 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3328 trace_user_setup_rt_frame(env, frame_addr);
3329 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3330 goto give_sigsegv;
3331 }
3332
3333 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3334
3335 tswap_siginfo(&frame->rs_info, info);
3336
3337 __put_user(0, &frame->rs_uc.tuc_flags);
3338 __put_user(0, &frame->rs_uc.tuc_link);
3339 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3340 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3341 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3342 &frame->rs_uc.tuc_stack.ss_flags);
3343
3344 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3345
3346 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3347 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3348 }
3349
3350 /*
3351 * Arguments to signal handler:
3352 *
3353 * a0 = signal number
3354 * a1 = pointer to siginfo_t
3355 * a2 = pointer to ucontext_t
3356 *
3357 * $25 and PC point to the signal handler, $29 points to the
3358 * struct sigframe.
3359 */
3360 env->active_tc.gpr[ 4] = sig;
3361 env->active_tc.gpr[ 5] = frame_addr
3362 + offsetof(struct target_rt_sigframe, rs_info);
3363 env->active_tc.gpr[ 6] = frame_addr
3364 + offsetof(struct target_rt_sigframe, rs_uc);
3365 env->active_tc.gpr[29] = frame_addr;
3366 env->active_tc.gpr[31] = frame_addr
3367 + offsetof(struct target_rt_sigframe, rs_code);
3368 /* The original kernel code sets CP0_EPC to the handler
3369 * since it returns to userland using eret
3370 * we cannot do this here, and we must set PC directly */
3371 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3372 mips_set_hflags_isa_mode_from_pc(env);
3373 unlock_user_struct(frame, frame_addr, 1);
3374 return;
3375
3376 give_sigsegv:
3377 unlock_user_struct(frame, frame_addr, 1);
3378 force_sigsegv(sig);
3379 }
3380
3381 long do_rt_sigreturn(CPUMIPSState *env)
3382 {
3383 struct target_rt_sigframe *frame;
3384 abi_ulong frame_addr;
3385 sigset_t blocked;
3386
3387 frame_addr = env->active_tc.gpr[29];
3388 trace_user_do_rt_sigreturn(env, frame_addr);
3389 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3390 goto badframe;
3391 }
3392
3393 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3394 set_sigmask(&blocked);
3395
3396 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3397
3398 if (do_sigaltstack(frame_addr +
3399 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3400 0, get_sp_from_cpustate(env)) == -EFAULT)
3401 goto badframe;
3402
3403 env->active_tc.PC = env->CP0_EPC;
3404 mips_set_hflags_isa_mode_from_pc(env);
3405 /* I am not sure this is right, but it seems to work
3406 * maybe a problem with nested signals ? */
3407 env->CP0_EPC = 0;
3408 return -TARGET_QEMU_ESIGRETURN;
3409
3410 badframe:
3411 force_sig(TARGET_SIGSEGV);
3412 return -TARGET_QEMU_ESIGRETURN;
3413 }
3414
3415 #elif defined(TARGET_SH4)
3416
3417 /*
3418 * code and data structures from linux kernel:
3419 * include/asm-sh/sigcontext.h
3420 * arch/sh/kernel/signal.c
3421 */
3422
3423 struct target_sigcontext {
3424 target_ulong oldmask;
3425
3426 /* CPU registers */
3427 target_ulong sc_gregs[16];
3428 target_ulong sc_pc;
3429 target_ulong sc_pr;
3430 target_ulong sc_sr;
3431 target_ulong sc_gbr;
3432 target_ulong sc_mach;
3433 target_ulong sc_macl;
3434
3435 /* FPU registers */
3436 target_ulong sc_fpregs[16];
3437 target_ulong sc_xfpregs[16];
3438 unsigned int sc_fpscr;
3439 unsigned int sc_fpul;
3440 unsigned int sc_ownedfp;
3441 };
3442
3443 struct target_sigframe
3444 {
3445 struct target_sigcontext sc;
3446 target_ulong extramask[TARGET_NSIG_WORDS-1];
3447 uint16_t retcode[3];
3448 };
3449
3450
3451 struct target_ucontext {
3452 target_ulong tuc_flags;
3453 struct target_ucontext *tuc_link;
3454 target_stack_t tuc_stack;
3455 struct target_sigcontext tuc_mcontext;
3456 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3457 };
3458
3459 struct target_rt_sigframe
3460 {
3461 struct target_siginfo info;
3462 struct target_ucontext uc;
3463 uint16_t retcode[3];
3464 };
3465
3466
3467 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3468 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3469
3470 static abi_ulong get_sigframe(struct target_sigaction *ka,
3471 unsigned long sp, size_t frame_size)
3472 {
3473 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3474 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3475 }
3476
3477 return (sp - frame_size) & -8ul;
3478 }
3479
3480 /* Notice when we're in the middle of a gUSA region and reset.
3481 Note that this will only occur for !parallel_cpus, as we will
3482 translate such sequences differently in a parallel context. */
3483 static void unwind_gusa(CPUSH4State *regs)
3484 {
3485 /* If the stack pointer is sufficiently negative, and we haven't
3486 completed the sequence, then reset to the entry to the region. */
3487 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3488 However, the page mappings in qemu linux-user aren't as restricted
3489 and we wind up with the normal stack mapped above 0xF0000000.
3490 That said, there is no reason why the kernel should be allowing
3491 a gUSA region that spans 1GB. Use a tighter check here, for what
3492 can actually be enabled by the immediate move. */
3493 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3494 /* Reset the PC to before the gUSA region, as computed from
3495 R0 = region end, SP = -(region size), plus one more for the
3496 insn that actually initializes SP to the region size. */
3497 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3498
3499 /* Reset the SP to the saved version in R1. */
3500 regs->gregs[15] = regs->gregs[1];
3501 }
3502 }
3503
3504 static void setup_sigcontext(struct target_sigcontext *sc,
3505 CPUSH4State *regs, unsigned long mask)
3506 {
3507 int i;
3508
3509 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3510 COPY(gregs[0]); COPY(gregs[1]);
3511 COPY(gregs[2]); COPY(gregs[3]);
3512 COPY(gregs[4]); COPY(gregs[5]);
3513 COPY(gregs[6]); COPY(gregs[7]);
3514 COPY(gregs[8]); COPY(gregs[9]);
3515 COPY(gregs[10]); COPY(gregs[11]);
3516 COPY(gregs[12]); COPY(gregs[13]);
3517 COPY(gregs[14]); COPY(gregs[15]);
3518 COPY(gbr); COPY(mach);
3519 COPY(macl); COPY(pr);
3520 COPY(sr); COPY(pc);
3521 #undef COPY
3522
3523 for (i=0; i<16; i++) {
3524 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3525 }
3526 __put_user(regs->fpscr, &sc->sc_fpscr);
3527 __put_user(regs->fpul, &sc->sc_fpul);
3528
3529 /* non-iBCS2 extensions.. */
3530 __put_user(mask, &sc->oldmask);
3531 }
3532
3533 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3534 {
3535 int i;
3536
3537 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3538 COPY(gregs[0]); COPY(gregs[1]);
3539 COPY(gregs[2]); COPY(gregs[3]);
3540 COPY(gregs[4]); COPY(gregs[5]);
3541 COPY(gregs[6]); COPY(gregs[7]);
3542 COPY(gregs[8]); COPY(gregs[9]);
3543 COPY(gregs[10]); COPY(gregs[11]);
3544 COPY(gregs[12]); COPY(gregs[13]);
3545 COPY(gregs[14]); COPY(gregs[15]);
3546 COPY(gbr); COPY(mach);
3547 COPY(macl); COPY(pr);
3548 COPY(sr); COPY(pc);
3549 #undef COPY
3550
3551 for (i=0; i<16; i++) {
3552 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3553 }
3554 __get_user(regs->fpscr, &sc->sc_fpscr);
3555 __get_user(regs->fpul, &sc->sc_fpul);
3556
3557 regs->tra = -1; /* disable syscall checks */
3558 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3559 }
3560
3561 static void setup_frame(int sig, struct target_sigaction *ka,
3562 target_sigset_t *set, CPUSH4State *regs)
3563 {
3564 struct target_sigframe *frame;
3565 abi_ulong frame_addr;
3566 int i;
3567
3568 unwind_gusa(regs);
3569
3570 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3571 trace_user_setup_frame(regs, frame_addr);
3572 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3573 goto give_sigsegv;
3574 }
3575
3576 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3577
3578 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3579 __put_user(set->sig[i + 1], &frame->extramask[i]);
3580 }
3581
3582 /* Set up to return from userspace. If provided, use a stub
3583 already in userspace. */
3584 if (ka->sa_flags & TARGET_SA_RESTORER) {
3585 regs->pr = (unsigned long) ka->sa_restorer;
3586 } else {
3587 /* Generate return code (system call to sigreturn) */
3588 abi_ulong retcode_addr = frame_addr +
3589 offsetof(struct target_sigframe, retcode);
3590 __put_user(MOVW(2), &frame->retcode[0]);
3591 __put_user(TRAP_NOARG, &frame->retcode[1]);
3592 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3593 regs->pr = (unsigned long) retcode_addr;
3594 }
3595
3596 /* Set up registers for signal handler */
3597 regs->gregs[15] = frame_addr;
3598 regs->gregs[4] = sig; /* Arg for signal handler */
3599 regs->gregs[5] = 0;
3600 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3601 regs->pc = (unsigned long) ka->_sa_handler;
3602 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3603
3604 unlock_user_struct(frame, frame_addr, 1);
3605 return;
3606
3607 give_sigsegv:
3608 unlock_user_struct(frame, frame_addr, 1);
3609 force_sigsegv(sig);
3610 }
3611
3612 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3613 target_siginfo_t *info,
3614 target_sigset_t *set, CPUSH4State *regs)
3615 {
3616 struct target_rt_sigframe *frame;
3617 abi_ulong frame_addr;
3618 int i;
3619
3620 unwind_gusa(regs);
3621
3622 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3623 trace_user_setup_rt_frame(regs, frame_addr);
3624 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3625 goto give_sigsegv;
3626 }
3627
3628 tswap_siginfo(&frame->info, info);
3629
3630 /* Create the ucontext. */
3631 __put_user(0, &frame->uc.tuc_flags);
3632 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3633 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3634 &frame->uc.tuc_stack.ss_sp);
3635 __put_user(sas_ss_flags(regs->gregs[15]),
3636 &frame->uc.tuc_stack.ss_flags);
3637 __put_user(target_sigaltstack_used.ss_size,
3638 &frame->uc.tuc_stack.ss_size);
3639 setup_sigcontext(&frame->uc.tuc_mcontext,
3640 regs, set->sig[0]);
3641 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3642 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3643 }
3644
3645 /* Set up to return from userspace. If provided, use a stub
3646 already in userspace. */
3647 if (ka->sa_flags & TARGET_SA_RESTORER) {
3648 regs->pr = (unsigned long) ka->sa_restorer;
3649 } else {
3650 /* Generate return code (system call to sigreturn) */
3651 abi_ulong retcode_addr = frame_addr +
3652 offsetof(struct target_rt_sigframe, retcode);
3653 __put_user(MOVW(2), &frame->retcode[0]);
3654 __put_user(TRAP_NOARG, &frame->retcode[1]);
3655 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3656 regs->pr = (unsigned long) retcode_addr;
3657 }
3658
3659 /* Set up registers for signal handler */
3660 regs->gregs[15] = frame_addr;
3661 regs->gregs[4] = sig; /* Arg for signal handler */
3662 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3663 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3664 regs->pc = (unsigned long) ka->_sa_handler;
3665 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3666
3667 unlock_user_struct(frame, frame_addr, 1);
3668 return;
3669
3670 give_sigsegv:
3671 unlock_user_struct(frame, frame_addr, 1);
3672 force_sigsegv(sig);
3673 }
3674
3675 long do_sigreturn(CPUSH4State *regs)
3676 {
3677 struct target_sigframe *frame;
3678 abi_ulong frame_addr;
3679 sigset_t blocked;
3680 target_sigset_t target_set;
3681 int i;
3682 int err = 0;
3683
3684 frame_addr = regs->gregs[15];
3685 trace_user_do_sigreturn(regs, frame_addr);
3686 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3687 goto badframe;
3688 }
3689
3690 __get_user(target_set.sig[0], &frame->sc.oldmask);
3691 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3692 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3693 }
3694
3695 if (err)
3696 goto badframe;
3697
3698 target_to_host_sigset_internal(&blocked, &target_set);
3699 set_sigmask(&blocked);
3700
3701 restore_sigcontext(regs, &frame->sc);
3702
3703 unlock_user_struct(frame, frame_addr, 0);
3704 return -TARGET_QEMU_ESIGRETURN;
3705
3706 badframe:
3707 unlock_user_struct(frame, frame_addr, 0);
3708 force_sig(TARGET_SIGSEGV);
3709 return -TARGET_QEMU_ESIGRETURN;
3710 }
3711
3712 long do_rt_sigreturn(CPUSH4State *regs)
3713 {
3714 struct target_rt_sigframe *frame;
3715 abi_ulong frame_addr;
3716 sigset_t blocked;
3717
3718 frame_addr = regs->gregs[15];
3719 trace_user_do_rt_sigreturn(regs, frame_addr);
3720 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3721 goto badframe;
3722 }
3723
3724 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3725 set_sigmask(&blocked);
3726
3727 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3728
3729 if (do_sigaltstack(frame_addr +
3730 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3731 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3732 goto badframe;
3733 }
3734
3735 unlock_user_struct(frame, frame_addr, 0);
3736 return -TARGET_QEMU_ESIGRETURN;
3737
3738 badframe:
3739 unlock_user_struct(frame, frame_addr, 0);
3740 force_sig(TARGET_SIGSEGV);
3741 return -TARGET_QEMU_ESIGRETURN;
3742 }
3743 #elif defined(TARGET_MICROBLAZE)
3744
3745 struct target_sigcontext {
3746 struct target_pt_regs regs; /* needs to be first */
3747 uint32_t oldmask;
3748 };
3749
3750 struct target_stack_t {
3751 abi_ulong ss_sp;
3752 int ss_flags;
3753 unsigned int ss_size;
3754 };
3755
3756 struct target_ucontext {
3757 abi_ulong tuc_flags;
3758 abi_ulong tuc_link;
3759 struct target_stack_t tuc_stack;
3760 struct target_sigcontext tuc_mcontext;
3761 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3762 };
3763
3764 /* Signal frames. */
3765 struct target_signal_frame {
3766 struct target_ucontext uc;
3767 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3768 uint32_t tramp[2];
3769 };
3770
3771 struct rt_signal_frame {
3772 siginfo_t info;
3773 ucontext_t uc;
3774 uint32_t tramp[2];
3775 };
3776
3777 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3778 {
3779 __put_user(env->regs[0], &sc->regs.r0);
3780 __put_user(env->regs[1], &sc->regs.r1);
3781 __put_user(env->regs[2], &sc->regs.r2);
3782 __put_user(env->regs[3], &sc->regs.r3);
3783 __put_user(env->regs[4], &sc->regs.r4);
3784 __put_user(env->regs[5], &sc->regs.r5);
3785 __put_user(env->regs[6], &sc->regs.r6);
3786 __put_user(env->regs[7], &sc->regs.r7);
3787 __put_user(env->regs[8], &sc->regs.r8);
3788 __put_user(env->regs[9], &sc->regs.r9);
3789 __put_user(env->regs[10], &sc->regs.r10);
3790 __put_user(env->regs[11], &sc->regs.r11);
3791 __put_user(env->regs[12], &sc->regs.r12);
3792 __put_user(env->regs[13], &sc->regs.r13);
3793 __put_user(env->regs[14], &sc->regs.r14);
3794 __put_user(env->regs[15], &sc->regs.r15);
3795 __put_user(env->regs[16], &sc->regs.r16);
3796 __put_user(env->regs[17], &sc->regs.r17);
3797 __put_user(env->regs[18], &sc->regs.r18);
3798 __put_user(env->regs[19], &sc->regs.r19);
3799 __put_user(env->regs[20], &sc->regs.r20);
3800 __put_user(env->regs[21], &sc->regs.r21);
3801 __put_user(env->regs[22], &sc->regs.r22);
3802 __put_user(env->regs[23], &sc->regs.r23);
3803 __put_user(env->regs[24], &sc->regs.r24);
3804 __put_user(env->regs[25], &sc->regs.r25);
3805 __put_user(env->regs[26], &sc->regs.r26);
3806 __put_user(env->regs[27], &sc->regs.r27);
3807 __put_user(env->regs[28], &sc->regs.r28);
3808 __put_user(env->regs[29], &sc->regs.r29);
3809 __put_user(env->regs[30], &sc->regs.r30);
3810 __put_user(env->regs[31], &sc->regs.r31);
3811 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3812 }
3813
3814 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3815 {
3816 __get_user(env->regs[0], &sc->regs.r0);
3817 __get_user(env->regs[1], &sc->regs.r1);
3818 __get_user(env->regs[2], &sc->regs.r2);
3819 __get_user(env->regs[3], &sc->regs.r3);
3820 __get_user(env->regs[4], &sc->regs.r4);
3821 __get_user(env->regs[5], &sc->regs.r5);
3822 __get_user(env->regs[6], &sc->regs.r6);
3823 __get_user(env->regs[7], &sc->regs.r7);
3824 __get_user(env->regs[8], &sc->regs.r8);
3825 __get_user(env->regs[9], &sc->regs.r9);
3826 __get_user(env->regs[10], &sc->regs.r10);
3827 __get_user(env->regs[11], &sc->regs.r11);
3828 __get_user(env->regs[12], &sc->regs.r12);
3829 __get_user(env->regs[13], &sc->regs.r13);
3830 __get_user(env->regs[14], &sc->regs.r14);
3831 __get_user(env->regs[15], &sc->regs.r15);
3832 __get_user(env->regs[16], &sc->regs.r16);
3833 __get_user(env->regs[17], &sc->regs.r17);
3834 __get_user(env->regs[18], &sc->regs.r18);
3835 __get_user(env->regs[19], &sc->regs.r19);
3836 __get_user(env->regs[20], &sc->regs.r20);
3837 __get_user(env->regs[21], &sc->regs.r21);
3838 __get_user(env->regs[22], &sc->regs.r22);
3839 __get_user(env->regs[23], &sc->regs.r23);
3840 __get_user(env->regs[24], &sc->regs.r24);
3841 __get_user(env->regs[25], &sc->regs.r25);
3842 __get_user(env->regs[26], &sc->regs.r26);
3843 __get_user(env->regs[27], &sc->regs.r27);
3844 __get_user(env->regs[28], &sc->regs.r28);
3845 __get_user(env->regs[29], &sc->regs.r29);
3846 __get_user(env->regs[30], &sc->regs.r30);
3847 __get_user(env->regs[31], &sc->regs.r31);
3848 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3849 }
3850
3851 static abi_ulong get_sigframe(struct target_sigaction *ka,
3852 CPUMBState *env, int frame_size)
3853 {
3854 abi_ulong sp = env->regs[1];
3855
3856 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3857 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3858 }
3859
3860 return ((sp - frame_size) & -8UL);
3861 }
3862
3863 static void setup_frame(int sig, struct target_sigaction *ka,
3864 target_sigset_t *set, CPUMBState *env)
3865 {
3866 struct target_signal_frame *frame;
3867 abi_ulong frame_addr;
3868 int i;
3869
3870 frame_addr = get_sigframe(ka, env, sizeof *frame);
3871 trace_user_setup_frame(env, frame_addr);
3872 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3873 goto badframe;
3874
3875 /* Save the mask. */
3876 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3877
3878 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3879 __put_user(set->sig[i], &frame->extramask[i - 1]);
3880 }
3881
3882 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3883
3884 /* Set up to return from userspace. If provided, use a stub
3885 already in userspace. */
3886 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3887 if (ka->sa_flags & TARGET_SA_RESTORER) {
3888 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3889 } else {
3890 uint32_t t;
3891 /* Note, these encodings are _big endian_! */
3892 /* addi r12, r0, __NR_sigreturn */
3893 t = 0x31800000UL | TARGET_NR_sigreturn;
3894 __put_user(t, frame->tramp + 0);
3895 /* brki r14, 0x8 */
3896 t = 0xb9cc0008UL;
3897 __put_user(t, frame->tramp + 1);
3898
3899 /* Return from sighandler will jump to the tramp.
3900 Negative 8 offset because return is rtsd r15, 8 */
3901 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3902 - 8;
3903 }
3904
3905 /* Set up registers for signal handler */
3906 env->regs[1] = frame_addr;
3907 /* Signal handler args: */
3908 env->regs[5] = sig; /* Arg 0: signum */
3909 env->regs[6] = 0;
3910 /* arg 1: sigcontext */
3911 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3912
3913 /* Offset of 4 to handle microblaze rtid r14, 0 */
3914 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3915
3916 unlock_user_struct(frame, frame_addr, 1);
3917 return;
3918 badframe:
3919 force_sigsegv(sig);
3920 }
3921
3922 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3923 target_siginfo_t *info,
3924 target_sigset_t *set, CPUMBState *env)
3925 {
3926 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3927 }
3928
3929 long do_sigreturn(CPUMBState *env)
3930 {
3931 struct target_signal_frame *frame;
3932 abi_ulong frame_addr;
3933 target_sigset_t target_set;
3934 sigset_t set;
3935 int i;
3936
3937 frame_addr = env->regs[R_SP];
3938 trace_user_do_sigreturn(env, frame_addr);
3939 /* Make sure the guest isn't playing games. */
3940 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3941 goto badframe;
3942
3943 /* Restore blocked signals */
3944 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3945 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3946 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3947 }
3948 target_to_host_sigset_internal(&set, &target_set);
3949 set_sigmask(&set);
3950
3951 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3952 /* We got here through a sigreturn syscall, our path back is via an
3953 rtb insn so setup r14 for that. */
3954 env->regs[14] = env->sregs[SR_PC];
3955
3956 unlock_user_struct(frame, frame_addr, 0);
3957 return -TARGET_QEMU_ESIGRETURN;
3958 badframe:
3959 force_sig(TARGET_SIGSEGV);
3960 return -TARGET_QEMU_ESIGRETURN;
3961 }
3962
3963 long do_rt_sigreturn(CPUMBState *env)
3964 {
3965 trace_user_do_rt_sigreturn(env, 0);
3966 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3967 return -TARGET_ENOSYS;
3968 }
3969
3970 #elif defined(TARGET_CRIS)
3971
3972 struct target_sigcontext {
3973 struct target_pt_regs regs; /* needs to be first */
3974 uint32_t oldmask;
3975 uint32_t usp; /* usp before stacking this gunk on it */
3976 };
3977
3978 /* Signal frames. */
3979 struct target_signal_frame {
3980 struct target_sigcontext sc;
3981 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3982 uint16_t retcode[4]; /* Trampoline code. */
3983 };
3984
3985 struct rt_signal_frame {
3986 siginfo_t *pinfo;
3987 void *puc;
3988 siginfo_t info;
3989 ucontext_t uc;
3990 uint16_t retcode[4]; /* Trampoline code. */
3991 };
3992
3993 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3994 {
3995 __put_user(env->regs[0], &sc->regs.r0);
3996 __put_user(env->regs[1], &sc->regs.r1);
3997 __put_user(env->regs[2], &sc->regs.r2);
3998 __put_user(env->regs[3], &sc->regs.r3);
3999 __put_user(env->regs[4], &sc->regs.r4);
4000 __put_user(env->regs[5], &sc->regs.r5);
4001 __put_user(env->regs[6], &sc->regs.r6);
4002 __put_user(env->regs[7], &sc->regs.r7);
4003 __put_user(env->regs[8], &sc->regs.r8);
4004 __put_user(env->regs[9], &sc->regs.r9);
4005 __put_user(env->regs[10], &sc->regs.r10);
4006 __put_user(env->regs[11], &sc->regs.r11);
4007 __put_user(env->regs[12], &sc->regs.r12);
4008 __put_user(env->regs[13], &sc->regs.r13);
4009 __put_user(env->regs[14], &sc->usp);
4010 __put_user(env->regs[15], &sc->regs.acr);
4011 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4012 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4013 __put_user(env->pc, &sc->regs.erp);
4014 }
4015
4016 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4017 {
4018 __get_user(env->regs[0], &sc->regs.r0);
4019 __get_user(env->regs[1], &sc->regs.r1);
4020 __get_user(env->regs[2], &sc->regs.r2);
4021 __get_user(env->regs[3], &sc->regs.r3);
4022 __get_user(env->regs[4], &sc->regs.r4);
4023 __get_user(env->regs[5], &sc->regs.r5);
4024 __get_user(env->regs[6], &sc->regs.r6);
4025 __get_user(env->regs[7], &sc->regs.r7);
4026 __get_user(env->regs[8], &sc->regs.r8);
4027 __get_user(env->regs[9], &sc->regs.r9);
4028 __get_user(env->regs[10], &sc->regs.r10);
4029 __get_user(env->regs[11], &sc->regs.r11);
4030 __get_user(env->regs[12], &sc->regs.r12);
4031 __get_user(env->regs[13], &sc->regs.r13);
4032 __get_user(env->regs[14], &sc->usp);
4033 __get_user(env->regs[15], &sc->regs.acr);
4034 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4035 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4036 __get_user(env->pc, &sc->regs.erp);
4037 }
4038
4039 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4040 {
4041 abi_ulong sp;
4042 /* Align the stack downwards to 4. */
4043 sp = (env->regs[R_SP] & ~3);
4044 return sp - framesize;
4045 }
4046
4047 static void setup_frame(int sig, struct target_sigaction *ka,
4048 target_sigset_t *set, CPUCRISState *env)
4049 {
4050 struct target_signal_frame *frame;
4051 abi_ulong frame_addr;
4052 int i;
4053
4054 frame_addr = get_sigframe(env, sizeof *frame);
4055 trace_user_setup_frame(env, frame_addr);
4056 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4057 goto badframe;
4058
4059 /*
4060 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4061 * use this trampoline anymore but it sets it up for GDB.
4062 * In QEMU, using the trampoline simplifies things a bit so we use it.
4063 *
4064 * This is movu.w __NR_sigreturn, r9; break 13;
4065 */
4066 __put_user(0x9c5f, frame->retcode+0);
4067 __put_user(TARGET_NR_sigreturn,
4068 frame->retcode + 1);
4069 __put_user(0xe93d, frame->retcode + 2);
4070
4071 /* Save the mask. */
4072 __put_user(set->sig[0], &frame->sc.oldmask);
4073
4074 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4075 __put_user(set->sig[i], &frame->extramask[i - 1]);
4076 }
4077
4078 setup_sigcontext(&frame->sc, env);
4079
4080 /* Move the stack and setup the arguments for the handler. */
4081 env->regs[R_SP] = frame_addr;
4082 env->regs[10] = sig;
4083 env->pc = (unsigned long) ka->_sa_handler;
4084 /* Link SRP so the guest returns through the trampoline. */
4085 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4086
4087 unlock_user_struct(frame, frame_addr, 1);
4088 return;
4089 badframe:
4090 force_sigsegv(sig);
4091 }
4092
4093 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4094 target_siginfo_t *info,
4095 target_sigset_t *set, CPUCRISState *env)
4096 {
4097 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4098 }
4099
4100 long do_sigreturn(CPUCRISState *env)
4101 {
4102 struct target_signal_frame *frame;
4103 abi_ulong frame_addr;
4104 target_sigset_t target_set;
4105 sigset_t set;
4106 int i;
4107
4108 frame_addr = env->regs[R_SP];
4109 trace_user_do_sigreturn(env, frame_addr);
4110 /* Make sure the guest isn't playing games. */
4111 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4112 goto badframe;
4113 }
4114
4115 /* Restore blocked signals */
4116 __get_user(target_set.sig[0], &frame->sc.oldmask);
4117 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4118 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4119 }
4120 target_to_host_sigset_internal(&set, &target_set);
4121 set_sigmask(&set);
4122
4123 restore_sigcontext(&frame->sc, env);
4124 unlock_user_struct(frame, frame_addr, 0);
4125 return -TARGET_QEMU_ESIGRETURN;
4126 badframe:
4127 force_sig(TARGET_SIGSEGV);
4128 return -TARGET_QEMU_ESIGRETURN;
4129 }
4130
4131 long do_rt_sigreturn(CPUCRISState *env)
4132 {
4133 trace_user_do_rt_sigreturn(env, 0);
4134 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4135 return -TARGET_ENOSYS;
4136 }
4137
4138 #elif defined(TARGET_NIOS2)
4139
4140 #define MCONTEXT_VERSION 2
4141
4142 struct target_sigcontext {
4143 int version;
4144 unsigned long gregs[32];
4145 };
4146
4147 struct target_ucontext {
4148 abi_ulong tuc_flags;
4149 abi_ulong tuc_link;
4150 target_stack_t tuc_stack;
4151 struct target_sigcontext tuc_mcontext;
4152 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4153 };
4154
4155 struct target_rt_sigframe {
4156 struct target_siginfo info;
4157 struct target_ucontext uc;
4158 };
4159
4160 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4161 {
4162 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4163 #ifdef CONFIG_STACK_GROWSUP
4164 return target_sigaltstack_used.ss_sp;
4165 #else
4166 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4167 #endif
4168 }
4169 return sp;
4170 }
4171
4172 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4173 {
4174 unsigned long *gregs = uc->tuc_mcontext.gregs;
4175
4176 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4177 __put_user(env->regs[1], &gregs[0]);
4178 __put_user(env->regs[2], &gregs[1]);
4179 __put_user(env->regs[3], &gregs[2]);
4180 __put_user(env->regs[4], &gregs[3]);
4181 __put_user(env->regs[5], &gregs[4]);
4182 __put_user(env->regs[6], &gregs[5]);
4183 __put_user(env->regs[7], &gregs[6]);
4184 __put_user(env->regs[8], &gregs[7]);
4185 __put_user(env->regs[9], &gregs[8]);
4186 __put_user(env->regs[10], &gregs[9]);
4187 __put_user(env->regs[11], &gregs[10]);
4188 __put_user(env->regs[12], &gregs[11]);
4189 __put_user(env->regs[13], &gregs[12]);
4190 __put_user(env->regs[14], &gregs[13]);
4191 __put_user(env->regs[15], &gregs[14]);
4192 __put_user(env->regs[16], &gregs[15]);
4193 __put_user(env->regs[17], &gregs[16]);
4194 __put_user(env->regs[18], &gregs[17]);
4195 __put_user(env->regs[19], &gregs[18]);
4196 __put_user(env->regs[20], &gregs[19]);
4197 __put_user(env->regs[21], &gregs[20]);
4198 __put_user(env->regs[22], &gregs[21]);
4199 __put_user(env->regs[23], &gregs[22]);
4200 __put_user(env->regs[R_RA], &gregs[23]);
4201 __put_user(env->regs[R_FP], &gregs[24]);
4202 __put_user(env->regs[R_GP], &gregs[25]);
4203 __put_user(env->regs[R_EA], &gregs[27]);
4204 __put_user(env->regs[R_SP], &gregs[28]);
4205
4206 return 0;
4207 }
4208
4209 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4210 int *pr2)
4211 {
4212 int temp;
4213 abi_ulong off, frame_addr = env->regs[R_SP];
4214 unsigned long *gregs = uc->tuc_mcontext.gregs;
4215 int err;
4216
4217 /* Always make any pending restarted system calls return -EINTR */
4218 /* current->restart_block.fn = do_no_restart_syscall; */
4219
4220 __get_user(temp, &uc->tuc_mcontext.version);
4221 if (temp != MCONTEXT_VERSION) {
4222 return 1;
4223 }
4224
4225 /* restore passed registers */
4226 __get_user(env->regs[1], &gregs[0]);
4227 __get_user(env->regs[2], &gregs[1]);
4228 __get_user(env->regs[3], &gregs[2]);
4229 __get_user(env->regs[4], &gregs[3]);
4230 __get_user(env->regs[5], &gregs[4]);
4231 __get_user(env->regs[6], &gregs[5]);
4232 __get_user(env->regs[7], &gregs[6]);
4233 __get_user(env->regs[8], &gregs[7]);
4234 __get_user(env->regs[9], &gregs[8]);
4235 __get_user(env->regs[10], &gregs[9]);
4236 __get_user(env->regs[11], &gregs[10]);
4237 __get_user(env->regs[12], &gregs[11]);
4238 __get_user(env->regs[13], &gregs[12]);
4239 __get_user(env->regs[14], &gregs[13]);
4240 __get_user(env->regs[15], &gregs[14]);
4241 __get_user(env->regs[16], &gregs[15]);
4242 __get_user(env->regs[17], &gregs[16]);
4243 __get_user(env->regs[18], &gregs[17]);
4244 __get_user(env->regs[19], &gregs[18]);
4245 __get_user(env->regs[20], &gregs[19]);
4246 __get_user(env->regs[21], &gregs[20]);
4247 __get_user(env->regs[22], &gregs[21]);
4248 __get_user(env->regs[23], &gregs[22]);
4249 /* gregs[23] is handled below */
4250 /* Verify, should this be settable */
4251 __get_user(env->regs[R_FP], &gregs[24]);
4252 /* Verify, should this be settable */
4253 __get_user(env->regs[R_GP], &gregs[25]);
4254 /* Not really necessary no user settable bits */
4255 __get_user(temp, &gregs[26]);
4256 __get_user(env->regs[R_EA], &gregs[27]);
4257
4258 __get_user(env->regs[R_RA], &gregs[23]);
4259 __get_user(env->regs[R_SP], &gregs[28]);
4260
4261 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4262 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4263 if (err == -EFAULT) {
4264 return 1;
4265 }
4266
4267 *pr2 = env->regs[2];
4268 return 0;
4269 }
4270
4271 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4272 size_t frame_size)
4273 {
4274 unsigned long usp;
4275
4276 /* Default to using normal stack. */
4277 usp = env->regs[R_SP];
4278
4279 /* This is the X/Open sanctioned signal stack switching. */
4280 usp = sigsp(usp, ka);
4281
4282 /* Verify, is it 32 or 64 bit aligned */
4283 return (void *)((usp - frame_size) & -8UL);
4284 }
4285
4286 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4287 target_siginfo_t *info,
4288 target_sigset_t *set,
4289 CPUNios2State *env)
4290 {
4291 struct target_rt_sigframe *frame;
4292 int i, err = 0;
4293
4294 frame = get_sigframe(ka, env, sizeof(*frame));
4295
4296 if (ka->sa_flags & SA_SIGINFO) {
4297 tswap_siginfo(&frame->info, info);
4298 }
4299
4300 /* Create the ucontext. */
4301 __put_user(0, &frame->uc.tuc_flags);
4302 __put_user(0, &frame->uc.tuc_link);
4303 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4304 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4305 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4306 err |= rt_setup_ucontext(&frame->uc, env);
4307 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4308 __put_user((abi_ulong)set->sig[i],
4309 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4310 }
4311
4312 if (err) {
4313 goto give_sigsegv;
4314 }
4315
4316 /* Set up to return from userspace; jump to fixed address sigreturn
4317 trampoline on kuser page. */
4318 env->regs[R_RA] = (unsigned long) (0x1044);
4319
4320 /* Set up registers for signal handler */
4321 env->regs[R_SP] = (unsigned long) frame;
4322 env->regs[4] = (unsigned long) sig;
4323 env->regs[5] = (unsigned long) &frame->info;
4324 env->regs[6] = (unsigned long) &frame->uc;
4325 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4326 return;
4327
4328 give_sigsegv:
4329 if (sig == TARGET_SIGSEGV) {
4330 ka->_sa_handler = TARGET_SIG_DFL;
4331 }
4332 force_sigsegv(sig);
4333 return;
4334 }
4335
4336 long do_sigreturn(CPUNios2State *env)
4337 {
4338 trace_user_do_sigreturn(env, 0);
4339 fprintf(stderr, "do_sigreturn: not implemented\n");
4340 return -TARGET_ENOSYS;
4341 }
4342
4343 long do_rt_sigreturn(CPUNios2State *env)
4344 {
4345 /* Verify, can we follow the stack back */
4346 abi_ulong frame_addr = env->regs[R_SP];
4347 struct target_rt_sigframe *frame;
4348 sigset_t set;
4349 int rval;
4350
4351 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4352 goto badframe;
4353 }
4354
4355 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4356 do_sigprocmask(SIG_SETMASK, &set, NULL);
4357
4358 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4359 goto badframe;
4360 }
4361
4362 unlock_user_struct(frame, frame_addr, 0);
4363 return rval;
4364
4365 badframe:
4366 unlock_user_struct(frame, frame_addr, 0);
4367 force_sig(TARGET_SIGSEGV);
4368 return 0;
4369 }
4370 /* TARGET_NIOS2 */
4371
4372 #elif defined(TARGET_OPENRISC)
4373
4374 struct target_sigcontext {
4375 struct target_pt_regs regs;
4376 abi_ulong oldmask;
4377 abi_ulong usp;
4378 };
4379
4380 struct target_ucontext {
4381 abi_ulong tuc_flags;
4382 abi_ulong tuc_link;
4383 target_stack_t tuc_stack;
4384 struct target_sigcontext tuc_mcontext;
4385 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4386 };
4387
4388 struct target_rt_sigframe {
4389 abi_ulong pinfo;
4390 uint64_t puc;
4391 struct target_siginfo info;
4392 struct target_sigcontext sc;
4393 struct target_ucontext uc;
4394 unsigned char retcode[16]; /* trampoline code */
4395 };
4396
4397 /* This is the asm-generic/ucontext.h version */
4398 #if 0
4399 static int restore_sigcontext(CPUOpenRISCState *regs,
4400 struct target_sigcontext *sc)
4401 {
4402 unsigned int err = 0;
4403 unsigned long old_usp;
4404
4405 /* Alwys make any pending restarted system call return -EINTR */
4406 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4407
4408 /* restore the regs from &sc->regs (same as sc, since regs is first)
4409 * (sc is already checked for VERIFY_READ since the sigframe was
4410 * checked in sys_sigreturn previously)
4411 */
4412
4413 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4414 goto badframe;
4415 }
4416
4417 /* make sure the U-flag is set so user-mode cannot fool us */
4418
4419 regs->sr &= ~SR_SM;
4420
4421 /* restore the old USP as it was before we stacked the sc etc.
4422 * (we cannot just pop the sigcontext since we aligned the sp and
4423 * stuff after pushing it)
4424 */
4425
4426 __get_user(old_usp, &sc->usp);
4427 phx_signal("old_usp 0x%lx", old_usp);
4428
4429 __PHX__ REALLY /* ??? */
4430 wrusp(old_usp);
4431 regs->gpr[1] = old_usp;
4432
4433 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4434 * after this completes, but we don't use that mechanism. maybe we can
4435 * use it now ?
4436 */
4437
4438 return err;
4439
4440 badframe:
4441 return 1;
4442 }
4443 #endif
4444
4445 /* Set up a signal frame. */
4446
4447 static void setup_sigcontext(struct target_sigcontext *sc,
4448 CPUOpenRISCState *regs,
4449 unsigned long mask)
4450 {
4451 unsigned long usp = cpu_get_gpr(regs, 1);
4452
4453 /* copy the regs. they are first in sc so we can use sc directly */
4454
4455 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4456
4457 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4458 the signal handler. The frametype will be restored to its previous
4459 value in restore_sigcontext. */
4460 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4461
4462 /* then some other stuff */
4463 __put_user(mask, &sc->oldmask);
4464 __put_user(usp, &sc->usp);
4465 }
4466
4467 static inline unsigned long align_sigframe(unsigned long sp)
4468 {
4469 return sp & ~3UL;
4470 }
4471
4472 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4473 CPUOpenRISCState *regs,
4474 size_t frame_size)
4475 {
4476 unsigned long sp = cpu_get_gpr(regs, 1);
4477 int onsigstack = on_sig_stack(sp);
4478
4479 /* redzone */
4480 /* This is the X/Open sanctioned signal stack switching. */
4481 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4482 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4483 }
4484
4485 sp = align_sigframe(sp - frame_size);
4486
4487 /*
4488 * If we are on the alternate signal stack and would overflow it, don't.
4489 * Return an always-bogus address instead so we will die with SIGSEGV.
4490 */
4491
4492 if (onsigstack && !likely(on_sig_stack(sp))) {
4493 return -1L;
4494 }
4495
4496 return sp;
4497 }
4498
4499 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4500 target_siginfo_t *info,
4501 target_sigset_t *set, CPUOpenRISCState *env)
4502 {
4503 int err = 0;
4504 abi_ulong frame_addr;
4505 unsigned long return_ip;
4506 struct target_rt_sigframe *frame;
4507 abi_ulong info_addr, uc_addr;
4508
4509 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4510 trace_user_setup_rt_frame(env, frame_addr);
4511 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4512 goto give_sigsegv;
4513 }
4514
4515 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4516 __put_user(info_addr, &frame->pinfo);
4517 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4518 __put_user(uc_addr, &frame->puc);
4519
4520 if (ka->sa_flags & SA_SIGINFO) {
4521 tswap_siginfo(&frame->info, info);
4522 }
4523
4524 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4525 __put_user(0, &frame->uc.tuc_flags);
4526 __put_user(0, &frame->uc.tuc_link);
4527 __put_user(target_sigaltstack_used.ss_sp,
4528 &frame->uc.tuc_stack.ss_sp);
4529 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4530 &frame->uc.tuc_stack.ss_flags);
4531 __put_user(target_sigaltstack_used.ss_size,
4532 &frame->uc.tuc_stack.ss_size);
4533 setup_sigcontext(&frame->sc, env, set->sig[0]);
4534
4535 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4536
4537 /* trampoline - the desired return ip is the retcode itself */
4538 return_ip = (unsigned long)&frame->retcode;
4539 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4540 __put_user(0xa960, (short *)(frame->retcode + 0));
4541 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4542 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4543 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4544
4545 if (err) {
4546 goto give_sigsegv;
4547 }
4548
4549 /* TODO what is the current->exec_domain stuff and invmap ? */
4550
4551 /* Set up registers for signal handler */
4552 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4553 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4554 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4555 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4556 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4557
4558 /* actually move the usp to reflect the stacked frame */
4559 cpu_set_gpr(env, 1, (unsigned long)frame);
4560
4561 return;
4562
4563 give_sigsegv:
4564 unlock_user_struct(frame, frame_addr, 1);
4565 force_sigsegv(sig);
4566 }
4567
4568 long do_sigreturn(CPUOpenRISCState *env)
4569 {
4570 trace_user_do_sigreturn(env, 0);
4571 fprintf(stderr, "do_sigreturn: not implemented\n");
4572 return -TARGET_ENOSYS;
4573 }
4574
4575 long do_rt_sigreturn(CPUOpenRISCState *env)
4576 {
4577 trace_user_do_rt_sigreturn(env, 0);
4578 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4579 return -TARGET_ENOSYS;
4580 }
4581 /* TARGET_OPENRISC */
4582
4583 #elif defined(TARGET_S390X)
4584
4585 #define __NUM_GPRS 16
4586 #define __NUM_FPRS 16
4587 #define __NUM_ACRS 16
4588
4589 #define S390_SYSCALL_SIZE 2
4590 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4591
4592 #define _SIGCONTEXT_NSIG 64
4593 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4594 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4595 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4596 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4597 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4598
4599 typedef struct {
4600 target_psw_t psw;
4601 target_ulong gprs[__NUM_GPRS];
4602 unsigned int acrs[__NUM_ACRS];
4603 } target_s390_regs_common;
4604
4605 typedef struct {
4606 unsigned int fpc;
4607 double fprs[__NUM_FPRS];
4608 } target_s390_fp_regs;
4609
4610 typedef struct {
4611 target_s390_regs_common regs;
4612 target_s390_fp_regs fpregs;
4613 } target_sigregs;
4614
4615 struct target_sigcontext {
4616 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4617 target_sigregs *sregs;
4618 };
4619
4620 typedef struct {
4621 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4622 struct target_sigcontext sc;
4623 target_sigregs sregs;
4624 int signo;
4625 uint8_t retcode[S390_SYSCALL_SIZE];
4626 } sigframe;
4627
4628 struct target_ucontext {
4629 target_ulong tuc_flags;
4630 struct target_ucontext *tuc_link;
4631 target_stack_t tuc_stack;
4632 target_sigregs tuc_mcontext;
4633 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4634 };
4635
4636 typedef struct {
4637 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4638 uint8_t retcode[S390_SYSCALL_SIZE];
4639 struct target_siginfo info;
4640 struct target_ucontext uc;
4641 } rt_sigframe;
4642
4643 static inline abi_ulong
4644 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4645 {
4646 abi_ulong sp;
4647
4648 /* Default to using normal stack */
4649 sp = env->regs[15];
4650
4651 /* This is the X/Open sanctioned signal stack switching. */
4652 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4653 if (!sas_ss_flags(sp)) {
4654 sp = target_sigaltstack_used.ss_sp +
4655 target_sigaltstack_used.ss_size;
4656 }
4657 }
4658
4659 /* This is the legacy signal stack switching. */
4660 else if (/* FIXME !user_mode(regs) */ 0 &&
4661 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4662 ka->sa_restorer) {
4663 sp = (abi_ulong) ka->sa_restorer;
4664 }
4665
4666 return (sp - frame_size) & -8ul;
4667 }
4668
4669 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4670 {
4671 int i;
4672 //save_access_regs(current->thread.acrs); FIXME
4673
4674 /* Copy a 'clean' PSW mask to the user to avoid leaking
4675 information about whether PER is currently on. */
4676 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4677 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4678 for (i = 0; i < 16; i++) {
4679 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4680 }
4681 for (i = 0; i < 16; i++) {
4682 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4683 }
4684 /*
4685 * We have to store the fp registers to current->thread.fp_regs
4686 * to merge them with the emulated registers.
4687 */
4688 //save_fp_regs(&current->thread.fp_regs); FIXME
4689 for (i = 0; i < 16; i++) {
4690 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4691 }
4692 }
4693
4694 static void setup_frame(int sig, struct target_sigaction *ka,
4695 target_sigset_t *set, CPUS390XState *env)
4696 {
4697 sigframe *frame;
4698 abi_ulong frame_addr;
4699
4700 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4701 trace_user_setup_frame(env, frame_addr);
4702 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4703 goto give_sigsegv;
4704 }
4705
4706 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4707
4708 save_sigregs(env, &frame->sregs);
4709
4710 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4711 (abi_ulong *)&frame->sc.sregs);
4712
4713 /* Set up to return from userspace. If provided, use a stub
4714 already in userspace. */
4715 if (ka->sa_flags & TARGET_SA_RESTORER) {
4716 env->regs[14] = (unsigned long)
4717 ka->sa_restorer | PSW_ADDR_AMODE;
4718 } else {
4719 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4720 | PSW_ADDR_AMODE;
4721 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4722 (uint16_t *)(frame->retcode));
4723 }
4724
4725 /* Set up backchain. */
4726 __put_user(env->regs[15], (abi_ulong *) frame);
4727
4728 /* Set up registers for signal handler */
4729 env->regs[15] = frame_addr;
4730 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4731
4732 env->regs[2] = sig; //map_signal(sig);
4733 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4734
4735 /* We forgot to include these in the sigcontext.
4736 To avoid breaking binary compatibility, they are passed as args. */
4737 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4738 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4739
4740 /* Place signal number on stack to allow backtrace from handler. */
4741 __put_user(env->regs[2], &frame->signo);
4742 unlock_user_struct(frame, frame_addr, 1);
4743 return;
4744
4745 give_sigsegv:
4746 force_sigsegv(sig);
4747 }
4748
4749 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4750 target_siginfo_t *info,
4751 target_sigset_t *set, CPUS390XState *env)
4752 {
4753 int i;
4754 rt_sigframe *frame;
4755 abi_ulong frame_addr;
4756
4757 frame_addr = get_sigframe(ka, env, sizeof *frame);
4758 trace_user_setup_rt_frame(env, frame_addr);
4759 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4760 goto give_sigsegv;
4761 }
4762
4763 tswap_siginfo(&frame->info, info);
4764
4765 /* Create the ucontext. */
4766 __put_user(0, &frame->uc.tuc_flags);
4767 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4768 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4769 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4770 &frame->uc.tuc_stack.ss_flags);
4771 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4772 save_sigregs(env, &frame->uc.tuc_mcontext);
4773 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4774 __put_user((abi_ulong)set->sig[i],
4775 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4776 }
4777
4778 /* Set up to return from userspace. If provided, use a stub
4779 already in userspace. */
4780 if (ka->sa_flags & TARGET_SA_RESTORER) {
4781 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4782 } else {
4783 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4784 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4785 (uint16_t *)(frame->retcode));
4786 }
4787
4788 /* Set up backchain. */
4789 __put_user(env->regs[15], (abi_ulong *) frame);
4790
4791 /* Set up registers for signal handler */
4792 env->regs[15] = frame_addr;
4793 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4794
4795 env->regs[2] = sig; //map_signal(sig);
4796 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4797 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4798 return;
4799
4800 give_sigsegv:
4801 force_sigsegv(sig);
4802 }
4803
4804 static int
4805 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4806 {
4807 int err = 0;
4808 int i;
4809
4810 for (i = 0; i < 16; i++) {
4811 __get_user(env->regs[i], &sc->regs.gprs[i]);
4812 }
4813
4814 __get_user(env->psw.mask, &sc->regs.psw.mask);
4815 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4816 (unsigned long long)env->psw.addr);
4817 __get_user(env->psw.addr, &sc->regs.psw.addr);
4818 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4819
4820 for (i = 0; i < 16; i++) {
4821 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4822 }
4823 for (i = 0; i < 16; i++) {
4824 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4825 }
4826
4827 return err;
4828 }
4829
4830 long do_sigreturn(CPUS390XState *env)
4831 {
4832 sigframe *frame;
4833 abi_ulong frame_addr = env->regs[15];
4834 target_sigset_t target_set;
4835 sigset_t set;
4836
4837 trace_user_do_sigreturn(env, frame_addr);
4838 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4839 goto badframe;
4840 }
4841 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4842
4843 target_to_host_sigset_internal(&set, &target_set);
4844 set_sigmask(&set); /* ~_BLOCKABLE? */
4845
4846 if (restore_sigregs(env, &frame->sregs)) {
4847 goto badframe;
4848 }
4849
4850 unlock_user_struct(frame, frame_addr, 0);
4851 return -TARGET_QEMU_ESIGRETURN;
4852
4853 badframe:
4854 force_sig(TARGET_SIGSEGV);
4855 return -TARGET_QEMU_ESIGRETURN;
4856 }
4857
4858 long do_rt_sigreturn(CPUS390XState *env)
4859 {
4860 rt_sigframe *frame;
4861 abi_ulong frame_addr = env->regs[15];
4862 sigset_t set;
4863
4864 trace_user_do_rt_sigreturn(env, frame_addr);
4865 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4866 goto badframe;
4867 }
4868 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4869
4870 set_sigmask(&set); /* ~_BLOCKABLE? */
4871
4872 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4873 goto badframe;
4874 }
4875
4876 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4877 get_sp_from_cpustate(env)) == -EFAULT) {
4878 goto badframe;
4879 }
4880 unlock_user_struct(frame, frame_addr, 0);
4881 return -TARGET_QEMU_ESIGRETURN;
4882
4883 badframe:
4884 unlock_user_struct(frame, frame_addr, 0);
4885 force_sig(TARGET_SIGSEGV);
4886 return -TARGET_QEMU_ESIGRETURN;
4887 }
4888
4889 #elif defined(TARGET_PPC)
4890
4891 /* Size of dummy stack frame allocated when calling signal handler.
4892 See arch/powerpc/include/asm/ptrace.h. */
4893 #if defined(TARGET_PPC64)
4894 #define SIGNAL_FRAMESIZE 128
4895 #else
4896 #define SIGNAL_FRAMESIZE 64
4897 #endif
4898
4899 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4900 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4901 struct target_mcontext {
4902 target_ulong mc_gregs[48];
4903 /* Includes fpscr. */
4904 uint64_t mc_fregs[33];
4905 #if defined(TARGET_PPC64)
4906 /* Pointer to the vector regs */
4907 target_ulong v_regs;
4908 #else
4909 target_ulong mc_pad[2];
4910 #endif
4911 /* We need to handle Altivec and SPE at the same time, which no
4912 kernel needs to do. Fortunately, the kernel defines this bit to
4913 be Altivec-register-large all the time, rather than trying to
4914 twiddle it based on the specific platform. */
4915 union {
4916 /* SPE vector registers. One extra for SPEFSCR. */
4917 uint32_t spe[33];
4918 /* Altivec vector registers. The packing of VSCR and VRSAVE
4919 varies depending on whether we're PPC64 or not: PPC64 splits
4920 them apart; PPC32 stuffs them together.
4921 We also need to account for the VSX registers on PPC64
4922 */
4923 #if defined(TARGET_PPC64)
4924 #define QEMU_NVRREG (34 + 16)
4925 /* On ppc64, this mcontext structure is naturally *unaligned*,
4926 * or rather it is aligned on a 8 bytes boundary but not on
4927 * a 16 bytes one. This pad fixes it up. This is also why the
4928 * vector regs are referenced by the v_regs pointer above so
4929 * any amount of padding can be added here
4930 */
4931 target_ulong pad;
4932 #else
4933 /* On ppc32, we are already aligned to 16 bytes */
4934 #define QEMU_NVRREG 33
4935 #endif
4936 /* We cannot use ppc_avr_t here as we do *not* want the implied
4937 * 16-bytes alignment that would result from it. This would have
4938 * the effect of making the whole struct target_mcontext aligned
4939 * which breaks the layout of struct target_ucontext on ppc64.
4940 */
4941 uint64_t altivec[QEMU_NVRREG][2];
4942 #undef QEMU_NVRREG
4943 } mc_vregs;
4944 };
4945
4946 /* See arch/powerpc/include/asm/sigcontext.h. */
4947 struct target_sigcontext {
4948 target_ulong _unused[4];
4949 int32_t signal;
4950 #if defined(TARGET_PPC64)
4951 int32_t pad0;
4952 #endif
4953 target_ulong handler;
4954 target_ulong oldmask;
4955 target_ulong regs; /* struct pt_regs __user * */
4956 #if defined(TARGET_PPC64)
4957 struct target_mcontext mcontext;
4958 #endif
4959 };
4960
4961 /* Indices for target_mcontext.mc_gregs, below.
4962 See arch/powerpc/include/asm/ptrace.h for details. */
4963 enum {
4964 TARGET_PT_R0 = 0,
4965 TARGET_PT_R1 = 1,
4966 TARGET_PT_R2 = 2,
4967 TARGET_PT_R3 = 3,
4968 TARGET_PT_R4 = 4,
4969 TARGET_PT_R5 = 5,
4970 TARGET_PT_R6 = 6,
4971 TARGET_PT_R7 = 7,
4972 TARGET_PT_R8 = 8,
4973 TARGET_PT_R9 = 9,
4974 TARGET_PT_R10 = 10,
4975 TARGET_PT_R11 = 11,
4976 TARGET_PT_R12 = 12,
4977 TARGET_PT_R13 = 13,
4978 TARGET_PT_R14 = 14,
4979 TARGET_PT_R15 = 15,
4980 TARGET_PT_R16 = 16,
4981 TARGET_PT_R17 = 17,
4982 TARGET_PT_R18 = 18,
4983 TARGET_PT_R19 = 19,
4984 TARGET_PT_R20 = 20,
4985 TARGET_PT_R21 = 21,
4986 TARGET_PT_R22 = 22,
4987 TARGET_PT_R23 = 23,
4988 TARGET_PT_R24 = 24,
4989 TARGET_PT_R25 = 25,
4990 TARGET_PT_R26 = 26,
4991 TARGET_PT_R27 = 27,
4992 TARGET_PT_R28 = 28,
4993 TARGET_PT_R29 = 29,
4994 TARGET_PT_R30 = 30,
4995 TARGET_PT_R31 = 31,
4996 TARGET_PT_NIP = 32,
4997 TARGET_PT_MSR = 33,
4998 TARGET_PT_ORIG_R3 = 34,
4999 TARGET_PT_CTR = 35,
5000 TARGET_PT_LNK = 36,
5001 TARGET_PT_XER = 37,
5002 TARGET_PT_CCR = 38,
5003 /* Yes, there are two registers with #39. One is 64-bit only. */
5004 TARGET_PT_MQ = 39,
5005 TARGET_PT_SOFTE = 39,
5006 TARGET_PT_TRAP = 40,
5007 TARGET_PT_DAR = 41,
5008 TARGET_PT_DSISR = 42,
5009 TARGET_PT_RESULT = 43,
5010 TARGET_PT_REGS_COUNT = 44
5011 };
5012
5013
5014 struct target_ucontext {
5015 target_ulong tuc_flags;
5016 target_ulong tuc_link; /* ucontext_t __user * */
5017 struct target_sigaltstack tuc_stack;
5018 #if !defined(TARGET_PPC64)
5019 int32_t tuc_pad[7];
5020 target_ulong tuc_regs; /* struct mcontext __user *
5021 points to uc_mcontext field */
5022 #endif
5023 target_sigset_t tuc_sigmask;
5024 #if defined(TARGET_PPC64)
5025 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5026 struct target_sigcontext tuc_sigcontext;
5027 #else
5028 int32_t tuc_maskext[30];
5029 int32_t tuc_pad2[3];
5030 struct target_mcontext tuc_mcontext;
5031 #endif
5032 };
5033
5034 /* See arch/powerpc/kernel/signal_32.c. */
5035 struct target_sigframe {
5036 struct target_sigcontext sctx;
5037 struct target_mcontext mctx;
5038 int32_t abigap[56];
5039 };
5040
5041 #if defined(TARGET_PPC64)
5042
5043 #define TARGET_TRAMP_SIZE 6
5044
5045 struct target_rt_sigframe {
5046 /* sys_rt_sigreturn requires the ucontext be the first field */
5047 struct target_ucontext uc;
5048 target_ulong _unused[2];
5049 uint32_t trampoline[TARGET_TRAMP_SIZE];
5050 target_ulong pinfo; /* struct siginfo __user * */
5051 target_ulong puc; /* void __user * */
5052 struct target_siginfo info;
5053 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5054 char abigap[288];
5055 } __attribute__((aligned(16)));
5056
5057 #else
5058
5059 struct target_rt_sigframe {
5060 struct target_siginfo info;
5061 struct target_ucontext uc;
5062 int32_t abigap[56];
5063 };
5064
5065 #endif
5066
5067 #if defined(TARGET_PPC64)
5068
5069 struct target_func_ptr {
5070 target_ulong entry;
5071 target_ulong toc;
5072 };
5073
5074 #endif
5075
5076 /* We use the mc_pad field for the signal return trampoline. */
5077 #define tramp mc_pad
5078
5079 /* See arch/powerpc/kernel/signal.c. */
5080 static target_ulong get_sigframe(struct target_sigaction *ka,
5081 CPUPPCState *env,
5082 int frame_size)
5083 {
5084 target_ulong oldsp;
5085
5086 oldsp = env->gpr[1];
5087
5088 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5089 (sas_ss_flags(oldsp) == 0)) {
5090 oldsp = (target_sigaltstack_used.ss_sp
5091 + target_sigaltstack_used.ss_size);
5092 }
5093
5094 return (oldsp - frame_size) & ~0xFUL;
5095 }
5096
5097 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5098 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5099 #define PPC_VEC_HI 0
5100 #define PPC_VEC_LO 1
5101 #else
5102 #define PPC_VEC_HI 1
5103 #define PPC_VEC_LO 0
5104 #endif
5105
5106
5107 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5108 {
5109 target_ulong msr = env->msr;
5110 int i;
5111 target_ulong ccr = 0;
5112
5113 /* In general, the kernel attempts to be intelligent about what it
5114 needs to save for Altivec/FP/SPE registers. We don't care that
5115 much, so we just go ahead and save everything. */
5116
5117 /* Save general registers. */
5118 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5119 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5120 }
5121 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5122 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5123 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5124 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5125
5126 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5127 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5128 }
5129 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5130
5131 /* Save Altivec registers if necessary. */
5132 if (env->insns_flags & PPC_ALTIVEC) {
5133 uint32_t *vrsave;
5134 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5135 ppc_avr_t *avr = &env->avr[i];
5136 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5137
5138 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5139 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5140 }
5141 /* Set MSR_VR in the saved MSR value to indicate that
5142 frame->mc_vregs contains valid data. */
5143 msr |= MSR_VR;
5144 #if defined(TARGET_PPC64)
5145 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5146 /* 64-bit needs to put a pointer to the vectors in the frame */
5147 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5148 #else
5149 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5150 #endif
5151 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5152 }
5153
5154 /* Save VSX second halves */
5155 if (env->insns_flags2 & PPC2_VSX) {
5156 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5157 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5158 __put_user(env->vsr[i], &vsregs[i]);
5159 }
5160 }
5161
5162 /* Save floating point registers. */
5163 if (env->insns_flags & PPC_FLOAT) {
5164 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5165 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5166 }
5167 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5168 }
5169
5170 /* Save SPE registers. The kernel only saves the high half. */
5171 if (env->insns_flags & PPC_SPE) {
5172 #if defined(TARGET_PPC64)
5173 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5174 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5175 }
5176 #else
5177 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5178 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5179 }
5180 #endif
5181 /* Set MSR_SPE in the saved MSR value to indicate that
5182 frame->mc_vregs contains valid data. */
5183 msr |= MSR_SPE;
5184 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5185 }
5186
5187 /* Store MSR. */
5188 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5189 }
5190
5191 static void encode_trampoline(int sigret, uint32_t *tramp)
5192 {
5193 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5194 if (sigret) {
5195 __put_user(0x38000000 | sigret, &tramp[0]);
5196 __put_user(0x44000002, &tramp[1]);
5197 }
5198 }
5199
5200 static void restore_user_regs(CPUPPCState *env,
5201 struct target_mcontext *frame, int sig)
5202 {
5203 target_ulong save_r2 = 0;
5204 target_ulong msr;
5205 target_ulong ccr;
5206
5207 int i;
5208
5209 if (!sig) {
5210 save_r2 = env->gpr[2];
5211 }
5212
5213 /* Restore general registers. */
5214 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5215 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5216 }
5217 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5218 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5219 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5220 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5221 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5222
5223 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5224 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5225 }
5226
5227 if (!sig) {
5228 env->gpr[2] = save_r2;
5229 }
5230 /* Restore MSR. */
5231 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5232
5233 /* If doing signal return, restore the previous little-endian mode. */
5234 if (sig)
5235 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5236
5237 /* Restore Altivec registers if necessary. */
5238 if (env->insns_flags & PPC_ALTIVEC) {
5239 ppc_avr_t *v_regs;
5240 uint32_t *vrsave;
5241 #if defined(TARGET_PPC64)
5242 uint64_t v_addr;
5243 /* 64-bit needs to recover the pointer to the vectors from the frame */
5244 __get_user(v_addr, &frame->v_regs);
5245 v_regs = g2h(v_addr);
5246 #else
5247 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5248 #endif
5249 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5250 ppc_avr_t *avr = &env->avr[i];
5251 ppc_avr_t *vreg = &v_regs[i];
5252
5253 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5254 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5255 }
5256 /* Set MSR_VEC in the saved MSR value to indicate that
5257 frame->mc_vregs contains valid data. */
5258 #if defined(TARGET_PPC64)
5259 vrsave = (uint32_t *)&v_regs[33];
5260 #else
5261 vrsave = (uint32_t *)&v_regs[32];
5262 #endif
5263 __get_user(env->spr[SPR_VRSAVE], vrsave);
5264 }
5265
5266 /* Restore VSX second halves */
5267 if (env->insns_flags2 & PPC2_VSX) {
5268 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5269 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5270 __get_user(env->vsr[i], &vsregs[i]);
5271 }
5272 }
5273
5274 /* Restore floating point registers. */
5275 if (env->insns_flags & PPC_FLOAT) {
5276 uint64_t fpscr;
5277 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5278 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5279 }
5280 __get_user(fpscr, &frame->mc_fregs[32]);
5281 env->fpscr = (uint32_t) fpscr;
5282 }
5283
5284 /* Save SPE registers. The kernel only saves the high half. */
5285 if (env->insns_flags & PPC_SPE) {
5286 #if defined(TARGET_PPC64)
5287 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5288 uint32_t hi;
5289
5290 __get_user(hi, &frame->mc_vregs.spe[i]);
5291 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5292 }
5293 #else
5294 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5295 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5296 }
5297 #endif
5298 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5299 }
5300 }
5301
5302 #if !defined(TARGET_PPC64)
5303 static void setup_frame(int sig, struct target_sigaction *ka,
5304 target_sigset_t *set, CPUPPCState *env)
5305 {
5306 struct target_sigframe *frame;
5307 struct target_sigcontext *sc;
5308 target_ulong frame_addr, newsp;
5309 int err = 0;
5310
5311 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5312 trace_user_setup_frame(env, frame_addr);
5313 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5314 goto sigsegv;
5315 sc = &frame->sctx;
5316
5317 __put_user(ka->_sa_handler, &sc->handler);
5318 __put_user(set->sig[0], &sc->oldmask);
5319 __put_user(set->sig[1], &sc->_unused[3]);
5320 __put_user(h2g(&frame->mctx), &sc->regs);
5321 __put_user(sig, &sc->signal);
5322
5323 /* Save user regs. */
5324 save_user_regs(env, &frame->mctx);
5325
5326 /* Construct the trampoline code on the stack. */
5327 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5328
5329 /* The kernel checks for the presence of a VDSO here. We don't
5330 emulate a vdso, so use a sigreturn system call. */
5331 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5332
5333 /* Turn off all fp exceptions. */
5334 env->fpscr = 0;
5335
5336 /* Create a stack frame for the caller of the handler. */
5337 newsp = frame_addr - SIGNAL_FRAMESIZE;
5338 err |= put_user(env->gpr[1], newsp, target_ulong);
5339
5340 if (err)
5341 goto sigsegv;
5342
5343 /* Set up registers for signal handler. */
5344 env->gpr[1] = newsp;
5345 env->gpr[3] = sig;
5346 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5347
5348 env->nip = (target_ulong) ka->_sa_handler;
5349
5350 /* Signal handlers are entered in big-endian mode. */
5351 env->msr &= ~(1ull << MSR_LE);
5352
5353 unlock_user_struct(frame, frame_addr, 1);
5354 return;
5355
5356 sigsegv:
5357 unlock_user_struct(frame, frame_addr, 1);
5358 force_sigsegv(sig);
5359 }
5360 #endif /* !defined(TARGET_PPC64) */
5361
5362 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5363 target_siginfo_t *info,
5364 target_sigset_t *set, CPUPPCState *env)
5365 {
5366 struct target_rt_sigframe *rt_sf;
5367 uint32_t *trampptr = 0;
5368 struct target_mcontext *mctx = 0;
5369 target_ulong rt_sf_addr, newsp = 0;
5370 int i, err = 0;
5371 #if defined(TARGET_PPC64)
5372 struct target_sigcontext *sc = 0;
5373 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5374 #endif
5375
5376 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5377 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5378 goto sigsegv;
5379
5380 tswap_siginfo(&rt_sf->info, info);
5381
5382 __put_user(0, &rt_sf->uc.tuc_flags);
5383 __put_user(0, &rt_sf->uc.tuc_link);
5384 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5385 &rt_sf->uc.tuc_stack.ss_sp);
5386 __put_user(sas_ss_flags(env->gpr[1]),
5387 &rt_sf->uc.tuc_stack.ss_flags);
5388 __put_user(target_sigaltstack_used.ss_size,
5389 &rt_sf->uc.tuc_stack.ss_size);
5390 #if !defined(TARGET_PPC64)
5391 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5392 &rt_sf->uc.tuc_regs);
5393 #endif
5394 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5395 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5396 }
5397
5398 #if defined(TARGET_PPC64)
5399 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5400 trampptr = &rt_sf->trampoline[0];
5401
5402 sc = &rt_sf->uc.tuc_sigcontext;
5403 __put_user(h2g(mctx), &sc->regs);
5404 __put_user(sig, &sc->signal);
5405 #else
5406 mctx = &rt_sf->uc.tuc_mcontext;
5407 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5408 #endif
5409
5410 save_user_regs(env, mctx);
5411 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5412
5413 /* The kernel checks for the presence of a VDSO here. We don't
5414 emulate a vdso, so use a sigreturn system call. */
5415 env->lr = (target_ulong) h2g(trampptr);
5416
5417 /* Turn off all fp exceptions. */
5418 env->fpscr = 0;
5419
5420 /* Create a stack frame for the caller of the handler. */
5421 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5422 err |= put_user(env->gpr[1], newsp, target_ulong);
5423
5424 if (err)
5425 goto sigsegv;
5426
5427 /* Set up registers for signal handler. */
5428 env->gpr[1] = newsp;
5429 env->gpr[3] = (target_ulong) sig;
5430 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5431 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5432 env->gpr[6] = (target_ulong) h2g(rt_sf);
5433
5434 #if defined(TARGET_PPC64)
5435 if (get_ppc64_abi(image) < 2) {
5436 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5437 struct target_func_ptr *handler =
5438 (struct target_func_ptr *)g2h(ka->_sa_handler);
5439 env->nip = tswapl(handler->entry);
5440 env->gpr[2] = tswapl(handler->toc);
5441 } else {
5442 /* ELFv2 PPC64 function pointers are entry points, but R12
5443 * must also be set */
5444 env->nip = tswapl((target_ulong) ka->_sa_handler);
5445 env->gpr[12] = env->nip;
5446 }
5447 #else
5448 env->nip = (target_ulong) ka->_sa_handler;
5449 #endif
5450
5451 /* Signal handlers are entered in big-endian mode. */
5452 env->msr &= ~(1ull << MSR_LE);
5453
5454 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5455 return;
5456
5457 sigsegv:
5458 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5459 force_sigsegv(sig);
5460
5461 }
5462
5463 #if !defined(TARGET_PPC64)
5464 long do_sigreturn(CPUPPCState *env)
5465 {
5466 struct target_sigcontext *sc = NULL;
5467 struct target_mcontext *sr = NULL;
5468 target_ulong sr_addr = 0, sc_addr;
5469 sigset_t blocked;
5470 target_sigset_t set;
5471
5472 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5473 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5474 goto sigsegv;
5475
5476 #if defined(TARGET_PPC64)
5477 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5478 #else
5479 __get_user(set.sig[0], &sc->oldmask);
5480 __get_user(set.sig[1], &sc->_unused[3]);
5481 #endif
5482 target_to_host_sigset_internal(&blocked, &set);
5483 set_sigmask(&blocked);
5484
5485 __get_user(sr_addr, &sc->regs);
5486 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5487 goto sigsegv;
5488 restore_user_regs(env, sr, 1);
5489
5490 unlock_user_struct(sr, sr_addr, 1);
5491 unlock_user_struct(sc, sc_addr, 1);
5492 return -TARGET_QEMU_ESIGRETURN;
5493
5494 sigsegv:
5495 unlock_user_struct(sr, sr_addr, 1);
5496 unlock_user_struct(sc, sc_addr, 1);
5497 force_sig(TARGET_SIGSEGV);
5498 return -TARGET_QEMU_ESIGRETURN;
5499 }
5500 #endif /* !defined(TARGET_PPC64) */
5501
5502 /* See arch/powerpc/kernel/signal_32.c. */
5503 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5504 {
5505 struct target_mcontext *mcp;
5506 target_ulong mcp_addr;
5507 sigset_t blocked;
5508 target_sigset_t set;
5509
5510 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5511 sizeof (set)))
5512 return 1;
5513
5514 #if defined(TARGET_PPC64)
5515 mcp_addr = h2g(ucp) +
5516 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5517 #else
5518 __get_user(mcp_addr, &ucp->tuc_regs);
5519 #endif
5520
5521 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5522 return 1;
5523
5524 target_to_host_sigset_internal(&blocked, &set);
5525 set_sigmask(&blocked);
5526 restore_user_regs(env, mcp, sig);
5527
5528 unlock_user_struct(mcp, mcp_addr, 1);
5529 return 0;
5530 }
5531
5532 long do_rt_sigreturn(CPUPPCState *env)
5533 {
5534 struct target_rt_sigframe *rt_sf = NULL;
5535 target_ulong rt_sf_addr;
5536
5537 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5538 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5539 goto sigsegv;
5540
5541 if (do_setcontext(&rt_sf->uc, env, 1))
5542 goto sigsegv;
5543
5544 do_sigaltstack(rt_sf_addr
5545 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5546 0, env->gpr[1]);
5547
5548 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5549 return -TARGET_QEMU_ESIGRETURN;
5550
5551 sigsegv:
5552 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5553 force_sig(TARGET_SIGSEGV);
5554 return -TARGET_QEMU_ESIGRETURN;
5555 }
5556
5557 #elif defined(TARGET_M68K)
5558
5559 struct target_sigcontext {
5560 abi_ulong sc_mask;
5561 abi_ulong sc_usp;
5562 abi_ulong sc_d0;
5563 abi_ulong sc_d1;
5564 abi_ulong sc_a0;
5565 abi_ulong sc_a1;
5566 unsigned short sc_sr;
5567 abi_ulong sc_pc;
5568 };
5569
5570 struct target_sigframe
5571 {
5572 abi_ulong pretcode;
5573 int sig;
5574 int code;
5575 abi_ulong psc;
5576 char retcode[8];
5577 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5578 struct target_sigcontext sc;
5579 };
5580
5581 typedef int target_greg_t;
5582 #define TARGET_NGREG 18
5583 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5584
5585 typedef struct target_fpregset {
5586 int f_fpcntl[3];
5587 int f_fpregs[8*3];
5588 } target_fpregset_t;
5589
5590 struct target_mcontext {
5591 int version;
5592 target_gregset_t gregs;
5593 target_fpregset_t fpregs;
5594 };
5595
5596 #define TARGET_MCONTEXT_VERSION 2
5597
5598 struct target_ucontext {
5599 abi_ulong tuc_flags;
5600 abi_ulong tuc_link;
5601 target_stack_t tuc_stack;
5602 struct target_mcontext tuc_mcontext;
5603 abi_long tuc_filler[80];
5604 target_sigset_t tuc_sigmask;
5605 };
5606
5607 struct target_rt_sigframe
5608 {
5609 abi_ulong pretcode;
5610 int sig;
5611 abi_ulong pinfo;
5612 abi_ulong puc;
5613 char retcode[8];
5614 struct target_siginfo info;
5615 struct target_ucontext uc;
5616 };
5617
5618 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5619 abi_ulong mask)
5620 {
5621 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5622 __put_user(mask, &sc->sc_mask);
5623 __put_user(env->aregs[7], &sc->sc_usp);
5624 __put_user(env->dregs[0], &sc->sc_d0);
5625 __put_user(env->dregs[1], &sc->sc_d1);
5626 __put_user(env->aregs[0], &sc->sc_a0);
5627 __put_user(env->aregs[1], &sc->sc_a1);
5628 __put_user(sr, &sc->sc_sr);
5629 __put_user(env->pc, &sc->sc_pc);
5630 }
5631
5632 static void
5633 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5634 {
5635 int temp;
5636
5637 __get_user(env->aregs[7], &sc->sc_usp);
5638 __get_user(env->dregs[0], &sc->sc_d0);
5639 __get_user(env->dregs[1], &sc->sc_d1);
5640 __get_user(env->aregs[0], &sc->sc_a0);
5641 __get_user(env->aregs[1], &sc->sc_a1);
5642 __get_user(env->pc, &sc->sc_pc);
5643 __get_user(temp, &sc->sc_sr);
5644 cpu_m68k_set_ccr(env, temp);
5645 }
5646
5647 /*
5648 * Determine which stack to use..
5649 */
5650 static inline abi_ulong
5651 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5652 size_t frame_size)
5653 {
5654 unsigned long sp;
5655
5656 sp = regs->aregs[7];
5657
5658 /* This is the X/Open sanctioned signal stack switching. */
5659 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5660 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5661 }
5662
5663 return ((sp - frame_size) & -8UL);
5664 }
5665
5666 static void setup_frame(int sig, struct target_sigaction *ka,
5667 target_sigset_t *set, CPUM68KState *env)
5668 {
5669 struct target_sigframe *frame;
5670 abi_ulong frame_addr;
5671 abi_ulong retcode_addr;
5672 abi_ulong sc_addr;
5673 int i;
5674
5675 frame_addr = get_sigframe(ka, env, sizeof *frame);
5676 trace_user_setup_frame(env, frame_addr);
5677 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5678 goto give_sigsegv;
5679 }
5680
5681 __put_user(sig, &frame->sig);
5682
5683 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5684 __put_user(sc_addr, &frame->psc);
5685
5686 setup_sigcontext(&frame->sc, env, set->sig[0]);
5687
5688 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5689 __put_user(set->sig[i], &frame->extramask[i - 1]);
5690 }
5691
5692 /* Set up to return from userspace. */
5693
5694 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5695 __put_user(retcode_addr, &frame->pretcode);
5696
5697 /* moveq #,d0; trap #0 */
5698
5699 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5700 (uint32_t *)(frame->retcode));
5701
5702 /* Set up to return from userspace */
5703
5704 env->aregs[7] = frame_addr;
5705 env->pc = ka->_sa_handler;
5706
5707 unlock_user_struct(frame, frame_addr, 1);
5708 return;
5709
5710 give_sigsegv:
5711 force_sigsegv(sig);
5712 }
5713
5714 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5715 CPUM68KState *env)
5716 {
5717 int i;
5718 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5719
5720 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
5721 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
5722 /* fpiar is not emulated */
5723
5724 for (i = 0; i < 8; i++) {
5725 uint32_t high = env->fregs[i].d.high << 16;
5726 __put_user(high, &fpregs->f_fpregs[i * 3]);
5727 __put_user(env->fregs[i].d.low,
5728 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5729 }
5730 }
5731
5732 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5733 CPUM68KState *env)
5734 {
5735 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5736 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5737
5738 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5739 __put_user(env->dregs[0], &gregs[0]);
5740 __put_user(env->dregs[1], &gregs[1]);
5741 __put_user(env->dregs[2], &gregs[2]);
5742 __put_user(env->dregs[3], &gregs[3]);
5743 __put_user(env->dregs[4], &gregs[4]);
5744 __put_user(env->dregs[5], &gregs[5]);
5745 __put_user(env->dregs[6], &gregs[6]);
5746 __put_user(env->dregs[7], &gregs[7]);
5747 __put_user(env->aregs[0], &gregs[8]);
5748 __put_user(env->aregs[1], &gregs[9]);
5749 __put_user(env->aregs[2], &gregs[10]);
5750 __put_user(env->aregs[3], &gregs[11]);
5751 __put_user(env->aregs[4], &gregs[12]);
5752 __put_user(env->aregs[5], &gregs[13]);
5753 __put_user(env->aregs[6], &gregs[14]);
5754 __put_user(env->aregs[7], &gregs[15]);
5755 __put_user(env->pc, &gregs[16]);
5756 __put_user(sr, &gregs[17]);
5757
5758 target_rt_save_fpu_state(uc, env);
5759
5760 return 0;
5761 }
5762
5763 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
5764 struct target_ucontext *uc)
5765 {
5766 int i;
5767 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5768 uint32_t fpcr;
5769
5770 __get_user(fpcr, &fpregs->f_fpcntl[0]);
5771 cpu_m68k_set_fpcr(env, fpcr);
5772 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
5773 /* fpiar is not emulated */
5774
5775 for (i = 0; i < 8; i++) {
5776 uint32_t high;
5777 __get_user(high, &fpregs->f_fpregs[i * 3]);
5778 env->fregs[i].d.high = high >> 16;
5779 __get_user(env->fregs[i].d.low,
5780 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5781 }
5782 }
5783
5784 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5785 struct target_ucontext *uc)
5786 {
5787 int temp;
5788 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5789
5790 __get_user(temp, &uc->tuc_mcontext.version);
5791 if (temp != TARGET_MCONTEXT_VERSION)
5792 goto badframe;
5793
5794 /* restore passed registers */
5795 __get_user(env->dregs[0], &gregs[0]);
5796 __get_user(env->dregs[1], &gregs[1]);
5797 __get_user(env->dregs[2], &gregs[2]);
5798 __get_user(env->dregs[3], &gregs[3]);
5799 __get_user(env->dregs[4], &gregs[4]);
5800 __get_user(env->dregs[5], &gregs[5]);
5801 __get_user(env->dregs[6], &gregs[6]);
5802 __get_user(env->dregs[7], &gregs[7]);
5803 __get_user(env->aregs[0], &gregs[8]);
5804 __get_user(env->aregs[1], &gregs[9]);
5805 __get_user(env->aregs[2], &gregs[10]);
5806 __get_user(env->aregs[3], &gregs[11]);
5807 __get_user(env->aregs[4], &gregs[12]);
5808 __get_user(env->aregs[5], &gregs[13]);
5809 __get_user(env->aregs[6], &gregs[14]);
5810 __get_user(env->aregs[7], &gregs[15]);
5811 __get_user(env->pc, &gregs[16]);
5812 __get_user(temp, &gregs[17]);
5813 cpu_m68k_set_ccr(env, temp);
5814
5815 target_rt_restore_fpu_state(env, uc);
5816
5817 return 0;
5818
5819 badframe:
5820 return 1;
5821 }
5822
5823 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5824 target_siginfo_t *info,
5825 target_sigset_t *set, CPUM68KState *env)
5826 {
5827 struct target_rt_sigframe *frame;
5828 abi_ulong frame_addr;
5829 abi_ulong retcode_addr;
5830 abi_ulong info_addr;
5831 abi_ulong uc_addr;
5832 int err = 0;
5833 int i;
5834
5835 frame_addr = get_sigframe(ka, env, sizeof *frame);
5836 trace_user_setup_rt_frame(env, frame_addr);
5837 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5838 goto give_sigsegv;
5839 }
5840
5841 __put_user(sig, &frame->sig);
5842
5843 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5844 __put_user(info_addr, &frame->pinfo);
5845
5846 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5847 __put_user(uc_addr, &frame->puc);
5848
5849 tswap_siginfo(&frame->info, info);
5850
5851 /* Create the ucontext */
5852
5853 __put_user(0, &frame->uc.tuc_flags);
5854 __put_user(0, &frame->uc.tuc_link);
5855 __put_user(target_sigaltstack_used.ss_sp,
5856 &frame->uc.tuc_stack.ss_sp);
5857 __put_user(sas_ss_flags(env->aregs[7]),
5858 &frame->uc.tuc_stack.ss_flags);
5859 __put_user(target_sigaltstack_used.ss_size,
5860 &frame->uc.tuc_stack.ss_size);
5861 err |= target_rt_setup_ucontext(&frame->uc, env);
5862
5863 if (err)
5864 goto give_sigsegv;
5865
5866 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5867 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5868 }
5869
5870 /* Set up to return from userspace. */
5871
5872 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5873 __put_user(retcode_addr, &frame->pretcode);
5874
5875 /* moveq #,d0; notb d0; trap #0 */
5876
5877 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5878 (uint32_t *)(frame->retcode + 0));
5879 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5880
5881 if (err)
5882 goto give_sigsegv;
5883
5884 /* Set up to return from userspace */
5885
5886 env->aregs[7] = frame_addr;
5887 env->pc = ka->_sa_handler;
5888
5889 unlock_user_struct(frame, frame_addr, 1);
5890 return;
5891
5892 give_sigsegv:
5893 unlock_user_struct(frame, frame_addr, 1);
5894 force_sigsegv(sig);
5895 }
5896
5897 long do_sigreturn(CPUM68KState *env)
5898 {
5899 struct target_sigframe *frame;
5900 abi_ulong frame_addr = env->aregs[7] - 4;
5901 target_sigset_t target_set;
5902 sigset_t set;
5903 int i;
5904
5905 trace_user_do_sigreturn(env, frame_addr);
5906 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5907 goto badframe;
5908
5909 /* set blocked signals */
5910
5911 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5912
5913 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5914 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5915 }
5916
5917 target_to_host_sigset_internal(&set, &target_set);
5918 set_sigmask(&set);
5919
5920 /* restore registers */
5921
5922 restore_sigcontext(env, &frame->sc);
5923
5924 unlock_user_struct(frame, frame_addr, 0);
5925 return -TARGET_QEMU_ESIGRETURN;
5926
5927 badframe:
5928 force_sig(TARGET_SIGSEGV);
5929 return -TARGET_QEMU_ESIGRETURN;
5930 }
5931
5932 long do_rt_sigreturn(CPUM68KState *env)
5933 {
5934 struct target_rt_sigframe *frame;
5935 abi_ulong frame_addr = env->aregs[7] - 4;
5936 sigset_t set;
5937
5938 trace_user_do_rt_sigreturn(env, frame_addr);
5939 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5940 goto badframe;
5941
5942 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5943 set_sigmask(&set);
5944
5945 /* restore registers */
5946
5947 if (target_rt_restore_ucontext(env, &frame->uc))
5948 goto badframe;
5949
5950 if (do_sigaltstack(frame_addr +
5951 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5952 0, get_sp_from_cpustate(env)) == -EFAULT)
5953 goto badframe;
5954
5955 unlock_user_struct(frame, frame_addr, 0);
5956 return -TARGET_QEMU_ESIGRETURN;
5957
5958 badframe:
5959 unlock_user_struct(frame, frame_addr, 0);
5960 force_sig(TARGET_SIGSEGV);
5961 return -TARGET_QEMU_ESIGRETURN;
5962 }
5963
5964 #elif defined(TARGET_ALPHA)
5965
5966 struct target_sigcontext {
5967 abi_long sc_onstack;
5968 abi_long sc_mask;
5969 abi_long sc_pc;
5970 abi_long sc_ps;
5971 abi_long sc_regs[32];
5972 abi_long sc_ownedfp;
5973 abi_long sc_fpregs[32];
5974 abi_ulong sc_fpcr;
5975 abi_ulong sc_fp_control;
5976 abi_ulong sc_reserved1;
5977 abi_ulong sc_reserved2;
5978 abi_ulong sc_ssize;
5979 abi_ulong sc_sbase;
5980 abi_ulong sc_traparg_a0;
5981 abi_ulong sc_traparg_a1;
5982 abi_ulong sc_traparg_a2;
5983 abi_ulong sc_fp_trap_pc;
5984 abi_ulong sc_fp_trigger_sum;
5985 abi_ulong sc_fp_trigger_inst;
5986 };
5987
5988 struct target_ucontext {
5989 abi_ulong tuc_flags;
5990 abi_ulong tuc_link;
5991 abi_ulong tuc_osf_sigmask;
5992 target_stack_t tuc_stack;
5993 struct target_sigcontext tuc_mcontext;
5994 target_sigset_t tuc_sigmask;
5995 };
5996
5997 struct target_sigframe {
5998 struct target_sigcontext sc;
5999 unsigned int retcode[3];
6000 };
6001
6002 struct target_rt_sigframe {
6003 target_siginfo_t info;
6004 struct target_ucontext uc;
6005 unsigned int retcode[3];
6006 };
6007
6008 #define INSN_MOV_R30_R16 0x47fe0410
6009 #define INSN_LDI_R0 0x201f0000
6010 #define INSN_CALLSYS 0x00000083
6011
6012 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6013 abi_ulong frame_addr, target_sigset_t *set)
6014 {
6015 int i;
6016
6017 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6018 __put_user(set->sig[0], &sc->sc_mask);
6019 __put_user(env->pc, &sc->sc_pc);
6020 __put_user(8, &sc->sc_ps);
6021
6022 for (i = 0; i < 31; ++i) {
6023 __put_user(env->ir[i], &sc->sc_regs[i]);
6024 }
6025 __put_user(0, &sc->sc_regs[31]);
6026
6027 for (i = 0; i < 31; ++i) {
6028 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6029 }
6030 __put_user(0, &sc->sc_fpregs[31]);
6031 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6032
6033 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6034 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6035 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6036 }
6037
6038 static void restore_sigcontext(CPUAlphaState *env,
6039 struct target_sigcontext *sc)
6040 {
6041 uint64_t fpcr;
6042 int i;
6043
6044 __get_user(env->pc, &sc->sc_pc);
6045
6046 for (i = 0; i < 31; ++i) {
6047 __get_user(env->ir[i], &sc->sc_regs[i]);
6048 }
6049 for (i = 0; i < 31; ++i) {
6050 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6051 }
6052
6053 __get_user(fpcr, &sc->sc_fpcr);
6054 cpu_alpha_store_fpcr(env, fpcr);
6055 }
6056
6057 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6058 CPUAlphaState *env,
6059 unsigned long framesize)
6060 {
6061 abi_ulong sp = env->ir[IR_SP];
6062
6063 /* This is the X/Open sanctioned signal stack switching. */
6064 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6065 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6066 }
6067 return (sp - framesize) & -32;
6068 }
6069
6070 static void setup_frame(int sig, struct target_sigaction *ka,
6071 target_sigset_t *set, CPUAlphaState *env)
6072 {
6073 abi_ulong frame_addr, r26;
6074 struct target_sigframe *frame;
6075 int err = 0;
6076
6077 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6078 trace_user_setup_frame(env, frame_addr);
6079 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6080 goto give_sigsegv;
6081 }
6082
6083 setup_sigcontext(&frame->sc, env, frame_addr, set);
6084
6085 if (ka->sa_restorer) {
6086 r26 = ka->sa_restorer;
6087 } else {
6088 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6089 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6090 &frame->retcode[1]);
6091 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6092 /* imb() */
6093 r26 = frame_addr;
6094 }
6095
6096 unlock_user_struct(frame, frame_addr, 1);
6097
6098 if (err) {
6099 give_sigsegv:
6100 force_sigsegv(sig);
6101 return;
6102 }
6103
6104 env->ir[IR_RA] = r26;
6105 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6106 env->ir[IR_A0] = sig;
6107 env->ir[IR_A1] = 0;
6108 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6109 env->ir[IR_SP] = frame_addr;
6110 }
6111
6112 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6113 target_siginfo_t *info,
6114 target_sigset_t *set, CPUAlphaState *env)
6115 {
6116 abi_ulong frame_addr, r26;
6117 struct target_rt_sigframe *frame;
6118 int i, err = 0;
6119
6120 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6121 trace_user_setup_rt_frame(env, frame_addr);
6122 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6123 goto give_sigsegv;
6124 }
6125
6126 tswap_siginfo(&frame->info, info);
6127
6128 __put_user(0, &frame->uc.tuc_flags);
6129 __put_user(0, &frame->uc.tuc_link);
6130 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6131 __put_user(target_sigaltstack_used.ss_sp,
6132 &frame->uc.tuc_stack.ss_sp);
6133 __put_user(sas_ss_flags(env->ir[IR_SP]),
6134 &frame->uc.tuc_stack.ss_flags);
6135 __put_user(target_sigaltstack_used.ss_size,
6136 &frame->uc.tuc_stack.ss_size);
6137 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6138 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6139 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6140 }
6141
6142 if (ka->sa_restorer) {
6143 r26 = ka->sa_restorer;
6144 } else {
6145 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6146 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6147 &frame->retcode[1]);
6148 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6149 /* imb(); */
6150 r26 = frame_addr;
6151 }
6152
6153 if (err) {
6154 give_sigsegv:
6155 force_sigsegv(sig);
6156 return;
6157 }
6158
6159 env->ir[IR_RA] = r26;
6160 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6161 env->ir[IR_A0] = sig;
6162 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6163 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6164 env->ir[IR_SP] = frame_addr;
6165 }
6166
6167 long do_sigreturn(CPUAlphaState *env)
6168 {
6169 struct target_sigcontext *sc;
6170 abi_ulong sc_addr = env->ir[IR_A0];
6171 target_sigset_t target_set;
6172 sigset_t set;
6173
6174 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6175 goto badframe;
6176 }
6177
6178 target_sigemptyset(&target_set);
6179 __get_user(target_set.sig[0], &sc->sc_mask);
6180
6181 target_to_host_sigset_internal(&set, &target_set);
6182 set_sigmask(&set);
6183
6184 restore_sigcontext(env, sc);
6185 unlock_user_struct(sc, sc_addr, 0);
6186 return -TARGET_QEMU_ESIGRETURN;
6187
6188 badframe:
6189 force_sig(TARGET_SIGSEGV);
6190 return -TARGET_QEMU_ESIGRETURN;
6191 }
6192
6193 long do_rt_sigreturn(CPUAlphaState *env)
6194 {
6195 abi_ulong frame_addr = env->ir[IR_A0];
6196 struct target_rt_sigframe *frame;
6197 sigset_t set;
6198
6199 trace_user_do_rt_sigreturn(env, frame_addr);
6200 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6201 goto badframe;
6202 }
6203 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6204 set_sigmask(&set);
6205
6206 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6207 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6208 uc.tuc_stack),
6209 0, env->ir[IR_SP]) == -EFAULT) {
6210 goto badframe;
6211 }
6212
6213 unlock_user_struct(frame, frame_addr, 0);
6214 return -TARGET_QEMU_ESIGRETURN;
6215
6216
6217 badframe:
6218 unlock_user_struct(frame, frame_addr, 0);
6219 force_sig(TARGET_SIGSEGV);
6220 return -TARGET_QEMU_ESIGRETURN;
6221 }
6222
6223 #elif defined(TARGET_TILEGX)
6224
6225 struct target_sigcontext {
6226 union {
6227 /* General-purpose registers. */
6228 abi_ulong gregs[56];
6229 struct {
6230 abi_ulong __gregs[53];
6231 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6232 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6233 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6234 };
6235 };
6236 abi_ulong pc; /* Program counter. */
6237 abi_ulong ics; /* In Interrupt Critical Section? */
6238 abi_ulong faultnum; /* Fault number. */
6239 abi_ulong pad[5];
6240 };
6241
6242 struct target_ucontext {
6243 abi_ulong tuc_flags;
6244 abi_ulong tuc_link;
6245 target_stack_t tuc_stack;
6246 struct target_sigcontext tuc_mcontext;
6247 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6248 };
6249
6250 struct target_rt_sigframe {
6251 unsigned char save_area[16]; /* caller save area */
6252 struct target_siginfo info;
6253 struct target_ucontext uc;
6254 abi_ulong retcode[2];
6255 };
6256
6257 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6258 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6259
6260
6261 static void setup_sigcontext(struct target_sigcontext *sc,
6262 CPUArchState *env, int signo)
6263 {
6264 int i;
6265
6266 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6267 __put_user(env->regs[i], &sc->gregs[i]);
6268 }
6269
6270 __put_user(env->pc, &sc->pc);
6271 __put_user(0, &sc->ics);
6272 __put_user(signo, &sc->faultnum);
6273 }
6274
6275 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6276 {
6277 int i;
6278
6279 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6280 __get_user(env->regs[i], &sc->gregs[i]);
6281 }
6282
6283 __get_user(env->pc, &sc->pc);
6284 }
6285
6286 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6287 size_t frame_size)
6288 {
6289 unsigned long sp = env->regs[TILEGX_R_SP];
6290
6291 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6292 return -1UL;
6293 }
6294
6295 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6296 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6297 }
6298
6299 sp -= frame_size;
6300 sp &= -16UL;
6301 return sp;
6302 }
6303
6304 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6305 target_siginfo_t *info,
6306 target_sigset_t *set, CPUArchState *env)
6307 {
6308 abi_ulong frame_addr;
6309 struct target_rt_sigframe *frame;
6310 unsigned long restorer;
6311
6312 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6313 trace_user_setup_rt_frame(env, frame_addr);
6314 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6315 goto give_sigsegv;
6316 }
6317
6318 /* Always write at least the signal number for the stack backtracer. */
6319 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6320 /* At sigreturn time, restore the callee-save registers too. */
6321 tswap_siginfo(&frame->info, info);
6322 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6323 } else {
6324 __put_user(info->si_signo, &frame->info.si_signo);
6325 }
6326
6327 /* Create the ucontext. */
6328 __put_user(0, &frame->uc.tuc_flags);
6329 __put_user(0, &frame->uc.tuc_link);
6330 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6331 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6332 &frame->uc.tuc_stack.ss_flags);
6333 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6334 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6335
6336 if (ka->sa_flags & TARGET_SA_RESTORER) {
6337 restorer = (unsigned long) ka->sa_restorer;
6338 } else {
6339 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6340 __put_user(INSN_SWINT1, &frame->retcode[1]);
6341 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6342 }
6343 env->pc = (unsigned long) ka->_sa_handler;
6344 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6345 env->regs[TILEGX_R_LR] = restorer;
6346 env->regs[0] = (unsigned long) sig;
6347 env->regs[1] = (unsigned long) &frame->info;
6348 env->regs[2] = (unsigned long) &frame->uc;
6349 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6350
6351 unlock_user_struct(frame, frame_addr, 1);
6352 return;
6353
6354 give_sigsegv:
6355 force_sigsegv(sig);
6356 }
6357
6358 long do_rt_sigreturn(CPUTLGState *env)
6359 {
6360 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6361 struct target_rt_sigframe *frame;
6362 sigset_t set;
6363
6364 trace_user_do_rt_sigreturn(env, frame_addr);
6365 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6366 goto badframe;
6367 }
6368 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6369 set_sigmask(&set);
6370
6371 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6372 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6373 uc.tuc_stack),
6374 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6375 goto badframe;
6376 }
6377
6378 unlock_user_struct(frame, frame_addr, 0);
6379 return -TARGET_QEMU_ESIGRETURN;
6380
6381
6382 badframe:
6383 unlock_user_struct(frame, frame_addr, 0);
6384 force_sig(TARGET_SIGSEGV);
6385 return -TARGET_QEMU_ESIGRETURN;
6386 }
6387
6388 #elif defined(TARGET_RISCV)
6389
6390 /* Signal handler invocation must be transparent for the code being
6391 interrupted. Complete CPU (hart) state is saved on entry and restored
6392 before returning from the handler. Process sigmask is also saved to block
6393 signals while the handler is running. The handler gets its own stack,
6394 which also doubles as storage for the CPU state and sigmask.
6395
6396 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
6397
6398 struct target_sigcontext {
6399 abi_long pc;
6400 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
6401 uint64_t fpr[32];
6402 uint32_t fcsr;
6403 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
6404
6405 struct target_ucontext {
6406 unsigned long uc_flags;
6407 struct target_ucontext *uc_link;
6408 target_stack_t uc_stack;
6409 struct target_sigcontext uc_mcontext;
6410 target_sigset_t uc_sigmask;
6411 };
6412
6413 struct target_rt_sigframe {
6414 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
6415 struct target_siginfo info;
6416 struct target_ucontext uc;
6417 };
6418
6419 static abi_ulong get_sigframe(struct target_sigaction *ka,
6420 CPURISCVState *regs, size_t framesize)
6421 {
6422 abi_ulong sp = regs->gpr[xSP];
6423 int onsigstack = on_sig_stack(sp);
6424
6425 /* redzone */
6426 /* This is the X/Open sanctioned signal stack switching. */
6427 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
6428 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6429 }
6430
6431 sp -= framesize;
6432 sp &= ~3UL; /* align sp on 4-byte boundary */
6433
6434 /* If we are on the alternate signal stack and would overflow it, don't.
6435 Return an always-bogus address instead so we will die with SIGSEGV. */
6436 if (onsigstack && !likely(on_sig_stack(sp))) {
6437 return -1L;
6438 }
6439
6440 return sp;
6441 }
6442
6443 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
6444 {
6445 int i;
6446
6447 __put_user(env->pc, &sc->pc);
6448
6449 for (i = 1; i < 32; i++) {
6450 __put_user(env->gpr[i], &sc->gpr[i - 1]);
6451 }
6452 for (i = 0; i < 32; i++) {
6453 __put_user(env->fpr[i], &sc->fpr[i]);
6454 }
6455
6456 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
6457 __put_user(fcsr, &sc->fcsr);
6458 }
6459
6460 static void setup_ucontext(struct target_ucontext *uc,
6461 CPURISCVState *env, target_sigset_t *set)
6462 {
6463 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
6464 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
6465 abi_ulong ss_size = target_sigaltstack_used.ss_size;
6466
6467 __put_user(0, &(uc->uc_flags));
6468 __put_user(0, &(uc->uc_link));
6469
6470 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
6471 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
6472 __put_user(ss_size, &(uc->uc_stack.ss_size));
6473
6474 int i;
6475 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6476 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
6477 }
6478
6479 setup_sigcontext(&uc->uc_mcontext, env);
6480 }
6481
6482 static inline void install_sigtramp(uint32_t *tramp)
6483 {
6484 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
6485 __put_user(0x00000073, tramp + 1); /* ecall */
6486 }
6487
6488 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6489 target_siginfo_t *info,
6490 target_sigset_t *set, CPURISCVState *env)
6491 {
6492 abi_ulong frame_addr;
6493 struct target_rt_sigframe *frame;
6494
6495 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6496 trace_user_setup_rt_frame(env, frame_addr);
6497
6498 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6499 goto badframe;
6500 }
6501
6502 setup_ucontext(&frame->uc, env, set);
6503 tswap_siginfo(&frame->info, info);
6504 install_sigtramp(frame->tramp);
6505
6506 env->pc = ka->_sa_handler;
6507 env->gpr[xSP] = frame_addr;
6508 env->gpr[xA0] = sig;
6509 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6510 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6511 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
6512
6513 return;
6514
6515 badframe:
6516 unlock_user_struct(frame, frame_addr, 1);
6517 if (sig == TARGET_SIGSEGV) {
6518 ka->_sa_handler = TARGET_SIG_DFL;
6519 }
6520 force_sig(TARGET_SIGSEGV);
6521 }
6522
6523 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
6524 {
6525 int i;
6526
6527 __get_user(env->pc, &sc->pc);
6528
6529 for (i = 1; i < 32; ++i) {
6530 __get_user(env->gpr[i], &sc->gpr[i - 1]);
6531 }
6532 for (i = 0; i < 32; ++i) {
6533 __get_user(env->fpr[i], &sc->fpr[i]);
6534 }
6535
6536 uint32_t fcsr;
6537 __get_user(fcsr, &sc->fcsr);
6538 csr_write_helper(env, fcsr, CSR_FCSR);
6539 }
6540
6541 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
6542 {
6543 sigset_t blocked;
6544 target_sigset_t target_set;
6545 int i;
6546
6547 target_sigemptyset(&target_set);
6548 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6549 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
6550 }
6551
6552 target_to_host_sigset_internal(&blocked, &target_set);
6553 set_sigmask(&blocked);
6554
6555 restore_sigcontext(env, &uc->uc_mcontext);
6556 }
6557
6558 long do_rt_sigreturn(CPURISCVState *env)
6559 {
6560 struct target_rt_sigframe *frame;
6561 abi_ulong frame_addr;
6562
6563 frame_addr = env->gpr[xSP];
6564 trace_user_do_sigreturn(env, frame_addr);
6565 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6566 goto badframe;
6567 }
6568
6569 restore_ucontext(env, &frame->uc);
6570
6571 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6572 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
6573 goto badframe;
6574 }
6575
6576 unlock_user_struct(frame, frame_addr, 0);
6577 return -TARGET_QEMU_ESIGRETURN;
6578
6579 badframe:
6580 unlock_user_struct(frame, frame_addr, 0);
6581 force_sig(TARGET_SIGSEGV);
6582 return 0;
6583 }
6584
6585 #elif defined(TARGET_HPPA)
6586
6587 struct target_sigcontext {
6588 abi_ulong sc_flags;
6589 abi_ulong sc_gr[32];
6590 uint64_t sc_fr[32];
6591 abi_ulong sc_iasq[2];
6592 abi_ulong sc_iaoq[2];
6593 abi_ulong sc_sar;
6594 };
6595
6596 struct target_ucontext {
6597 abi_uint tuc_flags;
6598 abi_ulong tuc_link;
6599 target_stack_t tuc_stack;
6600 abi_uint pad[1];
6601 struct target_sigcontext tuc_mcontext;
6602 target_sigset_t tuc_sigmask;
6603 };
6604
6605 struct target_rt_sigframe {
6606 abi_uint tramp[9];
6607 target_siginfo_t info;
6608 struct target_ucontext uc;
6609 /* hidden location of upper halves of pa2.0 64-bit gregs */
6610 };
6611
6612 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6613 {
6614 int flags = 0;
6615 int i;
6616
6617 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6618
6619 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6620 /* In the gateway page, executing a syscall. */
6621 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6622 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6623 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6624 } else {
6625 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6626 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6627 }
6628 __put_user(0, &sc->sc_iasq[0]);
6629 __put_user(0, &sc->sc_iasq[1]);
6630 __put_user(flags, &sc->sc_flags);
6631
6632 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6633 for (i = 1; i < 32; ++i) {
6634 __put_user(env->gr[i], &sc->sc_gr[i]);
6635 }
6636
6637 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6638 for (i = 1; i < 32; ++i) {
6639 __put_user(env->fr[i], &sc->sc_fr[i]);
6640 }
6641
6642 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6643 }
6644
6645 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6646 {
6647 target_ulong psw;
6648 int i;
6649
6650 __get_user(psw, &sc->sc_gr[0]);
6651 cpu_hppa_put_psw(env, psw);
6652
6653 for (i = 1; i < 32; ++i) {
6654 __get_user(env->gr[i], &sc->sc_gr[i]);
6655 }
6656 for (i = 0; i < 32; ++i) {
6657 __get_user(env->fr[i], &sc->sc_fr[i]);
6658 }
6659 cpu_hppa_loaded_fr0(env);
6660
6661 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6662 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6663 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6664 }
6665
6666 /* No, this doesn't look right, but it's copied straight from the kernel. */
6667 #define PARISC_RT_SIGFRAME_SIZE32 \
6668 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6669
6670 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6671 target_siginfo_t *info,
6672 target_sigset_t *set, CPUArchState *env)
6673 {
6674 abi_ulong frame_addr, sp, haddr;
6675 struct target_rt_sigframe *frame;
6676 int i;
6677
6678 sp = env->gr[30];
6679 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6680 if (sas_ss_flags(sp) == 0) {
6681 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6682 }
6683 }
6684 frame_addr = QEMU_ALIGN_UP(sp, 64);
6685 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6686
6687 trace_user_setup_rt_frame(env, frame_addr);
6688
6689 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6690 goto give_sigsegv;
6691 }
6692
6693 tswap_siginfo(&frame->info, info);
6694 frame->uc.tuc_flags = 0;
6695 frame->uc.tuc_link = 0;
6696
6697 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6698 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6699 &frame->uc.tuc_stack.ss_flags);
6700 __put_user(target_sigaltstack_used.ss_size,
6701 &frame->uc.tuc_stack.ss_size);
6702
6703 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6704 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6705 }
6706
6707 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6708
6709 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6710 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6711 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6712 __put_user(0x08000240, frame->tramp + 3); /* nop */
6713
6714 unlock_user_struct(frame, frame_addr, 1);
6715
6716 env->gr[2] = h2g(frame->tramp);
6717 env->gr[30] = sp;
6718 env->gr[26] = sig;
6719 env->gr[25] = h2g(&frame->info);
6720 env->gr[24] = h2g(&frame->uc);
6721
6722 haddr = ka->_sa_handler;
6723 if (haddr & 2) {
6724 /* Function descriptor. */
6725 target_ulong *fdesc, dest;
6726
6727 haddr &= -4;
6728 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
6729 goto give_sigsegv;
6730 }
6731 __get_user(dest, fdesc);
6732 __get_user(env->gr[19], fdesc + 1);
6733 unlock_user_struct(fdesc, haddr, 1);
6734 haddr = dest;
6735 }
6736 env->iaoq_f = haddr;
6737 env->iaoq_b = haddr + 4;
6738 return;
6739
6740 give_sigsegv:
6741 force_sigsegv(sig);
6742 }
6743
6744 long do_rt_sigreturn(CPUArchState *env)
6745 {
6746 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
6747 struct target_rt_sigframe *frame;
6748 sigset_t set;
6749
6750 trace_user_do_rt_sigreturn(env, frame_addr);
6751 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6752 goto badframe;
6753 }
6754 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6755 set_sigmask(&set);
6756
6757 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6758 unlock_user_struct(frame, frame_addr, 0);
6759
6760 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6761 uc.tuc_stack),
6762 0, env->gr[30]) == -EFAULT) {
6763 goto badframe;
6764 }
6765
6766 unlock_user_struct(frame, frame_addr, 0);
6767 return -TARGET_QEMU_ESIGRETURN;
6768
6769 badframe:
6770 force_sig(TARGET_SIGSEGV);
6771 return -TARGET_QEMU_ESIGRETURN;
6772 }
6773
6774 #else
6775 #error Target needs to add support for signal handling
6776 #endif
6777
6778 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
6779 struct emulated_sigtable *k)
6780 {
6781 CPUState *cpu = ENV_GET_CPU(cpu_env);
6782 abi_ulong handler;
6783 sigset_t set;
6784 target_sigset_t target_old_set;
6785 struct target_sigaction *sa;
6786 TaskState *ts = cpu->opaque;
6787
6788 trace_user_handle_signal(cpu_env, sig);
6789 /* dequeue signal */
6790 k->pending = 0;
6791
6792 sig = gdb_handlesig(cpu, sig);
6793 if (!sig) {
6794 sa = NULL;
6795 handler = TARGET_SIG_IGN;
6796 } else {
6797 sa = &sigact_table[sig - 1];
6798 handler = sa->_sa_handler;
6799 }
6800
6801 if (do_strace) {
6802 print_taken_signal(sig, &k->info);
6803 }
6804
6805 if (handler == TARGET_SIG_DFL) {
6806 /* default handler : ignore some signal. The other are job control or fatal */
6807 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
6808 kill(getpid(),SIGSTOP);
6809 } else if (sig != TARGET_SIGCHLD &&
6810 sig != TARGET_SIGURG &&
6811 sig != TARGET_SIGWINCH &&
6812 sig != TARGET_SIGCONT) {
6813 dump_core_and_abort(sig);
6814 }
6815 } else if (handler == TARGET_SIG_IGN) {
6816 /* ignore sig */
6817 } else if (handler == TARGET_SIG_ERR) {
6818 dump_core_and_abort(sig);
6819 } else {
6820 /* compute the blocked signals during the handler execution */
6821 sigset_t *blocked_set;
6822
6823 target_to_host_sigset(&set, &sa->sa_mask);
6824 /* SA_NODEFER indicates that the current signal should not be
6825 blocked during the handler */
6826 if (!(sa->sa_flags & TARGET_SA_NODEFER))
6827 sigaddset(&set, target_to_host_signal(sig));
6828
6829 /* save the previous blocked signal state to restore it at the
6830 end of the signal execution (see do_sigreturn) */
6831 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
6832
6833 /* block signals in the handler */
6834 blocked_set = ts->in_sigsuspend ?
6835 &ts->sigsuspend_mask : &ts->signal_mask;
6836 sigorset(&ts->signal_mask, blocked_set, &set);
6837 ts->in_sigsuspend = 0;
6838
6839 /* if the CPU is in VM86 mode, we restore the 32 bit values */
6840 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
6841 {
6842 CPUX86State *env = cpu_env;
6843 if (env->eflags & VM_MASK)
6844 save_v86_state(env);
6845 }
6846 #endif
6847 /* prepare the stack frame of the virtual CPU */
6848 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
6849 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
6850 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
6851 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
6852 || defined(TARGET_RISCV)
6853 /* These targets do not have traditional signals. */
6854 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6855 #else
6856 if (sa->sa_flags & TARGET_SA_SIGINFO)
6857 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6858 else
6859 setup_frame(sig, sa, &target_old_set, cpu_env);
6860 #endif
6861 if (sa->sa_flags & TARGET_SA_RESETHAND) {
6862 sa->_sa_handler = TARGET_SIG_DFL;
6863 }
6864 }
6865 }
6866
6867 void process_pending_signals(CPUArchState *cpu_env)
6868 {
6869 CPUState *cpu = ENV_GET_CPU(cpu_env);
6870 int sig;
6871 TaskState *ts = cpu->opaque;
6872 sigset_t set;
6873 sigset_t *blocked_set;
6874
6875 while (atomic_read(&ts->signal_pending)) {
6876 /* FIXME: This is not threadsafe. */
6877 sigfillset(&set);
6878 sigprocmask(SIG_SETMASK, &set, 0);
6879
6880 restart_scan:
6881 sig = ts->sync_signal.pending;
6882 if (sig) {
6883 /* Synchronous signals are forced,
6884 * see force_sig_info() and callers in Linux
6885 * Note that not all of our queue_signal() calls in QEMU correspond
6886 * to force_sig_info() calls in Linux (some are send_sig_info()).
6887 * However it seems like a kernel bug to me to allow the process
6888 * to block a synchronous signal since it could then just end up
6889 * looping round and round indefinitely.
6890 */
6891 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
6892 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
6893 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
6894 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
6895 }
6896
6897 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
6898 }
6899
6900 for (sig = 1; sig <= TARGET_NSIG; sig++) {
6901 blocked_set = ts->in_sigsuspend ?
6902 &ts->sigsuspend_mask : &ts->signal_mask;
6903
6904 if (ts->sigtab[sig - 1].pending &&
6905 (!sigismember(blocked_set,
6906 target_to_host_signal_table[sig]))) {
6907 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
6908 /* Restart scan from the beginning, as handle_pending_signal
6909 * might have resulted in a new synchronous signal (eg SIGSEGV).
6910 */
6911 goto restart_scan;
6912 }
6913 }
6914
6915 /* if no signal is pending, unblock signals and recheck (the act
6916 * of unblocking might cause us to take another host signal which
6917 * will set signal_pending again).
6918 */
6919 atomic_set(&ts->signal_pending, 0);
6920 ts->in_sigsuspend = 0;
6921 set = ts->signal_mask;
6922 sigdelset(&set, SIGSEGV);
6923 sigdelset(&set, SIGBUS);
6924 sigprocmask(SIG_SETMASK, &set, 0);
6925 }
6926 ts->in_sigsuspend = 0;
6927 }