]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user/signal.c: Put AArch64 frame record in the right place
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 return atomic_xchg(&ts->signal_pending, 1);
207 }
208
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
215 */
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
217 {
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
219
220 if (oldset) {
221 *oldset = ts->signal_mask;
222 }
223
224 if (set) {
225 int i;
226
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
229 }
230
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
239 }
240 }
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
247 }
248
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
252 }
253 return 0;
254 }
255
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
257 /* Just set the guest's signal mask to the specified value; the
258 * caller is assumed to have called block_signals() already.
259 */
260 static void set_sigmask(const sigset_t *set)
261 {
262 TaskState *ts = (TaskState *)thread_cpu->opaque;
263
264 ts->signal_mask = *set;
265 }
266 #endif
267
268 /* siginfo conversion */
269
270 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
271 const siginfo_t *info)
272 {
273 int sig = host_to_target_signal(info->si_signo);
274 int si_code = info->si_code;
275 int si_type;
276 tinfo->si_signo = sig;
277 tinfo->si_errno = 0;
278 tinfo->si_code = info->si_code;
279
280 /* This memset serves two purposes:
281 * (1) ensure we don't leak random junk to the guest later
282 * (2) placate false positives from gcc about fields
283 * being used uninitialized if it chooses to inline both this
284 * function and tswap_siginfo() into host_to_target_siginfo().
285 */
286 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
287
288 /* This is awkward, because we have to use a combination of
289 * the si_code and si_signo to figure out which of the union's
290 * members are valid. (Within the host kernel it is always possible
291 * to tell, but the kernel carefully avoids giving userspace the
292 * high 16 bits of si_code, so we don't have the information to
293 * do this the easy way...) We therefore make our best guess,
294 * bearing in mind that a guest can spoof most of the si_codes
295 * via rt_sigqueueinfo() if it likes.
296 *
297 * Once we have made our guess, we record it in the top 16 bits of
298 * the si_code, so that tswap_siginfo() later can use it.
299 * tswap_siginfo() will strip these top bits out before writing
300 * si_code to the guest (sign-extending the lower bits).
301 */
302
303 switch (si_code) {
304 case SI_USER:
305 case SI_TKILL:
306 case SI_KERNEL:
307 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
308 * These are the only unspoofable si_code values.
309 */
310 tinfo->_sifields._kill._pid = info->si_pid;
311 tinfo->_sifields._kill._uid = info->si_uid;
312 si_type = QEMU_SI_KILL;
313 break;
314 default:
315 /* Everything else is spoofable. Make best guess based on signal */
316 switch (sig) {
317 case TARGET_SIGCHLD:
318 tinfo->_sifields._sigchld._pid = info->si_pid;
319 tinfo->_sifields._sigchld._uid = info->si_uid;
320 tinfo->_sifields._sigchld._status
321 = host_to_target_waitstatus(info->si_status);
322 tinfo->_sifields._sigchld._utime = info->si_utime;
323 tinfo->_sifields._sigchld._stime = info->si_stime;
324 si_type = QEMU_SI_CHLD;
325 break;
326 case TARGET_SIGIO:
327 tinfo->_sifields._sigpoll._band = info->si_band;
328 tinfo->_sifields._sigpoll._fd = info->si_fd;
329 si_type = QEMU_SI_POLL;
330 break;
331 default:
332 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
333 tinfo->_sifields._rt._pid = info->si_pid;
334 tinfo->_sifields._rt._uid = info->si_uid;
335 /* XXX: potential problem if 64 bit */
336 tinfo->_sifields._rt._sigval.sival_ptr
337 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
338 si_type = QEMU_SI_RT;
339 break;
340 }
341 break;
342 }
343
344 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
345 }
346
347 static void tswap_siginfo(target_siginfo_t *tinfo,
348 const target_siginfo_t *info)
349 {
350 int si_type = extract32(info->si_code, 16, 16);
351 int si_code = sextract32(info->si_code, 0, 16);
352
353 __put_user(info->si_signo, &tinfo->si_signo);
354 __put_user(info->si_errno, &tinfo->si_errno);
355 __put_user(si_code, &tinfo->si_code);
356
357 /* We can use our internal marker of which fields in the structure
358 * are valid, rather than duplicating the guesswork of
359 * host_to_target_siginfo_noswap() here.
360 */
361 switch (si_type) {
362 case QEMU_SI_KILL:
363 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
364 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
365 break;
366 case QEMU_SI_TIMER:
367 __put_user(info->_sifields._timer._timer1,
368 &tinfo->_sifields._timer._timer1);
369 __put_user(info->_sifields._timer._timer2,
370 &tinfo->_sifields._timer._timer2);
371 break;
372 case QEMU_SI_POLL:
373 __put_user(info->_sifields._sigpoll._band,
374 &tinfo->_sifields._sigpoll._band);
375 __put_user(info->_sifields._sigpoll._fd,
376 &tinfo->_sifields._sigpoll._fd);
377 break;
378 case QEMU_SI_FAULT:
379 __put_user(info->_sifields._sigfault._addr,
380 &tinfo->_sifields._sigfault._addr);
381 break;
382 case QEMU_SI_CHLD:
383 __put_user(info->_sifields._sigchld._pid,
384 &tinfo->_sifields._sigchld._pid);
385 __put_user(info->_sifields._sigchld._uid,
386 &tinfo->_sifields._sigchld._uid);
387 __put_user(info->_sifields._sigchld._status,
388 &tinfo->_sifields._sigchld._status);
389 __put_user(info->_sifields._sigchld._utime,
390 &tinfo->_sifields._sigchld._utime);
391 __put_user(info->_sifields._sigchld._stime,
392 &tinfo->_sifields._sigchld._stime);
393 break;
394 case QEMU_SI_RT:
395 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
396 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
397 __put_user(info->_sifields._rt._sigval.sival_ptr,
398 &tinfo->_sifields._rt._sigval.sival_ptr);
399 break;
400 default:
401 g_assert_not_reached();
402 }
403 }
404
405 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
406 {
407 target_siginfo_t tgt_tmp;
408 host_to_target_siginfo_noswap(&tgt_tmp, info);
409 tswap_siginfo(tinfo, &tgt_tmp);
410 }
411
412 /* XXX: we support only POSIX RT signals are used. */
413 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
414 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
415 {
416 /* This conversion is used only for the rt_sigqueueinfo syscall,
417 * and so we know that the _rt fields are the valid ones.
418 */
419 abi_ulong sival_ptr;
420
421 __get_user(info->si_signo, &tinfo->si_signo);
422 __get_user(info->si_errno, &tinfo->si_errno);
423 __get_user(info->si_code, &tinfo->si_code);
424 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
425 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
426 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
427 info->si_value.sival_ptr = (void *)(long)sival_ptr;
428 }
429
430 static int fatal_signal (int sig)
431 {
432 switch (sig) {
433 case TARGET_SIGCHLD:
434 case TARGET_SIGURG:
435 case TARGET_SIGWINCH:
436 /* Ignored by default. */
437 return 0;
438 case TARGET_SIGCONT:
439 case TARGET_SIGSTOP:
440 case TARGET_SIGTSTP:
441 case TARGET_SIGTTIN:
442 case TARGET_SIGTTOU:
443 /* Job control signals. */
444 return 0;
445 default:
446 return 1;
447 }
448 }
449
450 /* returns 1 if given signal should dump core if not handled */
451 static int core_dump_signal(int sig)
452 {
453 switch (sig) {
454 case TARGET_SIGABRT:
455 case TARGET_SIGFPE:
456 case TARGET_SIGILL:
457 case TARGET_SIGQUIT:
458 case TARGET_SIGSEGV:
459 case TARGET_SIGTRAP:
460 case TARGET_SIGBUS:
461 return (1);
462 default:
463 return (0);
464 }
465 }
466
467 void signal_init(void)
468 {
469 TaskState *ts = (TaskState *)thread_cpu->opaque;
470 struct sigaction act;
471 struct sigaction oact;
472 int i, j;
473 int host_sig;
474
475 /* generate signal conversion tables */
476 for(i = 1; i < _NSIG; i++) {
477 if (host_to_target_signal_table[i] == 0)
478 host_to_target_signal_table[i] = i;
479 }
480 for(i = 1; i < _NSIG; i++) {
481 j = host_to_target_signal_table[i];
482 target_to_host_signal_table[j] = i;
483 }
484
485 /* Set the signal mask from the host mask. */
486 sigprocmask(0, 0, &ts->signal_mask);
487
488 /* set all host signal handlers. ALL signals are blocked during
489 the handlers to serialize them. */
490 memset(sigact_table, 0, sizeof(sigact_table));
491
492 sigfillset(&act.sa_mask);
493 act.sa_flags = SA_SIGINFO;
494 act.sa_sigaction = host_signal_handler;
495 for(i = 1; i <= TARGET_NSIG; i++) {
496 host_sig = target_to_host_signal(i);
497 sigaction(host_sig, NULL, &oact);
498 if (oact.sa_sigaction == (void *)SIG_IGN) {
499 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
500 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
501 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
502 }
503 /* If there's already a handler installed then something has
504 gone horribly wrong, so don't even try to handle that case. */
505 /* Install some handlers for our own use. We need at least
506 SIGSEGV and SIGBUS, to detect exceptions. We can not just
507 trap all signals because it affects syscall interrupt
508 behavior. But do trap all default-fatal signals. */
509 if (fatal_signal (i))
510 sigaction(host_sig, &act, NULL);
511 }
512 }
513
514 /* Force a synchronously taken signal. The kernel force_sig() function
515 * also forces the signal to "not blocked, not ignored", but for QEMU
516 * that work is done in process_pending_signals().
517 */
518 static void force_sig(int sig)
519 {
520 CPUState *cpu = thread_cpu;
521 CPUArchState *env = cpu->env_ptr;
522 target_siginfo_t info;
523
524 info.si_signo = sig;
525 info.si_errno = 0;
526 info.si_code = TARGET_SI_KERNEL;
527 info._sifields._kill._pid = 0;
528 info._sifields._kill._uid = 0;
529 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
530 }
531
532 /* Force a SIGSEGV if we couldn't write to memory trying to set
533 * up the signal frame. oldsig is the signal we were trying to handle
534 * at the point of failure.
535 */
536 #if !defined(TARGET_RISCV)
537 static void force_sigsegv(int oldsig)
538 {
539 if (oldsig == SIGSEGV) {
540 /* Make sure we don't try to deliver the signal again; this will
541 * end up with handle_pending_signal() calling dump_core_and_abort().
542 */
543 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
544 }
545 force_sig(TARGET_SIGSEGV);
546 }
547
548 #endif
549
550 /* abort execution with signal */
551 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
552 {
553 CPUState *cpu = thread_cpu;
554 CPUArchState *env = cpu->env_ptr;
555 TaskState *ts = (TaskState *)cpu->opaque;
556 int host_sig, core_dumped = 0;
557 struct sigaction act;
558
559 host_sig = target_to_host_signal(target_sig);
560 trace_user_force_sig(env, target_sig, host_sig);
561 gdb_signalled(env, target_sig);
562
563 /* dump core if supported by target binary format */
564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
565 stop_all_tasks();
566 core_dumped =
567 ((*ts->bprm->core_dump)(target_sig, env) == 0);
568 }
569 if (core_dumped) {
570 /* we already dumped the core of target process, we don't want
571 * a coredump of qemu itself */
572 struct rlimit nodump;
573 getrlimit(RLIMIT_CORE, &nodump);
574 nodump.rlim_cur=0;
575 setrlimit(RLIMIT_CORE, &nodump);
576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
577 target_sig, strsignal(host_sig), "core dumped" );
578 }
579
580 /* The proper exit code for dying from an uncaught signal is
581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
582 * a negative value. To get the proper exit code we need to
583 * actually die from an uncaught signal. Here the default signal
584 * handler is installed, we send ourself a signal and we wait for
585 * it to arrive. */
586 sigfillset(&act.sa_mask);
587 act.sa_handler = SIG_DFL;
588 act.sa_flags = 0;
589 sigaction(host_sig, &act, NULL);
590
591 /* For some reason raise(host_sig) doesn't send the signal when
592 * statically linked on x86-64. */
593 kill(getpid(), host_sig);
594
595 /* Make sure the signal isn't masked (just reuse the mask inside
596 of act) */
597 sigdelset(&act.sa_mask, host_sig);
598 sigsuspend(&act.sa_mask);
599
600 /* unreachable */
601 abort();
602 }
603
604 /* queue a signal so that it will be send to the virtual CPU as soon
605 as possible */
606 int queue_signal(CPUArchState *env, int sig, int si_type,
607 target_siginfo_t *info)
608 {
609 CPUState *cpu = ENV_GET_CPU(env);
610 TaskState *ts = cpu->opaque;
611
612 trace_user_queue_signal(env, sig);
613
614 info->si_code = deposit32(info->si_code, 16, 16, si_type);
615
616 ts->sync_signal.info = *info;
617 ts->sync_signal.pending = sig;
618 /* signal that a new signal is pending */
619 atomic_set(&ts->signal_pending, 1);
620 return 1; /* indicates that the signal was queued */
621 }
622
623 #ifndef HAVE_SAFE_SYSCALL
624 static inline void rewind_if_in_safe_syscall(void *puc)
625 {
626 /* Default version: never rewind */
627 }
628 #endif
629
630 static void host_signal_handler(int host_signum, siginfo_t *info,
631 void *puc)
632 {
633 CPUArchState *env = thread_cpu->env_ptr;
634 CPUState *cpu = ENV_GET_CPU(env);
635 TaskState *ts = cpu->opaque;
636
637 int sig;
638 target_siginfo_t tinfo;
639 ucontext_t *uc = puc;
640 struct emulated_sigtable *k;
641
642 /* the CPU emulator uses some host signals to detect exceptions,
643 we forward to it some signals */
644 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
645 && info->si_code > 0) {
646 if (cpu_signal_handler(host_signum, info, puc))
647 return;
648 }
649
650 /* get target signal number */
651 sig = host_to_target_signal(host_signum);
652 if (sig < 1 || sig > TARGET_NSIG)
653 return;
654 trace_user_host_signal(env, host_signum, sig);
655
656 rewind_if_in_safe_syscall(puc);
657
658 host_to_target_siginfo_noswap(&tinfo, info);
659 k = &ts->sigtab[sig - 1];
660 k->info = tinfo;
661 k->pending = sig;
662 ts->signal_pending = 1;
663
664 /* Block host signals until target signal handler entered. We
665 * can't block SIGSEGV or SIGBUS while we're executing guest
666 * code in case the guest code provokes one in the window between
667 * now and it getting out to the main loop. Signals will be
668 * unblocked again in process_pending_signals().
669 *
670 * WARNING: we cannot use sigfillset() here because the uc_sigmask
671 * field is a kernel sigset_t, which is much smaller than the
672 * libc sigset_t which sigfillset() operates on. Using sigfillset()
673 * would write 0xff bytes off the end of the structure and trash
674 * data on the struct.
675 * We can't use sizeof(uc->uc_sigmask) either, because the libc
676 * headers define the struct field with the wrong (too large) type.
677 */
678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
679 sigdelset(&uc->uc_sigmask, SIGSEGV);
680 sigdelset(&uc->uc_sigmask, SIGBUS);
681
682 /* interrupt the virtual CPU as soon as possible */
683 cpu_exit(thread_cpu);
684 }
685
686 /* do_sigaltstack() returns target values and errnos. */
687 /* compare linux/kernel/signal.c:do_sigaltstack() */
688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
689 {
690 int ret;
691 struct target_sigaltstack oss;
692
693 /* XXX: test errors */
694 if(uoss_addr)
695 {
696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
698 __put_user(sas_ss_flags(sp), &oss.ss_flags);
699 }
700
701 if(uss_addr)
702 {
703 struct target_sigaltstack *uss;
704 struct target_sigaltstack ss;
705 size_t minstacksize = TARGET_MINSIGSTKSZ;
706
707 #if defined(TARGET_PPC64)
708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
710 if (get_ppc64_abi(image) > 1) {
711 minstacksize = 4096;
712 }
713 #endif
714
715 ret = -TARGET_EFAULT;
716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
717 goto out;
718 }
719 __get_user(ss.ss_sp, &uss->ss_sp);
720 __get_user(ss.ss_size, &uss->ss_size);
721 __get_user(ss.ss_flags, &uss->ss_flags);
722 unlock_user_struct(uss, uss_addr, 0);
723
724 ret = -TARGET_EPERM;
725 if (on_sig_stack(sp))
726 goto out;
727
728 ret = -TARGET_EINVAL;
729 if (ss.ss_flags != TARGET_SS_DISABLE
730 && ss.ss_flags != TARGET_SS_ONSTACK
731 && ss.ss_flags != 0)
732 goto out;
733
734 if (ss.ss_flags == TARGET_SS_DISABLE) {
735 ss.ss_size = 0;
736 ss.ss_sp = 0;
737 } else {
738 ret = -TARGET_ENOMEM;
739 if (ss.ss_size < minstacksize) {
740 goto out;
741 }
742 }
743
744 target_sigaltstack_used.ss_sp = ss.ss_sp;
745 target_sigaltstack_used.ss_size = ss.ss_size;
746 }
747
748 if (uoss_addr) {
749 ret = -TARGET_EFAULT;
750 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
751 goto out;
752 }
753
754 ret = 0;
755 out:
756 return ret;
757 }
758
759 /* do_sigaction() return target values and host errnos */
760 int do_sigaction(int sig, const struct target_sigaction *act,
761 struct target_sigaction *oact)
762 {
763 struct target_sigaction *k;
764 struct sigaction act1;
765 int host_sig;
766 int ret = 0;
767
768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
769 return -TARGET_EINVAL;
770 }
771
772 if (block_signals()) {
773 return -TARGET_ERESTARTSYS;
774 }
775
776 k = &sigact_table[sig - 1];
777 if (oact) {
778 __put_user(k->_sa_handler, &oact->_sa_handler);
779 __put_user(k->sa_flags, &oact->sa_flags);
780 #ifdef TARGET_ARCH_HAS_SA_RESTORER
781 __put_user(k->sa_restorer, &oact->sa_restorer);
782 #endif
783 /* Not swapped. */
784 oact->sa_mask = k->sa_mask;
785 }
786 if (act) {
787 /* FIXME: This is not threadsafe. */
788 __get_user(k->_sa_handler, &act->_sa_handler);
789 __get_user(k->sa_flags, &act->sa_flags);
790 #ifdef TARGET_ARCH_HAS_SA_RESTORER
791 __get_user(k->sa_restorer, &act->sa_restorer);
792 #endif
793 /* To be swapped in target_to_host_sigset. */
794 k->sa_mask = act->sa_mask;
795
796 /* we update the host linux signal state */
797 host_sig = target_to_host_signal(sig);
798 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
799 sigfillset(&act1.sa_mask);
800 act1.sa_flags = SA_SIGINFO;
801 if (k->sa_flags & TARGET_SA_RESTART)
802 act1.sa_flags |= SA_RESTART;
803 /* NOTE: it is important to update the host kernel signal
804 ignore state to avoid getting unexpected interrupted
805 syscalls */
806 if (k->_sa_handler == TARGET_SIG_IGN) {
807 act1.sa_sigaction = (void *)SIG_IGN;
808 } else if (k->_sa_handler == TARGET_SIG_DFL) {
809 if (fatal_signal (sig))
810 act1.sa_sigaction = host_signal_handler;
811 else
812 act1.sa_sigaction = (void *)SIG_DFL;
813 } else {
814 act1.sa_sigaction = host_signal_handler;
815 }
816 ret = sigaction(host_sig, &act1, NULL);
817 }
818 }
819 return ret;
820 }
821
822 #if defined(TARGET_I386)
823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
824
825 struct target_fpreg {
826 uint16_t significand[4];
827 uint16_t exponent;
828 };
829
830 struct target_fpxreg {
831 uint16_t significand[4];
832 uint16_t exponent;
833 uint16_t padding[3];
834 };
835
836 struct target_xmmreg {
837 uint32_t element[4];
838 };
839
840 struct target_fpstate_32 {
841 /* Regular FPU environment */
842 uint32_t cw;
843 uint32_t sw;
844 uint32_t tag;
845 uint32_t ipoff;
846 uint32_t cssel;
847 uint32_t dataoff;
848 uint32_t datasel;
849 struct target_fpreg st[8];
850 uint16_t status;
851 uint16_t magic; /* 0xffff = regular FPU data only */
852
853 /* FXSR FPU environment */
854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
855 uint32_t mxcsr;
856 uint32_t reserved;
857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
858 struct target_xmmreg xmm[8];
859 uint32_t padding[56];
860 };
861
862 struct target_fpstate_64 {
863 /* FXSAVE format */
864 uint16_t cw;
865 uint16_t sw;
866 uint16_t twd;
867 uint16_t fop;
868 uint64_t rip;
869 uint64_t rdp;
870 uint32_t mxcsr;
871 uint32_t mxcsr_mask;
872 uint32_t st_space[32];
873 uint32_t xmm_space[64];
874 uint32_t reserved[24];
875 };
876
877 #ifndef TARGET_X86_64
878 # define target_fpstate target_fpstate_32
879 #else
880 # define target_fpstate target_fpstate_64
881 #endif
882
883 struct target_sigcontext_32 {
884 uint16_t gs, __gsh;
885 uint16_t fs, __fsh;
886 uint16_t es, __esh;
887 uint16_t ds, __dsh;
888 uint32_t edi;
889 uint32_t esi;
890 uint32_t ebp;
891 uint32_t esp;
892 uint32_t ebx;
893 uint32_t edx;
894 uint32_t ecx;
895 uint32_t eax;
896 uint32_t trapno;
897 uint32_t err;
898 uint32_t eip;
899 uint16_t cs, __csh;
900 uint32_t eflags;
901 uint32_t esp_at_signal;
902 uint16_t ss, __ssh;
903 uint32_t fpstate; /* pointer */
904 uint32_t oldmask;
905 uint32_t cr2;
906 };
907
908 struct target_sigcontext_64 {
909 uint64_t r8;
910 uint64_t r9;
911 uint64_t r10;
912 uint64_t r11;
913 uint64_t r12;
914 uint64_t r13;
915 uint64_t r14;
916 uint64_t r15;
917
918 uint64_t rdi;
919 uint64_t rsi;
920 uint64_t rbp;
921 uint64_t rbx;
922 uint64_t rdx;
923 uint64_t rax;
924 uint64_t rcx;
925 uint64_t rsp;
926 uint64_t rip;
927
928 uint64_t eflags;
929
930 uint16_t cs;
931 uint16_t gs;
932 uint16_t fs;
933 uint16_t ss;
934
935 uint64_t err;
936 uint64_t trapno;
937 uint64_t oldmask;
938 uint64_t cr2;
939
940 uint64_t fpstate; /* pointer */
941 uint64_t padding[8];
942 };
943
944 #ifndef TARGET_X86_64
945 # define target_sigcontext target_sigcontext_32
946 #else
947 # define target_sigcontext target_sigcontext_64
948 #endif
949
950 /* see Linux/include/uapi/asm-generic/ucontext.h */
951 struct target_ucontext {
952 abi_ulong tuc_flags;
953 abi_ulong tuc_link;
954 target_stack_t tuc_stack;
955 struct target_sigcontext tuc_mcontext;
956 target_sigset_t tuc_sigmask; /* mask last for extensibility */
957 };
958
959 #ifndef TARGET_X86_64
960 struct sigframe {
961 abi_ulong pretcode;
962 int sig;
963 struct target_sigcontext sc;
964 struct target_fpstate fpstate;
965 abi_ulong extramask[TARGET_NSIG_WORDS-1];
966 char retcode[8];
967 };
968
969 struct rt_sigframe {
970 abi_ulong pretcode;
971 int sig;
972 abi_ulong pinfo;
973 abi_ulong puc;
974 struct target_siginfo info;
975 struct target_ucontext uc;
976 struct target_fpstate fpstate;
977 char retcode[8];
978 };
979
980 #else
981
982 struct rt_sigframe {
983 abi_ulong pretcode;
984 struct target_ucontext uc;
985 struct target_siginfo info;
986 struct target_fpstate fpstate;
987 };
988
989 #endif
990
991 /*
992 * Set up a signal frame.
993 */
994
995 /* XXX: save x87 state */
996 static void setup_sigcontext(struct target_sigcontext *sc,
997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
998 abi_ulong fpstate_addr)
999 {
1000 CPUState *cs = CPU(x86_env_get_cpu(env));
1001 #ifndef TARGET_X86_64
1002 uint16_t magic;
1003
1004 /* already locked in setup_frame() */
1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1009 __put_user(env->regs[R_EDI], &sc->edi);
1010 __put_user(env->regs[R_ESI], &sc->esi);
1011 __put_user(env->regs[R_EBP], &sc->ebp);
1012 __put_user(env->regs[R_ESP], &sc->esp);
1013 __put_user(env->regs[R_EBX], &sc->ebx);
1014 __put_user(env->regs[R_EDX], &sc->edx);
1015 __put_user(env->regs[R_ECX], &sc->ecx);
1016 __put_user(env->regs[R_EAX], &sc->eax);
1017 __put_user(cs->exception_index, &sc->trapno);
1018 __put_user(env->error_code, &sc->err);
1019 __put_user(env->eip, &sc->eip);
1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1021 __put_user(env->eflags, &sc->eflags);
1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1024
1025 cpu_x86_fsave(env, fpstate_addr, 1);
1026 fpstate->status = fpstate->sw;
1027 magic = 0xffff;
1028 __put_user(magic, &fpstate->magic);
1029 __put_user(fpstate_addr, &sc->fpstate);
1030
1031 /* non-iBCS2 extensions.. */
1032 __put_user(mask, &sc->oldmask);
1033 __put_user(env->cr[2], &sc->cr2);
1034 #else
1035 __put_user(env->regs[R_EDI], &sc->rdi);
1036 __put_user(env->regs[R_ESI], &sc->rsi);
1037 __put_user(env->regs[R_EBP], &sc->rbp);
1038 __put_user(env->regs[R_ESP], &sc->rsp);
1039 __put_user(env->regs[R_EBX], &sc->rbx);
1040 __put_user(env->regs[R_EDX], &sc->rdx);
1041 __put_user(env->regs[R_ECX], &sc->rcx);
1042 __put_user(env->regs[R_EAX], &sc->rax);
1043
1044 __put_user(env->regs[8], &sc->r8);
1045 __put_user(env->regs[9], &sc->r9);
1046 __put_user(env->regs[10], &sc->r10);
1047 __put_user(env->regs[11], &sc->r11);
1048 __put_user(env->regs[12], &sc->r12);
1049 __put_user(env->regs[13], &sc->r13);
1050 __put_user(env->regs[14], &sc->r14);
1051 __put_user(env->regs[15], &sc->r15);
1052
1053 __put_user(cs->exception_index, &sc->trapno);
1054 __put_user(env->error_code, &sc->err);
1055 __put_user(env->eip, &sc->rip);
1056
1057 __put_user(env->eflags, &sc->eflags);
1058 __put_user(env->segs[R_CS].selector, &sc->cs);
1059 __put_user((uint16_t)0, &sc->gs);
1060 __put_user((uint16_t)0, &sc->fs);
1061 __put_user(env->segs[R_SS].selector, &sc->ss);
1062
1063 __put_user(mask, &sc->oldmask);
1064 __put_user(env->cr[2], &sc->cr2);
1065
1066 /* fpstate_addr must be 16 byte aligned for fxsave */
1067 assert(!(fpstate_addr & 0xf));
1068
1069 cpu_x86_fxsave(env, fpstate_addr);
1070 __put_user(fpstate_addr, &sc->fpstate);
1071 #endif
1072 }
1073
1074 /*
1075 * Determine which stack to use..
1076 */
1077
1078 static inline abi_ulong
1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1080 {
1081 unsigned long esp;
1082
1083 /* Default to using normal stack */
1084 esp = env->regs[R_ESP];
1085 #ifdef TARGET_X86_64
1086 esp -= 128; /* this is the redzone */
1087 #endif
1088
1089 /* This is the X/Open sanctioned signal stack switching. */
1090 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1091 if (sas_ss_flags(esp) == 0) {
1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1093 }
1094 } else {
1095 #ifndef TARGET_X86_64
1096 /* This is the legacy signal stack switching. */
1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1098 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1099 ka->sa_restorer) {
1100 esp = (unsigned long) ka->sa_restorer;
1101 }
1102 #endif
1103 }
1104
1105 #ifndef TARGET_X86_64
1106 return (esp - frame_size) & -8ul;
1107 #else
1108 return ((esp - frame_size) & (~15ul)) - 8;
1109 #endif
1110 }
1111
1112 #ifndef TARGET_X86_64
1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1114 static void setup_frame(int sig, struct target_sigaction *ka,
1115 target_sigset_t *set, CPUX86State *env)
1116 {
1117 abi_ulong frame_addr;
1118 struct sigframe *frame;
1119 int i;
1120
1121 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1122 trace_user_setup_frame(env, frame_addr);
1123
1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1125 goto give_sigsegv;
1126
1127 __put_user(sig, &frame->sig);
1128
1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1130 frame_addr + offsetof(struct sigframe, fpstate));
1131
1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1133 __put_user(set->sig[i], &frame->extramask[i - 1]);
1134 }
1135
1136 /* Set up to return from userspace. If provided, use a stub
1137 already in userspace. */
1138 if (ka->sa_flags & TARGET_SA_RESTORER) {
1139 __put_user(ka->sa_restorer, &frame->pretcode);
1140 } else {
1141 uint16_t val16;
1142 abi_ulong retcode_addr;
1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1144 __put_user(retcode_addr, &frame->pretcode);
1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1146 val16 = 0xb858;
1147 __put_user(val16, (uint16_t *)(frame->retcode+0));
1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1149 val16 = 0x80cd;
1150 __put_user(val16, (uint16_t *)(frame->retcode+6));
1151 }
1152
1153 /* Set up registers for signal handler */
1154 env->regs[R_ESP] = frame_addr;
1155 env->eip = ka->_sa_handler;
1156
1157 cpu_x86_load_seg(env, R_DS, __USER_DS);
1158 cpu_x86_load_seg(env, R_ES, __USER_DS);
1159 cpu_x86_load_seg(env, R_SS, __USER_DS);
1160 cpu_x86_load_seg(env, R_CS, __USER_CS);
1161 env->eflags &= ~TF_MASK;
1162
1163 unlock_user_struct(frame, frame_addr, 1);
1164
1165 return;
1166
1167 give_sigsegv:
1168 force_sigsegv(sig);
1169 }
1170 #endif
1171
1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1173 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1174 target_siginfo_t *info,
1175 target_sigset_t *set, CPUX86State *env)
1176 {
1177 abi_ulong frame_addr;
1178 #ifndef TARGET_X86_64
1179 abi_ulong addr;
1180 #endif
1181 struct rt_sigframe *frame;
1182 int i;
1183
1184 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1185 trace_user_setup_rt_frame(env, frame_addr);
1186
1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1188 goto give_sigsegv;
1189
1190 /* These fields are only in rt_sigframe on 32 bit */
1191 #ifndef TARGET_X86_64
1192 __put_user(sig, &frame->sig);
1193 addr = frame_addr + offsetof(struct rt_sigframe, info);
1194 __put_user(addr, &frame->pinfo);
1195 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1196 __put_user(addr, &frame->puc);
1197 #endif
1198 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1199 tswap_siginfo(&frame->info, info);
1200 }
1201
1202 /* Create the ucontext. */
1203 __put_user(0, &frame->uc.tuc_flags);
1204 __put_user(0, &frame->uc.tuc_link);
1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1207 &frame->uc.tuc_stack.ss_flags);
1208 __put_user(target_sigaltstack_used.ss_size,
1209 &frame->uc.tuc_stack.ss_size);
1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1212
1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1215 }
1216
1217 /* Set up to return from userspace. If provided, use a stub
1218 already in userspace. */
1219 #ifndef TARGET_X86_64
1220 if (ka->sa_flags & TARGET_SA_RESTORER) {
1221 __put_user(ka->sa_restorer, &frame->pretcode);
1222 } else {
1223 uint16_t val16;
1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1225 __put_user(addr, &frame->pretcode);
1226 /* This is movl $,%eax ; int $0x80 */
1227 __put_user(0xb8, (char *)(frame->retcode+0));
1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1229 val16 = 0x80cd;
1230 __put_user(val16, (uint16_t *)(frame->retcode+5));
1231 }
1232 #else
1233 /* XXX: Would be slightly better to return -EFAULT here if test fails
1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1235 __put_user(ka->sa_restorer, &frame->pretcode);
1236 #endif
1237
1238 /* Set up registers for signal handler */
1239 env->regs[R_ESP] = frame_addr;
1240 env->eip = ka->_sa_handler;
1241
1242 #ifndef TARGET_X86_64
1243 env->regs[R_EAX] = sig;
1244 env->regs[R_EDX] = (unsigned long)&frame->info;
1245 env->regs[R_ECX] = (unsigned long)&frame->uc;
1246 #else
1247 env->regs[R_EAX] = 0;
1248 env->regs[R_EDI] = sig;
1249 env->regs[R_ESI] = (unsigned long)&frame->info;
1250 env->regs[R_EDX] = (unsigned long)&frame->uc;
1251 #endif
1252
1253 cpu_x86_load_seg(env, R_DS, __USER_DS);
1254 cpu_x86_load_seg(env, R_ES, __USER_DS);
1255 cpu_x86_load_seg(env, R_CS, __USER_CS);
1256 cpu_x86_load_seg(env, R_SS, __USER_DS);
1257 env->eflags &= ~TF_MASK;
1258
1259 unlock_user_struct(frame, frame_addr, 1);
1260
1261 return;
1262
1263 give_sigsegv:
1264 force_sigsegv(sig);
1265 }
1266
1267 static int
1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1269 {
1270 unsigned int err = 0;
1271 abi_ulong fpstate_addr;
1272 unsigned int tmpflags;
1273
1274 #ifndef TARGET_X86_64
1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1279
1280 env->regs[R_EDI] = tswapl(sc->edi);
1281 env->regs[R_ESI] = tswapl(sc->esi);
1282 env->regs[R_EBP] = tswapl(sc->ebp);
1283 env->regs[R_ESP] = tswapl(sc->esp);
1284 env->regs[R_EBX] = tswapl(sc->ebx);
1285 env->regs[R_EDX] = tswapl(sc->edx);
1286 env->regs[R_ECX] = tswapl(sc->ecx);
1287 env->regs[R_EAX] = tswapl(sc->eax);
1288
1289 env->eip = tswapl(sc->eip);
1290 #else
1291 env->regs[8] = tswapl(sc->r8);
1292 env->regs[9] = tswapl(sc->r9);
1293 env->regs[10] = tswapl(sc->r10);
1294 env->regs[11] = tswapl(sc->r11);
1295 env->regs[12] = tswapl(sc->r12);
1296 env->regs[13] = tswapl(sc->r13);
1297 env->regs[14] = tswapl(sc->r14);
1298 env->regs[15] = tswapl(sc->r15);
1299
1300 env->regs[R_EDI] = tswapl(sc->rdi);
1301 env->regs[R_ESI] = tswapl(sc->rsi);
1302 env->regs[R_EBP] = tswapl(sc->rbp);
1303 env->regs[R_EBX] = tswapl(sc->rbx);
1304 env->regs[R_EDX] = tswapl(sc->rdx);
1305 env->regs[R_EAX] = tswapl(sc->rax);
1306 env->regs[R_ECX] = tswapl(sc->rcx);
1307 env->regs[R_ESP] = tswapl(sc->rsp);
1308
1309 env->eip = tswapl(sc->rip);
1310 #endif
1311
1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1314
1315 tmpflags = tswapl(sc->eflags);
1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1317 // regs->orig_eax = -1; /* disable syscall checks */
1318
1319 fpstate_addr = tswapl(sc->fpstate);
1320 if (fpstate_addr != 0) {
1321 if (!access_ok(VERIFY_READ, fpstate_addr,
1322 sizeof(struct target_fpstate)))
1323 goto badframe;
1324 #ifndef TARGET_X86_64
1325 cpu_x86_frstor(env, fpstate_addr, 1);
1326 #else
1327 cpu_x86_fxrstor(env, fpstate_addr);
1328 #endif
1329 }
1330
1331 return err;
1332 badframe:
1333 return 1;
1334 }
1335
1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1337 #ifndef TARGET_X86_64
1338 long do_sigreturn(CPUX86State *env)
1339 {
1340 struct sigframe *frame;
1341 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1342 target_sigset_t target_set;
1343 sigset_t set;
1344 int i;
1345
1346 trace_user_do_sigreturn(env, frame_addr);
1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1348 goto badframe;
1349 /* set blocked signals */
1350 __get_user(target_set.sig[0], &frame->sc.oldmask);
1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1353 }
1354
1355 target_to_host_sigset_internal(&set, &target_set);
1356 set_sigmask(&set);
1357
1358 /* restore registers */
1359 if (restore_sigcontext(env, &frame->sc))
1360 goto badframe;
1361 unlock_user_struct(frame, frame_addr, 0);
1362 return -TARGET_QEMU_ESIGRETURN;
1363
1364 badframe:
1365 unlock_user_struct(frame, frame_addr, 0);
1366 force_sig(TARGET_SIGSEGV);
1367 return -TARGET_QEMU_ESIGRETURN;
1368 }
1369 #endif
1370
1371 long do_rt_sigreturn(CPUX86State *env)
1372 {
1373 abi_ulong frame_addr;
1374 struct rt_sigframe *frame;
1375 sigset_t set;
1376
1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1378 trace_user_do_rt_sigreturn(env, frame_addr);
1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1380 goto badframe;
1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1382 set_sigmask(&set);
1383
1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1385 goto badframe;
1386 }
1387
1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1389 get_sp_from_cpustate(env)) == -EFAULT) {
1390 goto badframe;
1391 }
1392
1393 unlock_user_struct(frame, frame_addr, 0);
1394 return -TARGET_QEMU_ESIGRETURN;
1395
1396 badframe:
1397 unlock_user_struct(frame, frame_addr, 0);
1398 force_sig(TARGET_SIGSEGV);
1399 return -TARGET_QEMU_ESIGRETURN;
1400 }
1401
1402 #elif defined(TARGET_AARCH64)
1403
1404 struct target_sigcontext {
1405 uint64_t fault_address;
1406 /* AArch64 registers */
1407 uint64_t regs[31];
1408 uint64_t sp;
1409 uint64_t pc;
1410 uint64_t pstate;
1411 /* 4K reserved for FP/SIMD state and future expansion */
1412 char __reserved[4096] __attribute__((__aligned__(16)));
1413 };
1414
1415 struct target_ucontext {
1416 abi_ulong tuc_flags;
1417 abi_ulong tuc_link;
1418 target_stack_t tuc_stack;
1419 target_sigset_t tuc_sigmask;
1420 /* glibc uses a 1024-bit sigset_t */
1421 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1422 /* last for future expansion */
1423 struct target_sigcontext tuc_mcontext;
1424 };
1425
1426 /*
1427 * Header to be used at the beginning of structures extending the user
1428 * context. Such structures must be placed after the rt_sigframe on the stack
1429 * and be 16-byte aligned. The last structure must be a dummy one with the
1430 * magic and size set to 0.
1431 */
1432 struct target_aarch64_ctx {
1433 uint32_t magic;
1434 uint32_t size;
1435 };
1436
1437 #define TARGET_FPSIMD_MAGIC 0x46508001
1438
1439 struct target_fpsimd_context {
1440 struct target_aarch64_ctx head;
1441 uint32_t fpsr;
1442 uint32_t fpcr;
1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1444 };
1445
1446 #define TARGET_EXTRA_MAGIC 0x45585401
1447
1448 struct target_extra_context {
1449 struct target_aarch64_ctx head;
1450 uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
1451 uint32_t size; /* size in bytes of the extra space */
1452 uint32_t reserved[3];
1453 };
1454
1455 #define TARGET_SVE_MAGIC 0x53564501
1456
1457 struct target_sve_context {
1458 struct target_aarch64_ctx head;
1459 uint16_t vl;
1460 uint16_t reserved[3];
1461 /* The actual SVE data immediately follows. It is layed out
1462 * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
1463 * the original struct pointer.
1464 */
1465 };
1466
1467 #define TARGET_SVE_VQ_BYTES 16
1468
1469 #define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
1470 #define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
1471
1472 #define TARGET_SVE_SIG_REGS_OFFSET \
1473 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
1474 #define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
1475 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
1476 #define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
1477 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
1478 #define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
1479 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
1480 #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
1481 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
1482
1483 struct target_rt_sigframe {
1484 struct target_siginfo info;
1485 struct target_ucontext uc;
1486 };
1487
1488 struct target_rt_frame_record {
1489 uint64_t fp;
1490 uint64_t lr;
1491 uint32_t tramp[2];
1492 };
1493
1494 static void target_setup_general_frame(struct target_rt_sigframe *sf,
1495 CPUARMState *env, target_sigset_t *set)
1496 {
1497 int i;
1498
1499 __put_user(0, &sf->uc.tuc_flags);
1500 __put_user(0, &sf->uc.tuc_link);
1501
1502 __put_user(target_sigaltstack_used.ss_sp, &sf->uc.tuc_stack.ss_sp);
1503 __put_user(sas_ss_flags(env->xregs[31]), &sf->uc.tuc_stack.ss_flags);
1504 __put_user(target_sigaltstack_used.ss_size, &sf->uc.tuc_stack.ss_size);
1505
1506 for (i = 0; i < 31; i++) {
1507 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1508 }
1509 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1510 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1511 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1512
1513 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1514
1515 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1516 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1517 }
1518 }
1519
1520 static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
1521 CPUARMState *env)
1522 {
1523 int i;
1524
1525 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
1526 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
1527 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
1528 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
1529
1530 for (i = 0; i < 32; i++) {
1531 uint64_t *q = aa64_vfp_qreg(env, i);
1532 #ifdef TARGET_WORDS_BIGENDIAN
1533 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
1534 __put_user(q[1], &fpsimd->vregs[i * 2]);
1535 #else
1536 __put_user(q[0], &fpsimd->vregs[i * 2]);
1537 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
1538 #endif
1539 }
1540 }
1541
1542 static void target_setup_extra_record(struct target_extra_context *extra,
1543 uint64_t datap, uint32_t extra_size)
1544 {
1545 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
1546 __put_user(sizeof(struct target_extra_context), &extra->head.size);
1547 __put_user(datap, &extra->datap);
1548 __put_user(extra_size, &extra->size);
1549 }
1550
1551 static void target_setup_end_record(struct target_aarch64_ctx *end)
1552 {
1553 __put_user(0, &end->magic);
1554 __put_user(0, &end->size);
1555 }
1556
1557 static void target_setup_sve_record(struct target_sve_context *sve,
1558 CPUARMState *env, int vq, int size)
1559 {
1560 int i, j;
1561
1562 __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
1563 __put_user(size, &sve->head.size);
1564 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
1565
1566 /* Note that SVE regs are stored as a byte stream, with each byte element
1567 * at a subsequent address. This corresponds to a little-endian store
1568 * of our 64-bit hunks.
1569 */
1570 for (i = 0; i < 32; ++i) {
1571 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
1572 for (j = 0; j < vq * 2; ++j) {
1573 __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
1574 }
1575 }
1576 for (i = 0; i <= 16; ++i) {
1577 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
1578 for (j = 0; j < vq; ++j) {
1579 uint64_t r = env->vfp.pregs[i].p[j >> 2];
1580 __put_user_e(r >> ((j & 3) * 16), p + j, le);
1581 }
1582 }
1583 }
1584
1585 static void target_restore_general_frame(CPUARMState *env,
1586 struct target_rt_sigframe *sf)
1587 {
1588 sigset_t set;
1589 uint64_t pstate;
1590 int i;
1591
1592 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1593 set_sigmask(&set);
1594
1595 for (i = 0; i < 31; i++) {
1596 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1597 }
1598
1599 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1600 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1601 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1602 pstate_write(env, pstate);
1603 }
1604
1605 static void target_restore_fpsimd_record(CPUARMState *env,
1606 struct target_fpsimd_context *fpsimd)
1607 {
1608 uint32_t fpsr, fpcr;
1609 int i;
1610
1611 __get_user(fpsr, &fpsimd->fpsr);
1612 vfp_set_fpsr(env, fpsr);
1613 __get_user(fpcr, &fpsimd->fpcr);
1614 vfp_set_fpcr(env, fpcr);
1615
1616 for (i = 0; i < 32; i++) {
1617 uint64_t *q = aa64_vfp_qreg(env, i);
1618 #ifdef TARGET_WORDS_BIGENDIAN
1619 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
1620 __get_user(q[1], &fpsimd->vregs[i * 2]);
1621 #else
1622 __get_user(q[0], &fpsimd->vregs[i * 2]);
1623 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
1624 #endif
1625 }
1626 }
1627
1628 static void target_restore_sve_record(CPUARMState *env,
1629 struct target_sve_context *sve, int vq)
1630 {
1631 int i, j;
1632
1633 /* Note that SVE regs are stored as a byte stream, with each byte element
1634 * at a subsequent address. This corresponds to a little-endian load
1635 * of our 64-bit hunks.
1636 */
1637 for (i = 0; i < 32; ++i) {
1638 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
1639 for (j = 0; j < vq * 2; ++j) {
1640 __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
1641 }
1642 }
1643 for (i = 0; i <= 16; ++i) {
1644 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
1645 for (j = 0; j < vq; ++j) {
1646 uint16_t r;
1647 __get_user_e(r, p + j, le);
1648 if (j & 3) {
1649 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
1650 } else {
1651 env->vfp.pregs[i].p[j >> 2] = r;
1652 }
1653 }
1654 }
1655 }
1656
1657 static int target_restore_sigframe(CPUARMState *env,
1658 struct target_rt_sigframe *sf)
1659 {
1660 struct target_aarch64_ctx *ctx, *extra = NULL;
1661 struct target_fpsimd_context *fpsimd = NULL;
1662 struct target_sve_context *sve = NULL;
1663 uint64_t extra_datap = 0;
1664 bool used_extra = false;
1665 bool err = false;
1666 int vq = 0, sve_size = 0;
1667
1668 target_restore_general_frame(env, sf);
1669
1670 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
1671 while (ctx) {
1672 uint32_t magic, size, extra_size;
1673
1674 __get_user(magic, &ctx->magic);
1675 __get_user(size, &ctx->size);
1676 switch (magic) {
1677 case 0:
1678 if (size != 0) {
1679 err = true;
1680 goto exit;
1681 }
1682 if (used_extra) {
1683 ctx = NULL;
1684 } else {
1685 ctx = extra;
1686 used_extra = true;
1687 }
1688 continue;
1689
1690 case TARGET_FPSIMD_MAGIC:
1691 if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
1692 err = true;
1693 goto exit;
1694 }
1695 fpsimd = (struct target_fpsimd_context *)ctx;
1696 break;
1697
1698 case TARGET_SVE_MAGIC:
1699 if (arm_feature(env, ARM_FEATURE_SVE)) {
1700 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
1701 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
1702 if (!sve && size == sve_size) {
1703 sve = (struct target_sve_context *)ctx;
1704 break;
1705 }
1706 }
1707 err = true;
1708 goto exit;
1709
1710 case TARGET_EXTRA_MAGIC:
1711 if (extra || size != sizeof(struct target_extra_context)) {
1712 err = true;
1713 goto exit;
1714 }
1715 __get_user(extra_datap,
1716 &((struct target_extra_context *)ctx)->datap);
1717 __get_user(extra_size,
1718 &((struct target_extra_context *)ctx)->size);
1719 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
1720 break;
1721
1722 default:
1723 /* Unknown record -- we certainly didn't generate it.
1724 * Did we in fact get out of sync?
1725 */
1726 err = true;
1727 goto exit;
1728 }
1729 ctx = (void *)ctx + size;
1730 }
1731
1732 /* Require FPSIMD always. */
1733 if (fpsimd) {
1734 target_restore_fpsimd_record(env, fpsimd);
1735 } else {
1736 err = true;
1737 }
1738
1739 /* SVE data, if present, overwrites FPSIMD data. */
1740 if (sve) {
1741 target_restore_sve_record(env, sve, vq);
1742 }
1743
1744 exit:
1745 unlock_user(extra, extra_datap, 0);
1746 return err;
1747 }
1748
1749 static abi_ulong get_sigframe(struct target_sigaction *ka,
1750 CPUARMState *env, int size)
1751 {
1752 abi_ulong sp;
1753
1754 sp = env->xregs[31];
1755
1756 /*
1757 * This is the X/Open sanctioned signal stack switching.
1758 */
1759 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1760 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1761 }
1762
1763 sp = (sp - size) & ~15;
1764
1765 return sp;
1766 }
1767
1768 typedef struct {
1769 int total_size;
1770 int extra_base;
1771 int extra_size;
1772 int std_end_ofs;
1773 int extra_ofs;
1774 int extra_end_ofs;
1775 } target_sigframe_layout;
1776
1777 static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
1778 {
1779 /* Make sure there will always be space for the end marker. */
1780 const int std_size = sizeof(struct target_rt_sigframe)
1781 - sizeof(struct target_aarch64_ctx);
1782 int this_loc = l->total_size;
1783
1784 if (l->extra_base) {
1785 /* Once we have begun an extra space, all allocations go there. */
1786 l->extra_size += this_size;
1787 } else if (this_size + this_loc > std_size) {
1788 /* This allocation does not fit in the standard space. */
1789 /* Allocate the extra record. */
1790 l->extra_ofs = this_loc;
1791 l->total_size += sizeof(struct target_extra_context);
1792
1793 /* Allocate the standard end record. */
1794 l->std_end_ofs = l->total_size;
1795 l->total_size += sizeof(struct target_aarch64_ctx);
1796
1797 /* Allocate the requested record. */
1798 l->extra_base = this_loc = l->total_size;
1799 l->extra_size = this_size;
1800 }
1801 l->total_size += this_size;
1802
1803 return this_loc;
1804 }
1805
1806 static void target_setup_frame(int usig, struct target_sigaction *ka,
1807 target_siginfo_t *info, target_sigset_t *set,
1808 CPUARMState *env)
1809 {
1810 target_sigframe_layout layout = {
1811 /* Begin with the size pointing to the reserved space. */
1812 .total_size = offsetof(struct target_rt_sigframe,
1813 uc.tuc_mcontext.__reserved),
1814 };
1815 int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
1816 struct target_rt_sigframe *frame;
1817 struct target_rt_frame_record *fr;
1818 abi_ulong frame_addr, return_addr;
1819
1820 /* FPSIMD record is always in the standard space. */
1821 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
1822 &layout);
1823
1824 /* SVE state needs saving only if it exists. */
1825 if (arm_feature(env, ARM_FEATURE_SVE)) {
1826 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
1827 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
1828 sve_ofs = alloc_sigframe_space(sve_size, &layout);
1829 }
1830
1831 if (layout.extra_ofs) {
1832 /* Reserve space for the extra end marker. The standard end marker
1833 * will have been allocated when we allocated the extra record.
1834 */
1835 layout.extra_end_ofs
1836 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
1837 } else {
1838 /* Reserve space for the standard end marker.
1839 * Do not use alloc_sigframe_space because we cheat
1840 * std_size therein to reserve space for this.
1841 */
1842 layout.std_end_ofs = layout.total_size;
1843 layout.total_size += sizeof(struct target_aarch64_ctx);
1844 }
1845
1846 /* We must always provide at least the standard 4K reserved space,
1847 * even if we don't use all of it (this is part of the ABI)
1848 */
1849 layout.total_size = MAX(layout.total_size,
1850 sizeof(struct target_rt_sigframe));
1851
1852 /* Reserve space for the return code. On a real system this would
1853 * be within the VDSO. So, despite the name this is not a "real"
1854 * record within the frame.
1855 */
1856 fr_ofs = layout.total_size;
1857 layout.total_size += sizeof(struct target_rt_frame_record);
1858
1859 frame_addr = get_sigframe(ka, env, layout.total_size);
1860 trace_user_setup_frame(env, frame_addr);
1861 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1862 goto give_sigsegv;
1863 }
1864
1865 target_setup_general_frame(frame, env, set);
1866 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
1867 target_setup_end_record((void *)frame + layout.std_end_ofs);
1868 if (layout.extra_ofs) {
1869 target_setup_extra_record((void *)frame + layout.extra_ofs,
1870 frame_addr + layout.extra_base,
1871 layout.extra_size);
1872 target_setup_end_record((void *)frame + layout.extra_end_ofs);
1873 }
1874 if (sve_ofs) {
1875 target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
1876 }
1877
1878 /* Set up the stack frame for unwinding. */
1879 fr = (void *)frame + fr_ofs;
1880 __put_user(env->xregs[29], &fr->fp);
1881 __put_user(env->xregs[30], &fr->lr);
1882
1883 if (ka->sa_flags & TARGET_SA_RESTORER) {
1884 return_addr = ka->sa_restorer;
1885 } else {
1886 /*
1887 * mov x8,#__NR_rt_sigreturn; svc #0
1888 * Since these are instructions they need to be put as little-endian
1889 * regardless of target default or current CPU endianness.
1890 */
1891 __put_user_e(0xd2801168, &fr->tramp[0], le);
1892 __put_user_e(0xd4000001, &fr->tramp[1], le);
1893 return_addr = frame_addr + fr_ofs
1894 + offsetof(struct target_rt_frame_record, tramp);
1895 }
1896 env->xregs[0] = usig;
1897 env->xregs[31] = frame_addr;
1898 env->xregs[29] = frame_addr + fr_ofs;
1899 env->pc = ka->_sa_handler;
1900 env->xregs[30] = return_addr;
1901 if (info) {
1902 tswap_siginfo(&frame->info, info);
1903 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1904 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1905 }
1906
1907 unlock_user_struct(frame, frame_addr, 1);
1908 return;
1909
1910 give_sigsegv:
1911 unlock_user_struct(frame, frame_addr, 1);
1912 force_sigsegv(usig);
1913 }
1914
1915 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1916 target_siginfo_t *info, target_sigset_t *set,
1917 CPUARMState *env)
1918 {
1919 target_setup_frame(sig, ka, info, set, env);
1920 }
1921
1922 static void setup_frame(int sig, struct target_sigaction *ka,
1923 target_sigset_t *set, CPUARMState *env)
1924 {
1925 target_setup_frame(sig, ka, 0, set, env);
1926 }
1927
1928 long do_rt_sigreturn(CPUARMState *env)
1929 {
1930 struct target_rt_sigframe *frame = NULL;
1931 abi_ulong frame_addr = env->xregs[31];
1932
1933 trace_user_do_rt_sigreturn(env, frame_addr);
1934 if (frame_addr & 15) {
1935 goto badframe;
1936 }
1937
1938 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1939 goto badframe;
1940 }
1941
1942 if (target_restore_sigframe(env, frame)) {
1943 goto badframe;
1944 }
1945
1946 if (do_sigaltstack(frame_addr +
1947 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1948 0, get_sp_from_cpustate(env)) == -EFAULT) {
1949 goto badframe;
1950 }
1951
1952 unlock_user_struct(frame, frame_addr, 0);
1953 return -TARGET_QEMU_ESIGRETURN;
1954
1955 badframe:
1956 unlock_user_struct(frame, frame_addr, 0);
1957 force_sig(TARGET_SIGSEGV);
1958 return -TARGET_QEMU_ESIGRETURN;
1959 }
1960
1961 long do_sigreturn(CPUARMState *env)
1962 {
1963 return do_rt_sigreturn(env);
1964 }
1965
1966 #elif defined(TARGET_ARM)
1967
1968 struct target_sigcontext {
1969 abi_ulong trap_no;
1970 abi_ulong error_code;
1971 abi_ulong oldmask;
1972 abi_ulong arm_r0;
1973 abi_ulong arm_r1;
1974 abi_ulong arm_r2;
1975 abi_ulong arm_r3;
1976 abi_ulong arm_r4;
1977 abi_ulong arm_r5;
1978 abi_ulong arm_r6;
1979 abi_ulong arm_r7;
1980 abi_ulong arm_r8;
1981 abi_ulong arm_r9;
1982 abi_ulong arm_r10;
1983 abi_ulong arm_fp;
1984 abi_ulong arm_ip;
1985 abi_ulong arm_sp;
1986 abi_ulong arm_lr;
1987 abi_ulong arm_pc;
1988 abi_ulong arm_cpsr;
1989 abi_ulong fault_address;
1990 };
1991
1992 struct target_ucontext_v1 {
1993 abi_ulong tuc_flags;
1994 abi_ulong tuc_link;
1995 target_stack_t tuc_stack;
1996 struct target_sigcontext tuc_mcontext;
1997 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1998 };
1999
2000 struct target_ucontext_v2 {
2001 abi_ulong tuc_flags;
2002 abi_ulong tuc_link;
2003 target_stack_t tuc_stack;
2004 struct target_sigcontext tuc_mcontext;
2005 target_sigset_t tuc_sigmask; /* mask last for extensibility */
2006 char __unused[128 - sizeof(target_sigset_t)];
2007 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
2008 };
2009
2010 struct target_user_vfp {
2011 uint64_t fpregs[32];
2012 abi_ulong fpscr;
2013 };
2014
2015 struct target_user_vfp_exc {
2016 abi_ulong fpexc;
2017 abi_ulong fpinst;
2018 abi_ulong fpinst2;
2019 };
2020
2021 struct target_vfp_sigframe {
2022 abi_ulong magic;
2023 abi_ulong size;
2024 struct target_user_vfp ufp;
2025 struct target_user_vfp_exc ufp_exc;
2026 } __attribute__((__aligned__(8)));
2027
2028 struct target_iwmmxt_sigframe {
2029 abi_ulong magic;
2030 abi_ulong size;
2031 uint64_t regs[16];
2032 /* Note that not all the coprocessor control registers are stored here */
2033 uint32_t wcssf;
2034 uint32_t wcasf;
2035 uint32_t wcgr0;
2036 uint32_t wcgr1;
2037 uint32_t wcgr2;
2038 uint32_t wcgr3;
2039 } __attribute__((__aligned__(8)));
2040
2041 #define TARGET_VFP_MAGIC 0x56465001
2042 #define TARGET_IWMMXT_MAGIC 0x12ef842a
2043
2044 struct sigframe_v1
2045 {
2046 struct target_sigcontext sc;
2047 abi_ulong extramask[TARGET_NSIG_WORDS-1];
2048 abi_ulong retcode;
2049 };
2050
2051 struct sigframe_v2
2052 {
2053 struct target_ucontext_v2 uc;
2054 abi_ulong retcode;
2055 };
2056
2057 struct rt_sigframe_v1
2058 {
2059 abi_ulong pinfo;
2060 abi_ulong puc;
2061 struct target_siginfo info;
2062 struct target_ucontext_v1 uc;
2063 abi_ulong retcode;
2064 };
2065
2066 struct rt_sigframe_v2
2067 {
2068 struct target_siginfo info;
2069 struct target_ucontext_v2 uc;
2070 abi_ulong retcode;
2071 };
2072
2073 #define TARGET_CONFIG_CPU_32 1
2074
2075 /*
2076 * For ARM syscalls, we encode the syscall number into the instruction.
2077 */
2078 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
2079 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
2080
2081 /*
2082 * For Thumb syscalls, we pass the syscall number via r7. We therefore
2083 * need two 16-bit instructions.
2084 */
2085 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
2086 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
2087
2088 static const abi_ulong retcodes[4] = {
2089 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
2090 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
2091 };
2092
2093
2094 static inline int valid_user_regs(CPUARMState *regs)
2095 {
2096 return 1;
2097 }
2098
2099 static void
2100 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2101 CPUARMState *env, abi_ulong mask)
2102 {
2103 __put_user(env->regs[0], &sc->arm_r0);
2104 __put_user(env->regs[1], &sc->arm_r1);
2105 __put_user(env->regs[2], &sc->arm_r2);
2106 __put_user(env->regs[3], &sc->arm_r3);
2107 __put_user(env->regs[4], &sc->arm_r4);
2108 __put_user(env->regs[5], &sc->arm_r5);
2109 __put_user(env->regs[6], &sc->arm_r6);
2110 __put_user(env->regs[7], &sc->arm_r7);
2111 __put_user(env->regs[8], &sc->arm_r8);
2112 __put_user(env->regs[9], &sc->arm_r9);
2113 __put_user(env->regs[10], &sc->arm_r10);
2114 __put_user(env->regs[11], &sc->arm_fp);
2115 __put_user(env->regs[12], &sc->arm_ip);
2116 __put_user(env->regs[13], &sc->arm_sp);
2117 __put_user(env->regs[14], &sc->arm_lr);
2118 __put_user(env->regs[15], &sc->arm_pc);
2119 #ifdef TARGET_CONFIG_CPU_32
2120 __put_user(cpsr_read(env), &sc->arm_cpsr);
2121 #endif
2122
2123 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
2124 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
2125 __put_user(/* current->thread.address */ 0, &sc->fault_address);
2126 __put_user(mask, &sc->oldmask);
2127 }
2128
2129 static inline abi_ulong
2130 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
2131 {
2132 unsigned long sp = regs->regs[13];
2133
2134 /*
2135 * This is the X/Open sanctioned signal stack switching.
2136 */
2137 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
2138 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2139 }
2140 /*
2141 * ATPCS B01 mandates 8-byte alignment
2142 */
2143 return (sp - framesize) & ~7;
2144 }
2145
2146 static void
2147 setup_return(CPUARMState *env, struct target_sigaction *ka,
2148 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
2149 {
2150 abi_ulong handler = ka->_sa_handler;
2151 abi_ulong retcode;
2152 int thumb = handler & 1;
2153 uint32_t cpsr = cpsr_read(env);
2154
2155 cpsr &= ~CPSR_IT;
2156 if (thumb) {
2157 cpsr |= CPSR_T;
2158 } else {
2159 cpsr &= ~CPSR_T;
2160 }
2161
2162 if (ka->sa_flags & TARGET_SA_RESTORER) {
2163 retcode = ka->sa_restorer;
2164 } else {
2165 unsigned int idx = thumb;
2166
2167 if (ka->sa_flags & TARGET_SA_SIGINFO) {
2168 idx += 2;
2169 }
2170
2171 __put_user(retcodes[idx], rc);
2172
2173 retcode = rc_addr + thumb;
2174 }
2175
2176 env->regs[0] = usig;
2177 env->regs[13] = frame_addr;
2178 env->regs[14] = retcode;
2179 env->regs[15] = handler & (thumb ? ~1 : ~3);
2180 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
2181 }
2182
2183 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
2184 {
2185 int i;
2186 struct target_vfp_sigframe *vfpframe;
2187 vfpframe = (struct target_vfp_sigframe *)regspace;
2188 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
2189 __put_user(sizeof(*vfpframe), &vfpframe->size);
2190 for (i = 0; i < 32; i++) {
2191 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2192 }
2193 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
2194 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
2195 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2196 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2197 return (abi_ulong*)(vfpframe+1);
2198 }
2199
2200 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
2201 CPUARMState *env)
2202 {
2203 int i;
2204 struct target_iwmmxt_sigframe *iwmmxtframe;
2205 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2206 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
2207 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
2208 for (i = 0; i < 16; i++) {
2209 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2210 }
2211 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2212 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2213 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2214 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2215 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2216 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2217 return (abi_ulong*)(iwmmxtframe+1);
2218 }
2219
2220 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
2221 target_sigset_t *set, CPUARMState *env)
2222 {
2223 struct target_sigaltstack stack;
2224 int i;
2225 abi_ulong *regspace;
2226
2227 /* Clear all the bits of the ucontext we don't use. */
2228 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
2229
2230 memset(&stack, 0, sizeof(stack));
2231 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2232 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2233 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2234 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
2235
2236 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
2237 /* Save coprocessor signal frame. */
2238 regspace = uc->tuc_regspace;
2239 if (arm_feature(env, ARM_FEATURE_VFP)) {
2240 regspace = setup_sigframe_v2_vfp(regspace, env);
2241 }
2242 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2243 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
2244 }
2245
2246 /* Write terminating magic word */
2247 __put_user(0, regspace);
2248
2249 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2250 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
2251 }
2252 }
2253
2254 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
2255 static void setup_frame_v1(int usig, struct target_sigaction *ka,
2256 target_sigset_t *set, CPUARMState *regs)
2257 {
2258 struct sigframe_v1 *frame;
2259 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2260 int i;
2261
2262 trace_user_setup_frame(regs, frame_addr);
2263 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2264 goto sigsegv;
2265 }
2266
2267 setup_sigcontext(&frame->sc, regs, set->sig[0]);
2268
2269 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2270 __put_user(set->sig[i], &frame->extramask[i - 1]);
2271 }
2272
2273 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2274 frame_addr + offsetof(struct sigframe_v1, retcode));
2275
2276 unlock_user_struct(frame, frame_addr, 1);
2277 return;
2278 sigsegv:
2279 force_sigsegv(usig);
2280 }
2281
2282 static void setup_frame_v2(int usig, struct target_sigaction *ka,
2283 target_sigset_t *set, CPUARMState *regs)
2284 {
2285 struct sigframe_v2 *frame;
2286 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2287
2288 trace_user_setup_frame(regs, frame_addr);
2289 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2290 goto sigsegv;
2291 }
2292
2293 setup_sigframe_v2(&frame->uc, set, regs);
2294
2295 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2296 frame_addr + offsetof(struct sigframe_v2, retcode));
2297
2298 unlock_user_struct(frame, frame_addr, 1);
2299 return;
2300 sigsegv:
2301 force_sigsegv(usig);
2302 }
2303
2304 static void setup_frame(int usig, struct target_sigaction *ka,
2305 target_sigset_t *set, CPUARMState *regs)
2306 {
2307 if (get_osversion() >= 0x020612) {
2308 setup_frame_v2(usig, ka, set, regs);
2309 } else {
2310 setup_frame_v1(usig, ka, set, regs);
2311 }
2312 }
2313
2314 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2315 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2316 target_siginfo_t *info,
2317 target_sigset_t *set, CPUARMState *env)
2318 {
2319 struct rt_sigframe_v1 *frame;
2320 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2321 struct target_sigaltstack stack;
2322 int i;
2323 abi_ulong info_addr, uc_addr;
2324
2325 trace_user_setup_rt_frame(env, frame_addr);
2326 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2327 goto sigsegv;
2328 }
2329
2330 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2331 __put_user(info_addr, &frame->pinfo);
2332 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2333 __put_user(uc_addr, &frame->puc);
2334 tswap_siginfo(&frame->info, info);
2335
2336 /* Clear all the bits of the ucontext we don't use. */
2337 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2338
2339 memset(&stack, 0, sizeof(stack));
2340 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2341 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2342 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2343 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2344
2345 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2346 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2347 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2348 }
2349
2350 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2351 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2352
2353 env->regs[1] = info_addr;
2354 env->regs[2] = uc_addr;
2355
2356 unlock_user_struct(frame, frame_addr, 1);
2357 return;
2358 sigsegv:
2359 force_sigsegv(usig);
2360 }
2361
2362 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2363 target_siginfo_t *info,
2364 target_sigset_t *set, CPUARMState *env)
2365 {
2366 struct rt_sigframe_v2 *frame;
2367 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2368 abi_ulong info_addr, uc_addr;
2369
2370 trace_user_setup_rt_frame(env, frame_addr);
2371 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2372 goto sigsegv;
2373 }
2374
2375 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2376 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2377 tswap_siginfo(&frame->info, info);
2378
2379 setup_sigframe_v2(&frame->uc, set, env);
2380
2381 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2382 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2383
2384 env->regs[1] = info_addr;
2385 env->regs[2] = uc_addr;
2386
2387 unlock_user_struct(frame, frame_addr, 1);
2388 return;
2389 sigsegv:
2390 force_sigsegv(usig);
2391 }
2392
2393 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2394 target_siginfo_t *info,
2395 target_sigset_t *set, CPUARMState *env)
2396 {
2397 if (get_osversion() >= 0x020612) {
2398 setup_rt_frame_v2(usig, ka, info, set, env);
2399 } else {
2400 setup_rt_frame_v1(usig, ka, info, set, env);
2401 }
2402 }
2403
2404 static int
2405 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2406 {
2407 int err = 0;
2408 uint32_t cpsr;
2409
2410 __get_user(env->regs[0], &sc->arm_r0);
2411 __get_user(env->regs[1], &sc->arm_r1);
2412 __get_user(env->regs[2], &sc->arm_r2);
2413 __get_user(env->regs[3], &sc->arm_r3);
2414 __get_user(env->regs[4], &sc->arm_r4);
2415 __get_user(env->regs[5], &sc->arm_r5);
2416 __get_user(env->regs[6], &sc->arm_r6);
2417 __get_user(env->regs[7], &sc->arm_r7);
2418 __get_user(env->regs[8], &sc->arm_r8);
2419 __get_user(env->regs[9], &sc->arm_r9);
2420 __get_user(env->regs[10], &sc->arm_r10);
2421 __get_user(env->regs[11], &sc->arm_fp);
2422 __get_user(env->regs[12], &sc->arm_ip);
2423 __get_user(env->regs[13], &sc->arm_sp);
2424 __get_user(env->regs[14], &sc->arm_lr);
2425 __get_user(env->regs[15], &sc->arm_pc);
2426 #ifdef TARGET_CONFIG_CPU_32
2427 __get_user(cpsr, &sc->arm_cpsr);
2428 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2429 #endif
2430
2431 err |= !valid_user_regs(env);
2432
2433 return err;
2434 }
2435
2436 static long do_sigreturn_v1(CPUARMState *env)
2437 {
2438 abi_ulong frame_addr;
2439 struct sigframe_v1 *frame = NULL;
2440 target_sigset_t set;
2441 sigset_t host_set;
2442 int i;
2443
2444 /*
2445 * Since we stacked the signal on a 64-bit boundary,
2446 * then 'sp' should be word aligned here. If it's
2447 * not, then the user is trying to mess with us.
2448 */
2449 frame_addr = env->regs[13];
2450 trace_user_do_sigreturn(env, frame_addr);
2451 if (frame_addr & 7) {
2452 goto badframe;
2453 }
2454
2455 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2456 goto badframe;
2457 }
2458
2459 __get_user(set.sig[0], &frame->sc.oldmask);
2460 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2461 __get_user(set.sig[i], &frame->extramask[i - 1]);
2462 }
2463
2464 target_to_host_sigset_internal(&host_set, &set);
2465 set_sigmask(&host_set);
2466
2467 if (restore_sigcontext(env, &frame->sc)) {
2468 goto badframe;
2469 }
2470
2471 #if 0
2472 /* Send SIGTRAP if we're single-stepping */
2473 if (ptrace_cancel_bpt(current))
2474 send_sig(SIGTRAP, current, 1);
2475 #endif
2476 unlock_user_struct(frame, frame_addr, 0);
2477 return -TARGET_QEMU_ESIGRETURN;
2478
2479 badframe:
2480 force_sig(TARGET_SIGSEGV);
2481 return -TARGET_QEMU_ESIGRETURN;
2482 }
2483
2484 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2485 {
2486 int i;
2487 abi_ulong magic, sz;
2488 uint32_t fpscr, fpexc;
2489 struct target_vfp_sigframe *vfpframe;
2490 vfpframe = (struct target_vfp_sigframe *)regspace;
2491
2492 __get_user(magic, &vfpframe->magic);
2493 __get_user(sz, &vfpframe->size);
2494 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2495 return 0;
2496 }
2497 for (i = 0; i < 32; i++) {
2498 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2499 }
2500 __get_user(fpscr, &vfpframe->ufp.fpscr);
2501 vfp_set_fpscr(env, fpscr);
2502 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2503 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2504 * and the exception flag is cleared
2505 */
2506 fpexc |= (1 << 30);
2507 fpexc &= ~((1 << 31) | (1 << 28));
2508 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2509 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2510 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2511 return (abi_ulong*)(vfpframe + 1);
2512 }
2513
2514 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2515 abi_ulong *regspace)
2516 {
2517 int i;
2518 abi_ulong magic, sz;
2519 struct target_iwmmxt_sigframe *iwmmxtframe;
2520 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2521
2522 __get_user(magic, &iwmmxtframe->magic);
2523 __get_user(sz, &iwmmxtframe->size);
2524 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2525 return 0;
2526 }
2527 for (i = 0; i < 16; i++) {
2528 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2529 }
2530 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2531 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2532 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2533 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2534 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2535 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2536 return (abi_ulong*)(iwmmxtframe + 1);
2537 }
2538
2539 static int do_sigframe_return_v2(CPUARMState *env,
2540 target_ulong context_addr,
2541 struct target_ucontext_v2 *uc)
2542 {
2543 sigset_t host_set;
2544 abi_ulong *regspace;
2545
2546 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2547 set_sigmask(&host_set);
2548
2549 if (restore_sigcontext(env, &uc->tuc_mcontext))
2550 return 1;
2551
2552 /* Restore coprocessor signal frame */
2553 regspace = uc->tuc_regspace;
2554 if (arm_feature(env, ARM_FEATURE_VFP)) {
2555 regspace = restore_sigframe_v2_vfp(env, regspace);
2556 if (!regspace) {
2557 return 1;
2558 }
2559 }
2560 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2561 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2562 if (!regspace) {
2563 return 1;
2564 }
2565 }
2566
2567 if (do_sigaltstack(context_addr
2568 + offsetof(struct target_ucontext_v2, tuc_stack),
2569 0, get_sp_from_cpustate(env)) == -EFAULT) {
2570 return 1;
2571 }
2572
2573 #if 0
2574 /* Send SIGTRAP if we're single-stepping */
2575 if (ptrace_cancel_bpt(current))
2576 send_sig(SIGTRAP, current, 1);
2577 #endif
2578
2579 return 0;
2580 }
2581
2582 static long do_sigreturn_v2(CPUARMState *env)
2583 {
2584 abi_ulong frame_addr;
2585 struct sigframe_v2 *frame = NULL;
2586
2587 /*
2588 * Since we stacked the signal on a 64-bit boundary,
2589 * then 'sp' should be word aligned here. If it's
2590 * not, then the user is trying to mess with us.
2591 */
2592 frame_addr = env->regs[13];
2593 trace_user_do_sigreturn(env, frame_addr);
2594 if (frame_addr & 7) {
2595 goto badframe;
2596 }
2597
2598 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2599 goto badframe;
2600 }
2601
2602 if (do_sigframe_return_v2(env,
2603 frame_addr
2604 + offsetof(struct sigframe_v2, uc),
2605 &frame->uc)) {
2606 goto badframe;
2607 }
2608
2609 unlock_user_struct(frame, frame_addr, 0);
2610 return -TARGET_QEMU_ESIGRETURN;
2611
2612 badframe:
2613 unlock_user_struct(frame, frame_addr, 0);
2614 force_sig(TARGET_SIGSEGV);
2615 return -TARGET_QEMU_ESIGRETURN;
2616 }
2617
2618 long do_sigreturn(CPUARMState *env)
2619 {
2620 if (get_osversion() >= 0x020612) {
2621 return do_sigreturn_v2(env);
2622 } else {
2623 return do_sigreturn_v1(env);
2624 }
2625 }
2626
2627 static long do_rt_sigreturn_v1(CPUARMState *env)
2628 {
2629 abi_ulong frame_addr;
2630 struct rt_sigframe_v1 *frame = NULL;
2631 sigset_t host_set;
2632
2633 /*
2634 * Since we stacked the signal on a 64-bit boundary,
2635 * then 'sp' should be word aligned here. If it's
2636 * not, then the user is trying to mess with us.
2637 */
2638 frame_addr = env->regs[13];
2639 trace_user_do_rt_sigreturn(env, frame_addr);
2640 if (frame_addr & 7) {
2641 goto badframe;
2642 }
2643
2644 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2645 goto badframe;
2646 }
2647
2648 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2649 set_sigmask(&host_set);
2650
2651 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2652 goto badframe;
2653 }
2654
2655 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2656 goto badframe;
2657
2658 #if 0
2659 /* Send SIGTRAP if we're single-stepping */
2660 if (ptrace_cancel_bpt(current))
2661 send_sig(SIGTRAP, current, 1);
2662 #endif
2663 unlock_user_struct(frame, frame_addr, 0);
2664 return -TARGET_QEMU_ESIGRETURN;
2665
2666 badframe:
2667 unlock_user_struct(frame, frame_addr, 0);
2668 force_sig(TARGET_SIGSEGV);
2669 return -TARGET_QEMU_ESIGRETURN;
2670 }
2671
2672 static long do_rt_sigreturn_v2(CPUARMState *env)
2673 {
2674 abi_ulong frame_addr;
2675 struct rt_sigframe_v2 *frame = NULL;
2676
2677 /*
2678 * Since we stacked the signal on a 64-bit boundary,
2679 * then 'sp' should be word aligned here. If it's
2680 * not, then the user is trying to mess with us.
2681 */
2682 frame_addr = env->regs[13];
2683 trace_user_do_rt_sigreturn(env, frame_addr);
2684 if (frame_addr & 7) {
2685 goto badframe;
2686 }
2687
2688 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2689 goto badframe;
2690 }
2691
2692 if (do_sigframe_return_v2(env,
2693 frame_addr
2694 + offsetof(struct rt_sigframe_v2, uc),
2695 &frame->uc)) {
2696 goto badframe;
2697 }
2698
2699 unlock_user_struct(frame, frame_addr, 0);
2700 return -TARGET_QEMU_ESIGRETURN;
2701
2702 badframe:
2703 unlock_user_struct(frame, frame_addr, 0);
2704 force_sig(TARGET_SIGSEGV);
2705 return -TARGET_QEMU_ESIGRETURN;
2706 }
2707
2708 long do_rt_sigreturn(CPUARMState *env)
2709 {
2710 if (get_osversion() >= 0x020612) {
2711 return do_rt_sigreturn_v2(env);
2712 } else {
2713 return do_rt_sigreturn_v1(env);
2714 }
2715 }
2716
2717 #elif defined(TARGET_SPARC)
2718
2719 #define __SUNOS_MAXWIN 31
2720
2721 /* This is what SunOS does, so shall I. */
2722 struct target_sigcontext {
2723 abi_ulong sigc_onstack; /* state to restore */
2724
2725 abi_ulong sigc_mask; /* sigmask to restore */
2726 abi_ulong sigc_sp; /* stack pointer */
2727 abi_ulong sigc_pc; /* program counter */
2728 abi_ulong sigc_npc; /* next program counter */
2729 abi_ulong sigc_psr; /* for condition codes etc */
2730 abi_ulong sigc_g1; /* User uses these two registers */
2731 abi_ulong sigc_o0; /* within the trampoline code. */
2732
2733 /* Now comes information regarding the users window set
2734 * at the time of the signal.
2735 */
2736 abi_ulong sigc_oswins; /* outstanding windows */
2737
2738 /* stack ptrs for each regwin buf */
2739 char *sigc_spbuf[__SUNOS_MAXWIN];
2740
2741 /* Windows to restore after signal */
2742 struct {
2743 abi_ulong locals[8];
2744 abi_ulong ins[8];
2745 } sigc_wbuf[__SUNOS_MAXWIN];
2746 };
2747 /* A Sparc stack frame */
2748 struct sparc_stackf {
2749 abi_ulong locals[8];
2750 abi_ulong ins[8];
2751 /* It's simpler to treat fp and callers_pc as elements of ins[]
2752 * since we never need to access them ourselves.
2753 */
2754 char *structptr;
2755 abi_ulong xargs[6];
2756 abi_ulong xxargs[1];
2757 };
2758
2759 typedef struct {
2760 struct {
2761 abi_ulong psr;
2762 abi_ulong pc;
2763 abi_ulong npc;
2764 abi_ulong y;
2765 abi_ulong u_regs[16]; /* globals and ins */
2766 } si_regs;
2767 int si_mask;
2768 } __siginfo_t;
2769
2770 typedef struct {
2771 abi_ulong si_float_regs[32];
2772 unsigned long si_fsr;
2773 unsigned long si_fpqdepth;
2774 struct {
2775 unsigned long *insn_addr;
2776 unsigned long insn;
2777 } si_fpqueue [16];
2778 } qemu_siginfo_fpu_t;
2779
2780
2781 struct target_signal_frame {
2782 struct sparc_stackf ss;
2783 __siginfo_t info;
2784 abi_ulong fpu_save;
2785 abi_ulong insns[2] __attribute__ ((aligned (8)));
2786 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2787 abi_ulong extra_size; /* Should be 0 */
2788 qemu_siginfo_fpu_t fpu_state;
2789 };
2790 struct target_rt_signal_frame {
2791 struct sparc_stackf ss;
2792 siginfo_t info;
2793 abi_ulong regs[20];
2794 sigset_t mask;
2795 abi_ulong fpu_save;
2796 unsigned int insns[2];
2797 stack_t stack;
2798 unsigned int extra_size; /* Should be 0 */
2799 qemu_siginfo_fpu_t fpu_state;
2800 };
2801
2802 #define UREG_O0 16
2803 #define UREG_O6 22
2804 #define UREG_I0 0
2805 #define UREG_I1 1
2806 #define UREG_I2 2
2807 #define UREG_I3 3
2808 #define UREG_I4 4
2809 #define UREG_I5 5
2810 #define UREG_I6 6
2811 #define UREG_I7 7
2812 #define UREG_L0 8
2813 #define UREG_FP UREG_I6
2814 #define UREG_SP UREG_O6
2815
2816 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2817 CPUSPARCState *env,
2818 unsigned long framesize)
2819 {
2820 abi_ulong sp;
2821
2822 sp = env->regwptr[UREG_FP];
2823
2824 /* This is the X/Open sanctioned signal stack switching. */
2825 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2826 if (!on_sig_stack(sp)
2827 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2828 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2829 }
2830 }
2831 return sp - framesize;
2832 }
2833
2834 static int
2835 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2836 {
2837 int err = 0, i;
2838
2839 __put_user(env->psr, &si->si_regs.psr);
2840 __put_user(env->pc, &si->si_regs.pc);
2841 __put_user(env->npc, &si->si_regs.npc);
2842 __put_user(env->y, &si->si_regs.y);
2843 for (i=0; i < 8; i++) {
2844 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2845 }
2846 for (i=0; i < 8; i++) {
2847 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2848 }
2849 __put_user(mask, &si->si_mask);
2850 return err;
2851 }
2852
2853 #if 0
2854 static int
2855 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2856 CPUSPARCState *env, unsigned long mask)
2857 {
2858 int err = 0;
2859
2860 __put_user(mask, &sc->sigc_mask);
2861 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2862 __put_user(env->pc, &sc->sigc_pc);
2863 __put_user(env->npc, &sc->sigc_npc);
2864 __put_user(env->psr, &sc->sigc_psr);
2865 __put_user(env->gregs[1], &sc->sigc_g1);
2866 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2867
2868 return err;
2869 }
2870 #endif
2871 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2872
2873 static void setup_frame(int sig, struct target_sigaction *ka,
2874 target_sigset_t *set, CPUSPARCState *env)
2875 {
2876 abi_ulong sf_addr;
2877 struct target_signal_frame *sf;
2878 int sigframe_size, err, i;
2879
2880 /* 1. Make sure everything is clean */
2881 //synchronize_user_stack();
2882
2883 sigframe_size = NF_ALIGNEDSZ;
2884 sf_addr = get_sigframe(ka, env, sigframe_size);
2885 trace_user_setup_frame(env, sf_addr);
2886
2887 sf = lock_user(VERIFY_WRITE, sf_addr,
2888 sizeof(struct target_signal_frame), 0);
2889 if (!sf) {
2890 goto sigsegv;
2891 }
2892 #if 0
2893 if (invalid_frame_pointer(sf, sigframe_size))
2894 goto sigill_and_return;
2895 #endif
2896 /* 2. Save the current process state */
2897 err = setup___siginfo(&sf->info, env, set->sig[0]);
2898 __put_user(0, &sf->extra_size);
2899
2900 //save_fpu_state(regs, &sf->fpu_state);
2901 //__put_user(&sf->fpu_state, &sf->fpu_save);
2902
2903 __put_user(set->sig[0], &sf->info.si_mask);
2904 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2905 __put_user(set->sig[i + 1], &sf->extramask[i]);
2906 }
2907
2908 for (i = 0; i < 8; i++) {
2909 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2910 }
2911 for (i = 0; i < 8; i++) {
2912 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2913 }
2914 if (err)
2915 goto sigsegv;
2916
2917 /* 3. signal handler back-trampoline and parameters */
2918 env->regwptr[UREG_FP] = sf_addr;
2919 env->regwptr[UREG_I0] = sig;
2920 env->regwptr[UREG_I1] = sf_addr +
2921 offsetof(struct target_signal_frame, info);
2922 env->regwptr[UREG_I2] = sf_addr +
2923 offsetof(struct target_signal_frame, info);
2924
2925 /* 4. signal handler */
2926 env->pc = ka->_sa_handler;
2927 env->npc = (env->pc + 4);
2928 /* 5. return to kernel instructions */
2929 if (ka->ka_restorer) {
2930 env->regwptr[UREG_I7] = ka->ka_restorer;
2931 } else {
2932 uint32_t val32;
2933
2934 env->regwptr[UREG_I7] = sf_addr +
2935 offsetof(struct target_signal_frame, insns) - 2 * 4;
2936
2937 /* mov __NR_sigreturn, %g1 */
2938 val32 = 0x821020d8;
2939 __put_user(val32, &sf->insns[0]);
2940
2941 /* t 0x10 */
2942 val32 = 0x91d02010;
2943 __put_user(val32, &sf->insns[1]);
2944 if (err)
2945 goto sigsegv;
2946
2947 /* Flush instruction space. */
2948 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2949 // tb_flush(env);
2950 }
2951 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2952 return;
2953 #if 0
2954 sigill_and_return:
2955 force_sig(TARGET_SIGILL);
2956 #endif
2957 sigsegv:
2958 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2959 force_sigsegv(sig);
2960 }
2961
2962 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2963 target_siginfo_t *info,
2964 target_sigset_t *set, CPUSPARCState *env)
2965 {
2966 fprintf(stderr, "setup_rt_frame: not implemented\n");
2967 }
2968
2969 long do_sigreturn(CPUSPARCState *env)
2970 {
2971 abi_ulong sf_addr;
2972 struct target_signal_frame *sf;
2973 uint32_t up_psr, pc, npc;
2974 target_sigset_t set;
2975 sigset_t host_set;
2976 int err=0, i;
2977
2978 sf_addr = env->regwptr[UREG_FP];
2979 trace_user_do_sigreturn(env, sf_addr);
2980 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2981 goto segv_and_exit;
2982 }
2983
2984 /* 1. Make sure we are not getting garbage from the user */
2985
2986 if (sf_addr & 3)
2987 goto segv_and_exit;
2988
2989 __get_user(pc, &sf->info.si_regs.pc);
2990 __get_user(npc, &sf->info.si_regs.npc);
2991
2992 if ((pc | npc) & 3) {
2993 goto segv_and_exit;
2994 }
2995
2996 /* 2. Restore the state */
2997 __get_user(up_psr, &sf->info.si_regs.psr);
2998
2999 /* User can only change condition codes and FPU enabling in %psr. */
3000 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
3001 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
3002
3003 env->pc = pc;
3004 env->npc = npc;
3005 __get_user(env->y, &sf->info.si_regs.y);
3006 for (i=0; i < 8; i++) {
3007 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
3008 }
3009 for (i=0; i < 8; i++) {
3010 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
3011 }
3012
3013 /* FIXME: implement FPU save/restore:
3014 * __get_user(fpu_save, &sf->fpu_save);
3015 * if (fpu_save)
3016 * err |= restore_fpu_state(env, fpu_save);
3017 */
3018
3019 /* This is pretty much atomic, no amount locking would prevent
3020 * the races which exist anyways.
3021 */
3022 __get_user(set.sig[0], &sf->info.si_mask);
3023 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3024 __get_user(set.sig[i], &sf->extramask[i - 1]);
3025 }
3026
3027 target_to_host_sigset_internal(&host_set, &set);
3028 set_sigmask(&host_set);
3029
3030 if (err) {
3031 goto segv_and_exit;
3032 }
3033 unlock_user_struct(sf, sf_addr, 0);
3034 return -TARGET_QEMU_ESIGRETURN;
3035
3036 segv_and_exit:
3037 unlock_user_struct(sf, sf_addr, 0);
3038 force_sig(TARGET_SIGSEGV);
3039 return -TARGET_QEMU_ESIGRETURN;
3040 }
3041
3042 long do_rt_sigreturn(CPUSPARCState *env)
3043 {
3044 trace_user_do_rt_sigreturn(env, 0);
3045 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
3046 return -TARGET_ENOSYS;
3047 }
3048
3049 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
3050 #define SPARC_MC_TSTATE 0
3051 #define SPARC_MC_PC 1
3052 #define SPARC_MC_NPC 2
3053 #define SPARC_MC_Y 3
3054 #define SPARC_MC_G1 4
3055 #define SPARC_MC_G2 5
3056 #define SPARC_MC_G3 6
3057 #define SPARC_MC_G4 7
3058 #define SPARC_MC_G5 8
3059 #define SPARC_MC_G6 9
3060 #define SPARC_MC_G7 10
3061 #define SPARC_MC_O0 11
3062 #define SPARC_MC_O1 12
3063 #define SPARC_MC_O2 13
3064 #define SPARC_MC_O3 14
3065 #define SPARC_MC_O4 15
3066 #define SPARC_MC_O5 16
3067 #define SPARC_MC_O6 17
3068 #define SPARC_MC_O7 18
3069 #define SPARC_MC_NGREG 19
3070
3071 typedef abi_ulong target_mc_greg_t;
3072 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
3073
3074 struct target_mc_fq {
3075 abi_ulong *mcfq_addr;
3076 uint32_t mcfq_insn;
3077 };
3078
3079 struct target_mc_fpu {
3080 union {
3081 uint32_t sregs[32];
3082 uint64_t dregs[32];
3083 //uint128_t qregs[16];
3084 } mcfpu_fregs;
3085 abi_ulong mcfpu_fsr;
3086 abi_ulong mcfpu_fprs;
3087 abi_ulong mcfpu_gsr;
3088 struct target_mc_fq *mcfpu_fq;
3089 unsigned char mcfpu_qcnt;
3090 unsigned char mcfpu_qentsz;
3091 unsigned char mcfpu_enab;
3092 };
3093 typedef struct target_mc_fpu target_mc_fpu_t;
3094
3095 typedef struct {
3096 target_mc_gregset_t mc_gregs;
3097 target_mc_greg_t mc_fp;
3098 target_mc_greg_t mc_i7;
3099 target_mc_fpu_t mc_fpregs;
3100 } target_mcontext_t;
3101
3102 struct target_ucontext {
3103 struct target_ucontext *tuc_link;
3104 abi_ulong tuc_flags;
3105 target_sigset_t tuc_sigmask;
3106 target_mcontext_t tuc_mcontext;
3107 };
3108
3109 /* A V9 register window */
3110 struct target_reg_window {
3111 abi_ulong locals[8];
3112 abi_ulong ins[8];
3113 };
3114
3115 #define TARGET_STACK_BIAS 2047
3116
3117 /* {set, get}context() needed for 64-bit SparcLinux userland. */
3118 void sparc64_set_context(CPUSPARCState *env)
3119 {
3120 abi_ulong ucp_addr;
3121 struct target_ucontext *ucp;
3122 target_mc_gregset_t *grp;
3123 abi_ulong pc, npc, tstate;
3124 abi_ulong fp, i7, w_addr;
3125 unsigned int i;
3126
3127 ucp_addr = env->regwptr[UREG_I0];
3128 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
3129 goto do_sigsegv;
3130 }
3131 grp = &ucp->tuc_mcontext.mc_gregs;
3132 __get_user(pc, &((*grp)[SPARC_MC_PC]));
3133 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
3134 if ((pc | npc) & 3) {
3135 goto do_sigsegv;
3136 }
3137 if (env->regwptr[UREG_I1]) {
3138 target_sigset_t target_set;
3139 sigset_t set;
3140
3141 if (TARGET_NSIG_WORDS == 1) {
3142 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
3143 } else {
3144 abi_ulong *src, *dst;
3145 src = ucp->tuc_sigmask.sig;
3146 dst = target_set.sig;
3147 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
3148 __get_user(*dst, src);
3149 }
3150 }
3151 target_to_host_sigset_internal(&set, &target_set);
3152 set_sigmask(&set);
3153 }
3154 env->pc = pc;
3155 env->npc = npc;
3156 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
3157 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
3158 env->asi = (tstate >> 24) & 0xff;
3159 cpu_put_ccr(env, tstate >> 32);
3160 cpu_put_cwp64(env, tstate & 0x1f);
3161 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
3162 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
3163 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
3164 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
3165 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
3166 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
3167 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
3168 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
3169 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
3170 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
3171 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
3172 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
3173 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
3174 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
3175 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
3176
3177 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
3178 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
3179
3180 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3181 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3182 abi_ulong) != 0) {
3183 goto do_sigsegv;
3184 }
3185 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3186 abi_ulong) != 0) {
3187 goto do_sigsegv;
3188 }
3189 /* FIXME this does not match how the kernel handles the FPU in
3190 * its sparc64_set_context implementation. In particular the FPU
3191 * is only restored if fenab is non-zero in:
3192 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
3193 */
3194 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
3195 {
3196 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3197 for (i = 0; i < 64; i++, src++) {
3198 if (i & 1) {
3199 __get_user(env->fpr[i/2].l.lower, src);
3200 } else {
3201 __get_user(env->fpr[i/2].l.upper, src);
3202 }
3203 }
3204 }
3205 __get_user(env->fsr,
3206 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
3207 __get_user(env->gsr,
3208 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
3209 unlock_user_struct(ucp, ucp_addr, 0);
3210 return;
3211 do_sigsegv:
3212 unlock_user_struct(ucp, ucp_addr, 0);
3213 force_sig(TARGET_SIGSEGV);
3214 }
3215
3216 void sparc64_get_context(CPUSPARCState *env)
3217 {
3218 abi_ulong ucp_addr;
3219 struct target_ucontext *ucp;
3220 target_mc_gregset_t *grp;
3221 target_mcontext_t *mcp;
3222 abi_ulong fp, i7, w_addr;
3223 int err;
3224 unsigned int i;
3225 target_sigset_t target_set;
3226 sigset_t set;
3227
3228 ucp_addr = env->regwptr[UREG_I0];
3229 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
3230 goto do_sigsegv;
3231 }
3232
3233 mcp = &ucp->tuc_mcontext;
3234 grp = &mcp->mc_gregs;
3235
3236 /* Skip over the trap instruction, first. */
3237 env->pc = env->npc;
3238 env->npc += 4;
3239
3240 /* If we're only reading the signal mask then do_sigprocmask()
3241 * is guaranteed not to fail, which is important because we don't
3242 * have any way to signal a failure or restart this operation since
3243 * this is not a normal syscall.
3244 */
3245 err = do_sigprocmask(0, NULL, &set);
3246 assert(err == 0);
3247 host_to_target_sigset_internal(&target_set, &set);
3248 if (TARGET_NSIG_WORDS == 1) {
3249 __put_user(target_set.sig[0],
3250 (abi_ulong *)&ucp->tuc_sigmask);
3251 } else {
3252 abi_ulong *src, *dst;
3253 src = target_set.sig;
3254 dst = ucp->tuc_sigmask.sig;
3255 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
3256 __put_user(*src, dst);
3257 }
3258 if (err)
3259 goto do_sigsegv;
3260 }
3261
3262 /* XXX: tstate must be saved properly */
3263 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
3264 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
3265 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
3266 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
3267 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
3268 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
3269 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
3270 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
3271 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
3272 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
3273 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
3274 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
3275 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
3276 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
3277 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
3278 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
3279 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
3280 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
3281 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
3282
3283 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3284 fp = i7 = 0;
3285 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3286 abi_ulong) != 0) {
3287 goto do_sigsegv;
3288 }
3289 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3290 abi_ulong) != 0) {
3291 goto do_sigsegv;
3292 }
3293 __put_user(fp, &(mcp->mc_fp));
3294 __put_user(i7, &(mcp->mc_i7));
3295
3296 {
3297 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3298 for (i = 0; i < 64; i++, dst++) {
3299 if (i & 1) {
3300 __put_user(env->fpr[i/2].l.lower, dst);
3301 } else {
3302 __put_user(env->fpr[i/2].l.upper, dst);
3303 }
3304 }
3305 }
3306 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3307 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3308 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3309
3310 if (err)
3311 goto do_sigsegv;
3312 unlock_user_struct(ucp, ucp_addr, 1);
3313 return;
3314 do_sigsegv:
3315 unlock_user_struct(ucp, ucp_addr, 1);
3316 force_sig(TARGET_SIGSEGV);
3317 }
3318 #endif
3319 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3320
3321 # if defined(TARGET_ABI_MIPSO32)
3322 struct target_sigcontext {
3323 uint32_t sc_regmask; /* Unused */
3324 uint32_t sc_status;
3325 uint64_t sc_pc;
3326 uint64_t sc_regs[32];
3327 uint64_t sc_fpregs[32];
3328 uint32_t sc_ownedfp; /* Unused */
3329 uint32_t sc_fpc_csr;
3330 uint32_t sc_fpc_eir; /* Unused */
3331 uint32_t sc_used_math;
3332 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3333 uint32_t pad0;
3334 uint64_t sc_mdhi;
3335 uint64_t sc_mdlo;
3336 target_ulong sc_hi1; /* Was sc_cause */
3337 target_ulong sc_lo1; /* Was sc_badvaddr */
3338 target_ulong sc_hi2; /* Was sc_sigset[4] */
3339 target_ulong sc_lo2;
3340 target_ulong sc_hi3;
3341 target_ulong sc_lo3;
3342 };
3343 # else /* N32 || N64 */
3344 struct target_sigcontext {
3345 uint64_t sc_regs[32];
3346 uint64_t sc_fpregs[32];
3347 uint64_t sc_mdhi;
3348 uint64_t sc_hi1;
3349 uint64_t sc_hi2;
3350 uint64_t sc_hi3;
3351 uint64_t sc_mdlo;
3352 uint64_t sc_lo1;
3353 uint64_t sc_lo2;
3354 uint64_t sc_lo3;
3355 uint64_t sc_pc;
3356 uint32_t sc_fpc_csr;
3357 uint32_t sc_used_math;
3358 uint32_t sc_dsp;
3359 uint32_t sc_reserved;
3360 };
3361 # endif /* O32 */
3362
3363 struct sigframe {
3364 uint32_t sf_ass[4]; /* argument save space for o32 */
3365 uint32_t sf_code[2]; /* signal trampoline */
3366 struct target_sigcontext sf_sc;
3367 target_sigset_t sf_mask;
3368 };
3369
3370 struct target_ucontext {
3371 target_ulong tuc_flags;
3372 target_ulong tuc_link;
3373 target_stack_t tuc_stack;
3374 target_ulong pad0;
3375 struct target_sigcontext tuc_mcontext;
3376 target_sigset_t tuc_sigmask;
3377 };
3378
3379 struct target_rt_sigframe {
3380 uint32_t rs_ass[4]; /* argument save space for o32 */
3381 uint32_t rs_code[2]; /* signal trampoline */
3382 struct target_siginfo rs_info;
3383 struct target_ucontext rs_uc;
3384 };
3385
3386 /* Install trampoline to jump back from signal handler */
3387 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3388 {
3389 int err = 0;
3390
3391 /*
3392 * Set up the return code ...
3393 *
3394 * li v0, __NR__foo_sigreturn
3395 * syscall
3396 */
3397
3398 __put_user(0x24020000 + syscall, tramp + 0);
3399 __put_user(0x0000000c , tramp + 1);
3400 return err;
3401 }
3402
3403 static inline void setup_sigcontext(CPUMIPSState *regs,
3404 struct target_sigcontext *sc)
3405 {
3406 int i;
3407
3408 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3409 regs->hflags &= ~MIPS_HFLAG_BMASK;
3410
3411 __put_user(0, &sc->sc_regs[0]);
3412 for (i = 1; i < 32; ++i) {
3413 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3414 }
3415
3416 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3417 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3418
3419 /* Rather than checking for dsp existence, always copy. The storage
3420 would just be garbage otherwise. */
3421 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3422 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3423 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3424 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3425 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3426 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3427 {
3428 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3429 __put_user(dsp, &sc->sc_dsp);
3430 }
3431
3432 __put_user(1, &sc->sc_used_math);
3433
3434 for (i = 0; i < 32; ++i) {
3435 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3436 }
3437 }
3438
3439 static inline void
3440 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3441 {
3442 int i;
3443
3444 __get_user(regs->CP0_EPC, &sc->sc_pc);
3445
3446 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3447 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3448
3449 for (i = 1; i < 32; ++i) {
3450 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3451 }
3452
3453 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3454 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3455 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3456 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3457 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3458 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3459 {
3460 uint32_t dsp;
3461 __get_user(dsp, &sc->sc_dsp);
3462 cpu_wrdsp(dsp, 0x3ff, regs);
3463 }
3464
3465 for (i = 0; i < 32; ++i) {
3466 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3467 }
3468 }
3469
3470 /*
3471 * Determine which stack to use..
3472 */
3473 static inline abi_ulong
3474 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3475 {
3476 unsigned long sp;
3477
3478 /* Default to using normal stack */
3479 sp = regs->active_tc.gpr[29];
3480
3481 /*
3482 * FPU emulator may have its own trampoline active just
3483 * above the user stack, 16-bytes before the next lowest
3484 * 16 byte boundary. Try to avoid trashing it.
3485 */
3486 sp -= 32;
3487
3488 /* This is the X/Open sanctioned signal stack switching. */
3489 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3490 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3491 }
3492
3493 return (sp - frame_size) & ~7;
3494 }
3495
3496 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3497 {
3498 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3499 env->hflags &= ~MIPS_HFLAG_M16;
3500 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3501 env->active_tc.PC &= ~(target_ulong) 1;
3502 }
3503 }
3504
3505 # if defined(TARGET_ABI_MIPSO32)
3506 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3507 static void setup_frame(int sig, struct target_sigaction * ka,
3508 target_sigset_t *set, CPUMIPSState *regs)
3509 {
3510 struct sigframe *frame;
3511 abi_ulong frame_addr;
3512 int i;
3513
3514 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3515 trace_user_setup_frame(regs, frame_addr);
3516 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3517 goto give_sigsegv;
3518 }
3519
3520 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3521
3522 setup_sigcontext(regs, &frame->sf_sc);
3523
3524 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3525 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3526 }
3527
3528 /*
3529 * Arguments to signal handler:
3530 *
3531 * a0 = signal number
3532 * a1 = 0 (should be cause)
3533 * a2 = pointer to struct sigcontext
3534 *
3535 * $25 and PC point to the signal handler, $29 points to the
3536 * struct sigframe.
3537 */
3538 regs->active_tc.gpr[ 4] = sig;
3539 regs->active_tc.gpr[ 5] = 0;
3540 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3541 regs->active_tc.gpr[29] = frame_addr;
3542 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3543 /* The original kernel code sets CP0_EPC to the handler
3544 * since it returns to userland using eret
3545 * we cannot do this here, and we must set PC directly */
3546 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3547 mips_set_hflags_isa_mode_from_pc(regs);
3548 unlock_user_struct(frame, frame_addr, 1);
3549 return;
3550
3551 give_sigsegv:
3552 force_sigsegv(sig);
3553 }
3554
3555 long do_sigreturn(CPUMIPSState *regs)
3556 {
3557 struct sigframe *frame;
3558 abi_ulong frame_addr;
3559 sigset_t blocked;
3560 target_sigset_t target_set;
3561 int i;
3562
3563 frame_addr = regs->active_tc.gpr[29];
3564 trace_user_do_sigreturn(regs, frame_addr);
3565 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3566 goto badframe;
3567
3568 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3569 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3570 }
3571
3572 target_to_host_sigset_internal(&blocked, &target_set);
3573 set_sigmask(&blocked);
3574
3575 restore_sigcontext(regs, &frame->sf_sc);
3576
3577 #if 0
3578 /*
3579 * Don't let your children do this ...
3580 */
3581 __asm__ __volatile__(
3582 "move\t$29, %0\n\t"
3583 "j\tsyscall_exit"
3584 :/* no outputs */
3585 :"r" (&regs));
3586 /* Unreached */
3587 #endif
3588
3589 regs->active_tc.PC = regs->CP0_EPC;
3590 mips_set_hflags_isa_mode_from_pc(regs);
3591 /* I am not sure this is right, but it seems to work
3592 * maybe a problem with nested signals ? */
3593 regs->CP0_EPC = 0;
3594 return -TARGET_QEMU_ESIGRETURN;
3595
3596 badframe:
3597 force_sig(TARGET_SIGSEGV);
3598 return -TARGET_QEMU_ESIGRETURN;
3599 }
3600 # endif /* O32 */
3601
3602 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3603 target_siginfo_t *info,
3604 target_sigset_t *set, CPUMIPSState *env)
3605 {
3606 struct target_rt_sigframe *frame;
3607 abi_ulong frame_addr;
3608 int i;
3609
3610 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3611 trace_user_setup_rt_frame(env, frame_addr);
3612 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3613 goto give_sigsegv;
3614 }
3615
3616 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3617
3618 tswap_siginfo(&frame->rs_info, info);
3619
3620 __put_user(0, &frame->rs_uc.tuc_flags);
3621 __put_user(0, &frame->rs_uc.tuc_link);
3622 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3623 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3624 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3625 &frame->rs_uc.tuc_stack.ss_flags);
3626
3627 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3628
3629 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3630 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3631 }
3632
3633 /*
3634 * Arguments to signal handler:
3635 *
3636 * a0 = signal number
3637 * a1 = pointer to siginfo_t
3638 * a2 = pointer to ucontext_t
3639 *
3640 * $25 and PC point to the signal handler, $29 points to the
3641 * struct sigframe.
3642 */
3643 env->active_tc.gpr[ 4] = sig;
3644 env->active_tc.gpr[ 5] = frame_addr
3645 + offsetof(struct target_rt_sigframe, rs_info);
3646 env->active_tc.gpr[ 6] = frame_addr
3647 + offsetof(struct target_rt_sigframe, rs_uc);
3648 env->active_tc.gpr[29] = frame_addr;
3649 env->active_tc.gpr[31] = frame_addr
3650 + offsetof(struct target_rt_sigframe, rs_code);
3651 /* The original kernel code sets CP0_EPC to the handler
3652 * since it returns to userland using eret
3653 * we cannot do this here, and we must set PC directly */
3654 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3655 mips_set_hflags_isa_mode_from_pc(env);
3656 unlock_user_struct(frame, frame_addr, 1);
3657 return;
3658
3659 give_sigsegv:
3660 unlock_user_struct(frame, frame_addr, 1);
3661 force_sigsegv(sig);
3662 }
3663
3664 long do_rt_sigreturn(CPUMIPSState *env)
3665 {
3666 struct target_rt_sigframe *frame;
3667 abi_ulong frame_addr;
3668 sigset_t blocked;
3669
3670 frame_addr = env->active_tc.gpr[29];
3671 trace_user_do_rt_sigreturn(env, frame_addr);
3672 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3673 goto badframe;
3674 }
3675
3676 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3677 set_sigmask(&blocked);
3678
3679 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3680
3681 if (do_sigaltstack(frame_addr +
3682 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3683 0, get_sp_from_cpustate(env)) == -EFAULT)
3684 goto badframe;
3685
3686 env->active_tc.PC = env->CP0_EPC;
3687 mips_set_hflags_isa_mode_from_pc(env);
3688 /* I am not sure this is right, but it seems to work
3689 * maybe a problem with nested signals ? */
3690 env->CP0_EPC = 0;
3691 return -TARGET_QEMU_ESIGRETURN;
3692
3693 badframe:
3694 force_sig(TARGET_SIGSEGV);
3695 return -TARGET_QEMU_ESIGRETURN;
3696 }
3697
3698 #elif defined(TARGET_SH4)
3699
3700 /*
3701 * code and data structures from linux kernel:
3702 * include/asm-sh/sigcontext.h
3703 * arch/sh/kernel/signal.c
3704 */
3705
3706 struct target_sigcontext {
3707 target_ulong oldmask;
3708
3709 /* CPU registers */
3710 target_ulong sc_gregs[16];
3711 target_ulong sc_pc;
3712 target_ulong sc_pr;
3713 target_ulong sc_sr;
3714 target_ulong sc_gbr;
3715 target_ulong sc_mach;
3716 target_ulong sc_macl;
3717
3718 /* FPU registers */
3719 target_ulong sc_fpregs[16];
3720 target_ulong sc_xfpregs[16];
3721 unsigned int sc_fpscr;
3722 unsigned int sc_fpul;
3723 unsigned int sc_ownedfp;
3724 };
3725
3726 struct target_sigframe
3727 {
3728 struct target_sigcontext sc;
3729 target_ulong extramask[TARGET_NSIG_WORDS-1];
3730 uint16_t retcode[3];
3731 };
3732
3733
3734 struct target_ucontext {
3735 target_ulong tuc_flags;
3736 struct target_ucontext *tuc_link;
3737 target_stack_t tuc_stack;
3738 struct target_sigcontext tuc_mcontext;
3739 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3740 };
3741
3742 struct target_rt_sigframe
3743 {
3744 struct target_siginfo info;
3745 struct target_ucontext uc;
3746 uint16_t retcode[3];
3747 };
3748
3749
3750 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3751 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3752
3753 static abi_ulong get_sigframe(struct target_sigaction *ka,
3754 unsigned long sp, size_t frame_size)
3755 {
3756 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3757 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3758 }
3759
3760 return (sp - frame_size) & -8ul;
3761 }
3762
3763 /* Notice when we're in the middle of a gUSA region and reset.
3764 Note that this will only occur for !parallel_cpus, as we will
3765 translate such sequences differently in a parallel context. */
3766 static void unwind_gusa(CPUSH4State *regs)
3767 {
3768 /* If the stack pointer is sufficiently negative, and we haven't
3769 completed the sequence, then reset to the entry to the region. */
3770 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3771 However, the page mappings in qemu linux-user aren't as restricted
3772 and we wind up with the normal stack mapped above 0xF0000000.
3773 That said, there is no reason why the kernel should be allowing
3774 a gUSA region that spans 1GB. Use a tighter check here, for what
3775 can actually be enabled by the immediate move. */
3776 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3777 /* Reset the PC to before the gUSA region, as computed from
3778 R0 = region end, SP = -(region size), plus one more for the
3779 insn that actually initializes SP to the region size. */
3780 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3781
3782 /* Reset the SP to the saved version in R1. */
3783 regs->gregs[15] = regs->gregs[1];
3784 }
3785 }
3786
3787 static void setup_sigcontext(struct target_sigcontext *sc,
3788 CPUSH4State *regs, unsigned long mask)
3789 {
3790 int i;
3791
3792 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3793 COPY(gregs[0]); COPY(gregs[1]);
3794 COPY(gregs[2]); COPY(gregs[3]);
3795 COPY(gregs[4]); COPY(gregs[5]);
3796 COPY(gregs[6]); COPY(gregs[7]);
3797 COPY(gregs[8]); COPY(gregs[9]);
3798 COPY(gregs[10]); COPY(gregs[11]);
3799 COPY(gregs[12]); COPY(gregs[13]);
3800 COPY(gregs[14]); COPY(gregs[15]);
3801 COPY(gbr); COPY(mach);
3802 COPY(macl); COPY(pr);
3803 COPY(sr); COPY(pc);
3804 #undef COPY
3805
3806 for (i=0; i<16; i++) {
3807 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3808 }
3809 __put_user(regs->fpscr, &sc->sc_fpscr);
3810 __put_user(regs->fpul, &sc->sc_fpul);
3811
3812 /* non-iBCS2 extensions.. */
3813 __put_user(mask, &sc->oldmask);
3814 }
3815
3816 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3817 {
3818 int i;
3819
3820 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3821 COPY(gregs[0]); COPY(gregs[1]);
3822 COPY(gregs[2]); COPY(gregs[3]);
3823 COPY(gregs[4]); COPY(gregs[5]);
3824 COPY(gregs[6]); COPY(gregs[7]);
3825 COPY(gregs[8]); COPY(gregs[9]);
3826 COPY(gregs[10]); COPY(gregs[11]);
3827 COPY(gregs[12]); COPY(gregs[13]);
3828 COPY(gregs[14]); COPY(gregs[15]);
3829 COPY(gbr); COPY(mach);
3830 COPY(macl); COPY(pr);
3831 COPY(sr); COPY(pc);
3832 #undef COPY
3833
3834 for (i=0; i<16; i++) {
3835 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3836 }
3837 __get_user(regs->fpscr, &sc->sc_fpscr);
3838 __get_user(regs->fpul, &sc->sc_fpul);
3839
3840 regs->tra = -1; /* disable syscall checks */
3841 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3842 }
3843
3844 static void setup_frame(int sig, struct target_sigaction *ka,
3845 target_sigset_t *set, CPUSH4State *regs)
3846 {
3847 struct target_sigframe *frame;
3848 abi_ulong frame_addr;
3849 int i;
3850
3851 unwind_gusa(regs);
3852
3853 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3854 trace_user_setup_frame(regs, frame_addr);
3855 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3856 goto give_sigsegv;
3857 }
3858
3859 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3860
3861 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3862 __put_user(set->sig[i + 1], &frame->extramask[i]);
3863 }
3864
3865 /* Set up to return from userspace. If provided, use a stub
3866 already in userspace. */
3867 if (ka->sa_flags & TARGET_SA_RESTORER) {
3868 regs->pr = (unsigned long) ka->sa_restorer;
3869 } else {
3870 /* Generate return code (system call to sigreturn) */
3871 abi_ulong retcode_addr = frame_addr +
3872 offsetof(struct target_sigframe, retcode);
3873 __put_user(MOVW(2), &frame->retcode[0]);
3874 __put_user(TRAP_NOARG, &frame->retcode[1]);
3875 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3876 regs->pr = (unsigned long) retcode_addr;
3877 }
3878
3879 /* Set up registers for signal handler */
3880 regs->gregs[15] = frame_addr;
3881 regs->gregs[4] = sig; /* Arg for signal handler */
3882 regs->gregs[5] = 0;
3883 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3884 regs->pc = (unsigned long) ka->_sa_handler;
3885 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3886
3887 unlock_user_struct(frame, frame_addr, 1);
3888 return;
3889
3890 give_sigsegv:
3891 unlock_user_struct(frame, frame_addr, 1);
3892 force_sigsegv(sig);
3893 }
3894
3895 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3896 target_siginfo_t *info,
3897 target_sigset_t *set, CPUSH4State *regs)
3898 {
3899 struct target_rt_sigframe *frame;
3900 abi_ulong frame_addr;
3901 int i;
3902
3903 unwind_gusa(regs);
3904
3905 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3906 trace_user_setup_rt_frame(regs, frame_addr);
3907 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3908 goto give_sigsegv;
3909 }
3910
3911 tswap_siginfo(&frame->info, info);
3912
3913 /* Create the ucontext. */
3914 __put_user(0, &frame->uc.tuc_flags);
3915 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3916 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3917 &frame->uc.tuc_stack.ss_sp);
3918 __put_user(sas_ss_flags(regs->gregs[15]),
3919 &frame->uc.tuc_stack.ss_flags);
3920 __put_user(target_sigaltstack_used.ss_size,
3921 &frame->uc.tuc_stack.ss_size);
3922 setup_sigcontext(&frame->uc.tuc_mcontext,
3923 regs, set->sig[0]);
3924 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3925 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3926 }
3927
3928 /* Set up to return from userspace. If provided, use a stub
3929 already in userspace. */
3930 if (ka->sa_flags & TARGET_SA_RESTORER) {
3931 regs->pr = (unsigned long) ka->sa_restorer;
3932 } else {
3933 /* Generate return code (system call to sigreturn) */
3934 abi_ulong retcode_addr = frame_addr +
3935 offsetof(struct target_rt_sigframe, retcode);
3936 __put_user(MOVW(2), &frame->retcode[0]);
3937 __put_user(TRAP_NOARG, &frame->retcode[1]);
3938 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3939 regs->pr = (unsigned long) retcode_addr;
3940 }
3941
3942 /* Set up registers for signal handler */
3943 regs->gregs[15] = frame_addr;
3944 regs->gregs[4] = sig; /* Arg for signal handler */
3945 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3946 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3947 regs->pc = (unsigned long) ka->_sa_handler;
3948 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3949
3950 unlock_user_struct(frame, frame_addr, 1);
3951 return;
3952
3953 give_sigsegv:
3954 unlock_user_struct(frame, frame_addr, 1);
3955 force_sigsegv(sig);
3956 }
3957
3958 long do_sigreturn(CPUSH4State *regs)
3959 {
3960 struct target_sigframe *frame;
3961 abi_ulong frame_addr;
3962 sigset_t blocked;
3963 target_sigset_t target_set;
3964 int i;
3965 int err = 0;
3966
3967 frame_addr = regs->gregs[15];
3968 trace_user_do_sigreturn(regs, frame_addr);
3969 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3970 goto badframe;
3971 }
3972
3973 __get_user(target_set.sig[0], &frame->sc.oldmask);
3974 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3975 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3976 }
3977
3978 if (err)
3979 goto badframe;
3980
3981 target_to_host_sigset_internal(&blocked, &target_set);
3982 set_sigmask(&blocked);
3983
3984 restore_sigcontext(regs, &frame->sc);
3985
3986 unlock_user_struct(frame, frame_addr, 0);
3987 return -TARGET_QEMU_ESIGRETURN;
3988
3989 badframe:
3990 unlock_user_struct(frame, frame_addr, 0);
3991 force_sig(TARGET_SIGSEGV);
3992 return -TARGET_QEMU_ESIGRETURN;
3993 }
3994
3995 long do_rt_sigreturn(CPUSH4State *regs)
3996 {
3997 struct target_rt_sigframe *frame;
3998 abi_ulong frame_addr;
3999 sigset_t blocked;
4000
4001 frame_addr = regs->gregs[15];
4002 trace_user_do_rt_sigreturn(regs, frame_addr);
4003 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4004 goto badframe;
4005 }
4006
4007 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
4008 set_sigmask(&blocked);
4009
4010 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
4011
4012 if (do_sigaltstack(frame_addr +
4013 offsetof(struct target_rt_sigframe, uc.tuc_stack),
4014 0, get_sp_from_cpustate(regs)) == -EFAULT) {
4015 goto badframe;
4016 }
4017
4018 unlock_user_struct(frame, frame_addr, 0);
4019 return -TARGET_QEMU_ESIGRETURN;
4020
4021 badframe:
4022 unlock_user_struct(frame, frame_addr, 0);
4023 force_sig(TARGET_SIGSEGV);
4024 return -TARGET_QEMU_ESIGRETURN;
4025 }
4026 #elif defined(TARGET_MICROBLAZE)
4027
4028 struct target_sigcontext {
4029 struct target_pt_regs regs; /* needs to be first */
4030 uint32_t oldmask;
4031 };
4032
4033 struct target_stack_t {
4034 abi_ulong ss_sp;
4035 int ss_flags;
4036 unsigned int ss_size;
4037 };
4038
4039 struct target_ucontext {
4040 abi_ulong tuc_flags;
4041 abi_ulong tuc_link;
4042 struct target_stack_t tuc_stack;
4043 struct target_sigcontext tuc_mcontext;
4044 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
4045 };
4046
4047 /* Signal frames. */
4048 struct target_signal_frame {
4049 struct target_ucontext uc;
4050 uint32_t extramask[TARGET_NSIG_WORDS - 1];
4051 uint32_t tramp[2];
4052 };
4053
4054 struct rt_signal_frame {
4055 siginfo_t info;
4056 ucontext_t uc;
4057 uint32_t tramp[2];
4058 };
4059
4060 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
4061 {
4062 __put_user(env->regs[0], &sc->regs.r0);
4063 __put_user(env->regs[1], &sc->regs.r1);
4064 __put_user(env->regs[2], &sc->regs.r2);
4065 __put_user(env->regs[3], &sc->regs.r3);
4066 __put_user(env->regs[4], &sc->regs.r4);
4067 __put_user(env->regs[5], &sc->regs.r5);
4068 __put_user(env->regs[6], &sc->regs.r6);
4069 __put_user(env->regs[7], &sc->regs.r7);
4070 __put_user(env->regs[8], &sc->regs.r8);
4071 __put_user(env->regs[9], &sc->regs.r9);
4072 __put_user(env->regs[10], &sc->regs.r10);
4073 __put_user(env->regs[11], &sc->regs.r11);
4074 __put_user(env->regs[12], &sc->regs.r12);
4075 __put_user(env->regs[13], &sc->regs.r13);
4076 __put_user(env->regs[14], &sc->regs.r14);
4077 __put_user(env->regs[15], &sc->regs.r15);
4078 __put_user(env->regs[16], &sc->regs.r16);
4079 __put_user(env->regs[17], &sc->regs.r17);
4080 __put_user(env->regs[18], &sc->regs.r18);
4081 __put_user(env->regs[19], &sc->regs.r19);
4082 __put_user(env->regs[20], &sc->regs.r20);
4083 __put_user(env->regs[21], &sc->regs.r21);
4084 __put_user(env->regs[22], &sc->regs.r22);
4085 __put_user(env->regs[23], &sc->regs.r23);
4086 __put_user(env->regs[24], &sc->regs.r24);
4087 __put_user(env->regs[25], &sc->regs.r25);
4088 __put_user(env->regs[26], &sc->regs.r26);
4089 __put_user(env->regs[27], &sc->regs.r27);
4090 __put_user(env->regs[28], &sc->regs.r28);
4091 __put_user(env->regs[29], &sc->regs.r29);
4092 __put_user(env->regs[30], &sc->regs.r30);
4093 __put_user(env->regs[31], &sc->regs.r31);
4094 __put_user(env->sregs[SR_PC], &sc->regs.pc);
4095 }
4096
4097 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
4098 {
4099 __get_user(env->regs[0], &sc->regs.r0);
4100 __get_user(env->regs[1], &sc->regs.r1);
4101 __get_user(env->regs[2], &sc->regs.r2);
4102 __get_user(env->regs[3], &sc->regs.r3);
4103 __get_user(env->regs[4], &sc->regs.r4);
4104 __get_user(env->regs[5], &sc->regs.r5);
4105 __get_user(env->regs[6], &sc->regs.r6);
4106 __get_user(env->regs[7], &sc->regs.r7);
4107 __get_user(env->regs[8], &sc->regs.r8);
4108 __get_user(env->regs[9], &sc->regs.r9);
4109 __get_user(env->regs[10], &sc->regs.r10);
4110 __get_user(env->regs[11], &sc->regs.r11);
4111 __get_user(env->regs[12], &sc->regs.r12);
4112 __get_user(env->regs[13], &sc->regs.r13);
4113 __get_user(env->regs[14], &sc->regs.r14);
4114 __get_user(env->regs[15], &sc->regs.r15);
4115 __get_user(env->regs[16], &sc->regs.r16);
4116 __get_user(env->regs[17], &sc->regs.r17);
4117 __get_user(env->regs[18], &sc->regs.r18);
4118 __get_user(env->regs[19], &sc->regs.r19);
4119 __get_user(env->regs[20], &sc->regs.r20);
4120 __get_user(env->regs[21], &sc->regs.r21);
4121 __get_user(env->regs[22], &sc->regs.r22);
4122 __get_user(env->regs[23], &sc->regs.r23);
4123 __get_user(env->regs[24], &sc->regs.r24);
4124 __get_user(env->regs[25], &sc->regs.r25);
4125 __get_user(env->regs[26], &sc->regs.r26);
4126 __get_user(env->regs[27], &sc->regs.r27);
4127 __get_user(env->regs[28], &sc->regs.r28);
4128 __get_user(env->regs[29], &sc->regs.r29);
4129 __get_user(env->regs[30], &sc->regs.r30);
4130 __get_user(env->regs[31], &sc->regs.r31);
4131 __get_user(env->sregs[SR_PC], &sc->regs.pc);
4132 }
4133
4134 static abi_ulong get_sigframe(struct target_sigaction *ka,
4135 CPUMBState *env, int frame_size)
4136 {
4137 abi_ulong sp = env->regs[1];
4138
4139 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
4140 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4141 }
4142
4143 return ((sp - frame_size) & -8UL);
4144 }
4145
4146 static void setup_frame(int sig, struct target_sigaction *ka,
4147 target_sigset_t *set, CPUMBState *env)
4148 {
4149 struct target_signal_frame *frame;
4150 abi_ulong frame_addr;
4151 int i;
4152
4153 frame_addr = get_sigframe(ka, env, sizeof *frame);
4154 trace_user_setup_frame(env, frame_addr);
4155 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4156 goto badframe;
4157
4158 /* Save the mask. */
4159 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
4160
4161 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4162 __put_user(set->sig[i], &frame->extramask[i - 1]);
4163 }
4164
4165 setup_sigcontext(&frame->uc.tuc_mcontext, env);
4166
4167 /* Set up to return from userspace. If provided, use a stub
4168 already in userspace. */
4169 /* minus 8 is offset to cater for "rtsd r15,8" offset */
4170 if (ka->sa_flags & TARGET_SA_RESTORER) {
4171 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
4172 } else {
4173 uint32_t t;
4174 /* Note, these encodings are _big endian_! */
4175 /* addi r12, r0, __NR_sigreturn */
4176 t = 0x31800000UL | TARGET_NR_sigreturn;
4177 __put_user(t, frame->tramp + 0);
4178 /* brki r14, 0x8 */
4179 t = 0xb9cc0008UL;
4180 __put_user(t, frame->tramp + 1);
4181
4182 /* Return from sighandler will jump to the tramp.
4183 Negative 8 offset because return is rtsd r15, 8 */
4184 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
4185 - 8;
4186 }
4187
4188 /* Set up registers for signal handler */
4189 env->regs[1] = frame_addr;
4190 /* Signal handler args: */
4191 env->regs[5] = sig; /* Arg 0: signum */
4192 env->regs[6] = 0;
4193 /* arg 1: sigcontext */
4194 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
4195
4196 /* Offset of 4 to handle microblaze rtid r14, 0 */
4197 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
4198
4199 unlock_user_struct(frame, frame_addr, 1);
4200 return;
4201 badframe:
4202 force_sigsegv(sig);
4203 }
4204
4205 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4206 target_siginfo_t *info,
4207 target_sigset_t *set, CPUMBState *env)
4208 {
4209 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
4210 }
4211
4212 long do_sigreturn(CPUMBState *env)
4213 {
4214 struct target_signal_frame *frame;
4215 abi_ulong frame_addr;
4216 target_sigset_t target_set;
4217 sigset_t set;
4218 int i;
4219
4220 frame_addr = env->regs[R_SP];
4221 trace_user_do_sigreturn(env, frame_addr);
4222 /* Make sure the guest isn't playing games. */
4223 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4224 goto badframe;
4225
4226 /* Restore blocked signals */
4227 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
4228 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4229 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4230 }
4231 target_to_host_sigset_internal(&set, &target_set);
4232 set_sigmask(&set);
4233
4234 restore_sigcontext(&frame->uc.tuc_mcontext, env);
4235 /* We got here through a sigreturn syscall, our path back is via an
4236 rtb insn so setup r14 for that. */
4237 env->regs[14] = env->sregs[SR_PC];
4238
4239 unlock_user_struct(frame, frame_addr, 0);
4240 return -TARGET_QEMU_ESIGRETURN;
4241 badframe:
4242 force_sig(TARGET_SIGSEGV);
4243 return -TARGET_QEMU_ESIGRETURN;
4244 }
4245
4246 long do_rt_sigreturn(CPUMBState *env)
4247 {
4248 trace_user_do_rt_sigreturn(env, 0);
4249 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
4250 return -TARGET_ENOSYS;
4251 }
4252
4253 #elif defined(TARGET_CRIS)
4254
4255 struct target_sigcontext {
4256 struct target_pt_regs regs; /* needs to be first */
4257 uint32_t oldmask;
4258 uint32_t usp; /* usp before stacking this gunk on it */
4259 };
4260
4261 /* Signal frames. */
4262 struct target_signal_frame {
4263 struct target_sigcontext sc;
4264 uint32_t extramask[TARGET_NSIG_WORDS - 1];
4265 uint16_t retcode[4]; /* Trampoline code. */
4266 };
4267
4268 struct rt_signal_frame {
4269 siginfo_t *pinfo;
4270 void *puc;
4271 siginfo_t info;
4272 ucontext_t uc;
4273 uint16_t retcode[4]; /* Trampoline code. */
4274 };
4275
4276 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4277 {
4278 __put_user(env->regs[0], &sc->regs.r0);
4279 __put_user(env->regs[1], &sc->regs.r1);
4280 __put_user(env->regs[2], &sc->regs.r2);
4281 __put_user(env->regs[3], &sc->regs.r3);
4282 __put_user(env->regs[4], &sc->regs.r4);
4283 __put_user(env->regs[5], &sc->regs.r5);
4284 __put_user(env->regs[6], &sc->regs.r6);
4285 __put_user(env->regs[7], &sc->regs.r7);
4286 __put_user(env->regs[8], &sc->regs.r8);
4287 __put_user(env->regs[9], &sc->regs.r9);
4288 __put_user(env->regs[10], &sc->regs.r10);
4289 __put_user(env->regs[11], &sc->regs.r11);
4290 __put_user(env->regs[12], &sc->regs.r12);
4291 __put_user(env->regs[13], &sc->regs.r13);
4292 __put_user(env->regs[14], &sc->usp);
4293 __put_user(env->regs[15], &sc->regs.acr);
4294 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4295 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4296 __put_user(env->pc, &sc->regs.erp);
4297 }
4298
4299 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4300 {
4301 __get_user(env->regs[0], &sc->regs.r0);
4302 __get_user(env->regs[1], &sc->regs.r1);
4303 __get_user(env->regs[2], &sc->regs.r2);
4304 __get_user(env->regs[3], &sc->regs.r3);
4305 __get_user(env->regs[4], &sc->regs.r4);
4306 __get_user(env->regs[5], &sc->regs.r5);
4307 __get_user(env->regs[6], &sc->regs.r6);
4308 __get_user(env->regs[7], &sc->regs.r7);
4309 __get_user(env->regs[8], &sc->regs.r8);
4310 __get_user(env->regs[9], &sc->regs.r9);
4311 __get_user(env->regs[10], &sc->regs.r10);
4312 __get_user(env->regs[11], &sc->regs.r11);
4313 __get_user(env->regs[12], &sc->regs.r12);
4314 __get_user(env->regs[13], &sc->regs.r13);
4315 __get_user(env->regs[14], &sc->usp);
4316 __get_user(env->regs[15], &sc->regs.acr);
4317 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4318 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4319 __get_user(env->pc, &sc->regs.erp);
4320 }
4321
4322 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4323 {
4324 abi_ulong sp;
4325 /* Align the stack downwards to 4. */
4326 sp = (env->regs[R_SP] & ~3);
4327 return sp - framesize;
4328 }
4329
4330 static void setup_frame(int sig, struct target_sigaction *ka,
4331 target_sigset_t *set, CPUCRISState *env)
4332 {
4333 struct target_signal_frame *frame;
4334 abi_ulong frame_addr;
4335 int i;
4336
4337 frame_addr = get_sigframe(env, sizeof *frame);
4338 trace_user_setup_frame(env, frame_addr);
4339 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4340 goto badframe;
4341
4342 /*
4343 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4344 * use this trampoline anymore but it sets it up for GDB.
4345 * In QEMU, using the trampoline simplifies things a bit so we use it.
4346 *
4347 * This is movu.w __NR_sigreturn, r9; break 13;
4348 */
4349 __put_user(0x9c5f, frame->retcode+0);
4350 __put_user(TARGET_NR_sigreturn,
4351 frame->retcode + 1);
4352 __put_user(0xe93d, frame->retcode + 2);
4353
4354 /* Save the mask. */
4355 __put_user(set->sig[0], &frame->sc.oldmask);
4356
4357 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4358 __put_user(set->sig[i], &frame->extramask[i - 1]);
4359 }
4360
4361 setup_sigcontext(&frame->sc, env);
4362
4363 /* Move the stack and setup the arguments for the handler. */
4364 env->regs[R_SP] = frame_addr;
4365 env->regs[10] = sig;
4366 env->pc = (unsigned long) ka->_sa_handler;
4367 /* Link SRP so the guest returns through the trampoline. */
4368 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4369
4370 unlock_user_struct(frame, frame_addr, 1);
4371 return;
4372 badframe:
4373 force_sigsegv(sig);
4374 }
4375
4376 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4377 target_siginfo_t *info,
4378 target_sigset_t *set, CPUCRISState *env)
4379 {
4380 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4381 }
4382
4383 long do_sigreturn(CPUCRISState *env)
4384 {
4385 struct target_signal_frame *frame;
4386 abi_ulong frame_addr;
4387 target_sigset_t target_set;
4388 sigset_t set;
4389 int i;
4390
4391 frame_addr = env->regs[R_SP];
4392 trace_user_do_sigreturn(env, frame_addr);
4393 /* Make sure the guest isn't playing games. */
4394 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4395 goto badframe;
4396 }
4397
4398 /* Restore blocked signals */
4399 __get_user(target_set.sig[0], &frame->sc.oldmask);
4400 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4401 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4402 }
4403 target_to_host_sigset_internal(&set, &target_set);
4404 set_sigmask(&set);
4405
4406 restore_sigcontext(&frame->sc, env);
4407 unlock_user_struct(frame, frame_addr, 0);
4408 return -TARGET_QEMU_ESIGRETURN;
4409 badframe:
4410 force_sig(TARGET_SIGSEGV);
4411 return -TARGET_QEMU_ESIGRETURN;
4412 }
4413
4414 long do_rt_sigreturn(CPUCRISState *env)
4415 {
4416 trace_user_do_rt_sigreturn(env, 0);
4417 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4418 return -TARGET_ENOSYS;
4419 }
4420
4421 #elif defined(TARGET_NIOS2)
4422
4423 #define MCONTEXT_VERSION 2
4424
4425 struct target_sigcontext {
4426 int version;
4427 unsigned long gregs[32];
4428 };
4429
4430 struct target_ucontext {
4431 abi_ulong tuc_flags;
4432 abi_ulong tuc_link;
4433 target_stack_t tuc_stack;
4434 struct target_sigcontext tuc_mcontext;
4435 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4436 };
4437
4438 struct target_rt_sigframe {
4439 struct target_siginfo info;
4440 struct target_ucontext uc;
4441 };
4442
4443 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4444 {
4445 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4446 #ifdef CONFIG_STACK_GROWSUP
4447 return target_sigaltstack_used.ss_sp;
4448 #else
4449 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4450 #endif
4451 }
4452 return sp;
4453 }
4454
4455 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4456 {
4457 unsigned long *gregs = uc->tuc_mcontext.gregs;
4458
4459 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4460 __put_user(env->regs[1], &gregs[0]);
4461 __put_user(env->regs[2], &gregs[1]);
4462 __put_user(env->regs[3], &gregs[2]);
4463 __put_user(env->regs[4], &gregs[3]);
4464 __put_user(env->regs[5], &gregs[4]);
4465 __put_user(env->regs[6], &gregs[5]);
4466 __put_user(env->regs[7], &gregs[6]);
4467 __put_user(env->regs[8], &gregs[7]);
4468 __put_user(env->regs[9], &gregs[8]);
4469 __put_user(env->regs[10], &gregs[9]);
4470 __put_user(env->regs[11], &gregs[10]);
4471 __put_user(env->regs[12], &gregs[11]);
4472 __put_user(env->regs[13], &gregs[12]);
4473 __put_user(env->regs[14], &gregs[13]);
4474 __put_user(env->regs[15], &gregs[14]);
4475 __put_user(env->regs[16], &gregs[15]);
4476 __put_user(env->regs[17], &gregs[16]);
4477 __put_user(env->regs[18], &gregs[17]);
4478 __put_user(env->regs[19], &gregs[18]);
4479 __put_user(env->regs[20], &gregs[19]);
4480 __put_user(env->regs[21], &gregs[20]);
4481 __put_user(env->regs[22], &gregs[21]);
4482 __put_user(env->regs[23], &gregs[22]);
4483 __put_user(env->regs[R_RA], &gregs[23]);
4484 __put_user(env->regs[R_FP], &gregs[24]);
4485 __put_user(env->regs[R_GP], &gregs[25]);
4486 __put_user(env->regs[R_EA], &gregs[27]);
4487 __put_user(env->regs[R_SP], &gregs[28]);
4488
4489 return 0;
4490 }
4491
4492 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4493 int *pr2)
4494 {
4495 int temp;
4496 abi_ulong off, frame_addr = env->regs[R_SP];
4497 unsigned long *gregs = uc->tuc_mcontext.gregs;
4498 int err;
4499
4500 /* Always make any pending restarted system calls return -EINTR */
4501 /* current->restart_block.fn = do_no_restart_syscall; */
4502
4503 __get_user(temp, &uc->tuc_mcontext.version);
4504 if (temp != MCONTEXT_VERSION) {
4505 return 1;
4506 }
4507
4508 /* restore passed registers */
4509 __get_user(env->regs[1], &gregs[0]);
4510 __get_user(env->regs[2], &gregs[1]);
4511 __get_user(env->regs[3], &gregs[2]);
4512 __get_user(env->regs[4], &gregs[3]);
4513 __get_user(env->regs[5], &gregs[4]);
4514 __get_user(env->regs[6], &gregs[5]);
4515 __get_user(env->regs[7], &gregs[6]);
4516 __get_user(env->regs[8], &gregs[7]);
4517 __get_user(env->regs[9], &gregs[8]);
4518 __get_user(env->regs[10], &gregs[9]);
4519 __get_user(env->regs[11], &gregs[10]);
4520 __get_user(env->regs[12], &gregs[11]);
4521 __get_user(env->regs[13], &gregs[12]);
4522 __get_user(env->regs[14], &gregs[13]);
4523 __get_user(env->regs[15], &gregs[14]);
4524 __get_user(env->regs[16], &gregs[15]);
4525 __get_user(env->regs[17], &gregs[16]);
4526 __get_user(env->regs[18], &gregs[17]);
4527 __get_user(env->regs[19], &gregs[18]);
4528 __get_user(env->regs[20], &gregs[19]);
4529 __get_user(env->regs[21], &gregs[20]);
4530 __get_user(env->regs[22], &gregs[21]);
4531 __get_user(env->regs[23], &gregs[22]);
4532 /* gregs[23] is handled below */
4533 /* Verify, should this be settable */
4534 __get_user(env->regs[R_FP], &gregs[24]);
4535 /* Verify, should this be settable */
4536 __get_user(env->regs[R_GP], &gregs[25]);
4537 /* Not really necessary no user settable bits */
4538 __get_user(temp, &gregs[26]);
4539 __get_user(env->regs[R_EA], &gregs[27]);
4540
4541 __get_user(env->regs[R_RA], &gregs[23]);
4542 __get_user(env->regs[R_SP], &gregs[28]);
4543
4544 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4545 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4546 if (err == -EFAULT) {
4547 return 1;
4548 }
4549
4550 *pr2 = env->regs[2];
4551 return 0;
4552 }
4553
4554 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4555 size_t frame_size)
4556 {
4557 unsigned long usp;
4558
4559 /* Default to using normal stack. */
4560 usp = env->regs[R_SP];
4561
4562 /* This is the X/Open sanctioned signal stack switching. */
4563 usp = sigsp(usp, ka);
4564
4565 /* Verify, is it 32 or 64 bit aligned */
4566 return (void *)((usp - frame_size) & -8UL);
4567 }
4568
4569 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4570 target_siginfo_t *info,
4571 target_sigset_t *set,
4572 CPUNios2State *env)
4573 {
4574 struct target_rt_sigframe *frame;
4575 int i, err = 0;
4576
4577 frame = get_sigframe(ka, env, sizeof(*frame));
4578
4579 if (ka->sa_flags & SA_SIGINFO) {
4580 tswap_siginfo(&frame->info, info);
4581 }
4582
4583 /* Create the ucontext. */
4584 __put_user(0, &frame->uc.tuc_flags);
4585 __put_user(0, &frame->uc.tuc_link);
4586 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4587 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4588 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4589 err |= rt_setup_ucontext(&frame->uc, env);
4590 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4591 __put_user((abi_ulong)set->sig[i],
4592 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4593 }
4594
4595 if (err) {
4596 goto give_sigsegv;
4597 }
4598
4599 /* Set up to return from userspace; jump to fixed address sigreturn
4600 trampoline on kuser page. */
4601 env->regs[R_RA] = (unsigned long) (0x1044);
4602
4603 /* Set up registers for signal handler */
4604 env->regs[R_SP] = (unsigned long) frame;
4605 env->regs[4] = (unsigned long) sig;
4606 env->regs[5] = (unsigned long) &frame->info;
4607 env->regs[6] = (unsigned long) &frame->uc;
4608 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4609 return;
4610
4611 give_sigsegv:
4612 if (sig == TARGET_SIGSEGV) {
4613 ka->_sa_handler = TARGET_SIG_DFL;
4614 }
4615 force_sigsegv(sig);
4616 return;
4617 }
4618
4619 long do_sigreturn(CPUNios2State *env)
4620 {
4621 trace_user_do_sigreturn(env, 0);
4622 fprintf(stderr, "do_sigreturn: not implemented\n");
4623 return -TARGET_ENOSYS;
4624 }
4625
4626 long do_rt_sigreturn(CPUNios2State *env)
4627 {
4628 /* Verify, can we follow the stack back */
4629 abi_ulong frame_addr = env->regs[R_SP];
4630 struct target_rt_sigframe *frame;
4631 sigset_t set;
4632 int rval;
4633
4634 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4635 goto badframe;
4636 }
4637
4638 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4639 do_sigprocmask(SIG_SETMASK, &set, NULL);
4640
4641 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4642 goto badframe;
4643 }
4644
4645 unlock_user_struct(frame, frame_addr, 0);
4646 return rval;
4647
4648 badframe:
4649 unlock_user_struct(frame, frame_addr, 0);
4650 force_sig(TARGET_SIGSEGV);
4651 return 0;
4652 }
4653 /* TARGET_NIOS2 */
4654
4655 #elif defined(TARGET_OPENRISC)
4656
4657 struct target_sigcontext {
4658 struct target_pt_regs regs;
4659 abi_ulong oldmask;
4660 abi_ulong usp;
4661 };
4662
4663 struct target_ucontext {
4664 abi_ulong tuc_flags;
4665 abi_ulong tuc_link;
4666 target_stack_t tuc_stack;
4667 struct target_sigcontext tuc_mcontext;
4668 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4669 };
4670
4671 struct target_rt_sigframe {
4672 abi_ulong pinfo;
4673 uint64_t puc;
4674 struct target_siginfo info;
4675 struct target_sigcontext sc;
4676 struct target_ucontext uc;
4677 unsigned char retcode[16]; /* trampoline code */
4678 };
4679
4680 /* This is the asm-generic/ucontext.h version */
4681 #if 0
4682 static int restore_sigcontext(CPUOpenRISCState *regs,
4683 struct target_sigcontext *sc)
4684 {
4685 unsigned int err = 0;
4686 unsigned long old_usp;
4687
4688 /* Alwys make any pending restarted system call return -EINTR */
4689 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4690
4691 /* restore the regs from &sc->regs (same as sc, since regs is first)
4692 * (sc is already checked for VERIFY_READ since the sigframe was
4693 * checked in sys_sigreturn previously)
4694 */
4695
4696 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4697 goto badframe;
4698 }
4699
4700 /* make sure the U-flag is set so user-mode cannot fool us */
4701
4702 regs->sr &= ~SR_SM;
4703
4704 /* restore the old USP as it was before we stacked the sc etc.
4705 * (we cannot just pop the sigcontext since we aligned the sp and
4706 * stuff after pushing it)
4707 */
4708
4709 __get_user(old_usp, &sc->usp);
4710 phx_signal("old_usp 0x%lx", old_usp);
4711
4712 __PHX__ REALLY /* ??? */
4713 wrusp(old_usp);
4714 regs->gpr[1] = old_usp;
4715
4716 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4717 * after this completes, but we don't use that mechanism. maybe we can
4718 * use it now ?
4719 */
4720
4721 return err;
4722
4723 badframe:
4724 return 1;
4725 }
4726 #endif
4727
4728 /* Set up a signal frame. */
4729
4730 static void setup_sigcontext(struct target_sigcontext *sc,
4731 CPUOpenRISCState *regs,
4732 unsigned long mask)
4733 {
4734 unsigned long usp = cpu_get_gpr(regs, 1);
4735
4736 /* copy the regs. they are first in sc so we can use sc directly */
4737
4738 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4739
4740 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4741 the signal handler. The frametype will be restored to its previous
4742 value in restore_sigcontext. */
4743 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4744
4745 /* then some other stuff */
4746 __put_user(mask, &sc->oldmask);
4747 __put_user(usp, &sc->usp);
4748 }
4749
4750 static inline unsigned long align_sigframe(unsigned long sp)
4751 {
4752 return sp & ~3UL;
4753 }
4754
4755 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4756 CPUOpenRISCState *regs,
4757 size_t frame_size)
4758 {
4759 unsigned long sp = cpu_get_gpr(regs, 1);
4760 int onsigstack = on_sig_stack(sp);
4761
4762 /* redzone */
4763 /* This is the X/Open sanctioned signal stack switching. */
4764 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4765 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4766 }
4767
4768 sp = align_sigframe(sp - frame_size);
4769
4770 /*
4771 * If we are on the alternate signal stack and would overflow it, don't.
4772 * Return an always-bogus address instead so we will die with SIGSEGV.
4773 */
4774
4775 if (onsigstack && !likely(on_sig_stack(sp))) {
4776 return -1L;
4777 }
4778
4779 return sp;
4780 }
4781
4782 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4783 target_siginfo_t *info,
4784 target_sigset_t *set, CPUOpenRISCState *env)
4785 {
4786 int err = 0;
4787 abi_ulong frame_addr;
4788 unsigned long return_ip;
4789 struct target_rt_sigframe *frame;
4790 abi_ulong info_addr, uc_addr;
4791
4792 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4793 trace_user_setup_rt_frame(env, frame_addr);
4794 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4795 goto give_sigsegv;
4796 }
4797
4798 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4799 __put_user(info_addr, &frame->pinfo);
4800 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4801 __put_user(uc_addr, &frame->puc);
4802
4803 if (ka->sa_flags & SA_SIGINFO) {
4804 tswap_siginfo(&frame->info, info);
4805 }
4806
4807 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4808 __put_user(0, &frame->uc.tuc_flags);
4809 __put_user(0, &frame->uc.tuc_link);
4810 __put_user(target_sigaltstack_used.ss_sp,
4811 &frame->uc.tuc_stack.ss_sp);
4812 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4813 &frame->uc.tuc_stack.ss_flags);
4814 __put_user(target_sigaltstack_used.ss_size,
4815 &frame->uc.tuc_stack.ss_size);
4816 setup_sigcontext(&frame->sc, env, set->sig[0]);
4817
4818 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4819
4820 /* trampoline - the desired return ip is the retcode itself */
4821 return_ip = (unsigned long)&frame->retcode;
4822 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4823 __put_user(0xa960, (short *)(frame->retcode + 0));
4824 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4825 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4826 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4827
4828 if (err) {
4829 goto give_sigsegv;
4830 }
4831
4832 /* TODO what is the current->exec_domain stuff and invmap ? */
4833
4834 /* Set up registers for signal handler */
4835 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4836 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4837 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4838 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4839 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4840
4841 /* actually move the usp to reflect the stacked frame */
4842 cpu_set_gpr(env, 1, (unsigned long)frame);
4843
4844 return;
4845
4846 give_sigsegv:
4847 unlock_user_struct(frame, frame_addr, 1);
4848 force_sigsegv(sig);
4849 }
4850
4851 long do_sigreturn(CPUOpenRISCState *env)
4852 {
4853 trace_user_do_sigreturn(env, 0);
4854 fprintf(stderr, "do_sigreturn: not implemented\n");
4855 return -TARGET_ENOSYS;
4856 }
4857
4858 long do_rt_sigreturn(CPUOpenRISCState *env)
4859 {
4860 trace_user_do_rt_sigreturn(env, 0);
4861 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4862 return -TARGET_ENOSYS;
4863 }
4864 /* TARGET_OPENRISC */
4865
4866 #elif defined(TARGET_S390X)
4867
4868 #define __NUM_GPRS 16
4869 #define __NUM_FPRS 16
4870 #define __NUM_ACRS 16
4871
4872 #define S390_SYSCALL_SIZE 2
4873 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4874
4875 #define _SIGCONTEXT_NSIG 64
4876 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4877 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4878 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4879 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4880 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4881
4882 typedef struct {
4883 target_psw_t psw;
4884 target_ulong gprs[__NUM_GPRS];
4885 unsigned int acrs[__NUM_ACRS];
4886 } target_s390_regs_common;
4887
4888 typedef struct {
4889 unsigned int fpc;
4890 double fprs[__NUM_FPRS];
4891 } target_s390_fp_regs;
4892
4893 typedef struct {
4894 target_s390_regs_common regs;
4895 target_s390_fp_regs fpregs;
4896 } target_sigregs;
4897
4898 struct target_sigcontext {
4899 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4900 target_sigregs *sregs;
4901 };
4902
4903 typedef struct {
4904 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4905 struct target_sigcontext sc;
4906 target_sigregs sregs;
4907 int signo;
4908 uint8_t retcode[S390_SYSCALL_SIZE];
4909 } sigframe;
4910
4911 struct target_ucontext {
4912 target_ulong tuc_flags;
4913 struct target_ucontext *tuc_link;
4914 target_stack_t tuc_stack;
4915 target_sigregs tuc_mcontext;
4916 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4917 };
4918
4919 typedef struct {
4920 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4921 uint8_t retcode[S390_SYSCALL_SIZE];
4922 struct target_siginfo info;
4923 struct target_ucontext uc;
4924 } rt_sigframe;
4925
4926 static inline abi_ulong
4927 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4928 {
4929 abi_ulong sp;
4930
4931 /* Default to using normal stack */
4932 sp = env->regs[15];
4933
4934 /* This is the X/Open sanctioned signal stack switching. */
4935 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4936 if (!sas_ss_flags(sp)) {
4937 sp = target_sigaltstack_used.ss_sp +
4938 target_sigaltstack_used.ss_size;
4939 }
4940 }
4941
4942 /* This is the legacy signal stack switching. */
4943 else if (/* FIXME !user_mode(regs) */ 0 &&
4944 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4945 ka->sa_restorer) {
4946 sp = (abi_ulong) ka->sa_restorer;
4947 }
4948
4949 return (sp - frame_size) & -8ul;
4950 }
4951
4952 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4953 {
4954 int i;
4955 //save_access_regs(current->thread.acrs); FIXME
4956
4957 /* Copy a 'clean' PSW mask to the user to avoid leaking
4958 information about whether PER is currently on. */
4959 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4960 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4961 for (i = 0; i < 16; i++) {
4962 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4963 }
4964 for (i = 0; i < 16; i++) {
4965 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4966 }
4967 /*
4968 * We have to store the fp registers to current->thread.fp_regs
4969 * to merge them with the emulated registers.
4970 */
4971 //save_fp_regs(&current->thread.fp_regs); FIXME
4972 for (i = 0; i < 16; i++) {
4973 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4974 }
4975 }
4976
4977 static void setup_frame(int sig, struct target_sigaction *ka,
4978 target_sigset_t *set, CPUS390XState *env)
4979 {
4980 sigframe *frame;
4981 abi_ulong frame_addr;
4982
4983 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4984 trace_user_setup_frame(env, frame_addr);
4985 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4986 goto give_sigsegv;
4987 }
4988
4989 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4990
4991 save_sigregs(env, &frame->sregs);
4992
4993 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4994 (abi_ulong *)&frame->sc.sregs);
4995
4996 /* Set up to return from userspace. If provided, use a stub
4997 already in userspace. */
4998 if (ka->sa_flags & TARGET_SA_RESTORER) {
4999 env->regs[14] = (unsigned long)
5000 ka->sa_restorer | PSW_ADDR_AMODE;
5001 } else {
5002 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
5003 | PSW_ADDR_AMODE;
5004 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
5005 (uint16_t *)(frame->retcode));
5006 }
5007
5008 /* Set up backchain. */
5009 __put_user(env->regs[15], (abi_ulong *) frame);
5010
5011 /* Set up registers for signal handler */
5012 env->regs[15] = frame_addr;
5013 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
5014
5015 env->regs[2] = sig; //map_signal(sig);
5016 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
5017
5018 /* We forgot to include these in the sigcontext.
5019 To avoid breaking binary compatibility, they are passed as args. */
5020 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
5021 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
5022
5023 /* Place signal number on stack to allow backtrace from handler. */
5024 __put_user(env->regs[2], &frame->signo);
5025 unlock_user_struct(frame, frame_addr, 1);
5026 return;
5027
5028 give_sigsegv:
5029 force_sigsegv(sig);
5030 }
5031
5032 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5033 target_siginfo_t *info,
5034 target_sigset_t *set, CPUS390XState *env)
5035 {
5036 int i;
5037 rt_sigframe *frame;
5038 abi_ulong frame_addr;
5039
5040 frame_addr = get_sigframe(ka, env, sizeof *frame);
5041 trace_user_setup_rt_frame(env, frame_addr);
5042 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5043 goto give_sigsegv;
5044 }
5045
5046 tswap_siginfo(&frame->info, info);
5047
5048 /* Create the ucontext. */
5049 __put_user(0, &frame->uc.tuc_flags);
5050 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
5051 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5052 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
5053 &frame->uc.tuc_stack.ss_flags);
5054 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5055 save_sigregs(env, &frame->uc.tuc_mcontext);
5056 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
5057 __put_user((abi_ulong)set->sig[i],
5058 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
5059 }
5060
5061 /* Set up to return from userspace. If provided, use a stub
5062 already in userspace. */
5063 if (ka->sa_flags & TARGET_SA_RESTORER) {
5064 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
5065 } else {
5066 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
5067 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
5068 (uint16_t *)(frame->retcode));
5069 }
5070
5071 /* Set up backchain. */
5072 __put_user(env->regs[15], (abi_ulong *) frame);
5073
5074 /* Set up registers for signal handler */
5075 env->regs[15] = frame_addr;
5076 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
5077
5078 env->regs[2] = sig; //map_signal(sig);
5079 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
5080 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
5081 return;
5082
5083 give_sigsegv:
5084 force_sigsegv(sig);
5085 }
5086
5087 static int
5088 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
5089 {
5090 int err = 0;
5091 int i;
5092
5093 for (i = 0; i < 16; i++) {
5094 __get_user(env->regs[i], &sc->regs.gprs[i]);
5095 }
5096
5097 __get_user(env->psw.mask, &sc->regs.psw.mask);
5098 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
5099 (unsigned long long)env->psw.addr);
5100 __get_user(env->psw.addr, &sc->regs.psw.addr);
5101 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
5102
5103 for (i = 0; i < 16; i++) {
5104 __get_user(env->aregs[i], &sc->regs.acrs[i]);
5105 }
5106 for (i = 0; i < 16; i++) {
5107 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
5108 }
5109
5110 return err;
5111 }
5112
5113 long do_sigreturn(CPUS390XState *env)
5114 {
5115 sigframe *frame;
5116 abi_ulong frame_addr = env->regs[15];
5117 target_sigset_t target_set;
5118 sigset_t set;
5119
5120 trace_user_do_sigreturn(env, frame_addr);
5121 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5122 goto badframe;
5123 }
5124 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
5125
5126 target_to_host_sigset_internal(&set, &target_set);
5127 set_sigmask(&set); /* ~_BLOCKABLE? */
5128
5129 if (restore_sigregs(env, &frame->sregs)) {
5130 goto badframe;
5131 }
5132
5133 unlock_user_struct(frame, frame_addr, 0);
5134 return -TARGET_QEMU_ESIGRETURN;
5135
5136 badframe:
5137 force_sig(TARGET_SIGSEGV);
5138 return -TARGET_QEMU_ESIGRETURN;
5139 }
5140
5141 long do_rt_sigreturn(CPUS390XState *env)
5142 {
5143 rt_sigframe *frame;
5144 abi_ulong frame_addr = env->regs[15];
5145 sigset_t set;
5146
5147 trace_user_do_rt_sigreturn(env, frame_addr);
5148 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5149 goto badframe;
5150 }
5151 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5152
5153 set_sigmask(&set); /* ~_BLOCKABLE? */
5154
5155 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
5156 goto badframe;
5157 }
5158
5159 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
5160 get_sp_from_cpustate(env)) == -EFAULT) {
5161 goto badframe;
5162 }
5163 unlock_user_struct(frame, frame_addr, 0);
5164 return -TARGET_QEMU_ESIGRETURN;
5165
5166 badframe:
5167 unlock_user_struct(frame, frame_addr, 0);
5168 force_sig(TARGET_SIGSEGV);
5169 return -TARGET_QEMU_ESIGRETURN;
5170 }
5171
5172 #elif defined(TARGET_PPC)
5173
5174 /* Size of dummy stack frame allocated when calling signal handler.
5175 See arch/powerpc/include/asm/ptrace.h. */
5176 #if defined(TARGET_PPC64)
5177 #define SIGNAL_FRAMESIZE 128
5178 #else
5179 #define SIGNAL_FRAMESIZE 64
5180 #endif
5181
5182 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
5183 on 64-bit PPC, sigcontext and mcontext are one and the same. */
5184 struct target_mcontext {
5185 target_ulong mc_gregs[48];
5186 /* Includes fpscr. */
5187 uint64_t mc_fregs[33];
5188 #if defined(TARGET_PPC64)
5189 /* Pointer to the vector regs */
5190 target_ulong v_regs;
5191 #else
5192 target_ulong mc_pad[2];
5193 #endif
5194 /* We need to handle Altivec and SPE at the same time, which no
5195 kernel needs to do. Fortunately, the kernel defines this bit to
5196 be Altivec-register-large all the time, rather than trying to
5197 twiddle it based on the specific platform. */
5198 union {
5199 /* SPE vector registers. One extra for SPEFSCR. */
5200 uint32_t spe[33];
5201 /* Altivec vector registers. The packing of VSCR and VRSAVE
5202 varies depending on whether we're PPC64 or not: PPC64 splits
5203 them apart; PPC32 stuffs them together.
5204 We also need to account for the VSX registers on PPC64
5205 */
5206 #if defined(TARGET_PPC64)
5207 #define QEMU_NVRREG (34 + 16)
5208 /* On ppc64, this mcontext structure is naturally *unaligned*,
5209 * or rather it is aligned on a 8 bytes boundary but not on
5210 * a 16 bytes one. This pad fixes it up. This is also why the
5211 * vector regs are referenced by the v_regs pointer above so
5212 * any amount of padding can be added here
5213 */
5214 target_ulong pad;
5215 #else
5216 /* On ppc32, we are already aligned to 16 bytes */
5217 #define QEMU_NVRREG 33
5218 #endif
5219 /* We cannot use ppc_avr_t here as we do *not* want the implied
5220 * 16-bytes alignment that would result from it. This would have
5221 * the effect of making the whole struct target_mcontext aligned
5222 * which breaks the layout of struct target_ucontext on ppc64.
5223 */
5224 uint64_t altivec[QEMU_NVRREG][2];
5225 #undef QEMU_NVRREG
5226 } mc_vregs;
5227 };
5228
5229 /* See arch/powerpc/include/asm/sigcontext.h. */
5230 struct target_sigcontext {
5231 target_ulong _unused[4];
5232 int32_t signal;
5233 #if defined(TARGET_PPC64)
5234 int32_t pad0;
5235 #endif
5236 target_ulong handler;
5237 target_ulong oldmask;
5238 target_ulong regs; /* struct pt_regs __user * */
5239 #if defined(TARGET_PPC64)
5240 struct target_mcontext mcontext;
5241 #endif
5242 };
5243
5244 /* Indices for target_mcontext.mc_gregs, below.
5245 See arch/powerpc/include/asm/ptrace.h for details. */
5246 enum {
5247 TARGET_PT_R0 = 0,
5248 TARGET_PT_R1 = 1,
5249 TARGET_PT_R2 = 2,
5250 TARGET_PT_R3 = 3,
5251 TARGET_PT_R4 = 4,
5252 TARGET_PT_R5 = 5,
5253 TARGET_PT_R6 = 6,
5254 TARGET_PT_R7 = 7,
5255 TARGET_PT_R8 = 8,
5256 TARGET_PT_R9 = 9,
5257 TARGET_PT_R10 = 10,
5258 TARGET_PT_R11 = 11,
5259 TARGET_PT_R12 = 12,
5260 TARGET_PT_R13 = 13,
5261 TARGET_PT_R14 = 14,
5262 TARGET_PT_R15 = 15,
5263 TARGET_PT_R16 = 16,
5264 TARGET_PT_R17 = 17,
5265 TARGET_PT_R18 = 18,
5266 TARGET_PT_R19 = 19,
5267 TARGET_PT_R20 = 20,
5268 TARGET_PT_R21 = 21,
5269 TARGET_PT_R22 = 22,
5270 TARGET_PT_R23 = 23,
5271 TARGET_PT_R24 = 24,
5272 TARGET_PT_R25 = 25,
5273 TARGET_PT_R26 = 26,
5274 TARGET_PT_R27 = 27,
5275 TARGET_PT_R28 = 28,
5276 TARGET_PT_R29 = 29,
5277 TARGET_PT_R30 = 30,
5278 TARGET_PT_R31 = 31,
5279 TARGET_PT_NIP = 32,
5280 TARGET_PT_MSR = 33,
5281 TARGET_PT_ORIG_R3 = 34,
5282 TARGET_PT_CTR = 35,
5283 TARGET_PT_LNK = 36,
5284 TARGET_PT_XER = 37,
5285 TARGET_PT_CCR = 38,
5286 /* Yes, there are two registers with #39. One is 64-bit only. */
5287 TARGET_PT_MQ = 39,
5288 TARGET_PT_SOFTE = 39,
5289 TARGET_PT_TRAP = 40,
5290 TARGET_PT_DAR = 41,
5291 TARGET_PT_DSISR = 42,
5292 TARGET_PT_RESULT = 43,
5293 TARGET_PT_REGS_COUNT = 44
5294 };
5295
5296
5297 struct target_ucontext {
5298 target_ulong tuc_flags;
5299 target_ulong tuc_link; /* ucontext_t __user * */
5300 struct target_sigaltstack tuc_stack;
5301 #if !defined(TARGET_PPC64)
5302 int32_t tuc_pad[7];
5303 target_ulong tuc_regs; /* struct mcontext __user *
5304 points to uc_mcontext field */
5305 #endif
5306 target_sigset_t tuc_sigmask;
5307 #if defined(TARGET_PPC64)
5308 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5309 struct target_sigcontext tuc_sigcontext;
5310 #else
5311 int32_t tuc_maskext[30];
5312 int32_t tuc_pad2[3];
5313 struct target_mcontext tuc_mcontext;
5314 #endif
5315 };
5316
5317 /* See arch/powerpc/kernel/signal_32.c. */
5318 struct target_sigframe {
5319 struct target_sigcontext sctx;
5320 struct target_mcontext mctx;
5321 int32_t abigap[56];
5322 };
5323
5324 #if defined(TARGET_PPC64)
5325
5326 #define TARGET_TRAMP_SIZE 6
5327
5328 struct target_rt_sigframe {
5329 /* sys_rt_sigreturn requires the ucontext be the first field */
5330 struct target_ucontext uc;
5331 target_ulong _unused[2];
5332 uint32_t trampoline[TARGET_TRAMP_SIZE];
5333 target_ulong pinfo; /* struct siginfo __user * */
5334 target_ulong puc; /* void __user * */
5335 struct target_siginfo info;
5336 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5337 char abigap[288];
5338 } __attribute__((aligned(16)));
5339
5340 #else
5341
5342 struct target_rt_sigframe {
5343 struct target_siginfo info;
5344 struct target_ucontext uc;
5345 int32_t abigap[56];
5346 };
5347
5348 #endif
5349
5350 #if defined(TARGET_PPC64)
5351
5352 struct target_func_ptr {
5353 target_ulong entry;
5354 target_ulong toc;
5355 };
5356
5357 #endif
5358
5359 /* We use the mc_pad field for the signal return trampoline. */
5360 #define tramp mc_pad
5361
5362 /* See arch/powerpc/kernel/signal.c. */
5363 static target_ulong get_sigframe(struct target_sigaction *ka,
5364 CPUPPCState *env,
5365 int frame_size)
5366 {
5367 target_ulong oldsp;
5368
5369 oldsp = env->gpr[1];
5370
5371 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5372 (sas_ss_flags(oldsp) == 0)) {
5373 oldsp = (target_sigaltstack_used.ss_sp
5374 + target_sigaltstack_used.ss_size);
5375 }
5376
5377 return (oldsp - frame_size) & ~0xFUL;
5378 }
5379
5380 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5381 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5382 #define PPC_VEC_HI 0
5383 #define PPC_VEC_LO 1
5384 #else
5385 #define PPC_VEC_HI 1
5386 #define PPC_VEC_LO 0
5387 #endif
5388
5389
5390 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5391 {
5392 target_ulong msr = env->msr;
5393 int i;
5394 target_ulong ccr = 0;
5395
5396 /* In general, the kernel attempts to be intelligent about what it
5397 needs to save for Altivec/FP/SPE registers. We don't care that
5398 much, so we just go ahead and save everything. */
5399
5400 /* Save general registers. */
5401 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5402 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5403 }
5404 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5405 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5406 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5407 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5408
5409 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5410 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5411 }
5412 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5413
5414 /* Save Altivec registers if necessary. */
5415 if (env->insns_flags & PPC_ALTIVEC) {
5416 uint32_t *vrsave;
5417 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5418 ppc_avr_t *avr = &env->avr[i];
5419 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5420
5421 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5422 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5423 }
5424 /* Set MSR_VR in the saved MSR value to indicate that
5425 frame->mc_vregs contains valid data. */
5426 msr |= MSR_VR;
5427 #if defined(TARGET_PPC64)
5428 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5429 /* 64-bit needs to put a pointer to the vectors in the frame */
5430 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5431 #else
5432 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5433 #endif
5434 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5435 }
5436
5437 /* Save VSX second halves */
5438 if (env->insns_flags2 & PPC2_VSX) {
5439 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5440 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5441 __put_user(env->vsr[i], &vsregs[i]);
5442 }
5443 }
5444
5445 /* Save floating point registers. */
5446 if (env->insns_flags & PPC_FLOAT) {
5447 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5448 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5449 }
5450 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5451 }
5452
5453 /* Save SPE registers. The kernel only saves the high half. */
5454 if (env->insns_flags & PPC_SPE) {
5455 #if defined(TARGET_PPC64)
5456 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5457 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5458 }
5459 #else
5460 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5461 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5462 }
5463 #endif
5464 /* Set MSR_SPE in the saved MSR value to indicate that
5465 frame->mc_vregs contains valid data. */
5466 msr |= MSR_SPE;
5467 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5468 }
5469
5470 /* Store MSR. */
5471 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5472 }
5473
5474 static void encode_trampoline(int sigret, uint32_t *tramp)
5475 {
5476 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5477 if (sigret) {
5478 __put_user(0x38000000 | sigret, &tramp[0]);
5479 __put_user(0x44000002, &tramp[1]);
5480 }
5481 }
5482
5483 static void restore_user_regs(CPUPPCState *env,
5484 struct target_mcontext *frame, int sig)
5485 {
5486 target_ulong save_r2 = 0;
5487 target_ulong msr;
5488 target_ulong ccr;
5489
5490 int i;
5491
5492 if (!sig) {
5493 save_r2 = env->gpr[2];
5494 }
5495
5496 /* Restore general registers. */
5497 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5498 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5499 }
5500 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5501 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5502 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5503 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5504 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5505
5506 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5507 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5508 }
5509
5510 if (!sig) {
5511 env->gpr[2] = save_r2;
5512 }
5513 /* Restore MSR. */
5514 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5515
5516 /* If doing signal return, restore the previous little-endian mode. */
5517 if (sig)
5518 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5519
5520 /* Restore Altivec registers if necessary. */
5521 if (env->insns_flags & PPC_ALTIVEC) {
5522 ppc_avr_t *v_regs;
5523 uint32_t *vrsave;
5524 #if defined(TARGET_PPC64)
5525 uint64_t v_addr;
5526 /* 64-bit needs to recover the pointer to the vectors from the frame */
5527 __get_user(v_addr, &frame->v_regs);
5528 v_regs = g2h(v_addr);
5529 #else
5530 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5531 #endif
5532 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5533 ppc_avr_t *avr = &env->avr[i];
5534 ppc_avr_t *vreg = &v_regs[i];
5535
5536 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5537 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5538 }
5539 /* Set MSR_VEC in the saved MSR value to indicate that
5540 frame->mc_vregs contains valid data. */
5541 #if defined(TARGET_PPC64)
5542 vrsave = (uint32_t *)&v_regs[33];
5543 #else
5544 vrsave = (uint32_t *)&v_regs[32];
5545 #endif
5546 __get_user(env->spr[SPR_VRSAVE], vrsave);
5547 }
5548
5549 /* Restore VSX second halves */
5550 if (env->insns_flags2 & PPC2_VSX) {
5551 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5552 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5553 __get_user(env->vsr[i], &vsregs[i]);
5554 }
5555 }
5556
5557 /* Restore floating point registers. */
5558 if (env->insns_flags & PPC_FLOAT) {
5559 uint64_t fpscr;
5560 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5561 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5562 }
5563 __get_user(fpscr, &frame->mc_fregs[32]);
5564 env->fpscr = (uint32_t) fpscr;
5565 }
5566
5567 /* Save SPE registers. The kernel only saves the high half. */
5568 if (env->insns_flags & PPC_SPE) {
5569 #if defined(TARGET_PPC64)
5570 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5571 uint32_t hi;
5572
5573 __get_user(hi, &frame->mc_vregs.spe[i]);
5574 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5575 }
5576 #else
5577 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5578 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5579 }
5580 #endif
5581 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5582 }
5583 }
5584
5585 #if !defined(TARGET_PPC64)
5586 static void setup_frame(int sig, struct target_sigaction *ka,
5587 target_sigset_t *set, CPUPPCState *env)
5588 {
5589 struct target_sigframe *frame;
5590 struct target_sigcontext *sc;
5591 target_ulong frame_addr, newsp;
5592 int err = 0;
5593
5594 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5595 trace_user_setup_frame(env, frame_addr);
5596 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5597 goto sigsegv;
5598 sc = &frame->sctx;
5599
5600 __put_user(ka->_sa_handler, &sc->handler);
5601 __put_user(set->sig[0], &sc->oldmask);
5602 __put_user(set->sig[1], &sc->_unused[3]);
5603 __put_user(h2g(&frame->mctx), &sc->regs);
5604 __put_user(sig, &sc->signal);
5605
5606 /* Save user regs. */
5607 save_user_regs(env, &frame->mctx);
5608
5609 /* Construct the trampoline code on the stack. */
5610 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5611
5612 /* The kernel checks for the presence of a VDSO here. We don't
5613 emulate a vdso, so use a sigreturn system call. */
5614 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5615
5616 /* Turn off all fp exceptions. */
5617 env->fpscr = 0;
5618
5619 /* Create a stack frame for the caller of the handler. */
5620 newsp = frame_addr - SIGNAL_FRAMESIZE;
5621 err |= put_user(env->gpr[1], newsp, target_ulong);
5622
5623 if (err)
5624 goto sigsegv;
5625
5626 /* Set up registers for signal handler. */
5627 env->gpr[1] = newsp;
5628 env->gpr[3] = sig;
5629 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5630
5631 env->nip = (target_ulong) ka->_sa_handler;
5632
5633 /* Signal handlers are entered in big-endian mode. */
5634 env->msr &= ~(1ull << MSR_LE);
5635
5636 unlock_user_struct(frame, frame_addr, 1);
5637 return;
5638
5639 sigsegv:
5640 unlock_user_struct(frame, frame_addr, 1);
5641 force_sigsegv(sig);
5642 }
5643 #endif /* !defined(TARGET_PPC64) */
5644
5645 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5646 target_siginfo_t *info,
5647 target_sigset_t *set, CPUPPCState *env)
5648 {
5649 struct target_rt_sigframe *rt_sf;
5650 uint32_t *trampptr = 0;
5651 struct target_mcontext *mctx = 0;
5652 target_ulong rt_sf_addr, newsp = 0;
5653 int i, err = 0;
5654 #if defined(TARGET_PPC64)
5655 struct target_sigcontext *sc = 0;
5656 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5657 #endif
5658
5659 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5660 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5661 goto sigsegv;
5662
5663 tswap_siginfo(&rt_sf->info, info);
5664
5665 __put_user(0, &rt_sf->uc.tuc_flags);
5666 __put_user(0, &rt_sf->uc.tuc_link);
5667 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5668 &rt_sf->uc.tuc_stack.ss_sp);
5669 __put_user(sas_ss_flags(env->gpr[1]),
5670 &rt_sf->uc.tuc_stack.ss_flags);
5671 __put_user(target_sigaltstack_used.ss_size,
5672 &rt_sf->uc.tuc_stack.ss_size);
5673 #if !defined(TARGET_PPC64)
5674 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5675 &rt_sf->uc.tuc_regs);
5676 #endif
5677 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5678 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5679 }
5680
5681 #if defined(TARGET_PPC64)
5682 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5683 trampptr = &rt_sf->trampoline[0];
5684
5685 sc = &rt_sf->uc.tuc_sigcontext;
5686 __put_user(h2g(mctx), &sc->regs);
5687 __put_user(sig, &sc->signal);
5688 #else
5689 mctx = &rt_sf->uc.tuc_mcontext;
5690 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5691 #endif
5692
5693 save_user_regs(env, mctx);
5694 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5695
5696 /* The kernel checks for the presence of a VDSO here. We don't
5697 emulate a vdso, so use a sigreturn system call. */
5698 env->lr = (target_ulong) h2g(trampptr);
5699
5700 /* Turn off all fp exceptions. */
5701 env->fpscr = 0;
5702
5703 /* Create a stack frame for the caller of the handler. */
5704 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5705 err |= put_user(env->gpr[1], newsp, target_ulong);
5706
5707 if (err)
5708 goto sigsegv;
5709
5710 /* Set up registers for signal handler. */
5711 env->gpr[1] = newsp;
5712 env->gpr[3] = (target_ulong) sig;
5713 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5714 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5715 env->gpr[6] = (target_ulong) h2g(rt_sf);
5716
5717 #if defined(TARGET_PPC64)
5718 if (get_ppc64_abi(image) < 2) {
5719 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5720 struct target_func_ptr *handler =
5721 (struct target_func_ptr *)g2h(ka->_sa_handler);
5722 env->nip = tswapl(handler->entry);
5723 env->gpr[2] = tswapl(handler->toc);
5724 } else {
5725 /* ELFv2 PPC64 function pointers are entry points, but R12
5726 * must also be set */
5727 env->nip = tswapl((target_ulong) ka->_sa_handler);
5728 env->gpr[12] = env->nip;
5729 }
5730 #else
5731 env->nip = (target_ulong) ka->_sa_handler;
5732 #endif
5733
5734 /* Signal handlers are entered in big-endian mode. */
5735 env->msr &= ~(1ull << MSR_LE);
5736
5737 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5738 return;
5739
5740 sigsegv:
5741 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5742 force_sigsegv(sig);
5743
5744 }
5745
5746 #if !defined(TARGET_PPC64)
5747 long do_sigreturn(CPUPPCState *env)
5748 {
5749 struct target_sigcontext *sc = NULL;
5750 struct target_mcontext *sr = NULL;
5751 target_ulong sr_addr = 0, sc_addr;
5752 sigset_t blocked;
5753 target_sigset_t set;
5754
5755 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5756 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5757 goto sigsegv;
5758
5759 #if defined(TARGET_PPC64)
5760 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5761 #else
5762 __get_user(set.sig[0], &sc->oldmask);
5763 __get_user(set.sig[1], &sc->_unused[3]);
5764 #endif
5765 target_to_host_sigset_internal(&blocked, &set);
5766 set_sigmask(&blocked);
5767
5768 __get_user(sr_addr, &sc->regs);
5769 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5770 goto sigsegv;
5771 restore_user_regs(env, sr, 1);
5772
5773 unlock_user_struct(sr, sr_addr, 1);
5774 unlock_user_struct(sc, sc_addr, 1);
5775 return -TARGET_QEMU_ESIGRETURN;
5776
5777 sigsegv:
5778 unlock_user_struct(sr, sr_addr, 1);
5779 unlock_user_struct(sc, sc_addr, 1);
5780 force_sig(TARGET_SIGSEGV);
5781 return -TARGET_QEMU_ESIGRETURN;
5782 }
5783 #endif /* !defined(TARGET_PPC64) */
5784
5785 /* See arch/powerpc/kernel/signal_32.c. */
5786 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5787 {
5788 struct target_mcontext *mcp;
5789 target_ulong mcp_addr;
5790 sigset_t blocked;
5791 target_sigset_t set;
5792
5793 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5794 sizeof (set)))
5795 return 1;
5796
5797 #if defined(TARGET_PPC64)
5798 mcp_addr = h2g(ucp) +
5799 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5800 #else
5801 __get_user(mcp_addr, &ucp->tuc_regs);
5802 #endif
5803
5804 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5805 return 1;
5806
5807 target_to_host_sigset_internal(&blocked, &set);
5808 set_sigmask(&blocked);
5809 restore_user_regs(env, mcp, sig);
5810
5811 unlock_user_struct(mcp, mcp_addr, 1);
5812 return 0;
5813 }
5814
5815 long do_rt_sigreturn(CPUPPCState *env)
5816 {
5817 struct target_rt_sigframe *rt_sf = NULL;
5818 target_ulong rt_sf_addr;
5819
5820 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5821 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5822 goto sigsegv;
5823
5824 if (do_setcontext(&rt_sf->uc, env, 1))
5825 goto sigsegv;
5826
5827 do_sigaltstack(rt_sf_addr
5828 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5829 0, env->gpr[1]);
5830
5831 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5832 return -TARGET_QEMU_ESIGRETURN;
5833
5834 sigsegv:
5835 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5836 force_sig(TARGET_SIGSEGV);
5837 return -TARGET_QEMU_ESIGRETURN;
5838 }
5839
5840 #elif defined(TARGET_M68K)
5841
5842 struct target_sigcontext {
5843 abi_ulong sc_mask;
5844 abi_ulong sc_usp;
5845 abi_ulong sc_d0;
5846 abi_ulong sc_d1;
5847 abi_ulong sc_a0;
5848 abi_ulong sc_a1;
5849 unsigned short sc_sr;
5850 abi_ulong sc_pc;
5851 };
5852
5853 struct target_sigframe
5854 {
5855 abi_ulong pretcode;
5856 int sig;
5857 int code;
5858 abi_ulong psc;
5859 char retcode[8];
5860 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5861 struct target_sigcontext sc;
5862 };
5863
5864 typedef int target_greg_t;
5865 #define TARGET_NGREG 18
5866 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5867
5868 typedef struct target_fpregset {
5869 int f_fpcntl[3];
5870 int f_fpregs[8*3];
5871 } target_fpregset_t;
5872
5873 struct target_mcontext {
5874 int version;
5875 target_gregset_t gregs;
5876 target_fpregset_t fpregs;
5877 };
5878
5879 #define TARGET_MCONTEXT_VERSION 2
5880
5881 struct target_ucontext {
5882 abi_ulong tuc_flags;
5883 abi_ulong tuc_link;
5884 target_stack_t tuc_stack;
5885 struct target_mcontext tuc_mcontext;
5886 abi_long tuc_filler[80];
5887 target_sigset_t tuc_sigmask;
5888 };
5889
5890 struct target_rt_sigframe
5891 {
5892 abi_ulong pretcode;
5893 int sig;
5894 abi_ulong pinfo;
5895 abi_ulong puc;
5896 char retcode[8];
5897 struct target_siginfo info;
5898 struct target_ucontext uc;
5899 };
5900
5901 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5902 abi_ulong mask)
5903 {
5904 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5905 __put_user(mask, &sc->sc_mask);
5906 __put_user(env->aregs[7], &sc->sc_usp);
5907 __put_user(env->dregs[0], &sc->sc_d0);
5908 __put_user(env->dregs[1], &sc->sc_d1);
5909 __put_user(env->aregs[0], &sc->sc_a0);
5910 __put_user(env->aregs[1], &sc->sc_a1);
5911 __put_user(sr, &sc->sc_sr);
5912 __put_user(env->pc, &sc->sc_pc);
5913 }
5914
5915 static void
5916 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5917 {
5918 int temp;
5919
5920 __get_user(env->aregs[7], &sc->sc_usp);
5921 __get_user(env->dregs[0], &sc->sc_d0);
5922 __get_user(env->dregs[1], &sc->sc_d1);
5923 __get_user(env->aregs[0], &sc->sc_a0);
5924 __get_user(env->aregs[1], &sc->sc_a1);
5925 __get_user(env->pc, &sc->sc_pc);
5926 __get_user(temp, &sc->sc_sr);
5927 cpu_m68k_set_ccr(env, temp);
5928 }
5929
5930 /*
5931 * Determine which stack to use..
5932 */
5933 static inline abi_ulong
5934 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5935 size_t frame_size)
5936 {
5937 unsigned long sp;
5938
5939 sp = regs->aregs[7];
5940
5941 /* This is the X/Open sanctioned signal stack switching. */
5942 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5943 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5944 }
5945
5946 return ((sp - frame_size) & -8UL);
5947 }
5948
5949 static void setup_frame(int sig, struct target_sigaction *ka,
5950 target_sigset_t *set, CPUM68KState *env)
5951 {
5952 struct target_sigframe *frame;
5953 abi_ulong frame_addr;
5954 abi_ulong retcode_addr;
5955 abi_ulong sc_addr;
5956 int i;
5957
5958 frame_addr = get_sigframe(ka, env, sizeof *frame);
5959 trace_user_setup_frame(env, frame_addr);
5960 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5961 goto give_sigsegv;
5962 }
5963
5964 __put_user(sig, &frame->sig);
5965
5966 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5967 __put_user(sc_addr, &frame->psc);
5968
5969 setup_sigcontext(&frame->sc, env, set->sig[0]);
5970
5971 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5972 __put_user(set->sig[i], &frame->extramask[i - 1]);
5973 }
5974
5975 /* Set up to return from userspace. */
5976
5977 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5978 __put_user(retcode_addr, &frame->pretcode);
5979
5980 /* moveq #,d0; trap #0 */
5981
5982 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5983 (uint32_t *)(frame->retcode));
5984
5985 /* Set up to return from userspace */
5986
5987 env->aregs[7] = frame_addr;
5988 env->pc = ka->_sa_handler;
5989
5990 unlock_user_struct(frame, frame_addr, 1);
5991 return;
5992
5993 give_sigsegv:
5994 force_sigsegv(sig);
5995 }
5996
5997 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5998 CPUM68KState *env)
5999 {
6000 int i;
6001 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
6002
6003 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
6004 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
6005 /* fpiar is not emulated */
6006
6007 for (i = 0; i < 8; i++) {
6008 uint32_t high = env->fregs[i].d.high << 16;
6009 __put_user(high, &fpregs->f_fpregs[i * 3]);
6010 __put_user(env->fregs[i].d.low,
6011 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
6012 }
6013 }
6014
6015 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
6016 CPUM68KState *env)
6017 {
6018 target_greg_t *gregs = uc->tuc_mcontext.gregs;
6019 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
6020
6021 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
6022 __put_user(env->dregs[0], &gregs[0]);
6023 __put_user(env->dregs[1], &gregs[1]);
6024 __put_user(env->dregs[2], &gregs[2]);
6025 __put_user(env->dregs[3], &gregs[3]);
6026 __put_user(env->dregs[4], &gregs[4]);
6027 __put_user(env->dregs[5], &gregs[5]);
6028 __put_user(env->dregs[6], &gregs[6]);
6029 __put_user(env->dregs[7], &gregs[7]);
6030 __put_user(env->aregs[0], &gregs[8]);
6031 __put_user(env->aregs[1], &gregs[9]);
6032 __put_user(env->aregs[2], &gregs[10]);
6033 __put_user(env->aregs[3], &gregs[11]);
6034 __put_user(env->aregs[4], &gregs[12]);
6035 __put_user(env->aregs[5], &gregs[13]);
6036 __put_user(env->aregs[6], &gregs[14]);
6037 __put_user(env->aregs[7], &gregs[15]);
6038 __put_user(env->pc, &gregs[16]);
6039 __put_user(sr, &gregs[17]);
6040
6041 target_rt_save_fpu_state(uc, env);
6042
6043 return 0;
6044 }
6045
6046 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
6047 struct target_ucontext *uc)
6048 {
6049 int i;
6050 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
6051 uint32_t fpcr;
6052
6053 __get_user(fpcr, &fpregs->f_fpcntl[0]);
6054 cpu_m68k_set_fpcr(env, fpcr);
6055 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
6056 /* fpiar is not emulated */
6057
6058 for (i = 0; i < 8; i++) {
6059 uint32_t high;
6060 __get_user(high, &fpregs->f_fpregs[i * 3]);
6061 env->fregs[i].d.high = high >> 16;
6062 __get_user(env->fregs[i].d.low,
6063 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
6064 }
6065 }
6066
6067 static inline int target_rt_restore_ucontext(CPUM68KState *env,
6068 struct target_ucontext *uc)
6069 {
6070 int temp;
6071 target_greg_t *gregs = uc->tuc_mcontext.gregs;
6072
6073 __get_user(temp, &uc->tuc_mcontext.version);
6074 if (temp != TARGET_MCONTEXT_VERSION)
6075 goto badframe;
6076
6077 /* restore passed registers */
6078 __get_user(env->dregs[0], &gregs[0]);
6079 __get_user(env->dregs[1], &gregs[1]);
6080 __get_user(env->dregs[2], &gregs[2]);
6081 __get_user(env->dregs[3], &gregs[3]);
6082 __get_user(env->dregs[4], &gregs[4]);
6083 __get_user(env->dregs[5], &gregs[5]);
6084 __get_user(env->dregs[6], &gregs[6]);
6085 __get_user(env->dregs[7], &gregs[7]);
6086 __get_user(env->aregs[0], &gregs[8]);
6087 __get_user(env->aregs[1], &gregs[9]);
6088 __get_user(env->aregs[2], &gregs[10]);
6089 __get_user(env->aregs[3], &gregs[11]);
6090 __get_user(env->aregs[4], &gregs[12]);
6091 __get_user(env->aregs[5], &gregs[13]);
6092 __get_user(env->aregs[6], &gregs[14]);
6093 __get_user(env->aregs[7], &gregs[15]);
6094 __get_user(env->pc, &gregs[16]);
6095 __get_user(temp, &gregs[17]);
6096 cpu_m68k_set_ccr(env, temp);
6097
6098 target_rt_restore_fpu_state(env, uc);
6099
6100 return 0;
6101
6102 badframe:
6103 return 1;
6104 }
6105
6106 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6107 target_siginfo_t *info,
6108 target_sigset_t *set, CPUM68KState *env)
6109 {
6110 struct target_rt_sigframe *frame;
6111 abi_ulong frame_addr;
6112 abi_ulong retcode_addr;
6113 abi_ulong info_addr;
6114 abi_ulong uc_addr;
6115 int err = 0;
6116 int i;
6117
6118 frame_addr = get_sigframe(ka, env, sizeof *frame);
6119 trace_user_setup_rt_frame(env, frame_addr);
6120 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6121 goto give_sigsegv;
6122 }
6123
6124 __put_user(sig, &frame->sig);
6125
6126 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
6127 __put_user(info_addr, &frame->pinfo);
6128
6129 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
6130 __put_user(uc_addr, &frame->puc);
6131
6132 tswap_siginfo(&frame->info, info);
6133
6134 /* Create the ucontext */
6135
6136 __put_user(0, &frame->uc.tuc_flags);
6137 __put_user(0, &frame->uc.tuc_link);
6138 __put_user(target_sigaltstack_used.ss_sp,
6139 &frame->uc.tuc_stack.ss_sp);
6140 __put_user(sas_ss_flags(env->aregs[7]),
6141 &frame->uc.tuc_stack.ss_flags);
6142 __put_user(target_sigaltstack_used.ss_size,
6143 &frame->uc.tuc_stack.ss_size);
6144 err |= target_rt_setup_ucontext(&frame->uc, env);
6145
6146 if (err)
6147 goto give_sigsegv;
6148
6149 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
6150 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6151 }
6152
6153 /* Set up to return from userspace. */
6154
6155 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
6156 __put_user(retcode_addr, &frame->pretcode);
6157
6158 /* moveq #,d0; notb d0; trap #0 */
6159
6160 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
6161 (uint32_t *)(frame->retcode + 0));
6162 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
6163
6164 if (err)
6165 goto give_sigsegv;
6166
6167 /* Set up to return from userspace */
6168
6169 env->aregs[7] = frame_addr;
6170 env->pc = ka->_sa_handler;
6171
6172 unlock_user_struct(frame, frame_addr, 1);
6173 return;
6174
6175 give_sigsegv:
6176 unlock_user_struct(frame, frame_addr, 1);
6177 force_sigsegv(sig);
6178 }
6179
6180 long do_sigreturn(CPUM68KState *env)
6181 {
6182 struct target_sigframe *frame;
6183 abi_ulong frame_addr = env->aregs[7] - 4;
6184 target_sigset_t target_set;
6185 sigset_t set;
6186 int i;
6187
6188 trace_user_do_sigreturn(env, frame_addr);
6189 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
6190 goto badframe;
6191
6192 /* set blocked signals */
6193
6194 __get_user(target_set.sig[0], &frame->sc.sc_mask);
6195
6196 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
6197 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
6198 }
6199
6200 target_to_host_sigset_internal(&set, &target_set);
6201 set_sigmask(&set);
6202
6203 /* restore registers */
6204
6205 restore_sigcontext(env, &frame->sc);
6206
6207 unlock_user_struct(frame, frame_addr, 0);
6208 return -TARGET_QEMU_ESIGRETURN;
6209
6210 badframe:
6211 force_sig(TARGET_SIGSEGV);
6212 return -TARGET_QEMU_ESIGRETURN;
6213 }
6214
6215 long do_rt_sigreturn(CPUM68KState *env)
6216 {
6217 struct target_rt_sigframe *frame;
6218 abi_ulong frame_addr = env->aregs[7] - 4;
6219 sigset_t set;
6220
6221 trace_user_do_rt_sigreturn(env, frame_addr);
6222 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
6223 goto badframe;
6224
6225 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6226 set_sigmask(&set);
6227
6228 /* restore registers */
6229
6230 if (target_rt_restore_ucontext(env, &frame->uc))
6231 goto badframe;
6232
6233 if (do_sigaltstack(frame_addr +
6234 offsetof(struct target_rt_sigframe, uc.tuc_stack),
6235 0, get_sp_from_cpustate(env)) == -EFAULT)
6236 goto badframe;
6237
6238 unlock_user_struct(frame, frame_addr, 0);
6239 return -TARGET_QEMU_ESIGRETURN;
6240
6241 badframe:
6242 unlock_user_struct(frame, frame_addr, 0);
6243 force_sig(TARGET_SIGSEGV);
6244 return -TARGET_QEMU_ESIGRETURN;
6245 }
6246
6247 #elif defined(TARGET_ALPHA)
6248
6249 struct target_sigcontext {
6250 abi_long sc_onstack;
6251 abi_long sc_mask;
6252 abi_long sc_pc;
6253 abi_long sc_ps;
6254 abi_long sc_regs[32];
6255 abi_long sc_ownedfp;
6256 abi_long sc_fpregs[32];
6257 abi_ulong sc_fpcr;
6258 abi_ulong sc_fp_control;
6259 abi_ulong sc_reserved1;
6260 abi_ulong sc_reserved2;
6261 abi_ulong sc_ssize;
6262 abi_ulong sc_sbase;
6263 abi_ulong sc_traparg_a0;
6264 abi_ulong sc_traparg_a1;
6265 abi_ulong sc_traparg_a2;
6266 abi_ulong sc_fp_trap_pc;
6267 abi_ulong sc_fp_trigger_sum;
6268 abi_ulong sc_fp_trigger_inst;
6269 };
6270
6271 struct target_ucontext {
6272 abi_ulong tuc_flags;
6273 abi_ulong tuc_link;
6274 abi_ulong tuc_osf_sigmask;
6275 target_stack_t tuc_stack;
6276 struct target_sigcontext tuc_mcontext;
6277 target_sigset_t tuc_sigmask;
6278 };
6279
6280 struct target_sigframe {
6281 struct target_sigcontext sc;
6282 unsigned int retcode[3];
6283 };
6284
6285 struct target_rt_sigframe {
6286 target_siginfo_t info;
6287 struct target_ucontext uc;
6288 unsigned int retcode[3];
6289 };
6290
6291 #define INSN_MOV_R30_R16 0x47fe0410
6292 #define INSN_LDI_R0 0x201f0000
6293 #define INSN_CALLSYS 0x00000083
6294
6295 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6296 abi_ulong frame_addr, target_sigset_t *set)
6297 {
6298 int i;
6299
6300 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6301 __put_user(set->sig[0], &sc->sc_mask);
6302 __put_user(env->pc, &sc->sc_pc);
6303 __put_user(8, &sc->sc_ps);
6304
6305 for (i = 0; i < 31; ++i) {
6306 __put_user(env->ir[i], &sc->sc_regs[i]);
6307 }
6308 __put_user(0, &sc->sc_regs[31]);
6309
6310 for (i = 0; i < 31; ++i) {
6311 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6312 }
6313 __put_user(0, &sc->sc_fpregs[31]);
6314 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6315
6316 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6317 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6318 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6319 }
6320
6321 static void restore_sigcontext(CPUAlphaState *env,
6322 struct target_sigcontext *sc)
6323 {
6324 uint64_t fpcr;
6325 int i;
6326
6327 __get_user(env->pc, &sc->sc_pc);
6328
6329 for (i = 0; i < 31; ++i) {
6330 __get_user(env->ir[i], &sc->sc_regs[i]);
6331 }
6332 for (i = 0; i < 31; ++i) {
6333 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6334 }
6335
6336 __get_user(fpcr, &sc->sc_fpcr);
6337 cpu_alpha_store_fpcr(env, fpcr);
6338 }
6339
6340 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6341 CPUAlphaState *env,
6342 unsigned long framesize)
6343 {
6344 abi_ulong sp = env->ir[IR_SP];
6345
6346 /* This is the X/Open sanctioned signal stack switching. */
6347 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6348 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6349 }
6350 return (sp - framesize) & -32;
6351 }
6352
6353 static void setup_frame(int sig, struct target_sigaction *ka,
6354 target_sigset_t *set, CPUAlphaState *env)
6355 {
6356 abi_ulong frame_addr, r26;
6357 struct target_sigframe *frame;
6358 int err = 0;
6359
6360 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6361 trace_user_setup_frame(env, frame_addr);
6362 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6363 goto give_sigsegv;
6364 }
6365
6366 setup_sigcontext(&frame->sc, env, frame_addr, set);
6367
6368 if (ka->sa_restorer) {
6369 r26 = ka->sa_restorer;
6370 } else {
6371 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6372 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6373 &frame->retcode[1]);
6374 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6375 /* imb() */
6376 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
6377 }
6378
6379 unlock_user_struct(frame, frame_addr, 1);
6380
6381 if (err) {
6382 give_sigsegv:
6383 force_sigsegv(sig);
6384 return;
6385 }
6386
6387 env->ir[IR_RA] = r26;
6388 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6389 env->ir[IR_A0] = sig;
6390 env->ir[IR_A1] = 0;
6391 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6392 env->ir[IR_SP] = frame_addr;
6393 }
6394
6395 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6396 target_siginfo_t *info,
6397 target_sigset_t *set, CPUAlphaState *env)
6398 {
6399 abi_ulong frame_addr, r26;
6400 struct target_rt_sigframe *frame;
6401 int i, err = 0;
6402
6403 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6404 trace_user_setup_rt_frame(env, frame_addr);
6405 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6406 goto give_sigsegv;
6407 }
6408
6409 tswap_siginfo(&frame->info, info);
6410
6411 __put_user(0, &frame->uc.tuc_flags);
6412 __put_user(0, &frame->uc.tuc_link);
6413 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6414 __put_user(target_sigaltstack_used.ss_sp,
6415 &frame->uc.tuc_stack.ss_sp);
6416 __put_user(sas_ss_flags(env->ir[IR_SP]),
6417 &frame->uc.tuc_stack.ss_flags);
6418 __put_user(target_sigaltstack_used.ss_size,
6419 &frame->uc.tuc_stack.ss_size);
6420 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6421 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6422 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6423 }
6424
6425 if (ka->sa_restorer) {
6426 r26 = ka->sa_restorer;
6427 } else {
6428 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6429 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6430 &frame->retcode[1]);
6431 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6432 /* imb(); */
6433 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
6434 }
6435
6436 if (err) {
6437 give_sigsegv:
6438 force_sigsegv(sig);
6439 return;
6440 }
6441
6442 env->ir[IR_RA] = r26;
6443 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6444 env->ir[IR_A0] = sig;
6445 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6446 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6447 env->ir[IR_SP] = frame_addr;
6448 }
6449
6450 long do_sigreturn(CPUAlphaState *env)
6451 {
6452 struct target_sigcontext *sc;
6453 abi_ulong sc_addr = env->ir[IR_A0];
6454 target_sigset_t target_set;
6455 sigset_t set;
6456
6457 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6458 goto badframe;
6459 }
6460
6461 target_sigemptyset(&target_set);
6462 __get_user(target_set.sig[0], &sc->sc_mask);
6463
6464 target_to_host_sigset_internal(&set, &target_set);
6465 set_sigmask(&set);
6466
6467 restore_sigcontext(env, sc);
6468 unlock_user_struct(sc, sc_addr, 0);
6469 return -TARGET_QEMU_ESIGRETURN;
6470
6471 badframe:
6472 force_sig(TARGET_SIGSEGV);
6473 return -TARGET_QEMU_ESIGRETURN;
6474 }
6475
6476 long do_rt_sigreturn(CPUAlphaState *env)
6477 {
6478 abi_ulong frame_addr = env->ir[IR_A0];
6479 struct target_rt_sigframe *frame;
6480 sigset_t set;
6481
6482 trace_user_do_rt_sigreturn(env, frame_addr);
6483 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6484 goto badframe;
6485 }
6486 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6487 set_sigmask(&set);
6488
6489 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6490 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6491 uc.tuc_stack),
6492 0, env->ir[IR_SP]) == -EFAULT) {
6493 goto badframe;
6494 }
6495
6496 unlock_user_struct(frame, frame_addr, 0);
6497 return -TARGET_QEMU_ESIGRETURN;
6498
6499
6500 badframe:
6501 unlock_user_struct(frame, frame_addr, 0);
6502 force_sig(TARGET_SIGSEGV);
6503 return -TARGET_QEMU_ESIGRETURN;
6504 }
6505
6506 #elif defined(TARGET_TILEGX)
6507
6508 struct target_sigcontext {
6509 union {
6510 /* General-purpose registers. */
6511 abi_ulong gregs[56];
6512 struct {
6513 abi_ulong __gregs[53];
6514 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6515 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6516 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6517 };
6518 };
6519 abi_ulong pc; /* Program counter. */
6520 abi_ulong ics; /* In Interrupt Critical Section? */
6521 abi_ulong faultnum; /* Fault number. */
6522 abi_ulong pad[5];
6523 };
6524
6525 struct target_ucontext {
6526 abi_ulong tuc_flags;
6527 abi_ulong tuc_link;
6528 target_stack_t tuc_stack;
6529 struct target_sigcontext tuc_mcontext;
6530 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6531 };
6532
6533 struct target_rt_sigframe {
6534 unsigned char save_area[16]; /* caller save area */
6535 struct target_siginfo info;
6536 struct target_ucontext uc;
6537 abi_ulong retcode[2];
6538 };
6539
6540 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6541 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6542
6543
6544 static void setup_sigcontext(struct target_sigcontext *sc,
6545 CPUArchState *env, int signo)
6546 {
6547 int i;
6548
6549 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6550 __put_user(env->regs[i], &sc->gregs[i]);
6551 }
6552
6553 __put_user(env->pc, &sc->pc);
6554 __put_user(0, &sc->ics);
6555 __put_user(signo, &sc->faultnum);
6556 }
6557
6558 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6559 {
6560 int i;
6561
6562 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6563 __get_user(env->regs[i], &sc->gregs[i]);
6564 }
6565
6566 __get_user(env->pc, &sc->pc);
6567 }
6568
6569 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6570 size_t frame_size)
6571 {
6572 unsigned long sp = env->regs[TILEGX_R_SP];
6573
6574 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6575 return -1UL;
6576 }
6577
6578 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6579 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6580 }
6581
6582 sp -= frame_size;
6583 sp &= -16UL;
6584 return sp;
6585 }
6586
6587 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6588 target_siginfo_t *info,
6589 target_sigset_t *set, CPUArchState *env)
6590 {
6591 abi_ulong frame_addr;
6592 struct target_rt_sigframe *frame;
6593 unsigned long restorer;
6594
6595 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6596 trace_user_setup_rt_frame(env, frame_addr);
6597 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6598 goto give_sigsegv;
6599 }
6600
6601 /* Always write at least the signal number for the stack backtracer. */
6602 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6603 /* At sigreturn time, restore the callee-save registers too. */
6604 tswap_siginfo(&frame->info, info);
6605 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6606 } else {
6607 __put_user(info->si_signo, &frame->info.si_signo);
6608 }
6609
6610 /* Create the ucontext. */
6611 __put_user(0, &frame->uc.tuc_flags);
6612 __put_user(0, &frame->uc.tuc_link);
6613 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6614 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6615 &frame->uc.tuc_stack.ss_flags);
6616 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6617 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6618
6619 if (ka->sa_flags & TARGET_SA_RESTORER) {
6620 restorer = (unsigned long) ka->sa_restorer;
6621 } else {
6622 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6623 __put_user(INSN_SWINT1, &frame->retcode[1]);
6624 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6625 }
6626 env->pc = (unsigned long) ka->_sa_handler;
6627 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6628 env->regs[TILEGX_R_LR] = restorer;
6629 env->regs[0] = (unsigned long) sig;
6630 env->regs[1] = (unsigned long) &frame->info;
6631 env->regs[2] = (unsigned long) &frame->uc;
6632 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6633
6634 unlock_user_struct(frame, frame_addr, 1);
6635 return;
6636
6637 give_sigsegv:
6638 force_sigsegv(sig);
6639 }
6640
6641 long do_rt_sigreturn(CPUTLGState *env)
6642 {
6643 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6644 struct target_rt_sigframe *frame;
6645 sigset_t set;
6646
6647 trace_user_do_rt_sigreturn(env, frame_addr);
6648 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6649 goto badframe;
6650 }
6651 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6652 set_sigmask(&set);
6653
6654 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6655 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6656 uc.tuc_stack),
6657 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6658 goto badframe;
6659 }
6660
6661 unlock_user_struct(frame, frame_addr, 0);
6662 return -TARGET_QEMU_ESIGRETURN;
6663
6664
6665 badframe:
6666 unlock_user_struct(frame, frame_addr, 0);
6667 force_sig(TARGET_SIGSEGV);
6668 return -TARGET_QEMU_ESIGRETURN;
6669 }
6670
6671 #elif defined(TARGET_RISCV)
6672
6673 /* Signal handler invocation must be transparent for the code being
6674 interrupted. Complete CPU (hart) state is saved on entry and restored
6675 before returning from the handler. Process sigmask is also saved to block
6676 signals while the handler is running. The handler gets its own stack,
6677 which also doubles as storage for the CPU state and sigmask.
6678
6679 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
6680
6681 struct target_sigcontext {
6682 abi_long pc;
6683 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
6684 uint64_t fpr[32];
6685 uint32_t fcsr;
6686 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
6687
6688 struct target_ucontext {
6689 unsigned long uc_flags;
6690 struct target_ucontext *uc_link;
6691 target_stack_t uc_stack;
6692 struct target_sigcontext uc_mcontext;
6693 target_sigset_t uc_sigmask;
6694 };
6695
6696 struct target_rt_sigframe {
6697 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
6698 struct target_siginfo info;
6699 struct target_ucontext uc;
6700 };
6701
6702 static abi_ulong get_sigframe(struct target_sigaction *ka,
6703 CPURISCVState *regs, size_t framesize)
6704 {
6705 abi_ulong sp = regs->gpr[xSP];
6706 int onsigstack = on_sig_stack(sp);
6707
6708 /* redzone */
6709 /* This is the X/Open sanctioned signal stack switching. */
6710 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
6711 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6712 }
6713
6714 sp -= framesize;
6715 sp &= ~3UL; /* align sp on 4-byte boundary */
6716
6717 /* If we are on the alternate signal stack and would overflow it, don't.
6718 Return an always-bogus address instead so we will die with SIGSEGV. */
6719 if (onsigstack && !likely(on_sig_stack(sp))) {
6720 return -1L;
6721 }
6722
6723 return sp;
6724 }
6725
6726 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
6727 {
6728 int i;
6729
6730 __put_user(env->pc, &sc->pc);
6731
6732 for (i = 1; i < 32; i++) {
6733 __put_user(env->gpr[i], &sc->gpr[i - 1]);
6734 }
6735 for (i = 0; i < 32; i++) {
6736 __put_user(env->fpr[i], &sc->fpr[i]);
6737 }
6738
6739 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
6740 __put_user(fcsr, &sc->fcsr);
6741 }
6742
6743 static void setup_ucontext(struct target_ucontext *uc,
6744 CPURISCVState *env, target_sigset_t *set)
6745 {
6746 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
6747 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
6748 abi_ulong ss_size = target_sigaltstack_used.ss_size;
6749
6750 __put_user(0, &(uc->uc_flags));
6751 __put_user(0, &(uc->uc_link));
6752
6753 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
6754 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
6755 __put_user(ss_size, &(uc->uc_stack.ss_size));
6756
6757 int i;
6758 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6759 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
6760 }
6761
6762 setup_sigcontext(&uc->uc_mcontext, env);
6763 }
6764
6765 static inline void install_sigtramp(uint32_t *tramp)
6766 {
6767 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
6768 __put_user(0x00000073, tramp + 1); /* ecall */
6769 }
6770
6771 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6772 target_siginfo_t *info,
6773 target_sigset_t *set, CPURISCVState *env)
6774 {
6775 abi_ulong frame_addr;
6776 struct target_rt_sigframe *frame;
6777
6778 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6779 trace_user_setup_rt_frame(env, frame_addr);
6780
6781 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6782 goto badframe;
6783 }
6784
6785 setup_ucontext(&frame->uc, env, set);
6786 tswap_siginfo(&frame->info, info);
6787 install_sigtramp(frame->tramp);
6788
6789 env->pc = ka->_sa_handler;
6790 env->gpr[xSP] = frame_addr;
6791 env->gpr[xA0] = sig;
6792 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6793 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6794 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
6795
6796 return;
6797
6798 badframe:
6799 unlock_user_struct(frame, frame_addr, 1);
6800 if (sig == TARGET_SIGSEGV) {
6801 ka->_sa_handler = TARGET_SIG_DFL;
6802 }
6803 force_sig(TARGET_SIGSEGV);
6804 }
6805
6806 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
6807 {
6808 int i;
6809
6810 __get_user(env->pc, &sc->pc);
6811
6812 for (i = 1; i < 32; ++i) {
6813 __get_user(env->gpr[i], &sc->gpr[i - 1]);
6814 }
6815 for (i = 0; i < 32; ++i) {
6816 __get_user(env->fpr[i], &sc->fpr[i]);
6817 }
6818
6819 uint32_t fcsr;
6820 __get_user(fcsr, &sc->fcsr);
6821 csr_write_helper(env, fcsr, CSR_FCSR);
6822 }
6823
6824 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
6825 {
6826 sigset_t blocked;
6827 target_sigset_t target_set;
6828 int i;
6829
6830 target_sigemptyset(&target_set);
6831 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6832 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
6833 }
6834
6835 target_to_host_sigset_internal(&blocked, &target_set);
6836 set_sigmask(&blocked);
6837
6838 restore_sigcontext(env, &uc->uc_mcontext);
6839 }
6840
6841 long do_rt_sigreturn(CPURISCVState *env)
6842 {
6843 struct target_rt_sigframe *frame;
6844 abi_ulong frame_addr;
6845
6846 frame_addr = env->gpr[xSP];
6847 trace_user_do_sigreturn(env, frame_addr);
6848 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6849 goto badframe;
6850 }
6851
6852 restore_ucontext(env, &frame->uc);
6853
6854 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6855 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
6856 goto badframe;
6857 }
6858
6859 unlock_user_struct(frame, frame_addr, 0);
6860 return -TARGET_QEMU_ESIGRETURN;
6861
6862 badframe:
6863 unlock_user_struct(frame, frame_addr, 0);
6864 force_sig(TARGET_SIGSEGV);
6865 return 0;
6866 }
6867
6868 #elif defined(TARGET_HPPA)
6869
6870 struct target_sigcontext {
6871 abi_ulong sc_flags;
6872 abi_ulong sc_gr[32];
6873 uint64_t sc_fr[32];
6874 abi_ulong sc_iasq[2];
6875 abi_ulong sc_iaoq[2];
6876 abi_ulong sc_sar;
6877 };
6878
6879 struct target_ucontext {
6880 abi_uint tuc_flags;
6881 abi_ulong tuc_link;
6882 target_stack_t tuc_stack;
6883 abi_uint pad[1];
6884 struct target_sigcontext tuc_mcontext;
6885 target_sigset_t tuc_sigmask;
6886 };
6887
6888 struct target_rt_sigframe {
6889 abi_uint tramp[9];
6890 target_siginfo_t info;
6891 struct target_ucontext uc;
6892 /* hidden location of upper halves of pa2.0 64-bit gregs */
6893 };
6894
6895 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6896 {
6897 int flags = 0;
6898 int i;
6899
6900 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6901
6902 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6903 /* In the gateway page, executing a syscall. */
6904 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6905 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6906 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6907 } else {
6908 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6909 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6910 }
6911 __put_user(0, &sc->sc_iasq[0]);
6912 __put_user(0, &sc->sc_iasq[1]);
6913 __put_user(flags, &sc->sc_flags);
6914
6915 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6916 for (i = 1; i < 32; ++i) {
6917 __put_user(env->gr[i], &sc->sc_gr[i]);
6918 }
6919
6920 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6921 for (i = 1; i < 32; ++i) {
6922 __put_user(env->fr[i], &sc->sc_fr[i]);
6923 }
6924
6925 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6926 }
6927
6928 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6929 {
6930 target_ulong psw;
6931 int i;
6932
6933 __get_user(psw, &sc->sc_gr[0]);
6934 cpu_hppa_put_psw(env, psw);
6935
6936 for (i = 1; i < 32; ++i) {
6937 __get_user(env->gr[i], &sc->sc_gr[i]);
6938 }
6939 for (i = 0; i < 32; ++i) {
6940 __get_user(env->fr[i], &sc->sc_fr[i]);
6941 }
6942 cpu_hppa_loaded_fr0(env);
6943
6944 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6945 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6946 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6947 }
6948
6949 /* No, this doesn't look right, but it's copied straight from the kernel. */
6950 #define PARISC_RT_SIGFRAME_SIZE32 \
6951 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6952
6953 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6954 target_siginfo_t *info,
6955 target_sigset_t *set, CPUArchState *env)
6956 {
6957 abi_ulong frame_addr, sp, haddr;
6958 struct target_rt_sigframe *frame;
6959 int i;
6960
6961 sp = env->gr[30];
6962 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6963 if (sas_ss_flags(sp) == 0) {
6964 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6965 }
6966 }
6967 frame_addr = QEMU_ALIGN_UP(sp, 64);
6968 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6969
6970 trace_user_setup_rt_frame(env, frame_addr);
6971
6972 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6973 goto give_sigsegv;
6974 }
6975
6976 tswap_siginfo(&frame->info, info);
6977 frame->uc.tuc_flags = 0;
6978 frame->uc.tuc_link = 0;
6979
6980 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6981 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6982 &frame->uc.tuc_stack.ss_flags);
6983 __put_user(target_sigaltstack_used.ss_size,
6984 &frame->uc.tuc_stack.ss_size);
6985
6986 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6987 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6988 }
6989
6990 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6991
6992 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6993 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6994 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6995 __put_user(0x08000240, frame->tramp + 3); /* nop */
6996
6997 unlock_user_struct(frame, frame_addr, 1);
6998
6999 env->gr[2] = h2g(frame->tramp);
7000 env->gr[30] = sp;
7001 env->gr[26] = sig;
7002 env->gr[25] = h2g(&frame->info);
7003 env->gr[24] = h2g(&frame->uc);
7004
7005 haddr = ka->_sa_handler;
7006 if (haddr & 2) {
7007 /* Function descriptor. */
7008 target_ulong *fdesc, dest;
7009
7010 haddr &= -4;
7011 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
7012 goto give_sigsegv;
7013 }
7014 __get_user(dest, fdesc);
7015 __get_user(env->gr[19], fdesc + 1);
7016 unlock_user_struct(fdesc, haddr, 1);
7017 haddr = dest;
7018 }
7019 env->iaoq_f = haddr;
7020 env->iaoq_b = haddr + 4;
7021 return;
7022
7023 give_sigsegv:
7024 force_sigsegv(sig);
7025 }
7026
7027 long do_rt_sigreturn(CPUArchState *env)
7028 {
7029 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
7030 struct target_rt_sigframe *frame;
7031 sigset_t set;
7032
7033 trace_user_do_rt_sigreturn(env, frame_addr);
7034 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
7035 goto badframe;
7036 }
7037 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
7038 set_sigmask(&set);
7039
7040 restore_sigcontext(env, &frame->uc.tuc_mcontext);
7041 unlock_user_struct(frame, frame_addr, 0);
7042
7043 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
7044 uc.tuc_stack),
7045 0, env->gr[30]) == -EFAULT) {
7046 goto badframe;
7047 }
7048
7049 unlock_user_struct(frame, frame_addr, 0);
7050 return -TARGET_QEMU_ESIGRETURN;
7051
7052 badframe:
7053 force_sig(TARGET_SIGSEGV);
7054 return -TARGET_QEMU_ESIGRETURN;
7055 }
7056
7057 #elif defined(TARGET_XTENSA)
7058
7059 struct target_sigcontext {
7060 abi_ulong sc_pc;
7061 abi_ulong sc_ps;
7062 abi_ulong sc_lbeg;
7063 abi_ulong sc_lend;
7064 abi_ulong sc_lcount;
7065 abi_ulong sc_sar;
7066 abi_ulong sc_acclo;
7067 abi_ulong sc_acchi;
7068 abi_ulong sc_a[16];
7069 abi_ulong sc_xtregs;
7070 };
7071
7072 struct target_ucontext {
7073 abi_ulong tuc_flags;
7074 abi_ulong tuc_link;
7075 target_stack_t tuc_stack;
7076 struct target_sigcontext tuc_mcontext;
7077 target_sigset_t tuc_sigmask;
7078 };
7079
7080 struct target_rt_sigframe {
7081 target_siginfo_t info;
7082 struct target_ucontext uc;
7083 /* TODO: xtregs */
7084 uint8_t retcode[6];
7085 abi_ulong window[4];
7086 };
7087
7088 static abi_ulong get_sigframe(struct target_sigaction *sa,
7089 CPUXtensaState *env,
7090 unsigned long framesize)
7091 {
7092 abi_ulong sp = env->regs[1];
7093
7094 /* This is the X/Open sanctioned signal stack switching. */
7095 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
7096 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
7097 }
7098 return (sp - framesize) & -16;
7099 }
7100
7101 static int flush_window_regs(CPUXtensaState *env)
7102 {
7103 uint32_t wb = env->sregs[WINDOW_BASE];
7104 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1);
7105 unsigned d = ctz32(ws) + 1;
7106 unsigned i;
7107 int ret = 0;
7108
7109 for (i = d; i < env->config->nareg / 4; i += d) {
7110 uint32_t ssp, osp;
7111 unsigned j;
7112
7113 ws >>= d;
7114 xtensa_rotate_window(env, d);
7115
7116 if (ws & 0x1) {
7117 ssp = env->regs[5];
7118 d = 1;
7119 } else if (ws & 0x2) {
7120 ssp = env->regs[9];
7121 ret |= get_user_ual(osp, env->regs[1] - 12);
7122 osp -= 32;
7123 d = 2;
7124 } else if (ws & 0x4) {
7125 ssp = env->regs[13];
7126 ret |= get_user_ual(osp, env->regs[1] - 12);
7127 osp -= 48;
7128 d = 3;
7129 } else {
7130 g_assert_not_reached();
7131 }
7132
7133 for (j = 0; j < 4; ++j) {
7134 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4);
7135 }
7136 for (j = 4; j < d * 4; ++j) {
7137 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4);
7138 }
7139 }
7140 xtensa_rotate_window(env, d);
7141 g_assert(env->sregs[WINDOW_BASE] == wb);
7142 return ret == 0;
7143 }
7144
7145 static int setup_sigcontext(struct target_rt_sigframe *frame,
7146 CPUXtensaState *env)
7147 {
7148 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
7149 int i;
7150
7151 __put_user(env->pc, &sc->sc_pc);
7152 __put_user(env->sregs[PS], &sc->sc_ps);
7153 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
7154 __put_user(env->sregs[LEND], &sc->sc_lend);
7155 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
7156 if (!flush_window_regs(env)) {
7157 return 0;
7158 }
7159 for (i = 0; i < 16; ++i) {
7160 __put_user(env->regs[i], sc->sc_a + i);
7161 }
7162 __put_user(0, &sc->sc_xtregs);
7163 /* TODO: xtregs */
7164 return 1;
7165 }
7166
7167 static void setup_rt_frame(int sig, struct target_sigaction *ka,
7168 target_siginfo_t *info,
7169 target_sigset_t *set, CPUXtensaState *env)
7170 {
7171 abi_ulong frame_addr;
7172 struct target_rt_sigframe *frame;
7173 uint32_t ra;
7174 int i;
7175
7176 frame_addr = get_sigframe(ka, env, sizeof(*frame));
7177 trace_user_setup_rt_frame(env, frame_addr);
7178
7179 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
7180 goto give_sigsegv;
7181 }
7182
7183 if (ka->sa_flags & SA_SIGINFO) {
7184 tswap_siginfo(&frame->info, info);
7185 }
7186
7187 __put_user(0, &frame->uc.tuc_flags);
7188 __put_user(0, &frame->uc.tuc_link);
7189 __put_user(target_sigaltstack_used.ss_sp,
7190 &frame->uc.tuc_stack.ss_sp);
7191 __put_user(sas_ss_flags(env->regs[1]),
7192 &frame->uc.tuc_stack.ss_flags);
7193 __put_user(target_sigaltstack_used.ss_size,
7194 &frame->uc.tuc_stack.ss_size);
7195 if (!setup_sigcontext(frame, env)) {
7196 unlock_user_struct(frame, frame_addr, 0);
7197 goto give_sigsegv;
7198 }
7199 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
7200 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
7201 }
7202
7203 if (ka->sa_flags & TARGET_SA_RESTORER) {
7204 ra = ka->sa_restorer;
7205 } else {
7206 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
7207 #ifdef TARGET_WORDS_BIGENDIAN
7208 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
7209 __put_user(0x22, &frame->retcode[0]);
7210 __put_user(0x0a, &frame->retcode[1]);
7211 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
7212 /* Generate instruction: SYSCALL */
7213 __put_user(0x00, &frame->retcode[3]);
7214 __put_user(0x05, &frame->retcode[4]);
7215 __put_user(0x00, &frame->retcode[5]);
7216 #else
7217 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
7218 __put_user(0x22, &frame->retcode[0]);
7219 __put_user(0xa0, &frame->retcode[1]);
7220 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
7221 /* Generate instruction: SYSCALL */
7222 __put_user(0x00, &frame->retcode[3]);
7223 __put_user(0x50, &frame->retcode[4]);
7224 __put_user(0x00, &frame->retcode[5]);
7225 #endif
7226 }
7227 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
7228 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
7229 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
7230 }
7231 memset(env->regs, 0, sizeof(env->regs));
7232 env->pc = ka->_sa_handler;
7233 env->regs[1] = frame_addr;
7234 env->sregs[WINDOW_BASE] = 0;
7235 env->sregs[WINDOW_START] = 1;
7236
7237 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
7238 env->regs[6] = sig;
7239 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
7240 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
7241 unlock_user_struct(frame, frame_addr, 1);
7242 return;
7243
7244 give_sigsegv:
7245 force_sigsegv(sig);
7246 return;
7247 }
7248
7249 static void restore_sigcontext(CPUXtensaState *env,
7250 struct target_rt_sigframe *frame)
7251 {
7252 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
7253 uint32_t ps;
7254 int i;
7255
7256 __get_user(env->pc, &sc->sc_pc);
7257 __get_user(ps, &sc->sc_ps);
7258 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
7259 __get_user(env->sregs[LEND], &sc->sc_lend);
7260 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
7261
7262 env->sregs[WINDOW_BASE] = 0;
7263 env->sregs[WINDOW_START] = 1;
7264 env->sregs[PS] = deposit32(env->sregs[PS],
7265 PS_CALLINC_SHIFT,
7266 PS_CALLINC_LEN,
7267 extract32(ps, PS_CALLINC_SHIFT,
7268 PS_CALLINC_LEN));
7269 for (i = 0; i < 16; ++i) {
7270 __get_user(env->regs[i], sc->sc_a + i);
7271 }
7272 /* TODO: xtregs */
7273 }
7274
7275 long do_rt_sigreturn(CPUXtensaState *env)
7276 {
7277 abi_ulong frame_addr = env->regs[1];
7278 struct target_rt_sigframe *frame;
7279 sigset_t set;
7280
7281 trace_user_do_rt_sigreturn(env, frame_addr);
7282 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
7283 goto badframe;
7284 }
7285 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
7286 set_sigmask(&set);
7287
7288 restore_sigcontext(env, frame);
7289
7290 if (do_sigaltstack(frame_addr +
7291 offsetof(struct target_rt_sigframe, uc.tuc_stack),
7292 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
7293 goto badframe;
7294 }
7295 unlock_user_struct(frame, frame_addr, 0);
7296 return -TARGET_QEMU_ESIGRETURN;
7297
7298 badframe:
7299 unlock_user_struct(frame, frame_addr, 0);
7300 force_sig(TARGET_SIGSEGV);
7301 return -TARGET_QEMU_ESIGRETURN;
7302 }
7303
7304 #else
7305 #error Target needs to add support for signal handling
7306 #endif
7307
7308 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
7309 struct emulated_sigtable *k)
7310 {
7311 CPUState *cpu = ENV_GET_CPU(cpu_env);
7312 abi_ulong handler;
7313 sigset_t set;
7314 target_sigset_t target_old_set;
7315 struct target_sigaction *sa;
7316 TaskState *ts = cpu->opaque;
7317
7318 trace_user_handle_signal(cpu_env, sig);
7319 /* dequeue signal */
7320 k->pending = 0;
7321
7322 sig = gdb_handlesig(cpu, sig);
7323 if (!sig) {
7324 sa = NULL;
7325 handler = TARGET_SIG_IGN;
7326 } else {
7327 sa = &sigact_table[sig - 1];
7328 handler = sa->_sa_handler;
7329 }
7330
7331 if (do_strace) {
7332 print_taken_signal(sig, &k->info);
7333 }
7334
7335 if (handler == TARGET_SIG_DFL) {
7336 /* default handler : ignore some signal. The other are job control or fatal */
7337 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
7338 kill(getpid(),SIGSTOP);
7339 } else if (sig != TARGET_SIGCHLD &&
7340 sig != TARGET_SIGURG &&
7341 sig != TARGET_SIGWINCH &&
7342 sig != TARGET_SIGCONT) {
7343 dump_core_and_abort(sig);
7344 }
7345 } else if (handler == TARGET_SIG_IGN) {
7346 /* ignore sig */
7347 } else if (handler == TARGET_SIG_ERR) {
7348 dump_core_and_abort(sig);
7349 } else {
7350 /* compute the blocked signals during the handler execution */
7351 sigset_t *blocked_set;
7352
7353 target_to_host_sigset(&set, &sa->sa_mask);
7354 /* SA_NODEFER indicates that the current signal should not be
7355 blocked during the handler */
7356 if (!(sa->sa_flags & TARGET_SA_NODEFER))
7357 sigaddset(&set, target_to_host_signal(sig));
7358
7359 /* save the previous blocked signal state to restore it at the
7360 end of the signal execution (see do_sigreturn) */
7361 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
7362
7363 /* block signals in the handler */
7364 blocked_set = ts->in_sigsuspend ?
7365 &ts->sigsuspend_mask : &ts->signal_mask;
7366 sigorset(&ts->signal_mask, blocked_set, &set);
7367 ts->in_sigsuspend = 0;
7368
7369 /* if the CPU is in VM86 mode, we restore the 32 bit values */
7370 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
7371 {
7372 CPUX86State *env = cpu_env;
7373 if (env->eflags & VM_MASK)
7374 save_v86_state(env);
7375 }
7376 #endif
7377 /* prepare the stack frame of the virtual CPU */
7378 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
7379 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
7380 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
7381 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
7382 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
7383 /* These targets do not have traditional signals. */
7384 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
7385 #else
7386 if (sa->sa_flags & TARGET_SA_SIGINFO)
7387 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
7388 else
7389 setup_frame(sig, sa, &target_old_set, cpu_env);
7390 #endif
7391 if (sa->sa_flags & TARGET_SA_RESETHAND) {
7392 sa->_sa_handler = TARGET_SIG_DFL;
7393 }
7394 }
7395 }
7396
7397 void process_pending_signals(CPUArchState *cpu_env)
7398 {
7399 CPUState *cpu = ENV_GET_CPU(cpu_env);
7400 int sig;
7401 TaskState *ts = cpu->opaque;
7402 sigset_t set;
7403 sigset_t *blocked_set;
7404
7405 while (atomic_read(&ts->signal_pending)) {
7406 /* FIXME: This is not threadsafe. */
7407 sigfillset(&set);
7408 sigprocmask(SIG_SETMASK, &set, 0);
7409
7410 restart_scan:
7411 sig = ts->sync_signal.pending;
7412 if (sig) {
7413 /* Synchronous signals are forced,
7414 * see force_sig_info() and callers in Linux
7415 * Note that not all of our queue_signal() calls in QEMU correspond
7416 * to force_sig_info() calls in Linux (some are send_sig_info()).
7417 * However it seems like a kernel bug to me to allow the process
7418 * to block a synchronous signal since it could then just end up
7419 * looping round and round indefinitely.
7420 */
7421 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
7422 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
7423 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
7424 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
7425 }
7426
7427 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
7428 }
7429
7430 for (sig = 1; sig <= TARGET_NSIG; sig++) {
7431 blocked_set = ts->in_sigsuspend ?
7432 &ts->sigsuspend_mask : &ts->signal_mask;
7433
7434 if (ts->sigtab[sig - 1].pending &&
7435 (!sigismember(blocked_set,
7436 target_to_host_signal_table[sig]))) {
7437 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
7438 /* Restart scan from the beginning, as handle_pending_signal
7439 * might have resulted in a new synchronous signal (eg SIGSEGV).
7440 */
7441 goto restart_scan;
7442 }
7443 }
7444
7445 /* if no signal is pending, unblock signals and recheck (the act
7446 * of unblocking might cause us to take another host signal which
7447 * will set signal_pending again).
7448 */
7449 atomic_set(&ts->signal_pending, 0);
7450 ts->in_sigsuspend = 0;
7451 set = ts->signal_mask;
7452 sigdelset(&set, SIGSEGV);
7453 sigdelset(&set, SIGBUS);
7454 sigprocmask(SIG_SETMASK, &set, 0);
7455 }
7456 ts->in_sigsuspend = 0;
7457 }