]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
linux-user: move aarch64 signal.c parts to aarch64 directory
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29
30 struct target_sigaltstack target_sigaltstack_used = {
31 .ss_sp = 0,
32 .ss_size = 0,
33 .ss_flags = TARGET_SS_DISABLE,
34 };
35
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39 void *puc);
40
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42 [SIGHUP] = TARGET_SIGHUP,
43 [SIGINT] = TARGET_SIGINT,
44 [SIGQUIT] = TARGET_SIGQUIT,
45 [SIGILL] = TARGET_SIGILL,
46 [SIGTRAP] = TARGET_SIGTRAP,
47 [SIGABRT] = TARGET_SIGABRT,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS] = TARGET_SIGBUS,
50 [SIGFPE] = TARGET_SIGFPE,
51 [SIGKILL] = TARGET_SIGKILL,
52 [SIGUSR1] = TARGET_SIGUSR1,
53 [SIGSEGV] = TARGET_SIGSEGV,
54 [SIGUSR2] = TARGET_SIGUSR2,
55 [SIGPIPE] = TARGET_SIGPIPE,
56 [SIGALRM] = TARGET_SIGALRM,
57 [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59 [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61 [SIGCHLD] = TARGET_SIGCHLD,
62 [SIGCONT] = TARGET_SIGCONT,
63 [SIGSTOP] = TARGET_SIGSTOP,
64 [SIGTSTP] = TARGET_SIGTSTP,
65 [SIGTTIN] = TARGET_SIGTTIN,
66 [SIGTTOU] = TARGET_SIGTTOU,
67 [SIGURG] = TARGET_SIGURG,
68 [SIGXCPU] = TARGET_SIGXCPU,
69 [SIGXFSZ] = TARGET_SIGXFSZ,
70 [SIGVTALRM] = TARGET_SIGVTALRM,
71 [SIGPROF] = TARGET_SIGPROF,
72 [SIGWINCH] = TARGET_SIGWINCH,
73 [SIGIO] = TARGET_SIGIO,
74 [SIGPWR] = TARGET_SIGPWR,
75 [SIGSYS] = TARGET_SIGSYS,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN] = __SIGRTMAX,
82 [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85
86 int host_to_target_signal(int sig)
87 {
88 if (sig < 0 || sig >= _NSIG)
89 return sig;
90 return host_to_target_signal_table[sig];
91 }
92
93 int target_to_host_signal(int sig)
94 {
95 if (sig < 0 || sig >= _NSIG)
96 return sig;
97 return target_to_host_signal_table[sig];
98 }
99
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113
114 void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
116 {
117 int i;
118 target_sigemptyset(d);
119 for (i = 1; i <= TARGET_NSIG; i++) {
120 if (sigismember(s, i)) {
121 target_sigaddset(d, host_to_target_signal(i));
122 }
123 }
124 }
125
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128 target_sigset_t d1;
129 int i;
130
131 host_to_target_sigset_internal(&d1, s);
132 for(i = 0;i < TARGET_NSIG_WORDS; i++)
133 d->sig[i] = tswapal(d1.sig[i]);
134 }
135
136 void target_to_host_sigset_internal(sigset_t *d,
137 const target_sigset_t *s)
138 {
139 int i;
140 sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (target_sigismember(s, i)) {
143 sigaddset(d, target_to_host_signal(i));
144 }
145 }
146 }
147
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150 target_sigset_t s1;
151 int i;
152
153 for(i = 0;i < TARGET_NSIG_WORDS; i++)
154 s1.sig[i] = tswapal(s->sig[i]);
155 target_to_host_sigset_internal(d, &s1);
156 }
157
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159 const sigset_t *sigset)
160 {
161 target_sigset_t d;
162 host_to_target_sigset(&d, sigset);
163 *old_sigset = d.sig[0];
164 }
165
166 void target_to_host_old_sigset(sigset_t *sigset,
167 const abi_ulong *old_sigset)
168 {
169 target_sigset_t d;
170 int i;
171
172 d.sig[0] = *old_sigset;
173 for(i = 1;i < TARGET_NSIG_WORDS; i++)
174 d.sig[i] = 0;
175 target_to_host_sigset(sigset, &d);
176 }
177
178 int block_signals(void)
179 {
180 TaskState *ts = (TaskState *)thread_cpu->opaque;
181 sigset_t set;
182
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
186 */
187 sigfillset(&set);
188 sigprocmask(SIG_SETMASK, &set, 0);
189
190 return atomic_xchg(&ts->signal_pending, 1);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
197 * 0 on success.
198 * If set is NULL, this is guaranteed not to fail.
199 */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202 TaskState *ts = (TaskState *)thread_cpu->opaque;
203
204 if (oldset) {
205 *oldset = ts->signal_mask;
206 }
207
208 if (set) {
209 int i;
210
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS;
213 }
214
215 switch (how) {
216 case SIG_BLOCK:
217 sigorset(&ts->signal_mask, &ts->signal_mask, set);
218 break;
219 case SIG_UNBLOCK:
220 for (i = 1; i <= NSIG; ++i) {
221 if (sigismember(set, i)) {
222 sigdelset(&ts->signal_mask, i);
223 }
224 }
225 break;
226 case SIG_SETMASK:
227 ts->signal_mask = *set;
228 break;
229 default:
230 g_assert_not_reached();
231 }
232
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts->signal_mask, SIGKILL);
235 sigdelset(&ts->signal_mask, SIGSTOP);
236 }
237 return 0;
238 }
239
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
243 */
244 void set_sigmask(const sigset_t *set)
245 {
246 TaskState *ts = (TaskState *)thread_cpu->opaque;
247
248 ts->signal_mask = *set;
249 }
250 #endif
251
252 /* siginfo conversion */
253
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255 const siginfo_t *info)
256 {
257 int sig = host_to_target_signal(info->si_signo);
258 int si_code = info->si_code;
259 int si_type;
260 tinfo->si_signo = sig;
261 tinfo->si_errno = 0;
262 tinfo->si_code = info->si_code;
263
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
269 */
270 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
280 *
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
285 */
286
287 switch (si_code) {
288 case SI_USER:
289 case SI_TKILL:
290 case SI_KERNEL:
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
293 */
294 tinfo->_sifields._kill._pid = info->si_pid;
295 tinfo->_sifields._kill._uid = info->si_uid;
296 si_type = QEMU_SI_KILL;
297 break;
298 default:
299 /* Everything else is spoofable. Make best guess based on signal */
300 switch (sig) {
301 case TARGET_SIGCHLD:
302 tinfo->_sifields._sigchld._pid = info->si_pid;
303 tinfo->_sifields._sigchld._uid = info->si_uid;
304 tinfo->_sifields._sigchld._status
305 = host_to_target_waitstatus(info->si_status);
306 tinfo->_sifields._sigchld._utime = info->si_utime;
307 tinfo->_sifields._sigchld._stime = info->si_stime;
308 si_type = QEMU_SI_CHLD;
309 break;
310 case TARGET_SIGIO:
311 tinfo->_sifields._sigpoll._band = info->si_band;
312 tinfo->_sifields._sigpoll._fd = info->si_fd;
313 si_type = QEMU_SI_POLL;
314 break;
315 default:
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo->_sifields._rt._pid = info->si_pid;
318 tinfo->_sifields._rt._uid = info->si_uid;
319 /* XXX: potential problem if 64 bit */
320 tinfo->_sifields._rt._sigval.sival_ptr
321 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322 si_type = QEMU_SI_RT;
323 break;
324 }
325 break;
326 }
327
328 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330
331 void tswap_siginfo(target_siginfo_t *tinfo,
332 const target_siginfo_t *info)
333 {
334 int si_type = extract32(info->si_code, 16, 16);
335 int si_code = sextract32(info->si_code, 0, 16);
336
337 __put_user(info->si_signo, &tinfo->si_signo);
338 __put_user(info->si_errno, &tinfo->si_errno);
339 __put_user(si_code, &tinfo->si_code);
340
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
344 */
345 switch (si_type) {
346 case QEMU_SI_KILL:
347 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349 break;
350 case QEMU_SI_TIMER:
351 __put_user(info->_sifields._timer._timer1,
352 &tinfo->_sifields._timer._timer1);
353 __put_user(info->_sifields._timer._timer2,
354 &tinfo->_sifields._timer._timer2);
355 break;
356 case QEMU_SI_POLL:
357 __put_user(info->_sifields._sigpoll._band,
358 &tinfo->_sifields._sigpoll._band);
359 __put_user(info->_sifields._sigpoll._fd,
360 &tinfo->_sifields._sigpoll._fd);
361 break;
362 case QEMU_SI_FAULT:
363 __put_user(info->_sifields._sigfault._addr,
364 &tinfo->_sifields._sigfault._addr);
365 break;
366 case QEMU_SI_CHLD:
367 __put_user(info->_sifields._sigchld._pid,
368 &tinfo->_sifields._sigchld._pid);
369 __put_user(info->_sifields._sigchld._uid,
370 &tinfo->_sifields._sigchld._uid);
371 __put_user(info->_sifields._sigchld._status,
372 &tinfo->_sifields._sigchld._status);
373 __put_user(info->_sifields._sigchld._utime,
374 &tinfo->_sifields._sigchld._utime);
375 __put_user(info->_sifields._sigchld._stime,
376 &tinfo->_sifields._sigchld._stime);
377 break;
378 case QEMU_SI_RT:
379 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381 __put_user(info->_sifields._rt._sigval.sival_ptr,
382 &tinfo->_sifields._rt._sigval.sival_ptr);
383 break;
384 default:
385 g_assert_not_reached();
386 }
387 }
388
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391 target_siginfo_t tgt_tmp;
392 host_to_target_siginfo_noswap(&tgt_tmp, info);
393 tswap_siginfo(tinfo, &tgt_tmp);
394 }
395
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
402 */
403 abi_ulong sival_ptr;
404
405 __get_user(info->si_signo, &tinfo->si_signo);
406 __get_user(info->si_errno, &tinfo->si_errno);
407 __get_user(info->si_code, &tinfo->si_code);
408 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411 info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413
414 static int fatal_signal (int sig)
415 {
416 switch (sig) {
417 case TARGET_SIGCHLD:
418 case TARGET_SIGURG:
419 case TARGET_SIGWINCH:
420 /* Ignored by default. */
421 return 0;
422 case TARGET_SIGCONT:
423 case TARGET_SIGSTOP:
424 case TARGET_SIGTSTP:
425 case TARGET_SIGTTIN:
426 case TARGET_SIGTTOU:
427 /* Job control signals. */
428 return 0;
429 default:
430 return 1;
431 }
432 }
433
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437 switch (sig) {
438 case TARGET_SIGABRT:
439 case TARGET_SIGFPE:
440 case TARGET_SIGILL:
441 case TARGET_SIGQUIT:
442 case TARGET_SIGSEGV:
443 case TARGET_SIGTRAP:
444 case TARGET_SIGBUS:
445 return (1);
446 default:
447 return (0);
448 }
449 }
450
451 void signal_init(void)
452 {
453 TaskState *ts = (TaskState *)thread_cpu->opaque;
454 struct sigaction act;
455 struct sigaction oact;
456 int i, j;
457 int host_sig;
458
459 /* generate signal conversion tables */
460 for(i = 1; i < _NSIG; i++) {
461 if (host_to_target_signal_table[i] == 0)
462 host_to_target_signal_table[i] = i;
463 }
464 for(i = 1; i < _NSIG; i++) {
465 j = host_to_target_signal_table[i];
466 target_to_host_signal_table[j] = i;
467 }
468
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts->signal_mask);
471
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table, 0, sizeof(sigact_table));
475
476 sigfillset(&act.sa_mask);
477 act.sa_flags = SA_SIGINFO;
478 act.sa_sigaction = host_signal_handler;
479 for(i = 1; i <= TARGET_NSIG; i++) {
480 host_sig = target_to_host_signal(i);
481 sigaction(host_sig, NULL, &oact);
482 if (oact.sa_sigaction == (void *)SIG_IGN) {
483 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486 }
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i))
494 sigaction(host_sig, &act, NULL);
495 }
496 }
497
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
501 */
502 void force_sig(int sig)
503 {
504 CPUState *cpu = thread_cpu;
505 CPUArchState *env = cpu->env_ptr;
506 target_siginfo_t info;
507
508 info.si_signo = sig;
509 info.si_errno = 0;
510 info.si_code = TARGET_SI_KERNEL;
511 info._sifields._kill._pid = 0;
512 info._sifields._kill._uid = 0;
513 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
519 */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523 if (oldsig == SIGSEGV) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
526 */
527 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528 }
529 force_sig(TARGET_SIGSEGV);
530 }
531
532 #endif
533
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537 CPUState *cpu = thread_cpu;
538 CPUArchState *env = cpu->env_ptr;
539 TaskState *ts = (TaskState *)cpu->opaque;
540 int host_sig, core_dumped = 0;
541 struct sigaction act;
542
543 host_sig = target_to_host_signal(target_sig);
544 trace_user_force_sig(env, target_sig, host_sig);
545 gdb_signalled(env, target_sig);
546
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549 stop_all_tasks();
550 core_dumped =
551 ((*ts->bprm->core_dump)(target_sig, env) == 0);
552 }
553 if (core_dumped) {
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump;
557 getrlimit(RLIMIT_CORE, &nodump);
558 nodump.rlim_cur=0;
559 setrlimit(RLIMIT_CORE, &nodump);
560 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig, strsignal(host_sig), "core dumped" );
562 }
563
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
569 * it to arrive. */
570 sigfillset(&act.sa_mask);
571 act.sa_handler = SIG_DFL;
572 act.sa_flags = 0;
573 sigaction(host_sig, &act, NULL);
574
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig);
578
579 /* Make sure the signal isn't masked (just reuse the mask inside
580 of act) */
581 sigdelset(&act.sa_mask, host_sig);
582 sigsuspend(&act.sa_mask);
583
584 /* unreachable */
585 abort();
586 }
587
588 /* queue a signal so that it will be send to the virtual CPU as soon
589 as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591 target_siginfo_t *info)
592 {
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
595
596 trace_user_queue_signal(env, sig);
597
598 info->si_code = deposit32(info->si_code, 16, 16, si_type);
599
600 ts->sync_signal.info = *info;
601 ts->sync_signal.pending = sig;
602 /* signal that a new signal is pending */
603 atomic_set(&ts->signal_pending, 1);
604 return 1; /* indicates that the signal was queued */
605 }
606
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610 /* Default version: never rewind */
611 }
612 #endif
613
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615 void *puc)
616 {
617 CPUArchState *env = thread_cpu->env_ptr;
618 CPUState *cpu = ENV_GET_CPU(env);
619 TaskState *ts = cpu->opaque;
620
621 int sig;
622 target_siginfo_t tinfo;
623 ucontext_t *uc = puc;
624 struct emulated_sigtable *k;
625
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629 && info->si_code > 0) {
630 if (cpu_signal_handler(host_signum, info, puc))
631 return;
632 }
633
634 /* get target signal number */
635 sig = host_to_target_signal(host_signum);
636 if (sig < 1 || sig > TARGET_NSIG)
637 return;
638 trace_user_host_signal(env, host_signum, sig);
639
640 rewind_if_in_safe_syscall(puc);
641
642 host_to_target_siginfo_noswap(&tinfo, info);
643 k = &ts->sigtab[sig - 1];
644 k->info = tinfo;
645 k->pending = sig;
646 ts->signal_pending = 1;
647
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
653 *
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
661 */
662 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663 sigdelset(&uc->uc_sigmask, SIGSEGV);
664 sigdelset(&uc->uc_sigmask, SIGBUS);
665
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu);
668 }
669
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674 int ret;
675 struct target_sigaltstack oss;
676
677 /* XXX: test errors */
678 if(uoss_addr)
679 {
680 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682 __put_user(sas_ss_flags(sp), &oss.ss_flags);
683 }
684
685 if(uss_addr)
686 {
687 struct target_sigaltstack *uss;
688 struct target_sigaltstack ss;
689 size_t minstacksize = TARGET_MINSIGSTKSZ;
690
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694 if (get_ppc64_abi(image) > 1) {
695 minstacksize = 4096;
696 }
697 #endif
698
699 ret = -TARGET_EFAULT;
700 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701 goto out;
702 }
703 __get_user(ss.ss_sp, &uss->ss_sp);
704 __get_user(ss.ss_size, &uss->ss_size);
705 __get_user(ss.ss_flags, &uss->ss_flags);
706 unlock_user_struct(uss, uss_addr, 0);
707
708 ret = -TARGET_EPERM;
709 if (on_sig_stack(sp))
710 goto out;
711
712 ret = -TARGET_EINVAL;
713 if (ss.ss_flags != TARGET_SS_DISABLE
714 && ss.ss_flags != TARGET_SS_ONSTACK
715 && ss.ss_flags != 0)
716 goto out;
717
718 if (ss.ss_flags == TARGET_SS_DISABLE) {
719 ss.ss_size = 0;
720 ss.ss_sp = 0;
721 } else {
722 ret = -TARGET_ENOMEM;
723 if (ss.ss_size < minstacksize) {
724 goto out;
725 }
726 }
727
728 target_sigaltstack_used.ss_sp = ss.ss_sp;
729 target_sigaltstack_used.ss_size = ss.ss_size;
730 }
731
732 if (uoss_addr) {
733 ret = -TARGET_EFAULT;
734 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735 goto out;
736 }
737
738 ret = 0;
739 out:
740 return ret;
741 }
742
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745 struct target_sigaction *oact)
746 {
747 struct target_sigaction *k;
748 struct sigaction act1;
749 int host_sig;
750 int ret = 0;
751
752 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753 return -TARGET_EINVAL;
754 }
755
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS;
758 }
759
760 k = &sigact_table[sig - 1];
761 if (oact) {
762 __put_user(k->_sa_handler, &oact->_sa_handler);
763 __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767 /* Not swapped. */
768 oact->sa_mask = k->sa_mask;
769 }
770 if (act) {
771 /* FIXME: This is not threadsafe. */
772 __get_user(k->_sa_handler, &act->_sa_handler);
773 __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777 /* To be swapped in target_to_host_sigset. */
778 k->sa_mask = act->sa_mask;
779
780 /* we update the host linux signal state */
781 host_sig = target_to_host_signal(sig);
782 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783 sigfillset(&act1.sa_mask);
784 act1.sa_flags = SA_SIGINFO;
785 if (k->sa_flags & TARGET_SA_RESTART)
786 act1.sa_flags |= SA_RESTART;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
789 syscalls */
790 if (k->_sa_handler == TARGET_SIG_IGN) {
791 act1.sa_sigaction = (void *)SIG_IGN;
792 } else if (k->_sa_handler == TARGET_SIG_DFL) {
793 if (fatal_signal (sig))
794 act1.sa_sigaction = host_signal_handler;
795 else
796 act1.sa_sigaction = (void *)SIG_DFL;
797 } else {
798 act1.sa_sigaction = host_signal_handler;
799 }
800 ret = sigaction(host_sig, &act1, NULL);
801 }
802 }
803 return ret;
804 }
805
806 #if defined(TARGET_I386)
807 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
808
809 struct target_fpreg {
810 uint16_t significand[4];
811 uint16_t exponent;
812 };
813
814 struct target_fpxreg {
815 uint16_t significand[4];
816 uint16_t exponent;
817 uint16_t padding[3];
818 };
819
820 struct target_xmmreg {
821 uint32_t element[4];
822 };
823
824 struct target_fpstate_32 {
825 /* Regular FPU environment */
826 uint32_t cw;
827 uint32_t sw;
828 uint32_t tag;
829 uint32_t ipoff;
830 uint32_t cssel;
831 uint32_t dataoff;
832 uint32_t datasel;
833 struct target_fpreg st[8];
834 uint16_t status;
835 uint16_t magic; /* 0xffff = regular FPU data only */
836
837 /* FXSR FPU environment */
838 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
839 uint32_t mxcsr;
840 uint32_t reserved;
841 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
842 struct target_xmmreg xmm[8];
843 uint32_t padding[56];
844 };
845
846 struct target_fpstate_64 {
847 /* FXSAVE format */
848 uint16_t cw;
849 uint16_t sw;
850 uint16_t twd;
851 uint16_t fop;
852 uint64_t rip;
853 uint64_t rdp;
854 uint32_t mxcsr;
855 uint32_t mxcsr_mask;
856 uint32_t st_space[32];
857 uint32_t xmm_space[64];
858 uint32_t reserved[24];
859 };
860
861 #ifndef TARGET_X86_64
862 # define target_fpstate target_fpstate_32
863 #else
864 # define target_fpstate target_fpstate_64
865 #endif
866
867 struct target_sigcontext_32 {
868 uint16_t gs, __gsh;
869 uint16_t fs, __fsh;
870 uint16_t es, __esh;
871 uint16_t ds, __dsh;
872 uint32_t edi;
873 uint32_t esi;
874 uint32_t ebp;
875 uint32_t esp;
876 uint32_t ebx;
877 uint32_t edx;
878 uint32_t ecx;
879 uint32_t eax;
880 uint32_t trapno;
881 uint32_t err;
882 uint32_t eip;
883 uint16_t cs, __csh;
884 uint32_t eflags;
885 uint32_t esp_at_signal;
886 uint16_t ss, __ssh;
887 uint32_t fpstate; /* pointer */
888 uint32_t oldmask;
889 uint32_t cr2;
890 };
891
892 struct target_sigcontext_64 {
893 uint64_t r8;
894 uint64_t r9;
895 uint64_t r10;
896 uint64_t r11;
897 uint64_t r12;
898 uint64_t r13;
899 uint64_t r14;
900 uint64_t r15;
901
902 uint64_t rdi;
903 uint64_t rsi;
904 uint64_t rbp;
905 uint64_t rbx;
906 uint64_t rdx;
907 uint64_t rax;
908 uint64_t rcx;
909 uint64_t rsp;
910 uint64_t rip;
911
912 uint64_t eflags;
913
914 uint16_t cs;
915 uint16_t gs;
916 uint16_t fs;
917 uint16_t ss;
918
919 uint64_t err;
920 uint64_t trapno;
921 uint64_t oldmask;
922 uint64_t cr2;
923
924 uint64_t fpstate; /* pointer */
925 uint64_t padding[8];
926 };
927
928 #ifndef TARGET_X86_64
929 # define target_sigcontext target_sigcontext_32
930 #else
931 # define target_sigcontext target_sigcontext_64
932 #endif
933
934 /* see Linux/include/uapi/asm-generic/ucontext.h */
935 struct target_ucontext {
936 abi_ulong tuc_flags;
937 abi_ulong tuc_link;
938 target_stack_t tuc_stack;
939 struct target_sigcontext tuc_mcontext;
940 target_sigset_t tuc_sigmask; /* mask last for extensibility */
941 };
942
943 #ifndef TARGET_X86_64
944 struct sigframe {
945 abi_ulong pretcode;
946 int sig;
947 struct target_sigcontext sc;
948 struct target_fpstate fpstate;
949 abi_ulong extramask[TARGET_NSIG_WORDS-1];
950 char retcode[8];
951 };
952
953 struct rt_sigframe {
954 abi_ulong pretcode;
955 int sig;
956 abi_ulong pinfo;
957 abi_ulong puc;
958 struct target_siginfo info;
959 struct target_ucontext uc;
960 struct target_fpstate fpstate;
961 char retcode[8];
962 };
963
964 #else
965
966 struct rt_sigframe {
967 abi_ulong pretcode;
968 struct target_ucontext uc;
969 struct target_siginfo info;
970 struct target_fpstate fpstate;
971 };
972
973 #endif
974
975 /*
976 * Set up a signal frame.
977 */
978
979 /* XXX: save x87 state */
980 static void setup_sigcontext(struct target_sigcontext *sc,
981 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
982 abi_ulong fpstate_addr)
983 {
984 CPUState *cs = CPU(x86_env_get_cpu(env));
985 #ifndef TARGET_X86_64
986 uint16_t magic;
987
988 /* already locked in setup_frame() */
989 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
990 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
991 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
992 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
993 __put_user(env->regs[R_EDI], &sc->edi);
994 __put_user(env->regs[R_ESI], &sc->esi);
995 __put_user(env->regs[R_EBP], &sc->ebp);
996 __put_user(env->regs[R_ESP], &sc->esp);
997 __put_user(env->regs[R_EBX], &sc->ebx);
998 __put_user(env->regs[R_EDX], &sc->edx);
999 __put_user(env->regs[R_ECX], &sc->ecx);
1000 __put_user(env->regs[R_EAX], &sc->eax);
1001 __put_user(cs->exception_index, &sc->trapno);
1002 __put_user(env->error_code, &sc->err);
1003 __put_user(env->eip, &sc->eip);
1004 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1005 __put_user(env->eflags, &sc->eflags);
1006 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1007 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1008
1009 cpu_x86_fsave(env, fpstate_addr, 1);
1010 fpstate->status = fpstate->sw;
1011 magic = 0xffff;
1012 __put_user(magic, &fpstate->magic);
1013 __put_user(fpstate_addr, &sc->fpstate);
1014
1015 /* non-iBCS2 extensions.. */
1016 __put_user(mask, &sc->oldmask);
1017 __put_user(env->cr[2], &sc->cr2);
1018 #else
1019 __put_user(env->regs[R_EDI], &sc->rdi);
1020 __put_user(env->regs[R_ESI], &sc->rsi);
1021 __put_user(env->regs[R_EBP], &sc->rbp);
1022 __put_user(env->regs[R_ESP], &sc->rsp);
1023 __put_user(env->regs[R_EBX], &sc->rbx);
1024 __put_user(env->regs[R_EDX], &sc->rdx);
1025 __put_user(env->regs[R_ECX], &sc->rcx);
1026 __put_user(env->regs[R_EAX], &sc->rax);
1027
1028 __put_user(env->regs[8], &sc->r8);
1029 __put_user(env->regs[9], &sc->r9);
1030 __put_user(env->regs[10], &sc->r10);
1031 __put_user(env->regs[11], &sc->r11);
1032 __put_user(env->regs[12], &sc->r12);
1033 __put_user(env->regs[13], &sc->r13);
1034 __put_user(env->regs[14], &sc->r14);
1035 __put_user(env->regs[15], &sc->r15);
1036
1037 __put_user(cs->exception_index, &sc->trapno);
1038 __put_user(env->error_code, &sc->err);
1039 __put_user(env->eip, &sc->rip);
1040
1041 __put_user(env->eflags, &sc->eflags);
1042 __put_user(env->segs[R_CS].selector, &sc->cs);
1043 __put_user((uint16_t)0, &sc->gs);
1044 __put_user((uint16_t)0, &sc->fs);
1045 __put_user(env->segs[R_SS].selector, &sc->ss);
1046
1047 __put_user(mask, &sc->oldmask);
1048 __put_user(env->cr[2], &sc->cr2);
1049
1050 /* fpstate_addr must be 16 byte aligned for fxsave */
1051 assert(!(fpstate_addr & 0xf));
1052
1053 cpu_x86_fxsave(env, fpstate_addr);
1054 __put_user(fpstate_addr, &sc->fpstate);
1055 #endif
1056 }
1057
1058 /*
1059 * Determine which stack to use..
1060 */
1061
1062 static inline abi_ulong
1063 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1064 {
1065 unsigned long esp;
1066
1067 /* Default to using normal stack */
1068 esp = env->regs[R_ESP];
1069 #ifdef TARGET_X86_64
1070 esp -= 128; /* this is the redzone */
1071 #endif
1072
1073 /* This is the X/Open sanctioned signal stack switching. */
1074 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1075 if (sas_ss_flags(esp) == 0) {
1076 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1077 }
1078 } else {
1079 #ifndef TARGET_X86_64
1080 /* This is the legacy signal stack switching. */
1081 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1082 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1083 ka->sa_restorer) {
1084 esp = (unsigned long) ka->sa_restorer;
1085 }
1086 #endif
1087 }
1088
1089 #ifndef TARGET_X86_64
1090 return (esp - frame_size) & -8ul;
1091 #else
1092 return ((esp - frame_size) & (~15ul)) - 8;
1093 #endif
1094 }
1095
1096 #ifndef TARGET_X86_64
1097 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1098 static void setup_frame(int sig, struct target_sigaction *ka,
1099 target_sigset_t *set, CPUX86State *env)
1100 {
1101 abi_ulong frame_addr;
1102 struct sigframe *frame;
1103 int i;
1104
1105 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1106 trace_user_setup_frame(env, frame_addr);
1107
1108 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1109 goto give_sigsegv;
1110
1111 __put_user(sig, &frame->sig);
1112
1113 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1114 frame_addr + offsetof(struct sigframe, fpstate));
1115
1116 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1117 __put_user(set->sig[i], &frame->extramask[i - 1]);
1118 }
1119
1120 /* Set up to return from userspace. If provided, use a stub
1121 already in userspace. */
1122 if (ka->sa_flags & TARGET_SA_RESTORER) {
1123 __put_user(ka->sa_restorer, &frame->pretcode);
1124 } else {
1125 uint16_t val16;
1126 abi_ulong retcode_addr;
1127 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1128 __put_user(retcode_addr, &frame->pretcode);
1129 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1130 val16 = 0xb858;
1131 __put_user(val16, (uint16_t *)(frame->retcode+0));
1132 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1133 val16 = 0x80cd;
1134 __put_user(val16, (uint16_t *)(frame->retcode+6));
1135 }
1136
1137 /* Set up registers for signal handler */
1138 env->regs[R_ESP] = frame_addr;
1139 env->eip = ka->_sa_handler;
1140
1141 cpu_x86_load_seg(env, R_DS, __USER_DS);
1142 cpu_x86_load_seg(env, R_ES, __USER_DS);
1143 cpu_x86_load_seg(env, R_SS, __USER_DS);
1144 cpu_x86_load_seg(env, R_CS, __USER_CS);
1145 env->eflags &= ~TF_MASK;
1146
1147 unlock_user_struct(frame, frame_addr, 1);
1148
1149 return;
1150
1151 give_sigsegv:
1152 force_sigsegv(sig);
1153 }
1154 #endif
1155
1156 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1157 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1158 target_siginfo_t *info,
1159 target_sigset_t *set, CPUX86State *env)
1160 {
1161 abi_ulong frame_addr;
1162 #ifndef TARGET_X86_64
1163 abi_ulong addr;
1164 #endif
1165 struct rt_sigframe *frame;
1166 int i;
1167
1168 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1169 trace_user_setup_rt_frame(env, frame_addr);
1170
1171 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1172 goto give_sigsegv;
1173
1174 /* These fields are only in rt_sigframe on 32 bit */
1175 #ifndef TARGET_X86_64
1176 __put_user(sig, &frame->sig);
1177 addr = frame_addr + offsetof(struct rt_sigframe, info);
1178 __put_user(addr, &frame->pinfo);
1179 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1180 __put_user(addr, &frame->puc);
1181 #endif
1182 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1183 tswap_siginfo(&frame->info, info);
1184 }
1185
1186 /* Create the ucontext. */
1187 __put_user(0, &frame->uc.tuc_flags);
1188 __put_user(0, &frame->uc.tuc_link);
1189 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1190 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1191 &frame->uc.tuc_stack.ss_flags);
1192 __put_user(target_sigaltstack_used.ss_size,
1193 &frame->uc.tuc_stack.ss_size);
1194 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1195 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1196
1197 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1198 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1199 }
1200
1201 /* Set up to return from userspace. If provided, use a stub
1202 already in userspace. */
1203 #ifndef TARGET_X86_64
1204 if (ka->sa_flags & TARGET_SA_RESTORER) {
1205 __put_user(ka->sa_restorer, &frame->pretcode);
1206 } else {
1207 uint16_t val16;
1208 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1209 __put_user(addr, &frame->pretcode);
1210 /* This is movl $,%eax ; int $0x80 */
1211 __put_user(0xb8, (char *)(frame->retcode+0));
1212 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1213 val16 = 0x80cd;
1214 __put_user(val16, (uint16_t *)(frame->retcode+5));
1215 }
1216 #else
1217 /* XXX: Would be slightly better to return -EFAULT here if test fails
1218 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1219 __put_user(ka->sa_restorer, &frame->pretcode);
1220 #endif
1221
1222 /* Set up registers for signal handler */
1223 env->regs[R_ESP] = frame_addr;
1224 env->eip = ka->_sa_handler;
1225
1226 #ifndef TARGET_X86_64
1227 env->regs[R_EAX] = sig;
1228 env->regs[R_EDX] = (unsigned long)&frame->info;
1229 env->regs[R_ECX] = (unsigned long)&frame->uc;
1230 #else
1231 env->regs[R_EAX] = 0;
1232 env->regs[R_EDI] = sig;
1233 env->regs[R_ESI] = (unsigned long)&frame->info;
1234 env->regs[R_EDX] = (unsigned long)&frame->uc;
1235 #endif
1236
1237 cpu_x86_load_seg(env, R_DS, __USER_DS);
1238 cpu_x86_load_seg(env, R_ES, __USER_DS);
1239 cpu_x86_load_seg(env, R_CS, __USER_CS);
1240 cpu_x86_load_seg(env, R_SS, __USER_DS);
1241 env->eflags &= ~TF_MASK;
1242
1243 unlock_user_struct(frame, frame_addr, 1);
1244
1245 return;
1246
1247 give_sigsegv:
1248 force_sigsegv(sig);
1249 }
1250
1251 static int
1252 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1253 {
1254 unsigned int err = 0;
1255 abi_ulong fpstate_addr;
1256 unsigned int tmpflags;
1257
1258 #ifndef TARGET_X86_64
1259 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1260 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1261 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1262 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1263
1264 env->regs[R_EDI] = tswapl(sc->edi);
1265 env->regs[R_ESI] = tswapl(sc->esi);
1266 env->regs[R_EBP] = tswapl(sc->ebp);
1267 env->regs[R_ESP] = tswapl(sc->esp);
1268 env->regs[R_EBX] = tswapl(sc->ebx);
1269 env->regs[R_EDX] = tswapl(sc->edx);
1270 env->regs[R_ECX] = tswapl(sc->ecx);
1271 env->regs[R_EAX] = tswapl(sc->eax);
1272
1273 env->eip = tswapl(sc->eip);
1274 #else
1275 env->regs[8] = tswapl(sc->r8);
1276 env->regs[9] = tswapl(sc->r9);
1277 env->regs[10] = tswapl(sc->r10);
1278 env->regs[11] = tswapl(sc->r11);
1279 env->regs[12] = tswapl(sc->r12);
1280 env->regs[13] = tswapl(sc->r13);
1281 env->regs[14] = tswapl(sc->r14);
1282 env->regs[15] = tswapl(sc->r15);
1283
1284 env->regs[R_EDI] = tswapl(sc->rdi);
1285 env->regs[R_ESI] = tswapl(sc->rsi);
1286 env->regs[R_EBP] = tswapl(sc->rbp);
1287 env->regs[R_EBX] = tswapl(sc->rbx);
1288 env->regs[R_EDX] = tswapl(sc->rdx);
1289 env->regs[R_EAX] = tswapl(sc->rax);
1290 env->regs[R_ECX] = tswapl(sc->rcx);
1291 env->regs[R_ESP] = tswapl(sc->rsp);
1292
1293 env->eip = tswapl(sc->rip);
1294 #endif
1295
1296 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1297 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1298
1299 tmpflags = tswapl(sc->eflags);
1300 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1301 // regs->orig_eax = -1; /* disable syscall checks */
1302
1303 fpstate_addr = tswapl(sc->fpstate);
1304 if (fpstate_addr != 0) {
1305 if (!access_ok(VERIFY_READ, fpstate_addr,
1306 sizeof(struct target_fpstate)))
1307 goto badframe;
1308 #ifndef TARGET_X86_64
1309 cpu_x86_frstor(env, fpstate_addr, 1);
1310 #else
1311 cpu_x86_fxrstor(env, fpstate_addr);
1312 #endif
1313 }
1314
1315 return err;
1316 badframe:
1317 return 1;
1318 }
1319
1320 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1321 #ifndef TARGET_X86_64
1322 long do_sigreturn(CPUX86State *env)
1323 {
1324 struct sigframe *frame;
1325 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1326 target_sigset_t target_set;
1327 sigset_t set;
1328 int i;
1329
1330 trace_user_do_sigreturn(env, frame_addr);
1331 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1332 goto badframe;
1333 /* set blocked signals */
1334 __get_user(target_set.sig[0], &frame->sc.oldmask);
1335 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1336 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1337 }
1338
1339 target_to_host_sigset_internal(&set, &target_set);
1340 set_sigmask(&set);
1341
1342 /* restore registers */
1343 if (restore_sigcontext(env, &frame->sc))
1344 goto badframe;
1345 unlock_user_struct(frame, frame_addr, 0);
1346 return -TARGET_QEMU_ESIGRETURN;
1347
1348 badframe:
1349 unlock_user_struct(frame, frame_addr, 0);
1350 force_sig(TARGET_SIGSEGV);
1351 return -TARGET_QEMU_ESIGRETURN;
1352 }
1353 #endif
1354
1355 long do_rt_sigreturn(CPUX86State *env)
1356 {
1357 abi_ulong frame_addr;
1358 struct rt_sigframe *frame;
1359 sigset_t set;
1360
1361 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1362 trace_user_do_rt_sigreturn(env, frame_addr);
1363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1364 goto badframe;
1365 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1366 set_sigmask(&set);
1367
1368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1369 goto badframe;
1370 }
1371
1372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1373 get_sp_from_cpustate(env)) == -EFAULT) {
1374 goto badframe;
1375 }
1376
1377 unlock_user_struct(frame, frame_addr, 0);
1378 return -TARGET_QEMU_ESIGRETURN;
1379
1380 badframe:
1381 unlock_user_struct(frame, frame_addr, 0);
1382 force_sig(TARGET_SIGSEGV);
1383 return -TARGET_QEMU_ESIGRETURN;
1384 }
1385
1386 #elif defined(TARGET_ARM) && !defined(TARGET_AARCH64)
1387
1388 struct target_sigcontext {
1389 abi_ulong trap_no;
1390 abi_ulong error_code;
1391 abi_ulong oldmask;
1392 abi_ulong arm_r0;
1393 abi_ulong arm_r1;
1394 abi_ulong arm_r2;
1395 abi_ulong arm_r3;
1396 abi_ulong arm_r4;
1397 abi_ulong arm_r5;
1398 abi_ulong arm_r6;
1399 abi_ulong arm_r7;
1400 abi_ulong arm_r8;
1401 abi_ulong arm_r9;
1402 abi_ulong arm_r10;
1403 abi_ulong arm_fp;
1404 abi_ulong arm_ip;
1405 abi_ulong arm_sp;
1406 abi_ulong arm_lr;
1407 abi_ulong arm_pc;
1408 abi_ulong arm_cpsr;
1409 abi_ulong fault_address;
1410 };
1411
1412 struct target_ucontext_v1 {
1413 abi_ulong tuc_flags;
1414 abi_ulong tuc_link;
1415 target_stack_t tuc_stack;
1416 struct target_sigcontext tuc_mcontext;
1417 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1418 };
1419
1420 struct target_ucontext_v2 {
1421 abi_ulong tuc_flags;
1422 abi_ulong tuc_link;
1423 target_stack_t tuc_stack;
1424 struct target_sigcontext tuc_mcontext;
1425 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1426 char __unused[128 - sizeof(target_sigset_t)];
1427 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1428 };
1429
1430 struct target_user_vfp {
1431 uint64_t fpregs[32];
1432 abi_ulong fpscr;
1433 };
1434
1435 struct target_user_vfp_exc {
1436 abi_ulong fpexc;
1437 abi_ulong fpinst;
1438 abi_ulong fpinst2;
1439 };
1440
1441 struct target_vfp_sigframe {
1442 abi_ulong magic;
1443 abi_ulong size;
1444 struct target_user_vfp ufp;
1445 struct target_user_vfp_exc ufp_exc;
1446 } __attribute__((__aligned__(8)));
1447
1448 struct target_iwmmxt_sigframe {
1449 abi_ulong magic;
1450 abi_ulong size;
1451 uint64_t regs[16];
1452 /* Note that not all the coprocessor control registers are stored here */
1453 uint32_t wcssf;
1454 uint32_t wcasf;
1455 uint32_t wcgr0;
1456 uint32_t wcgr1;
1457 uint32_t wcgr2;
1458 uint32_t wcgr3;
1459 } __attribute__((__aligned__(8)));
1460
1461 #define TARGET_VFP_MAGIC 0x56465001
1462 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1463
1464 struct sigframe_v1
1465 {
1466 struct target_sigcontext sc;
1467 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1468 abi_ulong retcode;
1469 };
1470
1471 struct sigframe_v2
1472 {
1473 struct target_ucontext_v2 uc;
1474 abi_ulong retcode;
1475 };
1476
1477 struct rt_sigframe_v1
1478 {
1479 abi_ulong pinfo;
1480 abi_ulong puc;
1481 struct target_siginfo info;
1482 struct target_ucontext_v1 uc;
1483 abi_ulong retcode;
1484 };
1485
1486 struct rt_sigframe_v2
1487 {
1488 struct target_siginfo info;
1489 struct target_ucontext_v2 uc;
1490 abi_ulong retcode;
1491 };
1492
1493 #define TARGET_CONFIG_CPU_32 1
1494
1495 /*
1496 * For ARM syscalls, we encode the syscall number into the instruction.
1497 */
1498 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1499 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1500
1501 /*
1502 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1503 * need two 16-bit instructions.
1504 */
1505 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1506 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1507
1508 static const abi_ulong retcodes[4] = {
1509 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1510 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1511 };
1512
1513
1514 static inline int valid_user_regs(CPUARMState *regs)
1515 {
1516 return 1;
1517 }
1518
1519 static void
1520 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1521 CPUARMState *env, abi_ulong mask)
1522 {
1523 __put_user(env->regs[0], &sc->arm_r0);
1524 __put_user(env->regs[1], &sc->arm_r1);
1525 __put_user(env->regs[2], &sc->arm_r2);
1526 __put_user(env->regs[3], &sc->arm_r3);
1527 __put_user(env->regs[4], &sc->arm_r4);
1528 __put_user(env->regs[5], &sc->arm_r5);
1529 __put_user(env->regs[6], &sc->arm_r6);
1530 __put_user(env->regs[7], &sc->arm_r7);
1531 __put_user(env->regs[8], &sc->arm_r8);
1532 __put_user(env->regs[9], &sc->arm_r9);
1533 __put_user(env->regs[10], &sc->arm_r10);
1534 __put_user(env->regs[11], &sc->arm_fp);
1535 __put_user(env->regs[12], &sc->arm_ip);
1536 __put_user(env->regs[13], &sc->arm_sp);
1537 __put_user(env->regs[14], &sc->arm_lr);
1538 __put_user(env->regs[15], &sc->arm_pc);
1539 #ifdef TARGET_CONFIG_CPU_32
1540 __put_user(cpsr_read(env), &sc->arm_cpsr);
1541 #endif
1542
1543 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1544 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1545 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1546 __put_user(mask, &sc->oldmask);
1547 }
1548
1549 static inline abi_ulong
1550 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1551 {
1552 unsigned long sp = regs->regs[13];
1553
1554 /*
1555 * This is the X/Open sanctioned signal stack switching.
1556 */
1557 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1558 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1559 }
1560 /*
1561 * ATPCS B01 mandates 8-byte alignment
1562 */
1563 return (sp - framesize) & ~7;
1564 }
1565
1566 static void
1567 setup_return(CPUARMState *env, struct target_sigaction *ka,
1568 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1569 {
1570 abi_ulong handler = ka->_sa_handler;
1571 abi_ulong retcode;
1572 int thumb = handler & 1;
1573 uint32_t cpsr = cpsr_read(env);
1574
1575 cpsr &= ~CPSR_IT;
1576 if (thumb) {
1577 cpsr |= CPSR_T;
1578 } else {
1579 cpsr &= ~CPSR_T;
1580 }
1581
1582 if (ka->sa_flags & TARGET_SA_RESTORER) {
1583 retcode = ka->sa_restorer;
1584 } else {
1585 unsigned int idx = thumb;
1586
1587 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1588 idx += 2;
1589 }
1590
1591 __put_user(retcodes[idx], rc);
1592
1593 retcode = rc_addr + thumb;
1594 }
1595
1596 env->regs[0] = usig;
1597 env->regs[13] = frame_addr;
1598 env->regs[14] = retcode;
1599 env->regs[15] = handler & (thumb ? ~1 : ~3);
1600 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1601 }
1602
1603 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1604 {
1605 int i;
1606 struct target_vfp_sigframe *vfpframe;
1607 vfpframe = (struct target_vfp_sigframe *)regspace;
1608 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1609 __put_user(sizeof(*vfpframe), &vfpframe->size);
1610 for (i = 0; i < 32; i++) {
1611 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
1612 }
1613 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1614 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1615 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1616 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1617 return (abi_ulong*)(vfpframe+1);
1618 }
1619
1620 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1621 CPUARMState *env)
1622 {
1623 int i;
1624 struct target_iwmmxt_sigframe *iwmmxtframe;
1625 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1626 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1627 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1628 for (i = 0; i < 16; i++) {
1629 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1630 }
1631 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1632 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1633 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1634 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1635 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1636 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1637 return (abi_ulong*)(iwmmxtframe+1);
1638 }
1639
1640 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1641 target_sigset_t *set, CPUARMState *env)
1642 {
1643 struct target_sigaltstack stack;
1644 int i;
1645 abi_ulong *regspace;
1646
1647 /* Clear all the bits of the ucontext we don't use. */
1648 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1649
1650 memset(&stack, 0, sizeof(stack));
1651 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1652 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1653 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1654 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1655
1656 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1657 /* Save coprocessor signal frame. */
1658 regspace = uc->tuc_regspace;
1659 if (arm_feature(env, ARM_FEATURE_VFP)) {
1660 regspace = setup_sigframe_v2_vfp(regspace, env);
1661 }
1662 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1663 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1664 }
1665
1666 /* Write terminating magic word */
1667 __put_user(0, regspace);
1668
1669 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1670 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1671 }
1672 }
1673
1674 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1675 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1676 target_sigset_t *set, CPUARMState *regs)
1677 {
1678 struct sigframe_v1 *frame;
1679 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1680 int i;
1681
1682 trace_user_setup_frame(regs, frame_addr);
1683 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1684 goto sigsegv;
1685 }
1686
1687 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1688
1689 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1690 __put_user(set->sig[i], &frame->extramask[i - 1]);
1691 }
1692
1693 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1694 frame_addr + offsetof(struct sigframe_v1, retcode));
1695
1696 unlock_user_struct(frame, frame_addr, 1);
1697 return;
1698 sigsegv:
1699 force_sigsegv(usig);
1700 }
1701
1702 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1703 target_sigset_t *set, CPUARMState *regs)
1704 {
1705 struct sigframe_v2 *frame;
1706 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1707
1708 trace_user_setup_frame(regs, frame_addr);
1709 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1710 goto sigsegv;
1711 }
1712
1713 setup_sigframe_v2(&frame->uc, set, regs);
1714
1715 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1716 frame_addr + offsetof(struct sigframe_v2, retcode));
1717
1718 unlock_user_struct(frame, frame_addr, 1);
1719 return;
1720 sigsegv:
1721 force_sigsegv(usig);
1722 }
1723
1724 static void setup_frame(int usig, struct target_sigaction *ka,
1725 target_sigset_t *set, CPUARMState *regs)
1726 {
1727 if (get_osversion() >= 0x020612) {
1728 setup_frame_v2(usig, ka, set, regs);
1729 } else {
1730 setup_frame_v1(usig, ka, set, regs);
1731 }
1732 }
1733
1734 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1735 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1736 target_siginfo_t *info,
1737 target_sigset_t *set, CPUARMState *env)
1738 {
1739 struct rt_sigframe_v1 *frame;
1740 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1741 struct target_sigaltstack stack;
1742 int i;
1743 abi_ulong info_addr, uc_addr;
1744
1745 trace_user_setup_rt_frame(env, frame_addr);
1746 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1747 goto sigsegv;
1748 }
1749
1750 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1751 __put_user(info_addr, &frame->pinfo);
1752 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1753 __put_user(uc_addr, &frame->puc);
1754 tswap_siginfo(&frame->info, info);
1755
1756 /* Clear all the bits of the ucontext we don't use. */
1757 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1758
1759 memset(&stack, 0, sizeof(stack));
1760 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1761 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1762 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1763 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1764
1765 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1766 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1767 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1768 }
1769
1770 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1771 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1772
1773 env->regs[1] = info_addr;
1774 env->regs[2] = uc_addr;
1775
1776 unlock_user_struct(frame, frame_addr, 1);
1777 return;
1778 sigsegv:
1779 force_sigsegv(usig);
1780 }
1781
1782 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1783 target_siginfo_t *info,
1784 target_sigset_t *set, CPUARMState *env)
1785 {
1786 struct rt_sigframe_v2 *frame;
1787 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1788 abi_ulong info_addr, uc_addr;
1789
1790 trace_user_setup_rt_frame(env, frame_addr);
1791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1792 goto sigsegv;
1793 }
1794
1795 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1796 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1797 tswap_siginfo(&frame->info, info);
1798
1799 setup_sigframe_v2(&frame->uc, set, env);
1800
1801 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1802 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1803
1804 env->regs[1] = info_addr;
1805 env->regs[2] = uc_addr;
1806
1807 unlock_user_struct(frame, frame_addr, 1);
1808 return;
1809 sigsegv:
1810 force_sigsegv(usig);
1811 }
1812
1813 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1814 target_siginfo_t *info,
1815 target_sigset_t *set, CPUARMState *env)
1816 {
1817 if (get_osversion() >= 0x020612) {
1818 setup_rt_frame_v2(usig, ka, info, set, env);
1819 } else {
1820 setup_rt_frame_v1(usig, ka, info, set, env);
1821 }
1822 }
1823
1824 static int
1825 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1826 {
1827 int err = 0;
1828 uint32_t cpsr;
1829
1830 __get_user(env->regs[0], &sc->arm_r0);
1831 __get_user(env->regs[1], &sc->arm_r1);
1832 __get_user(env->regs[2], &sc->arm_r2);
1833 __get_user(env->regs[3], &sc->arm_r3);
1834 __get_user(env->regs[4], &sc->arm_r4);
1835 __get_user(env->regs[5], &sc->arm_r5);
1836 __get_user(env->regs[6], &sc->arm_r6);
1837 __get_user(env->regs[7], &sc->arm_r7);
1838 __get_user(env->regs[8], &sc->arm_r8);
1839 __get_user(env->regs[9], &sc->arm_r9);
1840 __get_user(env->regs[10], &sc->arm_r10);
1841 __get_user(env->regs[11], &sc->arm_fp);
1842 __get_user(env->regs[12], &sc->arm_ip);
1843 __get_user(env->regs[13], &sc->arm_sp);
1844 __get_user(env->regs[14], &sc->arm_lr);
1845 __get_user(env->regs[15], &sc->arm_pc);
1846 #ifdef TARGET_CONFIG_CPU_32
1847 __get_user(cpsr, &sc->arm_cpsr);
1848 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1849 #endif
1850
1851 err |= !valid_user_regs(env);
1852
1853 return err;
1854 }
1855
1856 static long do_sigreturn_v1(CPUARMState *env)
1857 {
1858 abi_ulong frame_addr;
1859 struct sigframe_v1 *frame = NULL;
1860 target_sigset_t set;
1861 sigset_t host_set;
1862 int i;
1863
1864 /*
1865 * Since we stacked the signal on a 64-bit boundary,
1866 * then 'sp' should be word aligned here. If it's
1867 * not, then the user is trying to mess with us.
1868 */
1869 frame_addr = env->regs[13];
1870 trace_user_do_sigreturn(env, frame_addr);
1871 if (frame_addr & 7) {
1872 goto badframe;
1873 }
1874
1875 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1876 goto badframe;
1877 }
1878
1879 __get_user(set.sig[0], &frame->sc.oldmask);
1880 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1881 __get_user(set.sig[i], &frame->extramask[i - 1]);
1882 }
1883
1884 target_to_host_sigset_internal(&host_set, &set);
1885 set_sigmask(&host_set);
1886
1887 if (restore_sigcontext(env, &frame->sc)) {
1888 goto badframe;
1889 }
1890
1891 #if 0
1892 /* Send SIGTRAP if we're single-stepping */
1893 if (ptrace_cancel_bpt(current))
1894 send_sig(SIGTRAP, current, 1);
1895 #endif
1896 unlock_user_struct(frame, frame_addr, 0);
1897 return -TARGET_QEMU_ESIGRETURN;
1898
1899 badframe:
1900 force_sig(TARGET_SIGSEGV);
1901 return -TARGET_QEMU_ESIGRETURN;
1902 }
1903
1904 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1905 {
1906 int i;
1907 abi_ulong magic, sz;
1908 uint32_t fpscr, fpexc;
1909 struct target_vfp_sigframe *vfpframe;
1910 vfpframe = (struct target_vfp_sigframe *)regspace;
1911
1912 __get_user(magic, &vfpframe->magic);
1913 __get_user(sz, &vfpframe->size);
1914 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1915 return 0;
1916 }
1917 for (i = 0; i < 32; i++) {
1918 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
1919 }
1920 __get_user(fpscr, &vfpframe->ufp.fpscr);
1921 vfp_set_fpscr(env, fpscr);
1922 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1923 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1924 * and the exception flag is cleared
1925 */
1926 fpexc |= (1 << 30);
1927 fpexc &= ~((1 << 31) | (1 << 28));
1928 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1929 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1930 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1931 return (abi_ulong*)(vfpframe + 1);
1932 }
1933
1934 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1935 abi_ulong *regspace)
1936 {
1937 int i;
1938 abi_ulong magic, sz;
1939 struct target_iwmmxt_sigframe *iwmmxtframe;
1940 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1941
1942 __get_user(magic, &iwmmxtframe->magic);
1943 __get_user(sz, &iwmmxtframe->size);
1944 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1945 return 0;
1946 }
1947 for (i = 0; i < 16; i++) {
1948 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1949 }
1950 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1951 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1952 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1953 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1954 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1955 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1956 return (abi_ulong*)(iwmmxtframe + 1);
1957 }
1958
1959 static int do_sigframe_return_v2(CPUARMState *env,
1960 target_ulong context_addr,
1961 struct target_ucontext_v2 *uc)
1962 {
1963 sigset_t host_set;
1964 abi_ulong *regspace;
1965
1966 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1967 set_sigmask(&host_set);
1968
1969 if (restore_sigcontext(env, &uc->tuc_mcontext))
1970 return 1;
1971
1972 /* Restore coprocessor signal frame */
1973 regspace = uc->tuc_regspace;
1974 if (arm_feature(env, ARM_FEATURE_VFP)) {
1975 regspace = restore_sigframe_v2_vfp(env, regspace);
1976 if (!regspace) {
1977 return 1;
1978 }
1979 }
1980 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1981 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1982 if (!regspace) {
1983 return 1;
1984 }
1985 }
1986
1987 if (do_sigaltstack(context_addr
1988 + offsetof(struct target_ucontext_v2, tuc_stack),
1989 0, get_sp_from_cpustate(env)) == -EFAULT) {
1990 return 1;
1991 }
1992
1993 #if 0
1994 /* Send SIGTRAP if we're single-stepping */
1995 if (ptrace_cancel_bpt(current))
1996 send_sig(SIGTRAP, current, 1);
1997 #endif
1998
1999 return 0;
2000 }
2001
2002 static long do_sigreturn_v2(CPUARMState *env)
2003 {
2004 abi_ulong frame_addr;
2005 struct sigframe_v2 *frame = NULL;
2006
2007 /*
2008 * Since we stacked the signal on a 64-bit boundary,
2009 * then 'sp' should be word aligned here. If it's
2010 * not, then the user is trying to mess with us.
2011 */
2012 frame_addr = env->regs[13];
2013 trace_user_do_sigreturn(env, frame_addr);
2014 if (frame_addr & 7) {
2015 goto badframe;
2016 }
2017
2018 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2019 goto badframe;
2020 }
2021
2022 if (do_sigframe_return_v2(env,
2023 frame_addr
2024 + offsetof(struct sigframe_v2, uc),
2025 &frame->uc)) {
2026 goto badframe;
2027 }
2028
2029 unlock_user_struct(frame, frame_addr, 0);
2030 return -TARGET_QEMU_ESIGRETURN;
2031
2032 badframe:
2033 unlock_user_struct(frame, frame_addr, 0);
2034 force_sig(TARGET_SIGSEGV);
2035 return -TARGET_QEMU_ESIGRETURN;
2036 }
2037
2038 long do_sigreturn(CPUARMState *env)
2039 {
2040 if (get_osversion() >= 0x020612) {
2041 return do_sigreturn_v2(env);
2042 } else {
2043 return do_sigreturn_v1(env);
2044 }
2045 }
2046
2047 static long do_rt_sigreturn_v1(CPUARMState *env)
2048 {
2049 abi_ulong frame_addr;
2050 struct rt_sigframe_v1 *frame = NULL;
2051 sigset_t host_set;
2052
2053 /*
2054 * Since we stacked the signal on a 64-bit boundary,
2055 * then 'sp' should be word aligned here. If it's
2056 * not, then the user is trying to mess with us.
2057 */
2058 frame_addr = env->regs[13];
2059 trace_user_do_rt_sigreturn(env, frame_addr);
2060 if (frame_addr & 7) {
2061 goto badframe;
2062 }
2063
2064 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2065 goto badframe;
2066 }
2067
2068 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2069 set_sigmask(&host_set);
2070
2071 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2072 goto badframe;
2073 }
2074
2075 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2076 goto badframe;
2077
2078 #if 0
2079 /* Send SIGTRAP if we're single-stepping */
2080 if (ptrace_cancel_bpt(current))
2081 send_sig(SIGTRAP, current, 1);
2082 #endif
2083 unlock_user_struct(frame, frame_addr, 0);
2084 return -TARGET_QEMU_ESIGRETURN;
2085
2086 badframe:
2087 unlock_user_struct(frame, frame_addr, 0);
2088 force_sig(TARGET_SIGSEGV);
2089 return -TARGET_QEMU_ESIGRETURN;
2090 }
2091
2092 static long do_rt_sigreturn_v2(CPUARMState *env)
2093 {
2094 abi_ulong frame_addr;
2095 struct rt_sigframe_v2 *frame = NULL;
2096
2097 /*
2098 * Since we stacked the signal on a 64-bit boundary,
2099 * then 'sp' should be word aligned here. If it's
2100 * not, then the user is trying to mess with us.
2101 */
2102 frame_addr = env->regs[13];
2103 trace_user_do_rt_sigreturn(env, frame_addr);
2104 if (frame_addr & 7) {
2105 goto badframe;
2106 }
2107
2108 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2109 goto badframe;
2110 }
2111
2112 if (do_sigframe_return_v2(env,
2113 frame_addr
2114 + offsetof(struct rt_sigframe_v2, uc),
2115 &frame->uc)) {
2116 goto badframe;
2117 }
2118
2119 unlock_user_struct(frame, frame_addr, 0);
2120 return -TARGET_QEMU_ESIGRETURN;
2121
2122 badframe:
2123 unlock_user_struct(frame, frame_addr, 0);
2124 force_sig(TARGET_SIGSEGV);
2125 return -TARGET_QEMU_ESIGRETURN;
2126 }
2127
2128 long do_rt_sigreturn(CPUARMState *env)
2129 {
2130 if (get_osversion() >= 0x020612) {
2131 return do_rt_sigreturn_v2(env);
2132 } else {
2133 return do_rt_sigreturn_v1(env);
2134 }
2135 }
2136
2137 #elif defined(TARGET_SPARC)
2138
2139 #define __SUNOS_MAXWIN 31
2140
2141 /* This is what SunOS does, so shall I. */
2142 struct target_sigcontext {
2143 abi_ulong sigc_onstack; /* state to restore */
2144
2145 abi_ulong sigc_mask; /* sigmask to restore */
2146 abi_ulong sigc_sp; /* stack pointer */
2147 abi_ulong sigc_pc; /* program counter */
2148 abi_ulong sigc_npc; /* next program counter */
2149 abi_ulong sigc_psr; /* for condition codes etc */
2150 abi_ulong sigc_g1; /* User uses these two registers */
2151 abi_ulong sigc_o0; /* within the trampoline code. */
2152
2153 /* Now comes information regarding the users window set
2154 * at the time of the signal.
2155 */
2156 abi_ulong sigc_oswins; /* outstanding windows */
2157
2158 /* stack ptrs for each regwin buf */
2159 char *sigc_spbuf[__SUNOS_MAXWIN];
2160
2161 /* Windows to restore after signal */
2162 struct {
2163 abi_ulong locals[8];
2164 abi_ulong ins[8];
2165 } sigc_wbuf[__SUNOS_MAXWIN];
2166 };
2167 /* A Sparc stack frame */
2168 struct sparc_stackf {
2169 abi_ulong locals[8];
2170 abi_ulong ins[8];
2171 /* It's simpler to treat fp and callers_pc as elements of ins[]
2172 * since we never need to access them ourselves.
2173 */
2174 char *structptr;
2175 abi_ulong xargs[6];
2176 abi_ulong xxargs[1];
2177 };
2178
2179 typedef struct {
2180 struct {
2181 abi_ulong psr;
2182 abi_ulong pc;
2183 abi_ulong npc;
2184 abi_ulong y;
2185 abi_ulong u_regs[16]; /* globals and ins */
2186 } si_regs;
2187 int si_mask;
2188 } __siginfo_t;
2189
2190 typedef struct {
2191 abi_ulong si_float_regs[32];
2192 unsigned long si_fsr;
2193 unsigned long si_fpqdepth;
2194 struct {
2195 unsigned long *insn_addr;
2196 unsigned long insn;
2197 } si_fpqueue [16];
2198 } qemu_siginfo_fpu_t;
2199
2200
2201 struct target_signal_frame {
2202 struct sparc_stackf ss;
2203 __siginfo_t info;
2204 abi_ulong fpu_save;
2205 abi_ulong insns[2] __attribute__ ((aligned (8)));
2206 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2207 abi_ulong extra_size; /* Should be 0 */
2208 qemu_siginfo_fpu_t fpu_state;
2209 };
2210 struct target_rt_signal_frame {
2211 struct sparc_stackf ss;
2212 siginfo_t info;
2213 abi_ulong regs[20];
2214 sigset_t mask;
2215 abi_ulong fpu_save;
2216 unsigned int insns[2];
2217 stack_t stack;
2218 unsigned int extra_size; /* Should be 0 */
2219 qemu_siginfo_fpu_t fpu_state;
2220 };
2221
2222 #define UREG_O0 16
2223 #define UREG_O6 22
2224 #define UREG_I0 0
2225 #define UREG_I1 1
2226 #define UREG_I2 2
2227 #define UREG_I3 3
2228 #define UREG_I4 4
2229 #define UREG_I5 5
2230 #define UREG_I6 6
2231 #define UREG_I7 7
2232 #define UREG_L0 8
2233 #define UREG_FP UREG_I6
2234 #define UREG_SP UREG_O6
2235
2236 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2237 CPUSPARCState *env,
2238 unsigned long framesize)
2239 {
2240 abi_ulong sp;
2241
2242 sp = env->regwptr[UREG_FP];
2243
2244 /* This is the X/Open sanctioned signal stack switching. */
2245 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2246 if (!on_sig_stack(sp)
2247 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2248 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2249 }
2250 }
2251 return sp - framesize;
2252 }
2253
2254 static int
2255 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2256 {
2257 int err = 0, i;
2258
2259 __put_user(env->psr, &si->si_regs.psr);
2260 __put_user(env->pc, &si->si_regs.pc);
2261 __put_user(env->npc, &si->si_regs.npc);
2262 __put_user(env->y, &si->si_regs.y);
2263 for (i=0; i < 8; i++) {
2264 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2265 }
2266 for (i=0; i < 8; i++) {
2267 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2268 }
2269 __put_user(mask, &si->si_mask);
2270 return err;
2271 }
2272
2273 #if 0
2274 static int
2275 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2276 CPUSPARCState *env, unsigned long mask)
2277 {
2278 int err = 0;
2279
2280 __put_user(mask, &sc->sigc_mask);
2281 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2282 __put_user(env->pc, &sc->sigc_pc);
2283 __put_user(env->npc, &sc->sigc_npc);
2284 __put_user(env->psr, &sc->sigc_psr);
2285 __put_user(env->gregs[1], &sc->sigc_g1);
2286 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2287
2288 return err;
2289 }
2290 #endif
2291 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2292
2293 static void setup_frame(int sig, struct target_sigaction *ka,
2294 target_sigset_t *set, CPUSPARCState *env)
2295 {
2296 abi_ulong sf_addr;
2297 struct target_signal_frame *sf;
2298 int sigframe_size, err, i;
2299
2300 /* 1. Make sure everything is clean */
2301 //synchronize_user_stack();
2302
2303 sigframe_size = NF_ALIGNEDSZ;
2304 sf_addr = get_sigframe(ka, env, sigframe_size);
2305 trace_user_setup_frame(env, sf_addr);
2306
2307 sf = lock_user(VERIFY_WRITE, sf_addr,
2308 sizeof(struct target_signal_frame), 0);
2309 if (!sf) {
2310 goto sigsegv;
2311 }
2312 #if 0
2313 if (invalid_frame_pointer(sf, sigframe_size))
2314 goto sigill_and_return;
2315 #endif
2316 /* 2. Save the current process state */
2317 err = setup___siginfo(&sf->info, env, set->sig[0]);
2318 __put_user(0, &sf->extra_size);
2319
2320 //save_fpu_state(regs, &sf->fpu_state);
2321 //__put_user(&sf->fpu_state, &sf->fpu_save);
2322
2323 __put_user(set->sig[0], &sf->info.si_mask);
2324 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2325 __put_user(set->sig[i + 1], &sf->extramask[i]);
2326 }
2327
2328 for (i = 0; i < 8; i++) {
2329 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2330 }
2331 for (i = 0; i < 8; i++) {
2332 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2333 }
2334 if (err)
2335 goto sigsegv;
2336
2337 /* 3. signal handler back-trampoline and parameters */
2338 env->regwptr[UREG_FP] = sf_addr;
2339 env->regwptr[UREG_I0] = sig;
2340 env->regwptr[UREG_I1] = sf_addr +
2341 offsetof(struct target_signal_frame, info);
2342 env->regwptr[UREG_I2] = sf_addr +
2343 offsetof(struct target_signal_frame, info);
2344
2345 /* 4. signal handler */
2346 env->pc = ka->_sa_handler;
2347 env->npc = (env->pc + 4);
2348 /* 5. return to kernel instructions */
2349 if (ka->ka_restorer) {
2350 env->regwptr[UREG_I7] = ka->ka_restorer;
2351 } else {
2352 uint32_t val32;
2353
2354 env->regwptr[UREG_I7] = sf_addr +
2355 offsetof(struct target_signal_frame, insns) - 2 * 4;
2356
2357 /* mov __NR_sigreturn, %g1 */
2358 val32 = 0x821020d8;
2359 __put_user(val32, &sf->insns[0]);
2360
2361 /* t 0x10 */
2362 val32 = 0x91d02010;
2363 __put_user(val32, &sf->insns[1]);
2364 if (err)
2365 goto sigsegv;
2366
2367 /* Flush instruction space. */
2368 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2369 // tb_flush(env);
2370 }
2371 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2372 return;
2373 #if 0
2374 sigill_and_return:
2375 force_sig(TARGET_SIGILL);
2376 #endif
2377 sigsegv:
2378 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2379 force_sigsegv(sig);
2380 }
2381
2382 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2383 target_siginfo_t *info,
2384 target_sigset_t *set, CPUSPARCState *env)
2385 {
2386 fprintf(stderr, "setup_rt_frame: not implemented\n");
2387 }
2388
2389 long do_sigreturn(CPUSPARCState *env)
2390 {
2391 abi_ulong sf_addr;
2392 struct target_signal_frame *sf;
2393 uint32_t up_psr, pc, npc;
2394 target_sigset_t set;
2395 sigset_t host_set;
2396 int err=0, i;
2397
2398 sf_addr = env->regwptr[UREG_FP];
2399 trace_user_do_sigreturn(env, sf_addr);
2400 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2401 goto segv_and_exit;
2402 }
2403
2404 /* 1. Make sure we are not getting garbage from the user */
2405
2406 if (sf_addr & 3)
2407 goto segv_and_exit;
2408
2409 __get_user(pc, &sf->info.si_regs.pc);
2410 __get_user(npc, &sf->info.si_regs.npc);
2411
2412 if ((pc | npc) & 3) {
2413 goto segv_and_exit;
2414 }
2415
2416 /* 2. Restore the state */
2417 __get_user(up_psr, &sf->info.si_regs.psr);
2418
2419 /* User can only change condition codes and FPU enabling in %psr. */
2420 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2421 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2422
2423 env->pc = pc;
2424 env->npc = npc;
2425 __get_user(env->y, &sf->info.si_regs.y);
2426 for (i=0; i < 8; i++) {
2427 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2428 }
2429 for (i=0; i < 8; i++) {
2430 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2431 }
2432
2433 /* FIXME: implement FPU save/restore:
2434 * __get_user(fpu_save, &sf->fpu_save);
2435 * if (fpu_save)
2436 * err |= restore_fpu_state(env, fpu_save);
2437 */
2438
2439 /* This is pretty much atomic, no amount locking would prevent
2440 * the races which exist anyways.
2441 */
2442 __get_user(set.sig[0], &sf->info.si_mask);
2443 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2444 __get_user(set.sig[i], &sf->extramask[i - 1]);
2445 }
2446
2447 target_to_host_sigset_internal(&host_set, &set);
2448 set_sigmask(&host_set);
2449
2450 if (err) {
2451 goto segv_and_exit;
2452 }
2453 unlock_user_struct(sf, sf_addr, 0);
2454 return -TARGET_QEMU_ESIGRETURN;
2455
2456 segv_and_exit:
2457 unlock_user_struct(sf, sf_addr, 0);
2458 force_sig(TARGET_SIGSEGV);
2459 return -TARGET_QEMU_ESIGRETURN;
2460 }
2461
2462 long do_rt_sigreturn(CPUSPARCState *env)
2463 {
2464 trace_user_do_rt_sigreturn(env, 0);
2465 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2466 return -TARGET_ENOSYS;
2467 }
2468
2469 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2470 #define SPARC_MC_TSTATE 0
2471 #define SPARC_MC_PC 1
2472 #define SPARC_MC_NPC 2
2473 #define SPARC_MC_Y 3
2474 #define SPARC_MC_G1 4
2475 #define SPARC_MC_G2 5
2476 #define SPARC_MC_G3 6
2477 #define SPARC_MC_G4 7
2478 #define SPARC_MC_G5 8
2479 #define SPARC_MC_G6 9
2480 #define SPARC_MC_G7 10
2481 #define SPARC_MC_O0 11
2482 #define SPARC_MC_O1 12
2483 #define SPARC_MC_O2 13
2484 #define SPARC_MC_O3 14
2485 #define SPARC_MC_O4 15
2486 #define SPARC_MC_O5 16
2487 #define SPARC_MC_O6 17
2488 #define SPARC_MC_O7 18
2489 #define SPARC_MC_NGREG 19
2490
2491 typedef abi_ulong target_mc_greg_t;
2492 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
2493
2494 struct target_mc_fq {
2495 abi_ulong *mcfq_addr;
2496 uint32_t mcfq_insn;
2497 };
2498
2499 struct target_mc_fpu {
2500 union {
2501 uint32_t sregs[32];
2502 uint64_t dregs[32];
2503 //uint128_t qregs[16];
2504 } mcfpu_fregs;
2505 abi_ulong mcfpu_fsr;
2506 abi_ulong mcfpu_fprs;
2507 abi_ulong mcfpu_gsr;
2508 struct target_mc_fq *mcfpu_fq;
2509 unsigned char mcfpu_qcnt;
2510 unsigned char mcfpu_qentsz;
2511 unsigned char mcfpu_enab;
2512 };
2513 typedef struct target_mc_fpu target_mc_fpu_t;
2514
2515 typedef struct {
2516 target_mc_gregset_t mc_gregs;
2517 target_mc_greg_t mc_fp;
2518 target_mc_greg_t mc_i7;
2519 target_mc_fpu_t mc_fpregs;
2520 } target_mcontext_t;
2521
2522 struct target_ucontext {
2523 struct target_ucontext *tuc_link;
2524 abi_ulong tuc_flags;
2525 target_sigset_t tuc_sigmask;
2526 target_mcontext_t tuc_mcontext;
2527 };
2528
2529 /* A V9 register window */
2530 struct target_reg_window {
2531 abi_ulong locals[8];
2532 abi_ulong ins[8];
2533 };
2534
2535 #define TARGET_STACK_BIAS 2047
2536
2537 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2538 void sparc64_set_context(CPUSPARCState *env)
2539 {
2540 abi_ulong ucp_addr;
2541 struct target_ucontext *ucp;
2542 target_mc_gregset_t *grp;
2543 abi_ulong pc, npc, tstate;
2544 abi_ulong fp, i7, w_addr;
2545 unsigned int i;
2546
2547 ucp_addr = env->regwptr[UREG_I0];
2548 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2549 goto do_sigsegv;
2550 }
2551 grp = &ucp->tuc_mcontext.mc_gregs;
2552 __get_user(pc, &((*grp)[SPARC_MC_PC]));
2553 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
2554 if ((pc | npc) & 3) {
2555 goto do_sigsegv;
2556 }
2557 if (env->regwptr[UREG_I1]) {
2558 target_sigset_t target_set;
2559 sigset_t set;
2560
2561 if (TARGET_NSIG_WORDS == 1) {
2562 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2563 } else {
2564 abi_ulong *src, *dst;
2565 src = ucp->tuc_sigmask.sig;
2566 dst = target_set.sig;
2567 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2568 __get_user(*dst, src);
2569 }
2570 }
2571 target_to_host_sigset_internal(&set, &target_set);
2572 set_sigmask(&set);
2573 }
2574 env->pc = pc;
2575 env->npc = npc;
2576 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
2577 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
2578 env->asi = (tstate >> 24) & 0xff;
2579 cpu_put_ccr(env, tstate >> 32);
2580 cpu_put_cwp64(env, tstate & 0x1f);
2581 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
2582 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
2583 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
2584 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
2585 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
2586 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
2587 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
2588 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
2589 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
2590 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
2591 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
2592 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
2593 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
2594 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
2595 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
2596
2597 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2598 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2599
2600 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2601 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2602 abi_ulong) != 0) {
2603 goto do_sigsegv;
2604 }
2605 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2606 abi_ulong) != 0) {
2607 goto do_sigsegv;
2608 }
2609 /* FIXME this does not match how the kernel handles the FPU in
2610 * its sparc64_set_context implementation. In particular the FPU
2611 * is only restored if fenab is non-zero in:
2612 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2613 */
2614 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2615 {
2616 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2617 for (i = 0; i < 64; i++, src++) {
2618 if (i & 1) {
2619 __get_user(env->fpr[i/2].l.lower, src);
2620 } else {
2621 __get_user(env->fpr[i/2].l.upper, src);
2622 }
2623 }
2624 }
2625 __get_user(env->fsr,
2626 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2627 __get_user(env->gsr,
2628 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2629 unlock_user_struct(ucp, ucp_addr, 0);
2630 return;
2631 do_sigsegv:
2632 unlock_user_struct(ucp, ucp_addr, 0);
2633 force_sig(TARGET_SIGSEGV);
2634 }
2635
2636 void sparc64_get_context(CPUSPARCState *env)
2637 {
2638 abi_ulong ucp_addr;
2639 struct target_ucontext *ucp;
2640 target_mc_gregset_t *grp;
2641 target_mcontext_t *mcp;
2642 abi_ulong fp, i7, w_addr;
2643 int err;
2644 unsigned int i;
2645 target_sigset_t target_set;
2646 sigset_t set;
2647
2648 ucp_addr = env->regwptr[UREG_I0];
2649 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2650 goto do_sigsegv;
2651 }
2652
2653 mcp = &ucp->tuc_mcontext;
2654 grp = &mcp->mc_gregs;
2655
2656 /* Skip over the trap instruction, first. */
2657 env->pc = env->npc;
2658 env->npc += 4;
2659
2660 /* If we're only reading the signal mask then do_sigprocmask()
2661 * is guaranteed not to fail, which is important because we don't
2662 * have any way to signal a failure or restart this operation since
2663 * this is not a normal syscall.
2664 */
2665 err = do_sigprocmask(0, NULL, &set);
2666 assert(err == 0);
2667 host_to_target_sigset_internal(&target_set, &set);
2668 if (TARGET_NSIG_WORDS == 1) {
2669 __put_user(target_set.sig[0],
2670 (abi_ulong *)&ucp->tuc_sigmask);
2671 } else {
2672 abi_ulong *src, *dst;
2673 src = target_set.sig;
2674 dst = ucp->tuc_sigmask.sig;
2675 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2676 __put_user(*src, dst);
2677 }
2678 if (err)
2679 goto do_sigsegv;
2680 }
2681
2682 /* XXX: tstate must be saved properly */
2683 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
2684 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
2685 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
2686 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
2687 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
2688 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
2689 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
2690 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
2691 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
2692 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
2693 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
2694 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
2695 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
2696 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
2697 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
2698 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
2699 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
2700 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
2701 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
2702
2703 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2704 fp = i7 = 0;
2705 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2706 abi_ulong) != 0) {
2707 goto do_sigsegv;
2708 }
2709 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2710 abi_ulong) != 0) {
2711 goto do_sigsegv;
2712 }
2713 __put_user(fp, &(mcp->mc_fp));
2714 __put_user(i7, &(mcp->mc_i7));
2715
2716 {
2717 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2718 for (i = 0; i < 64; i++, dst++) {
2719 if (i & 1) {
2720 __put_user(env->fpr[i/2].l.lower, dst);
2721 } else {
2722 __put_user(env->fpr[i/2].l.upper, dst);
2723 }
2724 }
2725 }
2726 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2727 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2728 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2729
2730 if (err)
2731 goto do_sigsegv;
2732 unlock_user_struct(ucp, ucp_addr, 1);
2733 return;
2734 do_sigsegv:
2735 unlock_user_struct(ucp, ucp_addr, 1);
2736 force_sig(TARGET_SIGSEGV);
2737 }
2738 #endif
2739 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2740
2741 # if defined(TARGET_ABI_MIPSO32)
2742 struct target_sigcontext {
2743 uint32_t sc_regmask; /* Unused */
2744 uint32_t sc_status;
2745 uint64_t sc_pc;
2746 uint64_t sc_regs[32];
2747 uint64_t sc_fpregs[32];
2748 uint32_t sc_ownedfp; /* Unused */
2749 uint32_t sc_fpc_csr;
2750 uint32_t sc_fpc_eir; /* Unused */
2751 uint32_t sc_used_math;
2752 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2753 uint32_t pad0;
2754 uint64_t sc_mdhi;
2755 uint64_t sc_mdlo;
2756 target_ulong sc_hi1; /* Was sc_cause */
2757 target_ulong sc_lo1; /* Was sc_badvaddr */
2758 target_ulong sc_hi2; /* Was sc_sigset[4] */
2759 target_ulong sc_lo2;
2760 target_ulong sc_hi3;
2761 target_ulong sc_lo3;
2762 };
2763 # else /* N32 || N64 */
2764 struct target_sigcontext {
2765 uint64_t sc_regs[32];
2766 uint64_t sc_fpregs[32];
2767 uint64_t sc_mdhi;
2768 uint64_t sc_hi1;
2769 uint64_t sc_hi2;
2770 uint64_t sc_hi3;
2771 uint64_t sc_mdlo;
2772 uint64_t sc_lo1;
2773 uint64_t sc_lo2;
2774 uint64_t sc_lo3;
2775 uint64_t sc_pc;
2776 uint32_t sc_fpc_csr;
2777 uint32_t sc_used_math;
2778 uint32_t sc_dsp;
2779 uint32_t sc_reserved;
2780 };
2781 # endif /* O32 */
2782
2783 struct sigframe {
2784 uint32_t sf_ass[4]; /* argument save space for o32 */
2785 uint32_t sf_code[2]; /* signal trampoline */
2786 struct target_sigcontext sf_sc;
2787 target_sigset_t sf_mask;
2788 };
2789
2790 struct target_ucontext {
2791 target_ulong tuc_flags;
2792 target_ulong tuc_link;
2793 target_stack_t tuc_stack;
2794 target_ulong pad0;
2795 struct target_sigcontext tuc_mcontext;
2796 target_sigset_t tuc_sigmask;
2797 };
2798
2799 struct target_rt_sigframe {
2800 uint32_t rs_ass[4]; /* argument save space for o32 */
2801 uint32_t rs_code[2]; /* signal trampoline */
2802 struct target_siginfo rs_info;
2803 struct target_ucontext rs_uc;
2804 };
2805
2806 /* Install trampoline to jump back from signal handler */
2807 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2808 {
2809 int err = 0;
2810
2811 /*
2812 * Set up the return code ...
2813 *
2814 * li v0, __NR__foo_sigreturn
2815 * syscall
2816 */
2817
2818 __put_user(0x24020000 + syscall, tramp + 0);
2819 __put_user(0x0000000c , tramp + 1);
2820 return err;
2821 }
2822
2823 static inline void setup_sigcontext(CPUMIPSState *regs,
2824 struct target_sigcontext *sc)
2825 {
2826 int i;
2827
2828 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2829 regs->hflags &= ~MIPS_HFLAG_BMASK;
2830
2831 __put_user(0, &sc->sc_regs[0]);
2832 for (i = 1; i < 32; ++i) {
2833 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2834 }
2835
2836 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2837 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2838
2839 /* Rather than checking for dsp existence, always copy. The storage
2840 would just be garbage otherwise. */
2841 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2842 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2843 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2844 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2845 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2846 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2847 {
2848 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2849 __put_user(dsp, &sc->sc_dsp);
2850 }
2851
2852 __put_user(1, &sc->sc_used_math);
2853
2854 for (i = 0; i < 32; ++i) {
2855 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2856 }
2857 }
2858
2859 static inline void
2860 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2861 {
2862 int i;
2863
2864 __get_user(regs->CP0_EPC, &sc->sc_pc);
2865
2866 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2867 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2868
2869 for (i = 1; i < 32; ++i) {
2870 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2871 }
2872
2873 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2874 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2875 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2876 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2877 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2878 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2879 {
2880 uint32_t dsp;
2881 __get_user(dsp, &sc->sc_dsp);
2882 cpu_wrdsp(dsp, 0x3ff, regs);
2883 }
2884
2885 for (i = 0; i < 32; ++i) {
2886 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2887 }
2888 }
2889
2890 /*
2891 * Determine which stack to use..
2892 */
2893 static inline abi_ulong
2894 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2895 {
2896 unsigned long sp;
2897
2898 /* Default to using normal stack */
2899 sp = regs->active_tc.gpr[29];
2900
2901 /*
2902 * FPU emulator may have its own trampoline active just
2903 * above the user stack, 16-bytes before the next lowest
2904 * 16 byte boundary. Try to avoid trashing it.
2905 */
2906 sp -= 32;
2907
2908 /* This is the X/Open sanctioned signal stack switching. */
2909 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2910 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2911 }
2912
2913 return (sp - frame_size) & ~7;
2914 }
2915
2916 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2917 {
2918 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2919 env->hflags &= ~MIPS_HFLAG_M16;
2920 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2921 env->active_tc.PC &= ~(target_ulong) 1;
2922 }
2923 }
2924
2925 # if defined(TARGET_ABI_MIPSO32)
2926 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2927 static void setup_frame(int sig, struct target_sigaction * ka,
2928 target_sigset_t *set, CPUMIPSState *regs)
2929 {
2930 struct sigframe *frame;
2931 abi_ulong frame_addr;
2932 int i;
2933
2934 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2935 trace_user_setup_frame(regs, frame_addr);
2936 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2937 goto give_sigsegv;
2938 }
2939
2940 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2941
2942 setup_sigcontext(regs, &frame->sf_sc);
2943
2944 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2945 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2946 }
2947
2948 /*
2949 * Arguments to signal handler:
2950 *
2951 * a0 = signal number
2952 * a1 = 0 (should be cause)
2953 * a2 = pointer to struct sigcontext
2954 *
2955 * $25 and PC point to the signal handler, $29 points to the
2956 * struct sigframe.
2957 */
2958 regs->active_tc.gpr[ 4] = sig;
2959 regs->active_tc.gpr[ 5] = 0;
2960 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2961 regs->active_tc.gpr[29] = frame_addr;
2962 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2963 /* The original kernel code sets CP0_EPC to the handler
2964 * since it returns to userland using eret
2965 * we cannot do this here, and we must set PC directly */
2966 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2967 mips_set_hflags_isa_mode_from_pc(regs);
2968 unlock_user_struct(frame, frame_addr, 1);
2969 return;
2970
2971 give_sigsegv:
2972 force_sigsegv(sig);
2973 }
2974
2975 long do_sigreturn(CPUMIPSState *regs)
2976 {
2977 struct sigframe *frame;
2978 abi_ulong frame_addr;
2979 sigset_t blocked;
2980 target_sigset_t target_set;
2981 int i;
2982
2983 frame_addr = regs->active_tc.gpr[29];
2984 trace_user_do_sigreturn(regs, frame_addr);
2985 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2986 goto badframe;
2987
2988 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2989 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2990 }
2991
2992 target_to_host_sigset_internal(&blocked, &target_set);
2993 set_sigmask(&blocked);
2994
2995 restore_sigcontext(regs, &frame->sf_sc);
2996
2997 #if 0
2998 /*
2999 * Don't let your children do this ...
3000 */
3001 __asm__ __volatile__(
3002 "move\t$29, %0\n\t"
3003 "j\tsyscall_exit"
3004 :/* no outputs */
3005 :"r" (&regs));
3006 /* Unreached */
3007 #endif
3008
3009 regs->active_tc.PC = regs->CP0_EPC;
3010 mips_set_hflags_isa_mode_from_pc(regs);
3011 /* I am not sure this is right, but it seems to work
3012 * maybe a problem with nested signals ? */
3013 regs->CP0_EPC = 0;
3014 return -TARGET_QEMU_ESIGRETURN;
3015
3016 badframe:
3017 force_sig(TARGET_SIGSEGV);
3018 return -TARGET_QEMU_ESIGRETURN;
3019 }
3020 # endif /* O32 */
3021
3022 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3023 target_siginfo_t *info,
3024 target_sigset_t *set, CPUMIPSState *env)
3025 {
3026 struct target_rt_sigframe *frame;
3027 abi_ulong frame_addr;
3028 int i;
3029
3030 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3031 trace_user_setup_rt_frame(env, frame_addr);
3032 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3033 goto give_sigsegv;
3034 }
3035
3036 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3037
3038 tswap_siginfo(&frame->rs_info, info);
3039
3040 __put_user(0, &frame->rs_uc.tuc_flags);
3041 __put_user(0, &frame->rs_uc.tuc_link);
3042 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3043 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3044 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3045 &frame->rs_uc.tuc_stack.ss_flags);
3046
3047 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3048
3049 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3050 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3051 }
3052
3053 /*
3054 * Arguments to signal handler:
3055 *
3056 * a0 = signal number
3057 * a1 = pointer to siginfo_t
3058 * a2 = pointer to ucontext_t
3059 *
3060 * $25 and PC point to the signal handler, $29 points to the
3061 * struct sigframe.
3062 */
3063 env->active_tc.gpr[ 4] = sig;
3064 env->active_tc.gpr[ 5] = frame_addr
3065 + offsetof(struct target_rt_sigframe, rs_info);
3066 env->active_tc.gpr[ 6] = frame_addr
3067 + offsetof(struct target_rt_sigframe, rs_uc);
3068 env->active_tc.gpr[29] = frame_addr;
3069 env->active_tc.gpr[31] = frame_addr
3070 + offsetof(struct target_rt_sigframe, rs_code);
3071 /* The original kernel code sets CP0_EPC to the handler
3072 * since it returns to userland using eret
3073 * we cannot do this here, and we must set PC directly */
3074 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3075 mips_set_hflags_isa_mode_from_pc(env);
3076 unlock_user_struct(frame, frame_addr, 1);
3077 return;
3078
3079 give_sigsegv:
3080 unlock_user_struct(frame, frame_addr, 1);
3081 force_sigsegv(sig);
3082 }
3083
3084 long do_rt_sigreturn(CPUMIPSState *env)
3085 {
3086 struct target_rt_sigframe *frame;
3087 abi_ulong frame_addr;
3088 sigset_t blocked;
3089
3090 frame_addr = env->active_tc.gpr[29];
3091 trace_user_do_rt_sigreturn(env, frame_addr);
3092 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3093 goto badframe;
3094 }
3095
3096 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3097 set_sigmask(&blocked);
3098
3099 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3100
3101 if (do_sigaltstack(frame_addr +
3102 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3103 0, get_sp_from_cpustate(env)) == -EFAULT)
3104 goto badframe;
3105
3106 env->active_tc.PC = env->CP0_EPC;
3107 mips_set_hflags_isa_mode_from_pc(env);
3108 /* I am not sure this is right, but it seems to work
3109 * maybe a problem with nested signals ? */
3110 env->CP0_EPC = 0;
3111 return -TARGET_QEMU_ESIGRETURN;
3112
3113 badframe:
3114 force_sig(TARGET_SIGSEGV);
3115 return -TARGET_QEMU_ESIGRETURN;
3116 }
3117
3118 #elif defined(TARGET_SH4)
3119
3120 /*
3121 * code and data structures from linux kernel:
3122 * include/asm-sh/sigcontext.h
3123 * arch/sh/kernel/signal.c
3124 */
3125
3126 struct target_sigcontext {
3127 target_ulong oldmask;
3128
3129 /* CPU registers */
3130 target_ulong sc_gregs[16];
3131 target_ulong sc_pc;
3132 target_ulong sc_pr;
3133 target_ulong sc_sr;
3134 target_ulong sc_gbr;
3135 target_ulong sc_mach;
3136 target_ulong sc_macl;
3137
3138 /* FPU registers */
3139 target_ulong sc_fpregs[16];
3140 target_ulong sc_xfpregs[16];
3141 unsigned int sc_fpscr;
3142 unsigned int sc_fpul;
3143 unsigned int sc_ownedfp;
3144 };
3145
3146 struct target_sigframe
3147 {
3148 struct target_sigcontext sc;
3149 target_ulong extramask[TARGET_NSIG_WORDS-1];
3150 uint16_t retcode[3];
3151 };
3152
3153
3154 struct target_ucontext {
3155 target_ulong tuc_flags;
3156 struct target_ucontext *tuc_link;
3157 target_stack_t tuc_stack;
3158 struct target_sigcontext tuc_mcontext;
3159 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3160 };
3161
3162 struct target_rt_sigframe
3163 {
3164 struct target_siginfo info;
3165 struct target_ucontext uc;
3166 uint16_t retcode[3];
3167 };
3168
3169
3170 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3171 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3172
3173 static abi_ulong get_sigframe(struct target_sigaction *ka,
3174 unsigned long sp, size_t frame_size)
3175 {
3176 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3177 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3178 }
3179
3180 return (sp - frame_size) & -8ul;
3181 }
3182
3183 /* Notice when we're in the middle of a gUSA region and reset.
3184 Note that this will only occur for !parallel_cpus, as we will
3185 translate such sequences differently in a parallel context. */
3186 static void unwind_gusa(CPUSH4State *regs)
3187 {
3188 /* If the stack pointer is sufficiently negative, and we haven't
3189 completed the sequence, then reset to the entry to the region. */
3190 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3191 However, the page mappings in qemu linux-user aren't as restricted
3192 and we wind up with the normal stack mapped above 0xF0000000.
3193 That said, there is no reason why the kernel should be allowing
3194 a gUSA region that spans 1GB. Use a tighter check here, for what
3195 can actually be enabled by the immediate move. */
3196 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3197 /* Reset the PC to before the gUSA region, as computed from
3198 R0 = region end, SP = -(region size), plus one more for the
3199 insn that actually initializes SP to the region size. */
3200 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3201
3202 /* Reset the SP to the saved version in R1. */
3203 regs->gregs[15] = regs->gregs[1];
3204 }
3205 }
3206
3207 static void setup_sigcontext(struct target_sigcontext *sc,
3208 CPUSH4State *regs, unsigned long mask)
3209 {
3210 int i;
3211
3212 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3213 COPY(gregs[0]); COPY(gregs[1]);
3214 COPY(gregs[2]); COPY(gregs[3]);
3215 COPY(gregs[4]); COPY(gregs[5]);
3216 COPY(gregs[6]); COPY(gregs[7]);
3217 COPY(gregs[8]); COPY(gregs[9]);
3218 COPY(gregs[10]); COPY(gregs[11]);
3219 COPY(gregs[12]); COPY(gregs[13]);
3220 COPY(gregs[14]); COPY(gregs[15]);
3221 COPY(gbr); COPY(mach);
3222 COPY(macl); COPY(pr);
3223 COPY(sr); COPY(pc);
3224 #undef COPY
3225
3226 for (i=0; i<16; i++) {
3227 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3228 }
3229 __put_user(regs->fpscr, &sc->sc_fpscr);
3230 __put_user(regs->fpul, &sc->sc_fpul);
3231
3232 /* non-iBCS2 extensions.. */
3233 __put_user(mask, &sc->oldmask);
3234 }
3235
3236 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3237 {
3238 int i;
3239
3240 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3241 COPY(gregs[0]); COPY(gregs[1]);
3242 COPY(gregs[2]); COPY(gregs[3]);
3243 COPY(gregs[4]); COPY(gregs[5]);
3244 COPY(gregs[6]); COPY(gregs[7]);
3245 COPY(gregs[8]); COPY(gregs[9]);
3246 COPY(gregs[10]); COPY(gregs[11]);
3247 COPY(gregs[12]); COPY(gregs[13]);
3248 COPY(gregs[14]); COPY(gregs[15]);
3249 COPY(gbr); COPY(mach);
3250 COPY(macl); COPY(pr);
3251 COPY(sr); COPY(pc);
3252 #undef COPY
3253
3254 for (i=0; i<16; i++) {
3255 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3256 }
3257 __get_user(regs->fpscr, &sc->sc_fpscr);
3258 __get_user(regs->fpul, &sc->sc_fpul);
3259
3260 regs->tra = -1; /* disable syscall checks */
3261 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3262 }
3263
3264 static void setup_frame(int sig, struct target_sigaction *ka,
3265 target_sigset_t *set, CPUSH4State *regs)
3266 {
3267 struct target_sigframe *frame;
3268 abi_ulong frame_addr;
3269 int i;
3270
3271 unwind_gusa(regs);
3272
3273 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3274 trace_user_setup_frame(regs, frame_addr);
3275 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3276 goto give_sigsegv;
3277 }
3278
3279 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3280
3281 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3282 __put_user(set->sig[i + 1], &frame->extramask[i]);
3283 }
3284
3285 /* Set up to return from userspace. If provided, use a stub
3286 already in userspace. */
3287 if (ka->sa_flags & TARGET_SA_RESTORER) {
3288 regs->pr = (unsigned long) ka->sa_restorer;
3289 } else {
3290 /* Generate return code (system call to sigreturn) */
3291 abi_ulong retcode_addr = frame_addr +
3292 offsetof(struct target_sigframe, retcode);
3293 __put_user(MOVW(2), &frame->retcode[0]);
3294 __put_user(TRAP_NOARG, &frame->retcode[1]);
3295 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3296 regs->pr = (unsigned long) retcode_addr;
3297 }
3298
3299 /* Set up registers for signal handler */
3300 regs->gregs[15] = frame_addr;
3301 regs->gregs[4] = sig; /* Arg for signal handler */
3302 regs->gregs[5] = 0;
3303 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3304 regs->pc = (unsigned long) ka->_sa_handler;
3305 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3306
3307 unlock_user_struct(frame, frame_addr, 1);
3308 return;
3309
3310 give_sigsegv:
3311 unlock_user_struct(frame, frame_addr, 1);
3312 force_sigsegv(sig);
3313 }
3314
3315 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3316 target_siginfo_t *info,
3317 target_sigset_t *set, CPUSH4State *regs)
3318 {
3319 struct target_rt_sigframe *frame;
3320 abi_ulong frame_addr;
3321 int i;
3322
3323 unwind_gusa(regs);
3324
3325 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3326 trace_user_setup_rt_frame(regs, frame_addr);
3327 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3328 goto give_sigsegv;
3329 }
3330
3331 tswap_siginfo(&frame->info, info);
3332
3333 /* Create the ucontext. */
3334 __put_user(0, &frame->uc.tuc_flags);
3335 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3336 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3337 &frame->uc.tuc_stack.ss_sp);
3338 __put_user(sas_ss_flags(regs->gregs[15]),
3339 &frame->uc.tuc_stack.ss_flags);
3340 __put_user(target_sigaltstack_used.ss_size,
3341 &frame->uc.tuc_stack.ss_size);
3342 setup_sigcontext(&frame->uc.tuc_mcontext,
3343 regs, set->sig[0]);
3344 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3345 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3346 }
3347
3348 /* Set up to return from userspace. If provided, use a stub
3349 already in userspace. */
3350 if (ka->sa_flags & TARGET_SA_RESTORER) {
3351 regs->pr = (unsigned long) ka->sa_restorer;
3352 } else {
3353 /* Generate return code (system call to sigreturn) */
3354 abi_ulong retcode_addr = frame_addr +
3355 offsetof(struct target_rt_sigframe, retcode);
3356 __put_user(MOVW(2), &frame->retcode[0]);
3357 __put_user(TRAP_NOARG, &frame->retcode[1]);
3358 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3359 regs->pr = (unsigned long) retcode_addr;
3360 }
3361
3362 /* Set up registers for signal handler */
3363 regs->gregs[15] = frame_addr;
3364 regs->gregs[4] = sig; /* Arg for signal handler */
3365 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3366 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3367 regs->pc = (unsigned long) ka->_sa_handler;
3368 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3369
3370 unlock_user_struct(frame, frame_addr, 1);
3371 return;
3372
3373 give_sigsegv:
3374 unlock_user_struct(frame, frame_addr, 1);
3375 force_sigsegv(sig);
3376 }
3377
3378 long do_sigreturn(CPUSH4State *regs)
3379 {
3380 struct target_sigframe *frame;
3381 abi_ulong frame_addr;
3382 sigset_t blocked;
3383 target_sigset_t target_set;
3384 int i;
3385 int err = 0;
3386
3387 frame_addr = regs->gregs[15];
3388 trace_user_do_sigreturn(regs, frame_addr);
3389 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3390 goto badframe;
3391 }
3392
3393 __get_user(target_set.sig[0], &frame->sc.oldmask);
3394 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3395 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3396 }
3397
3398 if (err)
3399 goto badframe;
3400
3401 target_to_host_sigset_internal(&blocked, &target_set);
3402 set_sigmask(&blocked);
3403
3404 restore_sigcontext(regs, &frame->sc);
3405
3406 unlock_user_struct(frame, frame_addr, 0);
3407 return -TARGET_QEMU_ESIGRETURN;
3408
3409 badframe:
3410 unlock_user_struct(frame, frame_addr, 0);
3411 force_sig(TARGET_SIGSEGV);
3412 return -TARGET_QEMU_ESIGRETURN;
3413 }
3414
3415 long do_rt_sigreturn(CPUSH4State *regs)
3416 {
3417 struct target_rt_sigframe *frame;
3418 abi_ulong frame_addr;
3419 sigset_t blocked;
3420
3421 frame_addr = regs->gregs[15];
3422 trace_user_do_rt_sigreturn(regs, frame_addr);
3423 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3424 goto badframe;
3425 }
3426
3427 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3428 set_sigmask(&blocked);
3429
3430 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3431
3432 if (do_sigaltstack(frame_addr +
3433 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3434 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3435 goto badframe;
3436 }
3437
3438 unlock_user_struct(frame, frame_addr, 0);
3439 return -TARGET_QEMU_ESIGRETURN;
3440
3441 badframe:
3442 unlock_user_struct(frame, frame_addr, 0);
3443 force_sig(TARGET_SIGSEGV);
3444 return -TARGET_QEMU_ESIGRETURN;
3445 }
3446 #elif defined(TARGET_MICROBLAZE)
3447
3448 struct target_sigcontext {
3449 struct target_pt_regs regs; /* needs to be first */
3450 uint32_t oldmask;
3451 };
3452
3453 struct target_stack_t {
3454 abi_ulong ss_sp;
3455 int ss_flags;
3456 unsigned int ss_size;
3457 };
3458
3459 struct target_ucontext {
3460 abi_ulong tuc_flags;
3461 abi_ulong tuc_link;
3462 struct target_stack_t tuc_stack;
3463 struct target_sigcontext tuc_mcontext;
3464 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3465 };
3466
3467 /* Signal frames. */
3468 struct target_signal_frame {
3469 struct target_ucontext uc;
3470 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3471 uint32_t tramp[2];
3472 };
3473
3474 struct rt_signal_frame {
3475 siginfo_t info;
3476 ucontext_t uc;
3477 uint32_t tramp[2];
3478 };
3479
3480 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3481 {
3482 __put_user(env->regs[0], &sc->regs.r0);
3483 __put_user(env->regs[1], &sc->regs.r1);
3484 __put_user(env->regs[2], &sc->regs.r2);
3485 __put_user(env->regs[3], &sc->regs.r3);
3486 __put_user(env->regs[4], &sc->regs.r4);
3487 __put_user(env->regs[5], &sc->regs.r5);
3488 __put_user(env->regs[6], &sc->regs.r6);
3489 __put_user(env->regs[7], &sc->regs.r7);
3490 __put_user(env->regs[8], &sc->regs.r8);
3491 __put_user(env->regs[9], &sc->regs.r9);
3492 __put_user(env->regs[10], &sc->regs.r10);
3493 __put_user(env->regs[11], &sc->regs.r11);
3494 __put_user(env->regs[12], &sc->regs.r12);
3495 __put_user(env->regs[13], &sc->regs.r13);
3496 __put_user(env->regs[14], &sc->regs.r14);
3497 __put_user(env->regs[15], &sc->regs.r15);
3498 __put_user(env->regs[16], &sc->regs.r16);
3499 __put_user(env->regs[17], &sc->regs.r17);
3500 __put_user(env->regs[18], &sc->regs.r18);
3501 __put_user(env->regs[19], &sc->regs.r19);
3502 __put_user(env->regs[20], &sc->regs.r20);
3503 __put_user(env->regs[21], &sc->regs.r21);
3504 __put_user(env->regs[22], &sc->regs.r22);
3505 __put_user(env->regs[23], &sc->regs.r23);
3506 __put_user(env->regs[24], &sc->regs.r24);
3507 __put_user(env->regs[25], &sc->regs.r25);
3508 __put_user(env->regs[26], &sc->regs.r26);
3509 __put_user(env->regs[27], &sc->regs.r27);
3510 __put_user(env->regs[28], &sc->regs.r28);
3511 __put_user(env->regs[29], &sc->regs.r29);
3512 __put_user(env->regs[30], &sc->regs.r30);
3513 __put_user(env->regs[31], &sc->regs.r31);
3514 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3515 }
3516
3517 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3518 {
3519 __get_user(env->regs[0], &sc->regs.r0);
3520 __get_user(env->regs[1], &sc->regs.r1);
3521 __get_user(env->regs[2], &sc->regs.r2);
3522 __get_user(env->regs[3], &sc->regs.r3);
3523 __get_user(env->regs[4], &sc->regs.r4);
3524 __get_user(env->regs[5], &sc->regs.r5);
3525 __get_user(env->regs[6], &sc->regs.r6);
3526 __get_user(env->regs[7], &sc->regs.r7);
3527 __get_user(env->regs[8], &sc->regs.r8);
3528 __get_user(env->regs[9], &sc->regs.r9);
3529 __get_user(env->regs[10], &sc->regs.r10);
3530 __get_user(env->regs[11], &sc->regs.r11);
3531 __get_user(env->regs[12], &sc->regs.r12);
3532 __get_user(env->regs[13], &sc->regs.r13);
3533 __get_user(env->regs[14], &sc->regs.r14);
3534 __get_user(env->regs[15], &sc->regs.r15);
3535 __get_user(env->regs[16], &sc->regs.r16);
3536 __get_user(env->regs[17], &sc->regs.r17);
3537 __get_user(env->regs[18], &sc->regs.r18);
3538 __get_user(env->regs[19], &sc->regs.r19);
3539 __get_user(env->regs[20], &sc->regs.r20);
3540 __get_user(env->regs[21], &sc->regs.r21);
3541 __get_user(env->regs[22], &sc->regs.r22);
3542 __get_user(env->regs[23], &sc->regs.r23);
3543 __get_user(env->regs[24], &sc->regs.r24);
3544 __get_user(env->regs[25], &sc->regs.r25);
3545 __get_user(env->regs[26], &sc->regs.r26);
3546 __get_user(env->regs[27], &sc->regs.r27);
3547 __get_user(env->regs[28], &sc->regs.r28);
3548 __get_user(env->regs[29], &sc->regs.r29);
3549 __get_user(env->regs[30], &sc->regs.r30);
3550 __get_user(env->regs[31], &sc->regs.r31);
3551 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3552 }
3553
3554 static abi_ulong get_sigframe(struct target_sigaction *ka,
3555 CPUMBState *env, int frame_size)
3556 {
3557 abi_ulong sp = env->regs[1];
3558
3559 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3560 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3561 }
3562
3563 return ((sp - frame_size) & -8UL);
3564 }
3565
3566 static void setup_frame(int sig, struct target_sigaction *ka,
3567 target_sigset_t *set, CPUMBState *env)
3568 {
3569 struct target_signal_frame *frame;
3570 abi_ulong frame_addr;
3571 int i;
3572
3573 frame_addr = get_sigframe(ka, env, sizeof *frame);
3574 trace_user_setup_frame(env, frame_addr);
3575 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3576 goto badframe;
3577
3578 /* Save the mask. */
3579 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3580
3581 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3582 __put_user(set->sig[i], &frame->extramask[i - 1]);
3583 }
3584
3585 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3586
3587 /* Set up to return from userspace. If provided, use a stub
3588 already in userspace. */
3589 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3590 if (ka->sa_flags & TARGET_SA_RESTORER) {
3591 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3592 } else {
3593 uint32_t t;
3594 /* Note, these encodings are _big endian_! */
3595 /* addi r12, r0, __NR_sigreturn */
3596 t = 0x31800000UL | TARGET_NR_sigreturn;
3597 __put_user(t, frame->tramp + 0);
3598 /* brki r14, 0x8 */
3599 t = 0xb9cc0008UL;
3600 __put_user(t, frame->tramp + 1);
3601
3602 /* Return from sighandler will jump to the tramp.
3603 Negative 8 offset because return is rtsd r15, 8 */
3604 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3605 - 8;
3606 }
3607
3608 /* Set up registers for signal handler */
3609 env->regs[1] = frame_addr;
3610 /* Signal handler args: */
3611 env->regs[5] = sig; /* Arg 0: signum */
3612 env->regs[6] = 0;
3613 /* arg 1: sigcontext */
3614 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3615
3616 /* Offset of 4 to handle microblaze rtid r14, 0 */
3617 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3618
3619 unlock_user_struct(frame, frame_addr, 1);
3620 return;
3621 badframe:
3622 force_sigsegv(sig);
3623 }
3624
3625 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3626 target_siginfo_t *info,
3627 target_sigset_t *set, CPUMBState *env)
3628 {
3629 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3630 }
3631
3632 long do_sigreturn(CPUMBState *env)
3633 {
3634 struct target_signal_frame *frame;
3635 abi_ulong frame_addr;
3636 target_sigset_t target_set;
3637 sigset_t set;
3638 int i;
3639
3640 frame_addr = env->regs[R_SP];
3641 trace_user_do_sigreturn(env, frame_addr);
3642 /* Make sure the guest isn't playing games. */
3643 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3644 goto badframe;
3645
3646 /* Restore blocked signals */
3647 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3648 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3649 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3650 }
3651 target_to_host_sigset_internal(&set, &target_set);
3652 set_sigmask(&set);
3653
3654 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3655 /* We got here through a sigreturn syscall, our path back is via an
3656 rtb insn so setup r14 for that. */
3657 env->regs[14] = env->sregs[SR_PC];
3658
3659 unlock_user_struct(frame, frame_addr, 0);
3660 return -TARGET_QEMU_ESIGRETURN;
3661 badframe:
3662 force_sig(TARGET_SIGSEGV);
3663 return -TARGET_QEMU_ESIGRETURN;
3664 }
3665
3666 long do_rt_sigreturn(CPUMBState *env)
3667 {
3668 trace_user_do_rt_sigreturn(env, 0);
3669 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3670 return -TARGET_ENOSYS;
3671 }
3672
3673 #elif defined(TARGET_CRIS)
3674
3675 struct target_sigcontext {
3676 struct target_pt_regs regs; /* needs to be first */
3677 uint32_t oldmask;
3678 uint32_t usp; /* usp before stacking this gunk on it */
3679 };
3680
3681 /* Signal frames. */
3682 struct target_signal_frame {
3683 struct target_sigcontext sc;
3684 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3685 uint16_t retcode[4]; /* Trampoline code. */
3686 };
3687
3688 struct rt_signal_frame {
3689 siginfo_t *pinfo;
3690 void *puc;
3691 siginfo_t info;
3692 ucontext_t uc;
3693 uint16_t retcode[4]; /* Trampoline code. */
3694 };
3695
3696 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3697 {
3698 __put_user(env->regs[0], &sc->regs.r0);
3699 __put_user(env->regs[1], &sc->regs.r1);
3700 __put_user(env->regs[2], &sc->regs.r2);
3701 __put_user(env->regs[3], &sc->regs.r3);
3702 __put_user(env->regs[4], &sc->regs.r4);
3703 __put_user(env->regs[5], &sc->regs.r5);
3704 __put_user(env->regs[6], &sc->regs.r6);
3705 __put_user(env->regs[7], &sc->regs.r7);
3706 __put_user(env->regs[8], &sc->regs.r8);
3707 __put_user(env->regs[9], &sc->regs.r9);
3708 __put_user(env->regs[10], &sc->regs.r10);
3709 __put_user(env->regs[11], &sc->regs.r11);
3710 __put_user(env->regs[12], &sc->regs.r12);
3711 __put_user(env->regs[13], &sc->regs.r13);
3712 __put_user(env->regs[14], &sc->usp);
3713 __put_user(env->regs[15], &sc->regs.acr);
3714 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3715 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3716 __put_user(env->pc, &sc->regs.erp);
3717 }
3718
3719 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3720 {
3721 __get_user(env->regs[0], &sc->regs.r0);
3722 __get_user(env->regs[1], &sc->regs.r1);
3723 __get_user(env->regs[2], &sc->regs.r2);
3724 __get_user(env->regs[3], &sc->regs.r3);
3725 __get_user(env->regs[4], &sc->regs.r4);
3726 __get_user(env->regs[5], &sc->regs.r5);
3727 __get_user(env->regs[6], &sc->regs.r6);
3728 __get_user(env->regs[7], &sc->regs.r7);
3729 __get_user(env->regs[8], &sc->regs.r8);
3730 __get_user(env->regs[9], &sc->regs.r9);
3731 __get_user(env->regs[10], &sc->regs.r10);
3732 __get_user(env->regs[11], &sc->regs.r11);
3733 __get_user(env->regs[12], &sc->regs.r12);
3734 __get_user(env->regs[13], &sc->regs.r13);
3735 __get_user(env->regs[14], &sc->usp);
3736 __get_user(env->regs[15], &sc->regs.acr);
3737 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3738 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3739 __get_user(env->pc, &sc->regs.erp);
3740 }
3741
3742 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3743 {
3744 abi_ulong sp;
3745 /* Align the stack downwards to 4. */
3746 sp = (env->regs[R_SP] & ~3);
3747 return sp - framesize;
3748 }
3749
3750 static void setup_frame(int sig, struct target_sigaction *ka,
3751 target_sigset_t *set, CPUCRISState *env)
3752 {
3753 struct target_signal_frame *frame;
3754 abi_ulong frame_addr;
3755 int i;
3756
3757 frame_addr = get_sigframe(env, sizeof *frame);
3758 trace_user_setup_frame(env, frame_addr);
3759 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3760 goto badframe;
3761
3762 /*
3763 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3764 * use this trampoline anymore but it sets it up for GDB.
3765 * In QEMU, using the trampoline simplifies things a bit so we use it.
3766 *
3767 * This is movu.w __NR_sigreturn, r9; break 13;
3768 */
3769 __put_user(0x9c5f, frame->retcode+0);
3770 __put_user(TARGET_NR_sigreturn,
3771 frame->retcode + 1);
3772 __put_user(0xe93d, frame->retcode + 2);
3773
3774 /* Save the mask. */
3775 __put_user(set->sig[0], &frame->sc.oldmask);
3776
3777 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3778 __put_user(set->sig[i], &frame->extramask[i - 1]);
3779 }
3780
3781 setup_sigcontext(&frame->sc, env);
3782
3783 /* Move the stack and setup the arguments for the handler. */
3784 env->regs[R_SP] = frame_addr;
3785 env->regs[10] = sig;
3786 env->pc = (unsigned long) ka->_sa_handler;
3787 /* Link SRP so the guest returns through the trampoline. */
3788 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3789
3790 unlock_user_struct(frame, frame_addr, 1);
3791 return;
3792 badframe:
3793 force_sigsegv(sig);
3794 }
3795
3796 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3797 target_siginfo_t *info,
3798 target_sigset_t *set, CPUCRISState *env)
3799 {
3800 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3801 }
3802
3803 long do_sigreturn(CPUCRISState *env)
3804 {
3805 struct target_signal_frame *frame;
3806 abi_ulong frame_addr;
3807 target_sigset_t target_set;
3808 sigset_t set;
3809 int i;
3810
3811 frame_addr = env->regs[R_SP];
3812 trace_user_do_sigreturn(env, frame_addr);
3813 /* Make sure the guest isn't playing games. */
3814 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3815 goto badframe;
3816 }
3817
3818 /* Restore blocked signals */
3819 __get_user(target_set.sig[0], &frame->sc.oldmask);
3820 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3821 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3822 }
3823 target_to_host_sigset_internal(&set, &target_set);
3824 set_sigmask(&set);
3825
3826 restore_sigcontext(&frame->sc, env);
3827 unlock_user_struct(frame, frame_addr, 0);
3828 return -TARGET_QEMU_ESIGRETURN;
3829 badframe:
3830 force_sig(TARGET_SIGSEGV);
3831 return -TARGET_QEMU_ESIGRETURN;
3832 }
3833
3834 long do_rt_sigreturn(CPUCRISState *env)
3835 {
3836 trace_user_do_rt_sigreturn(env, 0);
3837 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3838 return -TARGET_ENOSYS;
3839 }
3840
3841 #elif defined(TARGET_NIOS2)
3842
3843 #define MCONTEXT_VERSION 2
3844
3845 struct target_sigcontext {
3846 int version;
3847 unsigned long gregs[32];
3848 };
3849
3850 struct target_ucontext {
3851 abi_ulong tuc_flags;
3852 abi_ulong tuc_link;
3853 target_stack_t tuc_stack;
3854 struct target_sigcontext tuc_mcontext;
3855 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3856 };
3857
3858 struct target_rt_sigframe {
3859 struct target_siginfo info;
3860 struct target_ucontext uc;
3861 };
3862
3863 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
3864 {
3865 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
3866 #ifdef CONFIG_STACK_GROWSUP
3867 return target_sigaltstack_used.ss_sp;
3868 #else
3869 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3870 #endif
3871 }
3872 return sp;
3873 }
3874
3875 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
3876 {
3877 unsigned long *gregs = uc->tuc_mcontext.gregs;
3878
3879 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
3880 __put_user(env->regs[1], &gregs[0]);
3881 __put_user(env->regs[2], &gregs[1]);
3882 __put_user(env->regs[3], &gregs[2]);
3883 __put_user(env->regs[4], &gregs[3]);
3884 __put_user(env->regs[5], &gregs[4]);
3885 __put_user(env->regs[6], &gregs[5]);
3886 __put_user(env->regs[7], &gregs[6]);
3887 __put_user(env->regs[8], &gregs[7]);
3888 __put_user(env->regs[9], &gregs[8]);
3889 __put_user(env->regs[10], &gregs[9]);
3890 __put_user(env->regs[11], &gregs[10]);
3891 __put_user(env->regs[12], &gregs[11]);
3892 __put_user(env->regs[13], &gregs[12]);
3893 __put_user(env->regs[14], &gregs[13]);
3894 __put_user(env->regs[15], &gregs[14]);
3895 __put_user(env->regs[16], &gregs[15]);
3896 __put_user(env->regs[17], &gregs[16]);
3897 __put_user(env->regs[18], &gregs[17]);
3898 __put_user(env->regs[19], &gregs[18]);
3899 __put_user(env->regs[20], &gregs[19]);
3900 __put_user(env->regs[21], &gregs[20]);
3901 __put_user(env->regs[22], &gregs[21]);
3902 __put_user(env->regs[23], &gregs[22]);
3903 __put_user(env->regs[R_RA], &gregs[23]);
3904 __put_user(env->regs[R_FP], &gregs[24]);
3905 __put_user(env->regs[R_GP], &gregs[25]);
3906 __put_user(env->regs[R_EA], &gregs[27]);
3907 __put_user(env->regs[R_SP], &gregs[28]);
3908
3909 return 0;
3910 }
3911
3912 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
3913 int *pr2)
3914 {
3915 int temp;
3916 abi_ulong off, frame_addr = env->regs[R_SP];
3917 unsigned long *gregs = uc->tuc_mcontext.gregs;
3918 int err;
3919
3920 /* Always make any pending restarted system calls return -EINTR */
3921 /* current->restart_block.fn = do_no_restart_syscall; */
3922
3923 __get_user(temp, &uc->tuc_mcontext.version);
3924 if (temp != MCONTEXT_VERSION) {
3925 return 1;
3926 }
3927
3928 /* restore passed registers */
3929 __get_user(env->regs[1], &gregs[0]);
3930 __get_user(env->regs[2], &gregs[1]);
3931 __get_user(env->regs[3], &gregs[2]);
3932 __get_user(env->regs[4], &gregs[3]);
3933 __get_user(env->regs[5], &gregs[4]);
3934 __get_user(env->regs[6], &gregs[5]);
3935 __get_user(env->regs[7], &gregs[6]);
3936 __get_user(env->regs[8], &gregs[7]);
3937 __get_user(env->regs[9], &gregs[8]);
3938 __get_user(env->regs[10], &gregs[9]);
3939 __get_user(env->regs[11], &gregs[10]);
3940 __get_user(env->regs[12], &gregs[11]);
3941 __get_user(env->regs[13], &gregs[12]);
3942 __get_user(env->regs[14], &gregs[13]);
3943 __get_user(env->regs[15], &gregs[14]);
3944 __get_user(env->regs[16], &gregs[15]);
3945 __get_user(env->regs[17], &gregs[16]);
3946 __get_user(env->regs[18], &gregs[17]);
3947 __get_user(env->regs[19], &gregs[18]);
3948 __get_user(env->regs[20], &gregs[19]);
3949 __get_user(env->regs[21], &gregs[20]);
3950 __get_user(env->regs[22], &gregs[21]);
3951 __get_user(env->regs[23], &gregs[22]);
3952 /* gregs[23] is handled below */
3953 /* Verify, should this be settable */
3954 __get_user(env->regs[R_FP], &gregs[24]);
3955 /* Verify, should this be settable */
3956 __get_user(env->regs[R_GP], &gregs[25]);
3957 /* Not really necessary no user settable bits */
3958 __get_user(temp, &gregs[26]);
3959 __get_user(env->regs[R_EA], &gregs[27]);
3960
3961 __get_user(env->regs[R_RA], &gregs[23]);
3962 __get_user(env->regs[R_SP], &gregs[28]);
3963
3964 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
3965 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
3966 if (err == -EFAULT) {
3967 return 1;
3968 }
3969
3970 *pr2 = env->regs[2];
3971 return 0;
3972 }
3973
3974 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
3975 size_t frame_size)
3976 {
3977 unsigned long usp;
3978
3979 /* Default to using normal stack. */
3980 usp = env->regs[R_SP];
3981
3982 /* This is the X/Open sanctioned signal stack switching. */
3983 usp = sigsp(usp, ka);
3984
3985 /* Verify, is it 32 or 64 bit aligned */
3986 return (void *)((usp - frame_size) & -8UL);
3987 }
3988
3989 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3990 target_siginfo_t *info,
3991 target_sigset_t *set,
3992 CPUNios2State *env)
3993 {
3994 struct target_rt_sigframe *frame;
3995 int i, err = 0;
3996
3997 frame = get_sigframe(ka, env, sizeof(*frame));
3998
3999 if (ka->sa_flags & SA_SIGINFO) {
4000 tswap_siginfo(&frame->info, info);
4001 }
4002
4003 /* Create the ucontext. */
4004 __put_user(0, &frame->uc.tuc_flags);
4005 __put_user(0, &frame->uc.tuc_link);
4006 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4007 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4008 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4009 err |= rt_setup_ucontext(&frame->uc, env);
4010 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4011 __put_user((abi_ulong)set->sig[i],
4012 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4013 }
4014
4015 if (err) {
4016 goto give_sigsegv;
4017 }
4018
4019 /* Set up to return from userspace; jump to fixed address sigreturn
4020 trampoline on kuser page. */
4021 env->regs[R_RA] = (unsigned long) (0x1044);
4022
4023 /* Set up registers for signal handler */
4024 env->regs[R_SP] = (unsigned long) frame;
4025 env->regs[4] = (unsigned long) sig;
4026 env->regs[5] = (unsigned long) &frame->info;
4027 env->regs[6] = (unsigned long) &frame->uc;
4028 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4029 return;
4030
4031 give_sigsegv:
4032 if (sig == TARGET_SIGSEGV) {
4033 ka->_sa_handler = TARGET_SIG_DFL;
4034 }
4035 force_sigsegv(sig);
4036 return;
4037 }
4038
4039 long do_sigreturn(CPUNios2State *env)
4040 {
4041 trace_user_do_sigreturn(env, 0);
4042 fprintf(stderr, "do_sigreturn: not implemented\n");
4043 return -TARGET_ENOSYS;
4044 }
4045
4046 long do_rt_sigreturn(CPUNios2State *env)
4047 {
4048 /* Verify, can we follow the stack back */
4049 abi_ulong frame_addr = env->regs[R_SP];
4050 struct target_rt_sigframe *frame;
4051 sigset_t set;
4052 int rval;
4053
4054 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4055 goto badframe;
4056 }
4057
4058 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4059 do_sigprocmask(SIG_SETMASK, &set, NULL);
4060
4061 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4062 goto badframe;
4063 }
4064
4065 unlock_user_struct(frame, frame_addr, 0);
4066 return rval;
4067
4068 badframe:
4069 unlock_user_struct(frame, frame_addr, 0);
4070 force_sig(TARGET_SIGSEGV);
4071 return 0;
4072 }
4073 /* TARGET_NIOS2 */
4074
4075 #elif defined(TARGET_OPENRISC)
4076
4077 struct target_sigcontext {
4078 struct target_pt_regs regs;
4079 abi_ulong oldmask;
4080 abi_ulong usp;
4081 };
4082
4083 struct target_ucontext {
4084 abi_ulong tuc_flags;
4085 abi_ulong tuc_link;
4086 target_stack_t tuc_stack;
4087 struct target_sigcontext tuc_mcontext;
4088 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4089 };
4090
4091 struct target_rt_sigframe {
4092 abi_ulong pinfo;
4093 uint64_t puc;
4094 struct target_siginfo info;
4095 struct target_sigcontext sc;
4096 struct target_ucontext uc;
4097 unsigned char retcode[16]; /* trampoline code */
4098 };
4099
4100 /* This is the asm-generic/ucontext.h version */
4101 #if 0
4102 static int restore_sigcontext(CPUOpenRISCState *regs,
4103 struct target_sigcontext *sc)
4104 {
4105 unsigned int err = 0;
4106 unsigned long old_usp;
4107
4108 /* Alwys make any pending restarted system call return -EINTR */
4109 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4110
4111 /* restore the regs from &sc->regs (same as sc, since regs is first)
4112 * (sc is already checked for VERIFY_READ since the sigframe was
4113 * checked in sys_sigreturn previously)
4114 */
4115
4116 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4117 goto badframe;
4118 }
4119
4120 /* make sure the U-flag is set so user-mode cannot fool us */
4121
4122 regs->sr &= ~SR_SM;
4123
4124 /* restore the old USP as it was before we stacked the sc etc.
4125 * (we cannot just pop the sigcontext since we aligned the sp and
4126 * stuff after pushing it)
4127 */
4128
4129 __get_user(old_usp, &sc->usp);
4130 phx_signal("old_usp 0x%lx", old_usp);
4131
4132 __PHX__ REALLY /* ??? */
4133 wrusp(old_usp);
4134 regs->gpr[1] = old_usp;
4135
4136 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4137 * after this completes, but we don't use that mechanism. maybe we can
4138 * use it now ?
4139 */
4140
4141 return err;
4142
4143 badframe:
4144 return 1;
4145 }
4146 #endif
4147
4148 /* Set up a signal frame. */
4149
4150 static void setup_sigcontext(struct target_sigcontext *sc,
4151 CPUOpenRISCState *regs,
4152 unsigned long mask)
4153 {
4154 unsigned long usp = cpu_get_gpr(regs, 1);
4155
4156 /* copy the regs. they are first in sc so we can use sc directly */
4157
4158 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4159
4160 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4161 the signal handler. The frametype will be restored to its previous
4162 value in restore_sigcontext. */
4163 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4164
4165 /* then some other stuff */
4166 __put_user(mask, &sc->oldmask);
4167 __put_user(usp, &sc->usp);
4168 }
4169
4170 static inline unsigned long align_sigframe(unsigned long sp)
4171 {
4172 return sp & ~3UL;
4173 }
4174
4175 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4176 CPUOpenRISCState *regs,
4177 size_t frame_size)
4178 {
4179 unsigned long sp = cpu_get_gpr(regs, 1);
4180 int onsigstack = on_sig_stack(sp);
4181
4182 /* redzone */
4183 /* This is the X/Open sanctioned signal stack switching. */
4184 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4185 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4186 }
4187
4188 sp = align_sigframe(sp - frame_size);
4189
4190 /*
4191 * If we are on the alternate signal stack and would overflow it, don't.
4192 * Return an always-bogus address instead so we will die with SIGSEGV.
4193 */
4194
4195 if (onsigstack && !likely(on_sig_stack(sp))) {
4196 return -1L;
4197 }
4198
4199 return sp;
4200 }
4201
4202 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4203 target_siginfo_t *info,
4204 target_sigset_t *set, CPUOpenRISCState *env)
4205 {
4206 int err = 0;
4207 abi_ulong frame_addr;
4208 unsigned long return_ip;
4209 struct target_rt_sigframe *frame;
4210 abi_ulong info_addr, uc_addr;
4211
4212 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4213 trace_user_setup_rt_frame(env, frame_addr);
4214 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4215 goto give_sigsegv;
4216 }
4217
4218 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4219 __put_user(info_addr, &frame->pinfo);
4220 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4221 __put_user(uc_addr, &frame->puc);
4222
4223 if (ka->sa_flags & SA_SIGINFO) {
4224 tswap_siginfo(&frame->info, info);
4225 }
4226
4227 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4228 __put_user(0, &frame->uc.tuc_flags);
4229 __put_user(0, &frame->uc.tuc_link);
4230 __put_user(target_sigaltstack_used.ss_sp,
4231 &frame->uc.tuc_stack.ss_sp);
4232 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4233 &frame->uc.tuc_stack.ss_flags);
4234 __put_user(target_sigaltstack_used.ss_size,
4235 &frame->uc.tuc_stack.ss_size);
4236 setup_sigcontext(&frame->sc, env, set->sig[0]);
4237
4238 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4239
4240 /* trampoline - the desired return ip is the retcode itself */
4241 return_ip = (unsigned long)&frame->retcode;
4242 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4243 __put_user(0xa960, (short *)(frame->retcode + 0));
4244 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4245 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4246 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4247
4248 if (err) {
4249 goto give_sigsegv;
4250 }
4251
4252 /* TODO what is the current->exec_domain stuff and invmap ? */
4253
4254 /* Set up registers for signal handler */
4255 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4256 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4257 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4258 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4259 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4260
4261 /* actually move the usp to reflect the stacked frame */
4262 cpu_set_gpr(env, 1, (unsigned long)frame);
4263
4264 return;
4265
4266 give_sigsegv:
4267 unlock_user_struct(frame, frame_addr, 1);
4268 force_sigsegv(sig);
4269 }
4270
4271 long do_sigreturn(CPUOpenRISCState *env)
4272 {
4273 trace_user_do_sigreturn(env, 0);
4274 fprintf(stderr, "do_sigreturn: not implemented\n");
4275 return -TARGET_ENOSYS;
4276 }
4277
4278 long do_rt_sigreturn(CPUOpenRISCState *env)
4279 {
4280 trace_user_do_rt_sigreturn(env, 0);
4281 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4282 return -TARGET_ENOSYS;
4283 }
4284 /* TARGET_OPENRISC */
4285
4286 #elif defined(TARGET_S390X)
4287
4288 #define __NUM_GPRS 16
4289 #define __NUM_FPRS 16
4290 #define __NUM_ACRS 16
4291
4292 #define S390_SYSCALL_SIZE 2
4293 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4294
4295 #define _SIGCONTEXT_NSIG 64
4296 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4297 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4298 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4299 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4300 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4301
4302 typedef struct {
4303 target_psw_t psw;
4304 target_ulong gprs[__NUM_GPRS];
4305 unsigned int acrs[__NUM_ACRS];
4306 } target_s390_regs_common;
4307
4308 typedef struct {
4309 unsigned int fpc;
4310 double fprs[__NUM_FPRS];
4311 } target_s390_fp_regs;
4312
4313 typedef struct {
4314 target_s390_regs_common regs;
4315 target_s390_fp_regs fpregs;
4316 } target_sigregs;
4317
4318 struct target_sigcontext {
4319 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4320 target_sigregs *sregs;
4321 };
4322
4323 typedef struct {
4324 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4325 struct target_sigcontext sc;
4326 target_sigregs sregs;
4327 int signo;
4328 uint8_t retcode[S390_SYSCALL_SIZE];
4329 } sigframe;
4330
4331 struct target_ucontext {
4332 target_ulong tuc_flags;
4333 struct target_ucontext *tuc_link;
4334 target_stack_t tuc_stack;
4335 target_sigregs tuc_mcontext;
4336 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4337 };
4338
4339 typedef struct {
4340 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4341 uint8_t retcode[S390_SYSCALL_SIZE];
4342 struct target_siginfo info;
4343 struct target_ucontext uc;
4344 } rt_sigframe;
4345
4346 static inline abi_ulong
4347 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4348 {
4349 abi_ulong sp;
4350
4351 /* Default to using normal stack */
4352 sp = env->regs[15];
4353
4354 /* This is the X/Open sanctioned signal stack switching. */
4355 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4356 if (!sas_ss_flags(sp)) {
4357 sp = target_sigaltstack_used.ss_sp +
4358 target_sigaltstack_used.ss_size;
4359 }
4360 }
4361
4362 /* This is the legacy signal stack switching. */
4363 else if (/* FIXME !user_mode(regs) */ 0 &&
4364 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4365 ka->sa_restorer) {
4366 sp = (abi_ulong) ka->sa_restorer;
4367 }
4368
4369 return (sp - frame_size) & -8ul;
4370 }
4371
4372 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4373 {
4374 int i;
4375 //save_access_regs(current->thread.acrs); FIXME
4376
4377 /* Copy a 'clean' PSW mask to the user to avoid leaking
4378 information about whether PER is currently on. */
4379 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4380 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4381 for (i = 0; i < 16; i++) {
4382 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4383 }
4384 for (i = 0; i < 16; i++) {
4385 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4386 }
4387 /*
4388 * We have to store the fp registers to current->thread.fp_regs
4389 * to merge them with the emulated registers.
4390 */
4391 //save_fp_regs(&current->thread.fp_regs); FIXME
4392 for (i = 0; i < 16; i++) {
4393 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4394 }
4395 }
4396
4397 static void setup_frame(int sig, struct target_sigaction *ka,
4398 target_sigset_t *set, CPUS390XState *env)
4399 {
4400 sigframe *frame;
4401 abi_ulong frame_addr;
4402
4403 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4404 trace_user_setup_frame(env, frame_addr);
4405 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4406 goto give_sigsegv;
4407 }
4408
4409 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4410
4411 save_sigregs(env, &frame->sregs);
4412
4413 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4414 (abi_ulong *)&frame->sc.sregs);
4415
4416 /* Set up to return from userspace. If provided, use a stub
4417 already in userspace. */
4418 if (ka->sa_flags & TARGET_SA_RESTORER) {
4419 env->regs[14] = (unsigned long)
4420 ka->sa_restorer | PSW_ADDR_AMODE;
4421 } else {
4422 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4423 | PSW_ADDR_AMODE;
4424 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4425 (uint16_t *)(frame->retcode));
4426 }
4427
4428 /* Set up backchain. */
4429 __put_user(env->regs[15], (abi_ulong *) frame);
4430
4431 /* Set up registers for signal handler */
4432 env->regs[15] = frame_addr;
4433 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4434
4435 env->regs[2] = sig; //map_signal(sig);
4436 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4437
4438 /* We forgot to include these in the sigcontext.
4439 To avoid breaking binary compatibility, they are passed as args. */
4440 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4441 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4442
4443 /* Place signal number on stack to allow backtrace from handler. */
4444 __put_user(env->regs[2], &frame->signo);
4445 unlock_user_struct(frame, frame_addr, 1);
4446 return;
4447
4448 give_sigsegv:
4449 force_sigsegv(sig);
4450 }
4451
4452 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4453 target_siginfo_t *info,
4454 target_sigset_t *set, CPUS390XState *env)
4455 {
4456 int i;
4457 rt_sigframe *frame;
4458 abi_ulong frame_addr;
4459
4460 frame_addr = get_sigframe(ka, env, sizeof *frame);
4461 trace_user_setup_rt_frame(env, frame_addr);
4462 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4463 goto give_sigsegv;
4464 }
4465
4466 tswap_siginfo(&frame->info, info);
4467
4468 /* Create the ucontext. */
4469 __put_user(0, &frame->uc.tuc_flags);
4470 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4471 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4472 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4473 &frame->uc.tuc_stack.ss_flags);
4474 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4475 save_sigregs(env, &frame->uc.tuc_mcontext);
4476 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4477 __put_user((abi_ulong)set->sig[i],
4478 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4479 }
4480
4481 /* Set up to return from userspace. If provided, use a stub
4482 already in userspace. */
4483 if (ka->sa_flags & TARGET_SA_RESTORER) {
4484 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4485 } else {
4486 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4487 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4488 (uint16_t *)(frame->retcode));
4489 }
4490
4491 /* Set up backchain. */
4492 __put_user(env->regs[15], (abi_ulong *) frame);
4493
4494 /* Set up registers for signal handler */
4495 env->regs[15] = frame_addr;
4496 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4497
4498 env->regs[2] = sig; //map_signal(sig);
4499 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4500 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4501 return;
4502
4503 give_sigsegv:
4504 force_sigsegv(sig);
4505 }
4506
4507 static int
4508 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4509 {
4510 int err = 0;
4511 int i;
4512
4513 for (i = 0; i < 16; i++) {
4514 __get_user(env->regs[i], &sc->regs.gprs[i]);
4515 }
4516
4517 __get_user(env->psw.mask, &sc->regs.psw.mask);
4518 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4519 (unsigned long long)env->psw.addr);
4520 __get_user(env->psw.addr, &sc->regs.psw.addr);
4521 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4522
4523 for (i = 0; i < 16; i++) {
4524 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4525 }
4526 for (i = 0; i < 16; i++) {
4527 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4528 }
4529
4530 return err;
4531 }
4532
4533 long do_sigreturn(CPUS390XState *env)
4534 {
4535 sigframe *frame;
4536 abi_ulong frame_addr = env->regs[15];
4537 target_sigset_t target_set;
4538 sigset_t set;
4539
4540 trace_user_do_sigreturn(env, frame_addr);
4541 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4542 goto badframe;
4543 }
4544 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4545
4546 target_to_host_sigset_internal(&set, &target_set);
4547 set_sigmask(&set); /* ~_BLOCKABLE? */
4548
4549 if (restore_sigregs(env, &frame->sregs)) {
4550 goto badframe;
4551 }
4552
4553 unlock_user_struct(frame, frame_addr, 0);
4554 return -TARGET_QEMU_ESIGRETURN;
4555
4556 badframe:
4557 force_sig(TARGET_SIGSEGV);
4558 return -TARGET_QEMU_ESIGRETURN;
4559 }
4560
4561 long do_rt_sigreturn(CPUS390XState *env)
4562 {
4563 rt_sigframe *frame;
4564 abi_ulong frame_addr = env->regs[15];
4565 sigset_t set;
4566
4567 trace_user_do_rt_sigreturn(env, frame_addr);
4568 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4569 goto badframe;
4570 }
4571 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4572
4573 set_sigmask(&set); /* ~_BLOCKABLE? */
4574
4575 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4576 goto badframe;
4577 }
4578
4579 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4580 get_sp_from_cpustate(env)) == -EFAULT) {
4581 goto badframe;
4582 }
4583 unlock_user_struct(frame, frame_addr, 0);
4584 return -TARGET_QEMU_ESIGRETURN;
4585
4586 badframe:
4587 unlock_user_struct(frame, frame_addr, 0);
4588 force_sig(TARGET_SIGSEGV);
4589 return -TARGET_QEMU_ESIGRETURN;
4590 }
4591
4592 #elif defined(TARGET_PPC)
4593
4594 /* Size of dummy stack frame allocated when calling signal handler.
4595 See arch/powerpc/include/asm/ptrace.h. */
4596 #if defined(TARGET_PPC64)
4597 #define SIGNAL_FRAMESIZE 128
4598 #else
4599 #define SIGNAL_FRAMESIZE 64
4600 #endif
4601
4602 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4603 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4604 struct target_mcontext {
4605 target_ulong mc_gregs[48];
4606 /* Includes fpscr. */
4607 uint64_t mc_fregs[33];
4608 #if defined(TARGET_PPC64)
4609 /* Pointer to the vector regs */
4610 target_ulong v_regs;
4611 #else
4612 target_ulong mc_pad[2];
4613 #endif
4614 /* We need to handle Altivec and SPE at the same time, which no
4615 kernel needs to do. Fortunately, the kernel defines this bit to
4616 be Altivec-register-large all the time, rather than trying to
4617 twiddle it based on the specific platform. */
4618 union {
4619 /* SPE vector registers. One extra for SPEFSCR. */
4620 uint32_t spe[33];
4621 /* Altivec vector registers. The packing of VSCR and VRSAVE
4622 varies depending on whether we're PPC64 or not: PPC64 splits
4623 them apart; PPC32 stuffs them together.
4624 We also need to account for the VSX registers on PPC64
4625 */
4626 #if defined(TARGET_PPC64)
4627 #define QEMU_NVRREG (34 + 16)
4628 /* On ppc64, this mcontext structure is naturally *unaligned*,
4629 * or rather it is aligned on a 8 bytes boundary but not on
4630 * a 16 bytes one. This pad fixes it up. This is also why the
4631 * vector regs are referenced by the v_regs pointer above so
4632 * any amount of padding can be added here
4633 */
4634 target_ulong pad;
4635 #else
4636 /* On ppc32, we are already aligned to 16 bytes */
4637 #define QEMU_NVRREG 33
4638 #endif
4639 /* We cannot use ppc_avr_t here as we do *not* want the implied
4640 * 16-bytes alignment that would result from it. This would have
4641 * the effect of making the whole struct target_mcontext aligned
4642 * which breaks the layout of struct target_ucontext on ppc64.
4643 */
4644 uint64_t altivec[QEMU_NVRREG][2];
4645 #undef QEMU_NVRREG
4646 } mc_vregs;
4647 };
4648
4649 /* See arch/powerpc/include/asm/sigcontext.h. */
4650 struct target_sigcontext {
4651 target_ulong _unused[4];
4652 int32_t signal;
4653 #if defined(TARGET_PPC64)
4654 int32_t pad0;
4655 #endif
4656 target_ulong handler;
4657 target_ulong oldmask;
4658 target_ulong regs; /* struct pt_regs __user * */
4659 #if defined(TARGET_PPC64)
4660 struct target_mcontext mcontext;
4661 #endif
4662 };
4663
4664 /* Indices for target_mcontext.mc_gregs, below.
4665 See arch/powerpc/include/asm/ptrace.h for details. */
4666 enum {
4667 TARGET_PT_R0 = 0,
4668 TARGET_PT_R1 = 1,
4669 TARGET_PT_R2 = 2,
4670 TARGET_PT_R3 = 3,
4671 TARGET_PT_R4 = 4,
4672 TARGET_PT_R5 = 5,
4673 TARGET_PT_R6 = 6,
4674 TARGET_PT_R7 = 7,
4675 TARGET_PT_R8 = 8,
4676 TARGET_PT_R9 = 9,
4677 TARGET_PT_R10 = 10,
4678 TARGET_PT_R11 = 11,
4679 TARGET_PT_R12 = 12,
4680 TARGET_PT_R13 = 13,
4681 TARGET_PT_R14 = 14,
4682 TARGET_PT_R15 = 15,
4683 TARGET_PT_R16 = 16,
4684 TARGET_PT_R17 = 17,
4685 TARGET_PT_R18 = 18,
4686 TARGET_PT_R19 = 19,
4687 TARGET_PT_R20 = 20,
4688 TARGET_PT_R21 = 21,
4689 TARGET_PT_R22 = 22,
4690 TARGET_PT_R23 = 23,
4691 TARGET_PT_R24 = 24,
4692 TARGET_PT_R25 = 25,
4693 TARGET_PT_R26 = 26,
4694 TARGET_PT_R27 = 27,
4695 TARGET_PT_R28 = 28,
4696 TARGET_PT_R29 = 29,
4697 TARGET_PT_R30 = 30,
4698 TARGET_PT_R31 = 31,
4699 TARGET_PT_NIP = 32,
4700 TARGET_PT_MSR = 33,
4701 TARGET_PT_ORIG_R3 = 34,
4702 TARGET_PT_CTR = 35,
4703 TARGET_PT_LNK = 36,
4704 TARGET_PT_XER = 37,
4705 TARGET_PT_CCR = 38,
4706 /* Yes, there are two registers with #39. One is 64-bit only. */
4707 TARGET_PT_MQ = 39,
4708 TARGET_PT_SOFTE = 39,
4709 TARGET_PT_TRAP = 40,
4710 TARGET_PT_DAR = 41,
4711 TARGET_PT_DSISR = 42,
4712 TARGET_PT_RESULT = 43,
4713 TARGET_PT_REGS_COUNT = 44
4714 };
4715
4716
4717 struct target_ucontext {
4718 target_ulong tuc_flags;
4719 target_ulong tuc_link; /* ucontext_t __user * */
4720 struct target_sigaltstack tuc_stack;
4721 #if !defined(TARGET_PPC64)
4722 int32_t tuc_pad[7];
4723 target_ulong tuc_regs; /* struct mcontext __user *
4724 points to uc_mcontext field */
4725 #endif
4726 target_sigset_t tuc_sigmask;
4727 #if defined(TARGET_PPC64)
4728 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4729 struct target_sigcontext tuc_sigcontext;
4730 #else
4731 int32_t tuc_maskext[30];
4732 int32_t tuc_pad2[3];
4733 struct target_mcontext tuc_mcontext;
4734 #endif
4735 };
4736
4737 /* See arch/powerpc/kernel/signal_32.c. */
4738 struct target_sigframe {
4739 struct target_sigcontext sctx;
4740 struct target_mcontext mctx;
4741 int32_t abigap[56];
4742 };
4743
4744 #if defined(TARGET_PPC64)
4745
4746 #define TARGET_TRAMP_SIZE 6
4747
4748 struct target_rt_sigframe {
4749 /* sys_rt_sigreturn requires the ucontext be the first field */
4750 struct target_ucontext uc;
4751 target_ulong _unused[2];
4752 uint32_t trampoline[TARGET_TRAMP_SIZE];
4753 target_ulong pinfo; /* struct siginfo __user * */
4754 target_ulong puc; /* void __user * */
4755 struct target_siginfo info;
4756 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4757 char abigap[288];
4758 } __attribute__((aligned(16)));
4759
4760 #else
4761
4762 struct target_rt_sigframe {
4763 struct target_siginfo info;
4764 struct target_ucontext uc;
4765 int32_t abigap[56];
4766 };
4767
4768 #endif
4769
4770 #if defined(TARGET_PPC64)
4771
4772 struct target_func_ptr {
4773 target_ulong entry;
4774 target_ulong toc;
4775 };
4776
4777 #endif
4778
4779 /* We use the mc_pad field for the signal return trampoline. */
4780 #define tramp mc_pad
4781
4782 /* See arch/powerpc/kernel/signal.c. */
4783 static target_ulong get_sigframe(struct target_sigaction *ka,
4784 CPUPPCState *env,
4785 int frame_size)
4786 {
4787 target_ulong oldsp;
4788
4789 oldsp = env->gpr[1];
4790
4791 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4792 (sas_ss_flags(oldsp) == 0)) {
4793 oldsp = (target_sigaltstack_used.ss_sp
4794 + target_sigaltstack_used.ss_size);
4795 }
4796
4797 return (oldsp - frame_size) & ~0xFUL;
4798 }
4799
4800 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
4801 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
4802 #define PPC_VEC_HI 0
4803 #define PPC_VEC_LO 1
4804 #else
4805 #define PPC_VEC_HI 1
4806 #define PPC_VEC_LO 0
4807 #endif
4808
4809
4810 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4811 {
4812 target_ulong msr = env->msr;
4813 int i;
4814 target_ulong ccr = 0;
4815
4816 /* In general, the kernel attempts to be intelligent about what it
4817 needs to save for Altivec/FP/SPE registers. We don't care that
4818 much, so we just go ahead and save everything. */
4819
4820 /* Save general registers. */
4821 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4822 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4823 }
4824 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4825 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4826 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4827 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4828
4829 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4830 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4831 }
4832 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4833
4834 /* Save Altivec registers if necessary. */
4835 if (env->insns_flags & PPC_ALTIVEC) {
4836 uint32_t *vrsave;
4837 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4838 ppc_avr_t *avr = &env->avr[i];
4839 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
4840
4841 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
4842 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
4843 }
4844 /* Set MSR_VR in the saved MSR value to indicate that
4845 frame->mc_vregs contains valid data. */
4846 msr |= MSR_VR;
4847 #if defined(TARGET_PPC64)
4848 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
4849 /* 64-bit needs to put a pointer to the vectors in the frame */
4850 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
4851 #else
4852 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
4853 #endif
4854 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
4855 }
4856
4857 /* Save VSX second halves */
4858 if (env->insns_flags2 & PPC2_VSX) {
4859 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
4860 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
4861 __put_user(env->vsr[i], &vsregs[i]);
4862 }
4863 }
4864
4865 /* Save floating point registers. */
4866 if (env->insns_flags & PPC_FLOAT) {
4867 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4868 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4869 }
4870 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4871 }
4872
4873 /* Save SPE registers. The kernel only saves the high half. */
4874 if (env->insns_flags & PPC_SPE) {
4875 #if defined(TARGET_PPC64)
4876 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4877 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4878 }
4879 #else
4880 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4881 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4882 }
4883 #endif
4884 /* Set MSR_SPE in the saved MSR value to indicate that
4885 frame->mc_vregs contains valid data. */
4886 msr |= MSR_SPE;
4887 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4888 }
4889
4890 /* Store MSR. */
4891 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4892 }
4893
4894 static void encode_trampoline(int sigret, uint32_t *tramp)
4895 {
4896 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4897 if (sigret) {
4898 __put_user(0x38000000 | sigret, &tramp[0]);
4899 __put_user(0x44000002, &tramp[1]);
4900 }
4901 }
4902
4903 static void restore_user_regs(CPUPPCState *env,
4904 struct target_mcontext *frame, int sig)
4905 {
4906 target_ulong save_r2 = 0;
4907 target_ulong msr;
4908 target_ulong ccr;
4909
4910 int i;
4911
4912 if (!sig) {
4913 save_r2 = env->gpr[2];
4914 }
4915
4916 /* Restore general registers. */
4917 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4918 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4919 }
4920 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4921 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4922 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4923 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4924 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4925
4926 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4927 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4928 }
4929
4930 if (!sig) {
4931 env->gpr[2] = save_r2;
4932 }
4933 /* Restore MSR. */
4934 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4935
4936 /* If doing signal return, restore the previous little-endian mode. */
4937 if (sig)
4938 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4939
4940 /* Restore Altivec registers if necessary. */
4941 if (env->insns_flags & PPC_ALTIVEC) {
4942 ppc_avr_t *v_regs;
4943 uint32_t *vrsave;
4944 #if defined(TARGET_PPC64)
4945 uint64_t v_addr;
4946 /* 64-bit needs to recover the pointer to the vectors from the frame */
4947 __get_user(v_addr, &frame->v_regs);
4948 v_regs = g2h(v_addr);
4949 #else
4950 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
4951 #endif
4952 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4953 ppc_avr_t *avr = &env->avr[i];
4954 ppc_avr_t *vreg = &v_regs[i];
4955
4956 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
4957 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
4958 }
4959 /* Set MSR_VEC in the saved MSR value to indicate that
4960 frame->mc_vregs contains valid data. */
4961 #if defined(TARGET_PPC64)
4962 vrsave = (uint32_t *)&v_regs[33];
4963 #else
4964 vrsave = (uint32_t *)&v_regs[32];
4965 #endif
4966 __get_user(env->spr[SPR_VRSAVE], vrsave);
4967 }
4968
4969 /* Restore VSX second halves */
4970 if (env->insns_flags2 & PPC2_VSX) {
4971 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
4972 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
4973 __get_user(env->vsr[i], &vsregs[i]);
4974 }
4975 }
4976
4977 /* Restore floating point registers. */
4978 if (env->insns_flags & PPC_FLOAT) {
4979 uint64_t fpscr;
4980 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4981 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4982 }
4983 __get_user(fpscr, &frame->mc_fregs[32]);
4984 env->fpscr = (uint32_t) fpscr;
4985 }
4986
4987 /* Save SPE registers. The kernel only saves the high half. */
4988 if (env->insns_flags & PPC_SPE) {
4989 #if defined(TARGET_PPC64)
4990 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4991 uint32_t hi;
4992
4993 __get_user(hi, &frame->mc_vregs.spe[i]);
4994 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4995 }
4996 #else
4997 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4998 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4999 }
5000 #endif
5001 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5002 }
5003 }
5004
5005 #if !defined(TARGET_PPC64)
5006 static void setup_frame(int sig, struct target_sigaction *ka,
5007 target_sigset_t *set, CPUPPCState *env)
5008 {
5009 struct target_sigframe *frame;
5010 struct target_sigcontext *sc;
5011 target_ulong frame_addr, newsp;
5012 int err = 0;
5013
5014 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5015 trace_user_setup_frame(env, frame_addr);
5016 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5017 goto sigsegv;
5018 sc = &frame->sctx;
5019
5020 __put_user(ka->_sa_handler, &sc->handler);
5021 __put_user(set->sig[0], &sc->oldmask);
5022 __put_user(set->sig[1], &sc->_unused[3]);
5023 __put_user(h2g(&frame->mctx), &sc->regs);
5024 __put_user(sig, &sc->signal);
5025
5026 /* Save user regs. */
5027 save_user_regs(env, &frame->mctx);
5028
5029 /* Construct the trampoline code on the stack. */
5030 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5031
5032 /* The kernel checks for the presence of a VDSO here. We don't
5033 emulate a vdso, so use a sigreturn system call. */
5034 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5035
5036 /* Turn off all fp exceptions. */
5037 env->fpscr = 0;
5038
5039 /* Create a stack frame for the caller of the handler. */
5040 newsp = frame_addr - SIGNAL_FRAMESIZE;
5041 err |= put_user(env->gpr[1], newsp, target_ulong);
5042
5043 if (err)
5044 goto sigsegv;
5045
5046 /* Set up registers for signal handler. */
5047 env->gpr[1] = newsp;
5048 env->gpr[3] = sig;
5049 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5050
5051 env->nip = (target_ulong) ka->_sa_handler;
5052
5053 /* Signal handlers are entered in big-endian mode. */
5054 env->msr &= ~(1ull << MSR_LE);
5055
5056 unlock_user_struct(frame, frame_addr, 1);
5057 return;
5058
5059 sigsegv:
5060 unlock_user_struct(frame, frame_addr, 1);
5061 force_sigsegv(sig);
5062 }
5063 #endif /* !defined(TARGET_PPC64) */
5064
5065 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5066 target_siginfo_t *info,
5067 target_sigset_t *set, CPUPPCState *env)
5068 {
5069 struct target_rt_sigframe *rt_sf;
5070 uint32_t *trampptr = 0;
5071 struct target_mcontext *mctx = 0;
5072 target_ulong rt_sf_addr, newsp = 0;
5073 int i, err = 0;
5074 #if defined(TARGET_PPC64)
5075 struct target_sigcontext *sc = 0;
5076 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5077 #endif
5078
5079 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5080 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5081 goto sigsegv;
5082
5083 tswap_siginfo(&rt_sf->info, info);
5084
5085 __put_user(0, &rt_sf->uc.tuc_flags);
5086 __put_user(0, &rt_sf->uc.tuc_link);
5087 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5088 &rt_sf->uc.tuc_stack.ss_sp);
5089 __put_user(sas_ss_flags(env->gpr[1]),
5090 &rt_sf->uc.tuc_stack.ss_flags);
5091 __put_user(target_sigaltstack_used.ss_size,
5092 &rt_sf->uc.tuc_stack.ss_size);
5093 #if !defined(TARGET_PPC64)
5094 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5095 &rt_sf->uc.tuc_regs);
5096 #endif
5097 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5098 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5099 }
5100
5101 #if defined(TARGET_PPC64)
5102 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5103 trampptr = &rt_sf->trampoline[0];
5104
5105 sc = &rt_sf->uc.tuc_sigcontext;
5106 __put_user(h2g(mctx), &sc->regs);
5107 __put_user(sig, &sc->signal);
5108 #else
5109 mctx = &rt_sf->uc.tuc_mcontext;
5110 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5111 #endif
5112
5113 save_user_regs(env, mctx);
5114 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5115
5116 /* The kernel checks for the presence of a VDSO here. We don't
5117 emulate a vdso, so use a sigreturn system call. */
5118 env->lr = (target_ulong) h2g(trampptr);
5119
5120 /* Turn off all fp exceptions. */
5121 env->fpscr = 0;
5122
5123 /* Create a stack frame for the caller of the handler. */
5124 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5125 err |= put_user(env->gpr[1], newsp, target_ulong);
5126
5127 if (err)
5128 goto sigsegv;
5129
5130 /* Set up registers for signal handler. */
5131 env->gpr[1] = newsp;
5132 env->gpr[3] = (target_ulong) sig;
5133 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5134 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5135 env->gpr[6] = (target_ulong) h2g(rt_sf);
5136
5137 #if defined(TARGET_PPC64)
5138 if (get_ppc64_abi(image) < 2) {
5139 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5140 struct target_func_ptr *handler =
5141 (struct target_func_ptr *)g2h(ka->_sa_handler);
5142 env->nip = tswapl(handler->entry);
5143 env->gpr[2] = tswapl(handler->toc);
5144 } else {
5145 /* ELFv2 PPC64 function pointers are entry points, but R12
5146 * must also be set */
5147 env->nip = tswapl((target_ulong) ka->_sa_handler);
5148 env->gpr[12] = env->nip;
5149 }
5150 #else
5151 env->nip = (target_ulong) ka->_sa_handler;
5152 #endif
5153
5154 /* Signal handlers are entered in big-endian mode. */
5155 env->msr &= ~(1ull << MSR_LE);
5156
5157 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5158 return;
5159
5160 sigsegv:
5161 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5162 force_sigsegv(sig);
5163
5164 }
5165
5166 #if !defined(TARGET_PPC64)
5167 long do_sigreturn(CPUPPCState *env)
5168 {
5169 struct target_sigcontext *sc = NULL;
5170 struct target_mcontext *sr = NULL;
5171 target_ulong sr_addr = 0, sc_addr;
5172 sigset_t blocked;
5173 target_sigset_t set;
5174
5175 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5176 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5177 goto sigsegv;
5178
5179 #if defined(TARGET_PPC64)
5180 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5181 #else
5182 __get_user(set.sig[0], &sc->oldmask);
5183 __get_user(set.sig[1], &sc->_unused[3]);
5184 #endif
5185 target_to_host_sigset_internal(&blocked, &set);
5186 set_sigmask(&blocked);
5187
5188 __get_user(sr_addr, &sc->regs);
5189 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5190 goto sigsegv;
5191 restore_user_regs(env, sr, 1);
5192
5193 unlock_user_struct(sr, sr_addr, 1);
5194 unlock_user_struct(sc, sc_addr, 1);
5195 return -TARGET_QEMU_ESIGRETURN;
5196
5197 sigsegv:
5198 unlock_user_struct(sr, sr_addr, 1);
5199 unlock_user_struct(sc, sc_addr, 1);
5200 force_sig(TARGET_SIGSEGV);
5201 return -TARGET_QEMU_ESIGRETURN;
5202 }
5203 #endif /* !defined(TARGET_PPC64) */
5204
5205 /* See arch/powerpc/kernel/signal_32.c. */
5206 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5207 {
5208 struct target_mcontext *mcp;
5209 target_ulong mcp_addr;
5210 sigset_t blocked;
5211 target_sigset_t set;
5212
5213 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5214 sizeof (set)))
5215 return 1;
5216
5217 #if defined(TARGET_PPC64)
5218 mcp_addr = h2g(ucp) +
5219 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5220 #else
5221 __get_user(mcp_addr, &ucp->tuc_regs);
5222 #endif
5223
5224 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5225 return 1;
5226
5227 target_to_host_sigset_internal(&blocked, &set);
5228 set_sigmask(&blocked);
5229 restore_user_regs(env, mcp, sig);
5230
5231 unlock_user_struct(mcp, mcp_addr, 1);
5232 return 0;
5233 }
5234
5235 long do_rt_sigreturn(CPUPPCState *env)
5236 {
5237 struct target_rt_sigframe *rt_sf = NULL;
5238 target_ulong rt_sf_addr;
5239
5240 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5241 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5242 goto sigsegv;
5243
5244 if (do_setcontext(&rt_sf->uc, env, 1))
5245 goto sigsegv;
5246
5247 do_sigaltstack(rt_sf_addr
5248 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5249 0, env->gpr[1]);
5250
5251 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5252 return -TARGET_QEMU_ESIGRETURN;
5253
5254 sigsegv:
5255 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5256 force_sig(TARGET_SIGSEGV);
5257 return -TARGET_QEMU_ESIGRETURN;
5258 }
5259
5260 #elif defined(TARGET_M68K)
5261
5262 struct target_sigcontext {
5263 abi_ulong sc_mask;
5264 abi_ulong sc_usp;
5265 abi_ulong sc_d0;
5266 abi_ulong sc_d1;
5267 abi_ulong sc_a0;
5268 abi_ulong sc_a1;
5269 unsigned short sc_sr;
5270 abi_ulong sc_pc;
5271 };
5272
5273 struct target_sigframe
5274 {
5275 abi_ulong pretcode;
5276 int sig;
5277 int code;
5278 abi_ulong psc;
5279 char retcode[8];
5280 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5281 struct target_sigcontext sc;
5282 };
5283
5284 typedef int target_greg_t;
5285 #define TARGET_NGREG 18
5286 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5287
5288 typedef struct target_fpregset {
5289 int f_fpcntl[3];
5290 int f_fpregs[8*3];
5291 } target_fpregset_t;
5292
5293 struct target_mcontext {
5294 int version;
5295 target_gregset_t gregs;
5296 target_fpregset_t fpregs;
5297 };
5298
5299 #define TARGET_MCONTEXT_VERSION 2
5300
5301 struct target_ucontext {
5302 abi_ulong tuc_flags;
5303 abi_ulong tuc_link;
5304 target_stack_t tuc_stack;
5305 struct target_mcontext tuc_mcontext;
5306 abi_long tuc_filler[80];
5307 target_sigset_t tuc_sigmask;
5308 };
5309
5310 struct target_rt_sigframe
5311 {
5312 abi_ulong pretcode;
5313 int sig;
5314 abi_ulong pinfo;
5315 abi_ulong puc;
5316 char retcode[8];
5317 struct target_siginfo info;
5318 struct target_ucontext uc;
5319 };
5320
5321 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5322 abi_ulong mask)
5323 {
5324 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5325 __put_user(mask, &sc->sc_mask);
5326 __put_user(env->aregs[7], &sc->sc_usp);
5327 __put_user(env->dregs[0], &sc->sc_d0);
5328 __put_user(env->dregs[1], &sc->sc_d1);
5329 __put_user(env->aregs[0], &sc->sc_a0);
5330 __put_user(env->aregs[1], &sc->sc_a1);
5331 __put_user(sr, &sc->sc_sr);
5332 __put_user(env->pc, &sc->sc_pc);
5333 }
5334
5335 static void
5336 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5337 {
5338 int temp;
5339
5340 __get_user(env->aregs[7], &sc->sc_usp);
5341 __get_user(env->dregs[0], &sc->sc_d0);
5342 __get_user(env->dregs[1], &sc->sc_d1);
5343 __get_user(env->aregs[0], &sc->sc_a0);
5344 __get_user(env->aregs[1], &sc->sc_a1);
5345 __get_user(env->pc, &sc->sc_pc);
5346 __get_user(temp, &sc->sc_sr);
5347 cpu_m68k_set_ccr(env, temp);
5348 }
5349
5350 /*
5351 * Determine which stack to use..
5352 */
5353 static inline abi_ulong
5354 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5355 size_t frame_size)
5356 {
5357 unsigned long sp;
5358
5359 sp = regs->aregs[7];
5360
5361 /* This is the X/Open sanctioned signal stack switching. */
5362 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5363 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5364 }
5365
5366 return ((sp - frame_size) & -8UL);
5367 }
5368
5369 static void setup_frame(int sig, struct target_sigaction *ka,
5370 target_sigset_t *set, CPUM68KState *env)
5371 {
5372 struct target_sigframe *frame;
5373 abi_ulong frame_addr;
5374 abi_ulong retcode_addr;
5375 abi_ulong sc_addr;
5376 int i;
5377
5378 frame_addr = get_sigframe(ka, env, sizeof *frame);
5379 trace_user_setup_frame(env, frame_addr);
5380 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5381 goto give_sigsegv;
5382 }
5383
5384 __put_user(sig, &frame->sig);
5385
5386 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5387 __put_user(sc_addr, &frame->psc);
5388
5389 setup_sigcontext(&frame->sc, env, set->sig[0]);
5390
5391 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5392 __put_user(set->sig[i], &frame->extramask[i - 1]);
5393 }
5394
5395 /* Set up to return from userspace. */
5396
5397 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5398 __put_user(retcode_addr, &frame->pretcode);
5399
5400 /* moveq #,d0; trap #0 */
5401
5402 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5403 (uint32_t *)(frame->retcode));
5404
5405 /* Set up to return from userspace */
5406
5407 env->aregs[7] = frame_addr;
5408 env->pc = ka->_sa_handler;
5409
5410 unlock_user_struct(frame, frame_addr, 1);
5411 return;
5412
5413 give_sigsegv:
5414 force_sigsegv(sig);
5415 }
5416
5417 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5418 CPUM68KState *env)
5419 {
5420 int i;
5421 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5422
5423 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
5424 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
5425 /* fpiar is not emulated */
5426
5427 for (i = 0; i < 8; i++) {
5428 uint32_t high = env->fregs[i].d.high << 16;
5429 __put_user(high, &fpregs->f_fpregs[i * 3]);
5430 __put_user(env->fregs[i].d.low,
5431 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5432 }
5433 }
5434
5435 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5436 CPUM68KState *env)
5437 {
5438 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5439 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5440
5441 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5442 __put_user(env->dregs[0], &gregs[0]);
5443 __put_user(env->dregs[1], &gregs[1]);
5444 __put_user(env->dregs[2], &gregs[2]);
5445 __put_user(env->dregs[3], &gregs[3]);
5446 __put_user(env->dregs[4], &gregs[4]);
5447 __put_user(env->dregs[5], &gregs[5]);
5448 __put_user(env->dregs[6], &gregs[6]);
5449 __put_user(env->dregs[7], &gregs[7]);
5450 __put_user(env->aregs[0], &gregs[8]);
5451 __put_user(env->aregs[1], &gregs[9]);
5452 __put_user(env->aregs[2], &gregs[10]);
5453 __put_user(env->aregs[3], &gregs[11]);
5454 __put_user(env->aregs[4], &gregs[12]);
5455 __put_user(env->aregs[5], &gregs[13]);
5456 __put_user(env->aregs[6], &gregs[14]);
5457 __put_user(env->aregs[7], &gregs[15]);
5458 __put_user(env->pc, &gregs[16]);
5459 __put_user(sr, &gregs[17]);
5460
5461 target_rt_save_fpu_state(uc, env);
5462
5463 return 0;
5464 }
5465
5466 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
5467 struct target_ucontext *uc)
5468 {
5469 int i;
5470 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5471 uint32_t fpcr;
5472
5473 __get_user(fpcr, &fpregs->f_fpcntl[0]);
5474 cpu_m68k_set_fpcr(env, fpcr);
5475 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
5476 /* fpiar is not emulated */
5477
5478 for (i = 0; i < 8; i++) {
5479 uint32_t high;
5480 __get_user(high, &fpregs->f_fpregs[i * 3]);
5481 env->fregs[i].d.high = high >> 16;
5482 __get_user(env->fregs[i].d.low,
5483 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5484 }
5485 }
5486
5487 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5488 struct target_ucontext *uc)
5489 {
5490 int temp;
5491 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5492
5493 __get_user(temp, &uc->tuc_mcontext.version);
5494 if (temp != TARGET_MCONTEXT_VERSION)
5495 goto badframe;
5496
5497 /* restore passed registers */
5498 __get_user(env->dregs[0], &gregs[0]);
5499 __get_user(env->dregs[1], &gregs[1]);
5500 __get_user(env->dregs[2], &gregs[2]);
5501 __get_user(env->dregs[3], &gregs[3]);
5502 __get_user(env->dregs[4], &gregs[4]);
5503 __get_user(env->dregs[5], &gregs[5]);
5504 __get_user(env->dregs[6], &gregs[6]);
5505 __get_user(env->dregs[7], &gregs[7]);
5506 __get_user(env->aregs[0], &gregs[8]);
5507 __get_user(env->aregs[1], &gregs[9]);
5508 __get_user(env->aregs[2], &gregs[10]);
5509 __get_user(env->aregs[3], &gregs[11]);
5510 __get_user(env->aregs[4], &gregs[12]);
5511 __get_user(env->aregs[5], &gregs[13]);
5512 __get_user(env->aregs[6], &gregs[14]);
5513 __get_user(env->aregs[7], &gregs[15]);
5514 __get_user(env->pc, &gregs[16]);
5515 __get_user(temp, &gregs[17]);
5516 cpu_m68k_set_ccr(env, temp);
5517
5518 target_rt_restore_fpu_state(env, uc);
5519
5520 return 0;
5521
5522 badframe:
5523 return 1;
5524 }
5525
5526 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5527 target_siginfo_t *info,
5528 target_sigset_t *set, CPUM68KState *env)
5529 {
5530 struct target_rt_sigframe *frame;
5531 abi_ulong frame_addr;
5532 abi_ulong retcode_addr;
5533 abi_ulong info_addr;
5534 abi_ulong uc_addr;
5535 int err = 0;
5536 int i;
5537
5538 frame_addr = get_sigframe(ka, env, sizeof *frame);
5539 trace_user_setup_rt_frame(env, frame_addr);
5540 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5541 goto give_sigsegv;
5542 }
5543
5544 __put_user(sig, &frame->sig);
5545
5546 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5547 __put_user(info_addr, &frame->pinfo);
5548
5549 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5550 __put_user(uc_addr, &frame->puc);
5551
5552 tswap_siginfo(&frame->info, info);
5553
5554 /* Create the ucontext */
5555
5556 __put_user(0, &frame->uc.tuc_flags);
5557 __put_user(0, &frame->uc.tuc_link);
5558 __put_user(target_sigaltstack_used.ss_sp,
5559 &frame->uc.tuc_stack.ss_sp);
5560 __put_user(sas_ss_flags(env->aregs[7]),
5561 &frame->uc.tuc_stack.ss_flags);
5562 __put_user(target_sigaltstack_used.ss_size,
5563 &frame->uc.tuc_stack.ss_size);
5564 err |= target_rt_setup_ucontext(&frame->uc, env);
5565
5566 if (err)
5567 goto give_sigsegv;
5568
5569 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5570 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5571 }
5572
5573 /* Set up to return from userspace. */
5574
5575 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5576 __put_user(retcode_addr, &frame->pretcode);
5577
5578 /* moveq #,d0; notb d0; trap #0 */
5579
5580 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5581 (uint32_t *)(frame->retcode + 0));
5582 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5583
5584 if (err)
5585 goto give_sigsegv;
5586
5587 /* Set up to return from userspace */
5588
5589 env->aregs[7] = frame_addr;
5590 env->pc = ka->_sa_handler;
5591
5592 unlock_user_struct(frame, frame_addr, 1);
5593 return;
5594
5595 give_sigsegv:
5596 unlock_user_struct(frame, frame_addr, 1);
5597 force_sigsegv(sig);
5598 }
5599
5600 long do_sigreturn(CPUM68KState *env)
5601 {
5602 struct target_sigframe *frame;
5603 abi_ulong frame_addr = env->aregs[7] - 4;
5604 target_sigset_t target_set;
5605 sigset_t set;
5606 int i;
5607
5608 trace_user_do_sigreturn(env, frame_addr);
5609 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5610 goto badframe;
5611
5612 /* set blocked signals */
5613
5614 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5615
5616 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5617 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5618 }
5619
5620 target_to_host_sigset_internal(&set, &target_set);
5621 set_sigmask(&set);
5622
5623 /* restore registers */
5624
5625 restore_sigcontext(env, &frame->sc);
5626
5627 unlock_user_struct(frame, frame_addr, 0);
5628 return -TARGET_QEMU_ESIGRETURN;
5629
5630 badframe:
5631 force_sig(TARGET_SIGSEGV);
5632 return -TARGET_QEMU_ESIGRETURN;
5633 }
5634
5635 long do_rt_sigreturn(CPUM68KState *env)
5636 {
5637 struct target_rt_sigframe *frame;
5638 abi_ulong frame_addr = env->aregs[7] - 4;
5639 sigset_t set;
5640
5641 trace_user_do_rt_sigreturn(env, frame_addr);
5642 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5643 goto badframe;
5644
5645 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5646 set_sigmask(&set);
5647
5648 /* restore registers */
5649
5650 if (target_rt_restore_ucontext(env, &frame->uc))
5651 goto badframe;
5652
5653 if (do_sigaltstack(frame_addr +
5654 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5655 0, get_sp_from_cpustate(env)) == -EFAULT)
5656 goto badframe;
5657
5658 unlock_user_struct(frame, frame_addr, 0);
5659 return -TARGET_QEMU_ESIGRETURN;
5660
5661 badframe:
5662 unlock_user_struct(frame, frame_addr, 0);
5663 force_sig(TARGET_SIGSEGV);
5664 return -TARGET_QEMU_ESIGRETURN;
5665 }
5666
5667 #elif defined(TARGET_ALPHA)
5668
5669 struct target_sigcontext {
5670 abi_long sc_onstack;
5671 abi_long sc_mask;
5672 abi_long sc_pc;
5673 abi_long sc_ps;
5674 abi_long sc_regs[32];
5675 abi_long sc_ownedfp;
5676 abi_long sc_fpregs[32];
5677 abi_ulong sc_fpcr;
5678 abi_ulong sc_fp_control;
5679 abi_ulong sc_reserved1;
5680 abi_ulong sc_reserved2;
5681 abi_ulong sc_ssize;
5682 abi_ulong sc_sbase;
5683 abi_ulong sc_traparg_a0;
5684 abi_ulong sc_traparg_a1;
5685 abi_ulong sc_traparg_a2;
5686 abi_ulong sc_fp_trap_pc;
5687 abi_ulong sc_fp_trigger_sum;
5688 abi_ulong sc_fp_trigger_inst;
5689 };
5690
5691 struct target_ucontext {
5692 abi_ulong tuc_flags;
5693 abi_ulong tuc_link;
5694 abi_ulong tuc_osf_sigmask;
5695 target_stack_t tuc_stack;
5696 struct target_sigcontext tuc_mcontext;
5697 target_sigset_t tuc_sigmask;
5698 };
5699
5700 struct target_sigframe {
5701 struct target_sigcontext sc;
5702 unsigned int retcode[3];
5703 };
5704
5705 struct target_rt_sigframe {
5706 target_siginfo_t info;
5707 struct target_ucontext uc;
5708 unsigned int retcode[3];
5709 };
5710
5711 #define INSN_MOV_R30_R16 0x47fe0410
5712 #define INSN_LDI_R0 0x201f0000
5713 #define INSN_CALLSYS 0x00000083
5714
5715 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5716 abi_ulong frame_addr, target_sigset_t *set)
5717 {
5718 int i;
5719
5720 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5721 __put_user(set->sig[0], &sc->sc_mask);
5722 __put_user(env->pc, &sc->sc_pc);
5723 __put_user(8, &sc->sc_ps);
5724
5725 for (i = 0; i < 31; ++i) {
5726 __put_user(env->ir[i], &sc->sc_regs[i]);
5727 }
5728 __put_user(0, &sc->sc_regs[31]);
5729
5730 for (i = 0; i < 31; ++i) {
5731 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5732 }
5733 __put_user(0, &sc->sc_fpregs[31]);
5734 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5735
5736 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5737 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5738 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5739 }
5740
5741 static void restore_sigcontext(CPUAlphaState *env,
5742 struct target_sigcontext *sc)
5743 {
5744 uint64_t fpcr;
5745 int i;
5746
5747 __get_user(env->pc, &sc->sc_pc);
5748
5749 for (i = 0; i < 31; ++i) {
5750 __get_user(env->ir[i], &sc->sc_regs[i]);
5751 }
5752 for (i = 0; i < 31; ++i) {
5753 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5754 }
5755
5756 __get_user(fpcr, &sc->sc_fpcr);
5757 cpu_alpha_store_fpcr(env, fpcr);
5758 }
5759
5760 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5761 CPUAlphaState *env,
5762 unsigned long framesize)
5763 {
5764 abi_ulong sp = env->ir[IR_SP];
5765
5766 /* This is the X/Open sanctioned signal stack switching. */
5767 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5768 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5769 }
5770 return (sp - framesize) & -32;
5771 }
5772
5773 static void setup_frame(int sig, struct target_sigaction *ka,
5774 target_sigset_t *set, CPUAlphaState *env)
5775 {
5776 abi_ulong frame_addr, r26;
5777 struct target_sigframe *frame;
5778 int err = 0;
5779
5780 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5781 trace_user_setup_frame(env, frame_addr);
5782 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5783 goto give_sigsegv;
5784 }
5785
5786 setup_sigcontext(&frame->sc, env, frame_addr, set);
5787
5788 if (ka->sa_restorer) {
5789 r26 = ka->sa_restorer;
5790 } else {
5791 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5792 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5793 &frame->retcode[1]);
5794 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5795 /* imb() */
5796 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
5797 }
5798
5799 unlock_user_struct(frame, frame_addr, 1);
5800
5801 if (err) {
5802 give_sigsegv:
5803 force_sigsegv(sig);
5804 return;
5805 }
5806
5807 env->ir[IR_RA] = r26;
5808 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5809 env->ir[IR_A0] = sig;
5810 env->ir[IR_A1] = 0;
5811 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5812 env->ir[IR_SP] = frame_addr;
5813 }
5814
5815 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5816 target_siginfo_t *info,
5817 target_sigset_t *set, CPUAlphaState *env)
5818 {
5819 abi_ulong frame_addr, r26;
5820 struct target_rt_sigframe *frame;
5821 int i, err = 0;
5822
5823 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5824 trace_user_setup_rt_frame(env, frame_addr);
5825 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5826 goto give_sigsegv;
5827 }
5828
5829 tswap_siginfo(&frame->info, info);
5830
5831 __put_user(0, &frame->uc.tuc_flags);
5832 __put_user(0, &frame->uc.tuc_link);
5833 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5834 __put_user(target_sigaltstack_used.ss_sp,
5835 &frame->uc.tuc_stack.ss_sp);
5836 __put_user(sas_ss_flags(env->ir[IR_SP]),
5837 &frame->uc.tuc_stack.ss_flags);
5838 __put_user(target_sigaltstack_used.ss_size,
5839 &frame->uc.tuc_stack.ss_size);
5840 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5841 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5842 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5843 }
5844
5845 if (ka->sa_restorer) {
5846 r26 = ka->sa_restorer;
5847 } else {
5848 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5849 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5850 &frame->retcode[1]);
5851 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5852 /* imb(); */
5853 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
5854 }
5855
5856 if (err) {
5857 give_sigsegv:
5858 force_sigsegv(sig);
5859 return;
5860 }
5861
5862 env->ir[IR_RA] = r26;
5863 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5864 env->ir[IR_A0] = sig;
5865 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5866 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5867 env->ir[IR_SP] = frame_addr;
5868 }
5869
5870 long do_sigreturn(CPUAlphaState *env)
5871 {
5872 struct target_sigcontext *sc;
5873 abi_ulong sc_addr = env->ir[IR_A0];
5874 target_sigset_t target_set;
5875 sigset_t set;
5876
5877 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5878 goto badframe;
5879 }
5880
5881 target_sigemptyset(&target_set);
5882 __get_user(target_set.sig[0], &sc->sc_mask);
5883
5884 target_to_host_sigset_internal(&set, &target_set);
5885 set_sigmask(&set);
5886
5887 restore_sigcontext(env, sc);
5888 unlock_user_struct(sc, sc_addr, 0);
5889 return -TARGET_QEMU_ESIGRETURN;
5890
5891 badframe:
5892 force_sig(TARGET_SIGSEGV);
5893 return -TARGET_QEMU_ESIGRETURN;
5894 }
5895
5896 long do_rt_sigreturn(CPUAlphaState *env)
5897 {
5898 abi_ulong frame_addr = env->ir[IR_A0];
5899 struct target_rt_sigframe *frame;
5900 sigset_t set;
5901
5902 trace_user_do_rt_sigreturn(env, frame_addr);
5903 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5904 goto badframe;
5905 }
5906 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5907 set_sigmask(&set);
5908
5909 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5910 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5911 uc.tuc_stack),
5912 0, env->ir[IR_SP]) == -EFAULT) {
5913 goto badframe;
5914 }
5915
5916 unlock_user_struct(frame, frame_addr, 0);
5917 return -TARGET_QEMU_ESIGRETURN;
5918
5919
5920 badframe:
5921 unlock_user_struct(frame, frame_addr, 0);
5922 force_sig(TARGET_SIGSEGV);
5923 return -TARGET_QEMU_ESIGRETURN;
5924 }
5925
5926 #elif defined(TARGET_TILEGX)
5927
5928 struct target_sigcontext {
5929 union {
5930 /* General-purpose registers. */
5931 abi_ulong gregs[56];
5932 struct {
5933 abi_ulong __gregs[53];
5934 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5935 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5936 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5937 };
5938 };
5939 abi_ulong pc; /* Program counter. */
5940 abi_ulong ics; /* In Interrupt Critical Section? */
5941 abi_ulong faultnum; /* Fault number. */
5942 abi_ulong pad[5];
5943 };
5944
5945 struct target_ucontext {
5946 abi_ulong tuc_flags;
5947 abi_ulong tuc_link;
5948 target_stack_t tuc_stack;
5949 struct target_sigcontext tuc_mcontext;
5950 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5951 };
5952
5953 struct target_rt_sigframe {
5954 unsigned char save_area[16]; /* caller save area */
5955 struct target_siginfo info;
5956 struct target_ucontext uc;
5957 abi_ulong retcode[2];
5958 };
5959
5960 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5961 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5962
5963
5964 static void setup_sigcontext(struct target_sigcontext *sc,
5965 CPUArchState *env, int signo)
5966 {
5967 int i;
5968
5969 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5970 __put_user(env->regs[i], &sc->gregs[i]);
5971 }
5972
5973 __put_user(env->pc, &sc->pc);
5974 __put_user(0, &sc->ics);
5975 __put_user(signo, &sc->faultnum);
5976 }
5977
5978 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5979 {
5980 int i;
5981
5982 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5983 __get_user(env->regs[i], &sc->gregs[i]);
5984 }
5985
5986 __get_user(env->pc, &sc->pc);
5987 }
5988
5989 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5990 size_t frame_size)
5991 {
5992 unsigned long sp = env->regs[TILEGX_R_SP];
5993
5994 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5995 return -1UL;
5996 }
5997
5998 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5999 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6000 }
6001
6002 sp -= frame_size;
6003 sp &= -16UL;
6004 return sp;
6005 }
6006
6007 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6008 target_siginfo_t *info,
6009 target_sigset_t *set, CPUArchState *env)
6010 {
6011 abi_ulong frame_addr;
6012 struct target_rt_sigframe *frame;
6013 unsigned long restorer;
6014
6015 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6016 trace_user_setup_rt_frame(env, frame_addr);
6017 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6018 goto give_sigsegv;
6019 }
6020
6021 /* Always write at least the signal number for the stack backtracer. */
6022 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6023 /* At sigreturn time, restore the callee-save registers too. */
6024 tswap_siginfo(&frame->info, info);
6025 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6026 } else {
6027 __put_user(info->si_signo, &frame->info.si_signo);
6028 }
6029
6030 /* Create the ucontext. */
6031 __put_user(0, &frame->uc.tuc_flags);
6032 __put_user(0, &frame->uc.tuc_link);
6033 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6034 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6035 &frame->uc.tuc_stack.ss_flags);
6036 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6037 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6038
6039 if (ka->sa_flags & TARGET_SA_RESTORER) {
6040 restorer = (unsigned long) ka->sa_restorer;
6041 } else {
6042 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6043 __put_user(INSN_SWINT1, &frame->retcode[1]);
6044 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6045 }
6046 env->pc = (unsigned long) ka->_sa_handler;
6047 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6048 env->regs[TILEGX_R_LR] = restorer;
6049 env->regs[0] = (unsigned long) sig;
6050 env->regs[1] = (unsigned long) &frame->info;
6051 env->regs[2] = (unsigned long) &frame->uc;
6052 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6053
6054 unlock_user_struct(frame, frame_addr, 1);
6055 return;
6056
6057 give_sigsegv:
6058 force_sigsegv(sig);
6059 }
6060
6061 long do_rt_sigreturn(CPUTLGState *env)
6062 {
6063 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6064 struct target_rt_sigframe *frame;
6065 sigset_t set;
6066
6067 trace_user_do_rt_sigreturn(env, frame_addr);
6068 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6069 goto badframe;
6070 }
6071 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6072 set_sigmask(&set);
6073
6074 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6075 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6076 uc.tuc_stack),
6077 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6078 goto badframe;
6079 }
6080
6081 unlock_user_struct(frame, frame_addr, 0);
6082 return -TARGET_QEMU_ESIGRETURN;
6083
6084
6085 badframe:
6086 unlock_user_struct(frame, frame_addr, 0);
6087 force_sig(TARGET_SIGSEGV);
6088 return -TARGET_QEMU_ESIGRETURN;
6089 }
6090
6091 #elif defined(TARGET_RISCV)
6092
6093 /* Signal handler invocation must be transparent for the code being
6094 interrupted. Complete CPU (hart) state is saved on entry and restored
6095 before returning from the handler. Process sigmask is also saved to block
6096 signals while the handler is running. The handler gets its own stack,
6097 which also doubles as storage for the CPU state and sigmask.
6098
6099 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
6100
6101 struct target_sigcontext {
6102 abi_long pc;
6103 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
6104 uint64_t fpr[32];
6105 uint32_t fcsr;
6106 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
6107
6108 struct target_ucontext {
6109 unsigned long uc_flags;
6110 struct target_ucontext *uc_link;
6111 target_stack_t uc_stack;
6112 struct target_sigcontext uc_mcontext;
6113 target_sigset_t uc_sigmask;
6114 };
6115
6116 struct target_rt_sigframe {
6117 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
6118 struct target_siginfo info;
6119 struct target_ucontext uc;
6120 };
6121
6122 static abi_ulong get_sigframe(struct target_sigaction *ka,
6123 CPURISCVState *regs, size_t framesize)
6124 {
6125 abi_ulong sp = regs->gpr[xSP];
6126 int onsigstack = on_sig_stack(sp);
6127
6128 /* redzone */
6129 /* This is the X/Open sanctioned signal stack switching. */
6130 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
6131 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6132 }
6133
6134 sp -= framesize;
6135 sp &= ~3UL; /* align sp on 4-byte boundary */
6136
6137 /* If we are on the alternate signal stack and would overflow it, don't.
6138 Return an always-bogus address instead so we will die with SIGSEGV. */
6139 if (onsigstack && !likely(on_sig_stack(sp))) {
6140 return -1L;
6141 }
6142
6143 return sp;
6144 }
6145
6146 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
6147 {
6148 int i;
6149
6150 __put_user(env->pc, &sc->pc);
6151
6152 for (i = 1; i < 32; i++) {
6153 __put_user(env->gpr[i], &sc->gpr[i - 1]);
6154 }
6155 for (i = 0; i < 32; i++) {
6156 __put_user(env->fpr[i], &sc->fpr[i]);
6157 }
6158
6159 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
6160 __put_user(fcsr, &sc->fcsr);
6161 }
6162
6163 static void setup_ucontext(struct target_ucontext *uc,
6164 CPURISCVState *env, target_sigset_t *set)
6165 {
6166 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
6167 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
6168 abi_ulong ss_size = target_sigaltstack_used.ss_size;
6169
6170 __put_user(0, &(uc->uc_flags));
6171 __put_user(0, &(uc->uc_link));
6172
6173 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
6174 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
6175 __put_user(ss_size, &(uc->uc_stack.ss_size));
6176
6177 int i;
6178 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6179 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
6180 }
6181
6182 setup_sigcontext(&uc->uc_mcontext, env);
6183 }
6184
6185 static inline void install_sigtramp(uint32_t *tramp)
6186 {
6187 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
6188 __put_user(0x00000073, tramp + 1); /* ecall */
6189 }
6190
6191 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6192 target_siginfo_t *info,
6193 target_sigset_t *set, CPURISCVState *env)
6194 {
6195 abi_ulong frame_addr;
6196 struct target_rt_sigframe *frame;
6197
6198 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6199 trace_user_setup_rt_frame(env, frame_addr);
6200
6201 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6202 goto badframe;
6203 }
6204
6205 setup_ucontext(&frame->uc, env, set);
6206 tswap_siginfo(&frame->info, info);
6207 install_sigtramp(frame->tramp);
6208
6209 env->pc = ka->_sa_handler;
6210 env->gpr[xSP] = frame_addr;
6211 env->gpr[xA0] = sig;
6212 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6213 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6214 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
6215
6216 return;
6217
6218 badframe:
6219 unlock_user_struct(frame, frame_addr, 1);
6220 if (sig == TARGET_SIGSEGV) {
6221 ka->_sa_handler = TARGET_SIG_DFL;
6222 }
6223 force_sig(TARGET_SIGSEGV);
6224 }
6225
6226 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
6227 {
6228 int i;
6229
6230 __get_user(env->pc, &sc->pc);
6231
6232 for (i = 1; i < 32; ++i) {
6233 __get_user(env->gpr[i], &sc->gpr[i - 1]);
6234 }
6235 for (i = 0; i < 32; ++i) {
6236 __get_user(env->fpr[i], &sc->fpr[i]);
6237 }
6238
6239 uint32_t fcsr;
6240 __get_user(fcsr, &sc->fcsr);
6241 csr_write_helper(env, fcsr, CSR_FCSR);
6242 }
6243
6244 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
6245 {
6246 sigset_t blocked;
6247 target_sigset_t target_set;
6248 int i;
6249
6250 target_sigemptyset(&target_set);
6251 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6252 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
6253 }
6254
6255 target_to_host_sigset_internal(&blocked, &target_set);
6256 set_sigmask(&blocked);
6257
6258 restore_sigcontext(env, &uc->uc_mcontext);
6259 }
6260
6261 long do_rt_sigreturn(CPURISCVState *env)
6262 {
6263 struct target_rt_sigframe *frame;
6264 abi_ulong frame_addr;
6265
6266 frame_addr = env->gpr[xSP];
6267 trace_user_do_sigreturn(env, frame_addr);
6268 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6269 goto badframe;
6270 }
6271
6272 restore_ucontext(env, &frame->uc);
6273
6274 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6275 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
6276 goto badframe;
6277 }
6278
6279 unlock_user_struct(frame, frame_addr, 0);
6280 return -TARGET_QEMU_ESIGRETURN;
6281
6282 badframe:
6283 unlock_user_struct(frame, frame_addr, 0);
6284 force_sig(TARGET_SIGSEGV);
6285 return 0;
6286 }
6287
6288 #elif defined(TARGET_HPPA)
6289
6290 struct target_sigcontext {
6291 abi_ulong sc_flags;
6292 abi_ulong sc_gr[32];
6293 uint64_t sc_fr[32];
6294 abi_ulong sc_iasq[2];
6295 abi_ulong sc_iaoq[2];
6296 abi_ulong sc_sar;
6297 };
6298
6299 struct target_ucontext {
6300 abi_uint tuc_flags;
6301 abi_ulong tuc_link;
6302 target_stack_t tuc_stack;
6303 abi_uint pad[1];
6304 struct target_sigcontext tuc_mcontext;
6305 target_sigset_t tuc_sigmask;
6306 };
6307
6308 struct target_rt_sigframe {
6309 abi_uint tramp[9];
6310 target_siginfo_t info;
6311 struct target_ucontext uc;
6312 /* hidden location of upper halves of pa2.0 64-bit gregs */
6313 };
6314
6315 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6316 {
6317 int flags = 0;
6318 int i;
6319
6320 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6321
6322 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6323 /* In the gateway page, executing a syscall. */
6324 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6325 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6326 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6327 } else {
6328 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6329 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6330 }
6331 __put_user(0, &sc->sc_iasq[0]);
6332 __put_user(0, &sc->sc_iasq[1]);
6333 __put_user(flags, &sc->sc_flags);
6334
6335 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6336 for (i = 1; i < 32; ++i) {
6337 __put_user(env->gr[i], &sc->sc_gr[i]);
6338 }
6339
6340 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6341 for (i = 1; i < 32; ++i) {
6342 __put_user(env->fr[i], &sc->sc_fr[i]);
6343 }
6344
6345 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6346 }
6347
6348 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6349 {
6350 target_ulong psw;
6351 int i;
6352
6353 __get_user(psw, &sc->sc_gr[0]);
6354 cpu_hppa_put_psw(env, psw);
6355
6356 for (i = 1; i < 32; ++i) {
6357 __get_user(env->gr[i], &sc->sc_gr[i]);
6358 }
6359 for (i = 0; i < 32; ++i) {
6360 __get_user(env->fr[i], &sc->sc_fr[i]);
6361 }
6362 cpu_hppa_loaded_fr0(env);
6363
6364 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6365 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6366 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6367 }
6368
6369 /* No, this doesn't look right, but it's copied straight from the kernel. */
6370 #define PARISC_RT_SIGFRAME_SIZE32 \
6371 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6372
6373 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6374 target_siginfo_t *info,
6375 target_sigset_t *set, CPUArchState *env)
6376 {
6377 abi_ulong frame_addr, sp, haddr;
6378 struct target_rt_sigframe *frame;
6379 int i;
6380
6381 sp = env->gr[30];
6382 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6383 if (sas_ss_flags(sp) == 0) {
6384 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6385 }
6386 }
6387 frame_addr = QEMU_ALIGN_UP(sp, 64);
6388 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6389
6390 trace_user_setup_rt_frame(env, frame_addr);
6391
6392 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6393 goto give_sigsegv;
6394 }
6395
6396 tswap_siginfo(&frame->info, info);
6397 frame->uc.tuc_flags = 0;
6398 frame->uc.tuc_link = 0;
6399
6400 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6401 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6402 &frame->uc.tuc_stack.ss_flags);
6403 __put_user(target_sigaltstack_used.ss_size,
6404 &frame->uc.tuc_stack.ss_size);
6405
6406 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6407 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6408 }
6409
6410 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6411
6412 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6413 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6414 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6415 __put_user(0x08000240, frame->tramp + 3); /* nop */
6416
6417 unlock_user_struct(frame, frame_addr, 1);
6418
6419 env->gr[2] = h2g(frame->tramp);
6420 env->gr[30] = sp;
6421 env->gr[26] = sig;
6422 env->gr[25] = h2g(&frame->info);
6423 env->gr[24] = h2g(&frame->uc);
6424
6425 haddr = ka->_sa_handler;
6426 if (haddr & 2) {
6427 /* Function descriptor. */
6428 target_ulong *fdesc, dest;
6429
6430 haddr &= -4;
6431 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
6432 goto give_sigsegv;
6433 }
6434 __get_user(dest, fdesc);
6435 __get_user(env->gr[19], fdesc + 1);
6436 unlock_user_struct(fdesc, haddr, 1);
6437 haddr = dest;
6438 }
6439 env->iaoq_f = haddr;
6440 env->iaoq_b = haddr + 4;
6441 return;
6442
6443 give_sigsegv:
6444 force_sigsegv(sig);
6445 }
6446
6447 long do_rt_sigreturn(CPUArchState *env)
6448 {
6449 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
6450 struct target_rt_sigframe *frame;
6451 sigset_t set;
6452
6453 trace_user_do_rt_sigreturn(env, frame_addr);
6454 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6455 goto badframe;
6456 }
6457 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6458 set_sigmask(&set);
6459
6460 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6461 unlock_user_struct(frame, frame_addr, 0);
6462
6463 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6464 uc.tuc_stack),
6465 0, env->gr[30]) == -EFAULT) {
6466 goto badframe;
6467 }
6468
6469 unlock_user_struct(frame, frame_addr, 0);
6470 return -TARGET_QEMU_ESIGRETURN;
6471
6472 badframe:
6473 force_sig(TARGET_SIGSEGV);
6474 return -TARGET_QEMU_ESIGRETURN;
6475 }
6476
6477 #elif defined(TARGET_XTENSA)
6478
6479 struct target_sigcontext {
6480 abi_ulong sc_pc;
6481 abi_ulong sc_ps;
6482 abi_ulong sc_lbeg;
6483 abi_ulong sc_lend;
6484 abi_ulong sc_lcount;
6485 abi_ulong sc_sar;
6486 abi_ulong sc_acclo;
6487 abi_ulong sc_acchi;
6488 abi_ulong sc_a[16];
6489 abi_ulong sc_xtregs;
6490 };
6491
6492 struct target_ucontext {
6493 abi_ulong tuc_flags;
6494 abi_ulong tuc_link;
6495 target_stack_t tuc_stack;
6496 struct target_sigcontext tuc_mcontext;
6497 target_sigset_t tuc_sigmask;
6498 };
6499
6500 struct target_rt_sigframe {
6501 target_siginfo_t info;
6502 struct target_ucontext uc;
6503 /* TODO: xtregs */
6504 uint8_t retcode[6];
6505 abi_ulong window[4];
6506 };
6507
6508 static abi_ulong get_sigframe(struct target_sigaction *sa,
6509 CPUXtensaState *env,
6510 unsigned long framesize)
6511 {
6512 abi_ulong sp = env->regs[1];
6513
6514 /* This is the X/Open sanctioned signal stack switching. */
6515 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6516 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6517 }
6518 return (sp - framesize) & -16;
6519 }
6520
6521 static int flush_window_regs(CPUXtensaState *env)
6522 {
6523 uint32_t wb = env->sregs[WINDOW_BASE];
6524 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1);
6525 unsigned d = ctz32(ws) + 1;
6526 unsigned i;
6527 int ret = 0;
6528
6529 for (i = d; i < env->config->nareg / 4; i += d) {
6530 uint32_t ssp, osp;
6531 unsigned j;
6532
6533 ws >>= d;
6534 xtensa_rotate_window(env, d);
6535
6536 if (ws & 0x1) {
6537 ssp = env->regs[5];
6538 d = 1;
6539 } else if (ws & 0x2) {
6540 ssp = env->regs[9];
6541 ret |= get_user_ual(osp, env->regs[1] - 12);
6542 osp -= 32;
6543 d = 2;
6544 } else if (ws & 0x4) {
6545 ssp = env->regs[13];
6546 ret |= get_user_ual(osp, env->regs[1] - 12);
6547 osp -= 48;
6548 d = 3;
6549 } else {
6550 g_assert_not_reached();
6551 }
6552
6553 for (j = 0; j < 4; ++j) {
6554 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4);
6555 }
6556 for (j = 4; j < d * 4; ++j) {
6557 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4);
6558 }
6559 }
6560 xtensa_rotate_window(env, d);
6561 g_assert(env->sregs[WINDOW_BASE] == wb);
6562 return ret == 0;
6563 }
6564
6565 static int setup_sigcontext(struct target_rt_sigframe *frame,
6566 CPUXtensaState *env)
6567 {
6568 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
6569 int i;
6570
6571 __put_user(env->pc, &sc->sc_pc);
6572 __put_user(env->sregs[PS], &sc->sc_ps);
6573 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
6574 __put_user(env->sregs[LEND], &sc->sc_lend);
6575 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
6576 if (!flush_window_regs(env)) {
6577 return 0;
6578 }
6579 for (i = 0; i < 16; ++i) {
6580 __put_user(env->regs[i], sc->sc_a + i);
6581 }
6582 __put_user(0, &sc->sc_xtregs);
6583 /* TODO: xtregs */
6584 return 1;
6585 }
6586
6587 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6588 target_siginfo_t *info,
6589 target_sigset_t *set, CPUXtensaState *env)
6590 {
6591 abi_ulong frame_addr;
6592 struct target_rt_sigframe *frame;
6593 uint32_t ra;
6594 int i;
6595
6596 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6597 trace_user_setup_rt_frame(env, frame_addr);
6598
6599 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6600 goto give_sigsegv;
6601 }
6602
6603 if (ka->sa_flags & SA_SIGINFO) {
6604 tswap_siginfo(&frame->info, info);
6605 }
6606
6607 __put_user(0, &frame->uc.tuc_flags);
6608 __put_user(0, &frame->uc.tuc_link);
6609 __put_user(target_sigaltstack_used.ss_sp,
6610 &frame->uc.tuc_stack.ss_sp);
6611 __put_user(sas_ss_flags(env->regs[1]),
6612 &frame->uc.tuc_stack.ss_flags);
6613 __put_user(target_sigaltstack_used.ss_size,
6614 &frame->uc.tuc_stack.ss_size);
6615 if (!setup_sigcontext(frame, env)) {
6616 unlock_user_struct(frame, frame_addr, 0);
6617 goto give_sigsegv;
6618 }
6619 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6620 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6621 }
6622
6623 if (ka->sa_flags & TARGET_SA_RESTORER) {
6624 ra = ka->sa_restorer;
6625 } else {
6626 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6627 #ifdef TARGET_WORDS_BIGENDIAN
6628 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
6629 __put_user(0x22, &frame->retcode[0]);
6630 __put_user(0x0a, &frame->retcode[1]);
6631 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
6632 /* Generate instruction: SYSCALL */
6633 __put_user(0x00, &frame->retcode[3]);
6634 __put_user(0x05, &frame->retcode[4]);
6635 __put_user(0x00, &frame->retcode[5]);
6636 #else
6637 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
6638 __put_user(0x22, &frame->retcode[0]);
6639 __put_user(0xa0, &frame->retcode[1]);
6640 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
6641 /* Generate instruction: SYSCALL */
6642 __put_user(0x00, &frame->retcode[3]);
6643 __put_user(0x50, &frame->retcode[4]);
6644 __put_user(0x00, &frame->retcode[5]);
6645 #endif
6646 }
6647 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
6648 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
6649 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
6650 }
6651 memset(env->regs, 0, sizeof(env->regs));
6652 env->pc = ka->_sa_handler;
6653 env->regs[1] = frame_addr;
6654 env->sregs[WINDOW_BASE] = 0;
6655 env->sregs[WINDOW_START] = 1;
6656
6657 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
6658 env->regs[6] = sig;
6659 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
6660 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6661 unlock_user_struct(frame, frame_addr, 1);
6662 return;
6663
6664 give_sigsegv:
6665 force_sigsegv(sig);
6666 return;
6667 }
6668
6669 static void restore_sigcontext(CPUXtensaState *env,
6670 struct target_rt_sigframe *frame)
6671 {
6672 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
6673 uint32_t ps;
6674 int i;
6675
6676 __get_user(env->pc, &sc->sc_pc);
6677 __get_user(ps, &sc->sc_ps);
6678 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
6679 __get_user(env->sregs[LEND], &sc->sc_lend);
6680 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
6681
6682 env->sregs[WINDOW_BASE] = 0;
6683 env->sregs[WINDOW_START] = 1;
6684 env->sregs[PS] = deposit32(env->sregs[PS],
6685 PS_CALLINC_SHIFT,
6686 PS_CALLINC_LEN,
6687 extract32(ps, PS_CALLINC_SHIFT,
6688 PS_CALLINC_LEN));
6689 for (i = 0; i < 16; ++i) {
6690 __get_user(env->regs[i], sc->sc_a + i);
6691 }
6692 /* TODO: xtregs */
6693 }
6694
6695 long do_rt_sigreturn(CPUXtensaState *env)
6696 {
6697 abi_ulong frame_addr = env->regs[1];
6698 struct target_rt_sigframe *frame;
6699 sigset_t set;
6700
6701 trace_user_do_rt_sigreturn(env, frame_addr);
6702 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6703 goto badframe;
6704 }
6705 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6706 set_sigmask(&set);
6707
6708 restore_sigcontext(env, frame);
6709
6710 if (do_sigaltstack(frame_addr +
6711 offsetof(struct target_rt_sigframe, uc.tuc_stack),
6712 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
6713 goto badframe;
6714 }
6715 unlock_user_struct(frame, frame_addr, 0);
6716 return -TARGET_QEMU_ESIGRETURN;
6717
6718 badframe:
6719 unlock_user_struct(frame, frame_addr, 0);
6720 force_sig(TARGET_SIGSEGV);
6721 return -TARGET_QEMU_ESIGRETURN;
6722 }
6723 #endif
6724
6725 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
6726 struct emulated_sigtable *k)
6727 {
6728 CPUState *cpu = ENV_GET_CPU(cpu_env);
6729 abi_ulong handler;
6730 sigset_t set;
6731 target_sigset_t target_old_set;
6732 struct target_sigaction *sa;
6733 TaskState *ts = cpu->opaque;
6734
6735 trace_user_handle_signal(cpu_env, sig);
6736 /* dequeue signal */
6737 k->pending = 0;
6738
6739 sig = gdb_handlesig(cpu, sig);
6740 if (!sig) {
6741 sa = NULL;
6742 handler = TARGET_SIG_IGN;
6743 } else {
6744 sa = &sigact_table[sig - 1];
6745 handler = sa->_sa_handler;
6746 }
6747
6748 if (do_strace) {
6749 print_taken_signal(sig, &k->info);
6750 }
6751
6752 if (handler == TARGET_SIG_DFL) {
6753 /* default handler : ignore some signal. The other are job control or fatal */
6754 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
6755 kill(getpid(),SIGSTOP);
6756 } else if (sig != TARGET_SIGCHLD &&
6757 sig != TARGET_SIGURG &&
6758 sig != TARGET_SIGWINCH &&
6759 sig != TARGET_SIGCONT) {
6760 dump_core_and_abort(sig);
6761 }
6762 } else if (handler == TARGET_SIG_IGN) {
6763 /* ignore sig */
6764 } else if (handler == TARGET_SIG_ERR) {
6765 dump_core_and_abort(sig);
6766 } else {
6767 /* compute the blocked signals during the handler execution */
6768 sigset_t *blocked_set;
6769
6770 target_to_host_sigset(&set, &sa->sa_mask);
6771 /* SA_NODEFER indicates that the current signal should not be
6772 blocked during the handler */
6773 if (!(sa->sa_flags & TARGET_SA_NODEFER))
6774 sigaddset(&set, target_to_host_signal(sig));
6775
6776 /* save the previous blocked signal state to restore it at the
6777 end of the signal execution (see do_sigreturn) */
6778 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
6779
6780 /* block signals in the handler */
6781 blocked_set = ts->in_sigsuspend ?
6782 &ts->sigsuspend_mask : &ts->signal_mask;
6783 sigorset(&ts->signal_mask, blocked_set, &set);
6784 ts->in_sigsuspend = 0;
6785
6786 /* if the CPU is in VM86 mode, we restore the 32 bit values */
6787 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
6788 {
6789 CPUX86State *env = cpu_env;
6790 if (env->eflags & VM_MASK)
6791 save_v86_state(env);
6792 }
6793 #endif
6794 /* prepare the stack frame of the virtual CPU */
6795 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
6796 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
6797 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
6798 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
6799 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
6800 /* These targets do not have traditional signals. */
6801 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6802 #else
6803 if (sa->sa_flags & TARGET_SA_SIGINFO)
6804 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6805 else
6806 setup_frame(sig, sa, &target_old_set, cpu_env);
6807 #endif
6808 if (sa->sa_flags & TARGET_SA_RESETHAND) {
6809 sa->_sa_handler = TARGET_SIG_DFL;
6810 }
6811 }
6812 }
6813
6814 void process_pending_signals(CPUArchState *cpu_env)
6815 {
6816 CPUState *cpu = ENV_GET_CPU(cpu_env);
6817 int sig;
6818 TaskState *ts = cpu->opaque;
6819 sigset_t set;
6820 sigset_t *blocked_set;
6821
6822 while (atomic_read(&ts->signal_pending)) {
6823 /* FIXME: This is not threadsafe. */
6824 sigfillset(&set);
6825 sigprocmask(SIG_SETMASK, &set, 0);
6826
6827 restart_scan:
6828 sig = ts->sync_signal.pending;
6829 if (sig) {
6830 /* Synchronous signals are forced,
6831 * see force_sig_info() and callers in Linux
6832 * Note that not all of our queue_signal() calls in QEMU correspond
6833 * to force_sig_info() calls in Linux (some are send_sig_info()).
6834 * However it seems like a kernel bug to me to allow the process
6835 * to block a synchronous signal since it could then just end up
6836 * looping round and round indefinitely.
6837 */
6838 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
6839 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
6840 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
6841 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
6842 }
6843
6844 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
6845 }
6846
6847 for (sig = 1; sig <= TARGET_NSIG; sig++) {
6848 blocked_set = ts->in_sigsuspend ?
6849 &ts->sigsuspend_mask : &ts->signal_mask;
6850
6851 if (ts->sigtab[sig - 1].pending &&
6852 (!sigismember(blocked_set,
6853 target_to_host_signal_table[sig]))) {
6854 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
6855 /* Restart scan from the beginning, as handle_pending_signal
6856 * might have resulted in a new synchronous signal (eg SIGSEGV).
6857 */
6858 goto restart_scan;
6859 }
6860 }
6861
6862 /* if no signal is pending, unblock signals and recheck (the act
6863 * of unblocking might cause us to take another host signal which
6864 * will set signal_pending again).
6865 */
6866 atomic_set(&ts->signal_pending, 0);
6867 ts->in_sigsuspend = 0;
6868 set = ts->signal_mask;
6869 sigdelset(&set, SIGSEGV);
6870 sigdelset(&set, SIGBUS);
6871 sigprocmask(SIG_SETMASK, &set, 0);
6872 }
6873 ts->in_sigsuspend = 0;
6874 }