]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
5429c9aa38c69f4c6ea00be2a882c5d76b8c94c6
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29
30 struct target_sigaltstack target_sigaltstack_used = {
31 .ss_sp = 0,
32 .ss_size = 0,
33 .ss_flags = TARGET_SS_DISABLE,
34 };
35
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39 void *puc);
40
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42 [SIGHUP] = TARGET_SIGHUP,
43 [SIGINT] = TARGET_SIGINT,
44 [SIGQUIT] = TARGET_SIGQUIT,
45 [SIGILL] = TARGET_SIGILL,
46 [SIGTRAP] = TARGET_SIGTRAP,
47 [SIGABRT] = TARGET_SIGABRT,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS] = TARGET_SIGBUS,
50 [SIGFPE] = TARGET_SIGFPE,
51 [SIGKILL] = TARGET_SIGKILL,
52 [SIGUSR1] = TARGET_SIGUSR1,
53 [SIGSEGV] = TARGET_SIGSEGV,
54 [SIGUSR2] = TARGET_SIGUSR2,
55 [SIGPIPE] = TARGET_SIGPIPE,
56 [SIGALRM] = TARGET_SIGALRM,
57 [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59 [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61 [SIGCHLD] = TARGET_SIGCHLD,
62 [SIGCONT] = TARGET_SIGCONT,
63 [SIGSTOP] = TARGET_SIGSTOP,
64 [SIGTSTP] = TARGET_SIGTSTP,
65 [SIGTTIN] = TARGET_SIGTTIN,
66 [SIGTTOU] = TARGET_SIGTTOU,
67 [SIGURG] = TARGET_SIGURG,
68 [SIGXCPU] = TARGET_SIGXCPU,
69 [SIGXFSZ] = TARGET_SIGXFSZ,
70 [SIGVTALRM] = TARGET_SIGVTALRM,
71 [SIGPROF] = TARGET_SIGPROF,
72 [SIGWINCH] = TARGET_SIGWINCH,
73 [SIGIO] = TARGET_SIGIO,
74 [SIGPWR] = TARGET_SIGPWR,
75 [SIGSYS] = TARGET_SIGSYS,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN] = __SIGRTMAX,
82 [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85
86 int host_to_target_signal(int sig)
87 {
88 if (sig < 0 || sig >= _NSIG)
89 return sig;
90 return host_to_target_signal_table[sig];
91 }
92
93 int target_to_host_signal(int sig)
94 {
95 if (sig < 0 || sig >= _NSIG)
96 return sig;
97 return target_to_host_signal_table[sig];
98 }
99
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113
114 void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
116 {
117 int i;
118 target_sigemptyset(d);
119 for (i = 1; i <= TARGET_NSIG; i++) {
120 if (sigismember(s, i)) {
121 target_sigaddset(d, host_to_target_signal(i));
122 }
123 }
124 }
125
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128 target_sigset_t d1;
129 int i;
130
131 host_to_target_sigset_internal(&d1, s);
132 for(i = 0;i < TARGET_NSIG_WORDS; i++)
133 d->sig[i] = tswapal(d1.sig[i]);
134 }
135
136 void target_to_host_sigset_internal(sigset_t *d,
137 const target_sigset_t *s)
138 {
139 int i;
140 sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (target_sigismember(s, i)) {
143 sigaddset(d, target_to_host_signal(i));
144 }
145 }
146 }
147
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150 target_sigset_t s1;
151 int i;
152
153 for(i = 0;i < TARGET_NSIG_WORDS; i++)
154 s1.sig[i] = tswapal(s->sig[i]);
155 target_to_host_sigset_internal(d, &s1);
156 }
157
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159 const sigset_t *sigset)
160 {
161 target_sigset_t d;
162 host_to_target_sigset(&d, sigset);
163 *old_sigset = d.sig[0];
164 }
165
166 void target_to_host_old_sigset(sigset_t *sigset,
167 const abi_ulong *old_sigset)
168 {
169 target_sigset_t d;
170 int i;
171
172 d.sig[0] = *old_sigset;
173 for(i = 1;i < TARGET_NSIG_WORDS; i++)
174 d.sig[i] = 0;
175 target_to_host_sigset(sigset, &d);
176 }
177
178 int block_signals(void)
179 {
180 TaskState *ts = (TaskState *)thread_cpu->opaque;
181 sigset_t set;
182
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
186 */
187 sigfillset(&set);
188 sigprocmask(SIG_SETMASK, &set, 0);
189
190 return atomic_xchg(&ts->signal_pending, 1);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
197 * 0 on success.
198 * If set is NULL, this is guaranteed not to fail.
199 */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202 TaskState *ts = (TaskState *)thread_cpu->opaque;
203
204 if (oldset) {
205 *oldset = ts->signal_mask;
206 }
207
208 if (set) {
209 int i;
210
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS;
213 }
214
215 switch (how) {
216 case SIG_BLOCK:
217 sigorset(&ts->signal_mask, &ts->signal_mask, set);
218 break;
219 case SIG_UNBLOCK:
220 for (i = 1; i <= NSIG; ++i) {
221 if (sigismember(set, i)) {
222 sigdelset(&ts->signal_mask, i);
223 }
224 }
225 break;
226 case SIG_SETMASK:
227 ts->signal_mask = *set;
228 break;
229 default:
230 g_assert_not_reached();
231 }
232
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts->signal_mask, SIGKILL);
235 sigdelset(&ts->signal_mask, SIGSTOP);
236 }
237 return 0;
238 }
239
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
243 */
244 void set_sigmask(const sigset_t *set)
245 {
246 TaskState *ts = (TaskState *)thread_cpu->opaque;
247
248 ts->signal_mask = *set;
249 }
250 #endif
251
252 /* siginfo conversion */
253
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255 const siginfo_t *info)
256 {
257 int sig = host_to_target_signal(info->si_signo);
258 int si_code = info->si_code;
259 int si_type;
260 tinfo->si_signo = sig;
261 tinfo->si_errno = 0;
262 tinfo->si_code = info->si_code;
263
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
269 */
270 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
280 *
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
285 */
286
287 switch (si_code) {
288 case SI_USER:
289 case SI_TKILL:
290 case SI_KERNEL:
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
293 */
294 tinfo->_sifields._kill._pid = info->si_pid;
295 tinfo->_sifields._kill._uid = info->si_uid;
296 si_type = QEMU_SI_KILL;
297 break;
298 default:
299 /* Everything else is spoofable. Make best guess based on signal */
300 switch (sig) {
301 case TARGET_SIGCHLD:
302 tinfo->_sifields._sigchld._pid = info->si_pid;
303 tinfo->_sifields._sigchld._uid = info->si_uid;
304 tinfo->_sifields._sigchld._status
305 = host_to_target_waitstatus(info->si_status);
306 tinfo->_sifields._sigchld._utime = info->si_utime;
307 tinfo->_sifields._sigchld._stime = info->si_stime;
308 si_type = QEMU_SI_CHLD;
309 break;
310 case TARGET_SIGIO:
311 tinfo->_sifields._sigpoll._band = info->si_band;
312 tinfo->_sifields._sigpoll._fd = info->si_fd;
313 si_type = QEMU_SI_POLL;
314 break;
315 default:
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo->_sifields._rt._pid = info->si_pid;
318 tinfo->_sifields._rt._uid = info->si_uid;
319 /* XXX: potential problem if 64 bit */
320 tinfo->_sifields._rt._sigval.sival_ptr
321 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322 si_type = QEMU_SI_RT;
323 break;
324 }
325 break;
326 }
327
328 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330
331 void tswap_siginfo(target_siginfo_t *tinfo,
332 const target_siginfo_t *info)
333 {
334 int si_type = extract32(info->si_code, 16, 16);
335 int si_code = sextract32(info->si_code, 0, 16);
336
337 __put_user(info->si_signo, &tinfo->si_signo);
338 __put_user(info->si_errno, &tinfo->si_errno);
339 __put_user(si_code, &tinfo->si_code);
340
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
344 */
345 switch (si_type) {
346 case QEMU_SI_KILL:
347 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349 break;
350 case QEMU_SI_TIMER:
351 __put_user(info->_sifields._timer._timer1,
352 &tinfo->_sifields._timer._timer1);
353 __put_user(info->_sifields._timer._timer2,
354 &tinfo->_sifields._timer._timer2);
355 break;
356 case QEMU_SI_POLL:
357 __put_user(info->_sifields._sigpoll._band,
358 &tinfo->_sifields._sigpoll._band);
359 __put_user(info->_sifields._sigpoll._fd,
360 &tinfo->_sifields._sigpoll._fd);
361 break;
362 case QEMU_SI_FAULT:
363 __put_user(info->_sifields._sigfault._addr,
364 &tinfo->_sifields._sigfault._addr);
365 break;
366 case QEMU_SI_CHLD:
367 __put_user(info->_sifields._sigchld._pid,
368 &tinfo->_sifields._sigchld._pid);
369 __put_user(info->_sifields._sigchld._uid,
370 &tinfo->_sifields._sigchld._uid);
371 __put_user(info->_sifields._sigchld._status,
372 &tinfo->_sifields._sigchld._status);
373 __put_user(info->_sifields._sigchld._utime,
374 &tinfo->_sifields._sigchld._utime);
375 __put_user(info->_sifields._sigchld._stime,
376 &tinfo->_sifields._sigchld._stime);
377 break;
378 case QEMU_SI_RT:
379 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381 __put_user(info->_sifields._rt._sigval.sival_ptr,
382 &tinfo->_sifields._rt._sigval.sival_ptr);
383 break;
384 default:
385 g_assert_not_reached();
386 }
387 }
388
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391 target_siginfo_t tgt_tmp;
392 host_to_target_siginfo_noswap(&tgt_tmp, info);
393 tswap_siginfo(tinfo, &tgt_tmp);
394 }
395
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
402 */
403 abi_ulong sival_ptr;
404
405 __get_user(info->si_signo, &tinfo->si_signo);
406 __get_user(info->si_errno, &tinfo->si_errno);
407 __get_user(info->si_code, &tinfo->si_code);
408 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411 info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413
414 static int fatal_signal (int sig)
415 {
416 switch (sig) {
417 case TARGET_SIGCHLD:
418 case TARGET_SIGURG:
419 case TARGET_SIGWINCH:
420 /* Ignored by default. */
421 return 0;
422 case TARGET_SIGCONT:
423 case TARGET_SIGSTOP:
424 case TARGET_SIGTSTP:
425 case TARGET_SIGTTIN:
426 case TARGET_SIGTTOU:
427 /* Job control signals. */
428 return 0;
429 default:
430 return 1;
431 }
432 }
433
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437 switch (sig) {
438 case TARGET_SIGABRT:
439 case TARGET_SIGFPE:
440 case TARGET_SIGILL:
441 case TARGET_SIGQUIT:
442 case TARGET_SIGSEGV:
443 case TARGET_SIGTRAP:
444 case TARGET_SIGBUS:
445 return (1);
446 default:
447 return (0);
448 }
449 }
450
451 void signal_init(void)
452 {
453 TaskState *ts = (TaskState *)thread_cpu->opaque;
454 struct sigaction act;
455 struct sigaction oact;
456 int i, j;
457 int host_sig;
458
459 /* generate signal conversion tables */
460 for(i = 1; i < _NSIG; i++) {
461 if (host_to_target_signal_table[i] == 0)
462 host_to_target_signal_table[i] = i;
463 }
464 for(i = 1; i < _NSIG; i++) {
465 j = host_to_target_signal_table[i];
466 target_to_host_signal_table[j] = i;
467 }
468
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts->signal_mask);
471
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table, 0, sizeof(sigact_table));
475
476 sigfillset(&act.sa_mask);
477 act.sa_flags = SA_SIGINFO;
478 act.sa_sigaction = host_signal_handler;
479 for(i = 1; i <= TARGET_NSIG; i++) {
480 host_sig = target_to_host_signal(i);
481 sigaction(host_sig, NULL, &oact);
482 if (oact.sa_sigaction == (void *)SIG_IGN) {
483 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486 }
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i))
494 sigaction(host_sig, &act, NULL);
495 }
496 }
497
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
501 */
502 void force_sig(int sig)
503 {
504 CPUState *cpu = thread_cpu;
505 CPUArchState *env = cpu->env_ptr;
506 target_siginfo_t info;
507
508 info.si_signo = sig;
509 info.si_errno = 0;
510 info.si_code = TARGET_SI_KERNEL;
511 info._sifields._kill._pid = 0;
512 info._sifields._kill._uid = 0;
513 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
519 */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523 if (oldsig == SIGSEGV) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
526 */
527 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528 }
529 force_sig(TARGET_SIGSEGV);
530 }
531
532 #endif
533
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537 CPUState *cpu = thread_cpu;
538 CPUArchState *env = cpu->env_ptr;
539 TaskState *ts = (TaskState *)cpu->opaque;
540 int host_sig, core_dumped = 0;
541 struct sigaction act;
542
543 host_sig = target_to_host_signal(target_sig);
544 trace_user_force_sig(env, target_sig, host_sig);
545 gdb_signalled(env, target_sig);
546
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549 stop_all_tasks();
550 core_dumped =
551 ((*ts->bprm->core_dump)(target_sig, env) == 0);
552 }
553 if (core_dumped) {
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump;
557 getrlimit(RLIMIT_CORE, &nodump);
558 nodump.rlim_cur=0;
559 setrlimit(RLIMIT_CORE, &nodump);
560 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig, strsignal(host_sig), "core dumped" );
562 }
563
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
569 * it to arrive. */
570 sigfillset(&act.sa_mask);
571 act.sa_handler = SIG_DFL;
572 act.sa_flags = 0;
573 sigaction(host_sig, &act, NULL);
574
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig);
578
579 /* Make sure the signal isn't masked (just reuse the mask inside
580 of act) */
581 sigdelset(&act.sa_mask, host_sig);
582 sigsuspend(&act.sa_mask);
583
584 /* unreachable */
585 abort();
586 }
587
588 /* queue a signal so that it will be send to the virtual CPU as soon
589 as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591 target_siginfo_t *info)
592 {
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
595
596 trace_user_queue_signal(env, sig);
597
598 info->si_code = deposit32(info->si_code, 16, 16, si_type);
599
600 ts->sync_signal.info = *info;
601 ts->sync_signal.pending = sig;
602 /* signal that a new signal is pending */
603 atomic_set(&ts->signal_pending, 1);
604 return 1; /* indicates that the signal was queued */
605 }
606
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610 /* Default version: never rewind */
611 }
612 #endif
613
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615 void *puc)
616 {
617 CPUArchState *env = thread_cpu->env_ptr;
618 CPUState *cpu = ENV_GET_CPU(env);
619 TaskState *ts = cpu->opaque;
620
621 int sig;
622 target_siginfo_t tinfo;
623 ucontext_t *uc = puc;
624 struct emulated_sigtable *k;
625
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629 && info->si_code > 0) {
630 if (cpu_signal_handler(host_signum, info, puc))
631 return;
632 }
633
634 /* get target signal number */
635 sig = host_to_target_signal(host_signum);
636 if (sig < 1 || sig > TARGET_NSIG)
637 return;
638 trace_user_host_signal(env, host_signum, sig);
639
640 rewind_if_in_safe_syscall(puc);
641
642 host_to_target_siginfo_noswap(&tinfo, info);
643 k = &ts->sigtab[sig - 1];
644 k->info = tinfo;
645 k->pending = sig;
646 ts->signal_pending = 1;
647
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
653 *
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
661 */
662 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663 sigdelset(&uc->uc_sigmask, SIGSEGV);
664 sigdelset(&uc->uc_sigmask, SIGBUS);
665
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu);
668 }
669
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674 int ret;
675 struct target_sigaltstack oss;
676
677 /* XXX: test errors */
678 if(uoss_addr)
679 {
680 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682 __put_user(sas_ss_flags(sp), &oss.ss_flags);
683 }
684
685 if(uss_addr)
686 {
687 struct target_sigaltstack *uss;
688 struct target_sigaltstack ss;
689 size_t minstacksize = TARGET_MINSIGSTKSZ;
690
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694 if (get_ppc64_abi(image) > 1) {
695 minstacksize = 4096;
696 }
697 #endif
698
699 ret = -TARGET_EFAULT;
700 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701 goto out;
702 }
703 __get_user(ss.ss_sp, &uss->ss_sp);
704 __get_user(ss.ss_size, &uss->ss_size);
705 __get_user(ss.ss_flags, &uss->ss_flags);
706 unlock_user_struct(uss, uss_addr, 0);
707
708 ret = -TARGET_EPERM;
709 if (on_sig_stack(sp))
710 goto out;
711
712 ret = -TARGET_EINVAL;
713 if (ss.ss_flags != TARGET_SS_DISABLE
714 && ss.ss_flags != TARGET_SS_ONSTACK
715 && ss.ss_flags != 0)
716 goto out;
717
718 if (ss.ss_flags == TARGET_SS_DISABLE) {
719 ss.ss_size = 0;
720 ss.ss_sp = 0;
721 } else {
722 ret = -TARGET_ENOMEM;
723 if (ss.ss_size < minstacksize) {
724 goto out;
725 }
726 }
727
728 target_sigaltstack_used.ss_sp = ss.ss_sp;
729 target_sigaltstack_used.ss_size = ss.ss_size;
730 }
731
732 if (uoss_addr) {
733 ret = -TARGET_EFAULT;
734 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735 goto out;
736 }
737
738 ret = 0;
739 out:
740 return ret;
741 }
742
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745 struct target_sigaction *oact)
746 {
747 struct target_sigaction *k;
748 struct sigaction act1;
749 int host_sig;
750 int ret = 0;
751
752 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753 return -TARGET_EINVAL;
754 }
755
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS;
758 }
759
760 k = &sigact_table[sig - 1];
761 if (oact) {
762 __put_user(k->_sa_handler, &oact->_sa_handler);
763 __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767 /* Not swapped. */
768 oact->sa_mask = k->sa_mask;
769 }
770 if (act) {
771 /* FIXME: This is not threadsafe. */
772 __get_user(k->_sa_handler, &act->_sa_handler);
773 __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777 /* To be swapped in target_to_host_sigset. */
778 k->sa_mask = act->sa_mask;
779
780 /* we update the host linux signal state */
781 host_sig = target_to_host_signal(sig);
782 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783 sigfillset(&act1.sa_mask);
784 act1.sa_flags = SA_SIGINFO;
785 if (k->sa_flags & TARGET_SA_RESTART)
786 act1.sa_flags |= SA_RESTART;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
789 syscalls */
790 if (k->_sa_handler == TARGET_SIG_IGN) {
791 act1.sa_sigaction = (void *)SIG_IGN;
792 } else if (k->_sa_handler == TARGET_SIG_DFL) {
793 if (fatal_signal (sig))
794 act1.sa_sigaction = host_signal_handler;
795 else
796 act1.sa_sigaction = (void *)SIG_DFL;
797 } else {
798 act1.sa_sigaction = host_signal_handler;
799 }
800 ret = sigaction(host_sig, &act1, NULL);
801 }
802 }
803 return ret;
804 }
805
806 #if defined(TARGET_I386)
807 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
808
809 struct target_fpreg {
810 uint16_t significand[4];
811 uint16_t exponent;
812 };
813
814 struct target_fpxreg {
815 uint16_t significand[4];
816 uint16_t exponent;
817 uint16_t padding[3];
818 };
819
820 struct target_xmmreg {
821 uint32_t element[4];
822 };
823
824 struct target_fpstate_32 {
825 /* Regular FPU environment */
826 uint32_t cw;
827 uint32_t sw;
828 uint32_t tag;
829 uint32_t ipoff;
830 uint32_t cssel;
831 uint32_t dataoff;
832 uint32_t datasel;
833 struct target_fpreg st[8];
834 uint16_t status;
835 uint16_t magic; /* 0xffff = regular FPU data only */
836
837 /* FXSR FPU environment */
838 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
839 uint32_t mxcsr;
840 uint32_t reserved;
841 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
842 struct target_xmmreg xmm[8];
843 uint32_t padding[56];
844 };
845
846 struct target_fpstate_64 {
847 /* FXSAVE format */
848 uint16_t cw;
849 uint16_t sw;
850 uint16_t twd;
851 uint16_t fop;
852 uint64_t rip;
853 uint64_t rdp;
854 uint32_t mxcsr;
855 uint32_t mxcsr_mask;
856 uint32_t st_space[32];
857 uint32_t xmm_space[64];
858 uint32_t reserved[24];
859 };
860
861 #ifndef TARGET_X86_64
862 # define target_fpstate target_fpstate_32
863 #else
864 # define target_fpstate target_fpstate_64
865 #endif
866
867 struct target_sigcontext_32 {
868 uint16_t gs, __gsh;
869 uint16_t fs, __fsh;
870 uint16_t es, __esh;
871 uint16_t ds, __dsh;
872 uint32_t edi;
873 uint32_t esi;
874 uint32_t ebp;
875 uint32_t esp;
876 uint32_t ebx;
877 uint32_t edx;
878 uint32_t ecx;
879 uint32_t eax;
880 uint32_t trapno;
881 uint32_t err;
882 uint32_t eip;
883 uint16_t cs, __csh;
884 uint32_t eflags;
885 uint32_t esp_at_signal;
886 uint16_t ss, __ssh;
887 uint32_t fpstate; /* pointer */
888 uint32_t oldmask;
889 uint32_t cr2;
890 };
891
892 struct target_sigcontext_64 {
893 uint64_t r8;
894 uint64_t r9;
895 uint64_t r10;
896 uint64_t r11;
897 uint64_t r12;
898 uint64_t r13;
899 uint64_t r14;
900 uint64_t r15;
901
902 uint64_t rdi;
903 uint64_t rsi;
904 uint64_t rbp;
905 uint64_t rbx;
906 uint64_t rdx;
907 uint64_t rax;
908 uint64_t rcx;
909 uint64_t rsp;
910 uint64_t rip;
911
912 uint64_t eflags;
913
914 uint16_t cs;
915 uint16_t gs;
916 uint16_t fs;
917 uint16_t ss;
918
919 uint64_t err;
920 uint64_t trapno;
921 uint64_t oldmask;
922 uint64_t cr2;
923
924 uint64_t fpstate; /* pointer */
925 uint64_t padding[8];
926 };
927
928 #ifndef TARGET_X86_64
929 # define target_sigcontext target_sigcontext_32
930 #else
931 # define target_sigcontext target_sigcontext_64
932 #endif
933
934 /* see Linux/include/uapi/asm-generic/ucontext.h */
935 struct target_ucontext {
936 abi_ulong tuc_flags;
937 abi_ulong tuc_link;
938 target_stack_t tuc_stack;
939 struct target_sigcontext tuc_mcontext;
940 target_sigset_t tuc_sigmask; /* mask last for extensibility */
941 };
942
943 #ifndef TARGET_X86_64
944 struct sigframe {
945 abi_ulong pretcode;
946 int sig;
947 struct target_sigcontext sc;
948 struct target_fpstate fpstate;
949 abi_ulong extramask[TARGET_NSIG_WORDS-1];
950 char retcode[8];
951 };
952
953 struct rt_sigframe {
954 abi_ulong pretcode;
955 int sig;
956 abi_ulong pinfo;
957 abi_ulong puc;
958 struct target_siginfo info;
959 struct target_ucontext uc;
960 struct target_fpstate fpstate;
961 char retcode[8];
962 };
963
964 #else
965
966 struct rt_sigframe {
967 abi_ulong pretcode;
968 struct target_ucontext uc;
969 struct target_siginfo info;
970 struct target_fpstate fpstate;
971 };
972
973 #endif
974
975 /*
976 * Set up a signal frame.
977 */
978
979 /* XXX: save x87 state */
980 static void setup_sigcontext(struct target_sigcontext *sc,
981 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
982 abi_ulong fpstate_addr)
983 {
984 CPUState *cs = CPU(x86_env_get_cpu(env));
985 #ifndef TARGET_X86_64
986 uint16_t magic;
987
988 /* already locked in setup_frame() */
989 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
990 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
991 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
992 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
993 __put_user(env->regs[R_EDI], &sc->edi);
994 __put_user(env->regs[R_ESI], &sc->esi);
995 __put_user(env->regs[R_EBP], &sc->ebp);
996 __put_user(env->regs[R_ESP], &sc->esp);
997 __put_user(env->regs[R_EBX], &sc->ebx);
998 __put_user(env->regs[R_EDX], &sc->edx);
999 __put_user(env->regs[R_ECX], &sc->ecx);
1000 __put_user(env->regs[R_EAX], &sc->eax);
1001 __put_user(cs->exception_index, &sc->trapno);
1002 __put_user(env->error_code, &sc->err);
1003 __put_user(env->eip, &sc->eip);
1004 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1005 __put_user(env->eflags, &sc->eflags);
1006 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1007 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1008
1009 cpu_x86_fsave(env, fpstate_addr, 1);
1010 fpstate->status = fpstate->sw;
1011 magic = 0xffff;
1012 __put_user(magic, &fpstate->magic);
1013 __put_user(fpstate_addr, &sc->fpstate);
1014
1015 /* non-iBCS2 extensions.. */
1016 __put_user(mask, &sc->oldmask);
1017 __put_user(env->cr[2], &sc->cr2);
1018 #else
1019 __put_user(env->regs[R_EDI], &sc->rdi);
1020 __put_user(env->regs[R_ESI], &sc->rsi);
1021 __put_user(env->regs[R_EBP], &sc->rbp);
1022 __put_user(env->regs[R_ESP], &sc->rsp);
1023 __put_user(env->regs[R_EBX], &sc->rbx);
1024 __put_user(env->regs[R_EDX], &sc->rdx);
1025 __put_user(env->regs[R_ECX], &sc->rcx);
1026 __put_user(env->regs[R_EAX], &sc->rax);
1027
1028 __put_user(env->regs[8], &sc->r8);
1029 __put_user(env->regs[9], &sc->r9);
1030 __put_user(env->regs[10], &sc->r10);
1031 __put_user(env->regs[11], &sc->r11);
1032 __put_user(env->regs[12], &sc->r12);
1033 __put_user(env->regs[13], &sc->r13);
1034 __put_user(env->regs[14], &sc->r14);
1035 __put_user(env->regs[15], &sc->r15);
1036
1037 __put_user(cs->exception_index, &sc->trapno);
1038 __put_user(env->error_code, &sc->err);
1039 __put_user(env->eip, &sc->rip);
1040
1041 __put_user(env->eflags, &sc->eflags);
1042 __put_user(env->segs[R_CS].selector, &sc->cs);
1043 __put_user((uint16_t)0, &sc->gs);
1044 __put_user((uint16_t)0, &sc->fs);
1045 __put_user(env->segs[R_SS].selector, &sc->ss);
1046
1047 __put_user(mask, &sc->oldmask);
1048 __put_user(env->cr[2], &sc->cr2);
1049
1050 /* fpstate_addr must be 16 byte aligned for fxsave */
1051 assert(!(fpstate_addr & 0xf));
1052
1053 cpu_x86_fxsave(env, fpstate_addr);
1054 __put_user(fpstate_addr, &sc->fpstate);
1055 #endif
1056 }
1057
1058 /*
1059 * Determine which stack to use..
1060 */
1061
1062 static inline abi_ulong
1063 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1064 {
1065 unsigned long esp;
1066
1067 /* Default to using normal stack */
1068 esp = env->regs[R_ESP];
1069 #ifdef TARGET_X86_64
1070 esp -= 128; /* this is the redzone */
1071 #endif
1072
1073 /* This is the X/Open sanctioned signal stack switching. */
1074 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1075 if (sas_ss_flags(esp) == 0) {
1076 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1077 }
1078 } else {
1079 #ifndef TARGET_X86_64
1080 /* This is the legacy signal stack switching. */
1081 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1082 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1083 ka->sa_restorer) {
1084 esp = (unsigned long) ka->sa_restorer;
1085 }
1086 #endif
1087 }
1088
1089 #ifndef TARGET_X86_64
1090 return (esp - frame_size) & -8ul;
1091 #else
1092 return ((esp - frame_size) & (~15ul)) - 8;
1093 #endif
1094 }
1095
1096 #ifndef TARGET_X86_64
1097 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1098 static void setup_frame(int sig, struct target_sigaction *ka,
1099 target_sigset_t *set, CPUX86State *env)
1100 {
1101 abi_ulong frame_addr;
1102 struct sigframe *frame;
1103 int i;
1104
1105 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1106 trace_user_setup_frame(env, frame_addr);
1107
1108 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1109 goto give_sigsegv;
1110
1111 __put_user(sig, &frame->sig);
1112
1113 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1114 frame_addr + offsetof(struct sigframe, fpstate));
1115
1116 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1117 __put_user(set->sig[i], &frame->extramask[i - 1]);
1118 }
1119
1120 /* Set up to return from userspace. If provided, use a stub
1121 already in userspace. */
1122 if (ka->sa_flags & TARGET_SA_RESTORER) {
1123 __put_user(ka->sa_restorer, &frame->pretcode);
1124 } else {
1125 uint16_t val16;
1126 abi_ulong retcode_addr;
1127 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1128 __put_user(retcode_addr, &frame->pretcode);
1129 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1130 val16 = 0xb858;
1131 __put_user(val16, (uint16_t *)(frame->retcode+0));
1132 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1133 val16 = 0x80cd;
1134 __put_user(val16, (uint16_t *)(frame->retcode+6));
1135 }
1136
1137 /* Set up registers for signal handler */
1138 env->regs[R_ESP] = frame_addr;
1139 env->eip = ka->_sa_handler;
1140
1141 cpu_x86_load_seg(env, R_DS, __USER_DS);
1142 cpu_x86_load_seg(env, R_ES, __USER_DS);
1143 cpu_x86_load_seg(env, R_SS, __USER_DS);
1144 cpu_x86_load_seg(env, R_CS, __USER_CS);
1145 env->eflags &= ~TF_MASK;
1146
1147 unlock_user_struct(frame, frame_addr, 1);
1148
1149 return;
1150
1151 give_sigsegv:
1152 force_sigsegv(sig);
1153 }
1154 #endif
1155
1156 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1157 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1158 target_siginfo_t *info,
1159 target_sigset_t *set, CPUX86State *env)
1160 {
1161 abi_ulong frame_addr;
1162 #ifndef TARGET_X86_64
1163 abi_ulong addr;
1164 #endif
1165 struct rt_sigframe *frame;
1166 int i;
1167
1168 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1169 trace_user_setup_rt_frame(env, frame_addr);
1170
1171 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1172 goto give_sigsegv;
1173
1174 /* These fields are only in rt_sigframe on 32 bit */
1175 #ifndef TARGET_X86_64
1176 __put_user(sig, &frame->sig);
1177 addr = frame_addr + offsetof(struct rt_sigframe, info);
1178 __put_user(addr, &frame->pinfo);
1179 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1180 __put_user(addr, &frame->puc);
1181 #endif
1182 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1183 tswap_siginfo(&frame->info, info);
1184 }
1185
1186 /* Create the ucontext. */
1187 __put_user(0, &frame->uc.tuc_flags);
1188 __put_user(0, &frame->uc.tuc_link);
1189 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1190 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1191 &frame->uc.tuc_stack.ss_flags);
1192 __put_user(target_sigaltstack_used.ss_size,
1193 &frame->uc.tuc_stack.ss_size);
1194 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1195 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1196
1197 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1198 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1199 }
1200
1201 /* Set up to return from userspace. If provided, use a stub
1202 already in userspace. */
1203 #ifndef TARGET_X86_64
1204 if (ka->sa_flags & TARGET_SA_RESTORER) {
1205 __put_user(ka->sa_restorer, &frame->pretcode);
1206 } else {
1207 uint16_t val16;
1208 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1209 __put_user(addr, &frame->pretcode);
1210 /* This is movl $,%eax ; int $0x80 */
1211 __put_user(0xb8, (char *)(frame->retcode+0));
1212 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1213 val16 = 0x80cd;
1214 __put_user(val16, (uint16_t *)(frame->retcode+5));
1215 }
1216 #else
1217 /* XXX: Would be slightly better to return -EFAULT here if test fails
1218 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1219 __put_user(ka->sa_restorer, &frame->pretcode);
1220 #endif
1221
1222 /* Set up registers for signal handler */
1223 env->regs[R_ESP] = frame_addr;
1224 env->eip = ka->_sa_handler;
1225
1226 #ifndef TARGET_X86_64
1227 env->regs[R_EAX] = sig;
1228 env->regs[R_EDX] = (unsigned long)&frame->info;
1229 env->regs[R_ECX] = (unsigned long)&frame->uc;
1230 #else
1231 env->regs[R_EAX] = 0;
1232 env->regs[R_EDI] = sig;
1233 env->regs[R_ESI] = (unsigned long)&frame->info;
1234 env->regs[R_EDX] = (unsigned long)&frame->uc;
1235 #endif
1236
1237 cpu_x86_load_seg(env, R_DS, __USER_DS);
1238 cpu_x86_load_seg(env, R_ES, __USER_DS);
1239 cpu_x86_load_seg(env, R_CS, __USER_CS);
1240 cpu_x86_load_seg(env, R_SS, __USER_DS);
1241 env->eflags &= ~TF_MASK;
1242
1243 unlock_user_struct(frame, frame_addr, 1);
1244
1245 return;
1246
1247 give_sigsegv:
1248 force_sigsegv(sig);
1249 }
1250
1251 static int
1252 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1253 {
1254 unsigned int err = 0;
1255 abi_ulong fpstate_addr;
1256 unsigned int tmpflags;
1257
1258 #ifndef TARGET_X86_64
1259 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1260 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1261 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1262 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1263
1264 env->regs[R_EDI] = tswapl(sc->edi);
1265 env->regs[R_ESI] = tswapl(sc->esi);
1266 env->regs[R_EBP] = tswapl(sc->ebp);
1267 env->regs[R_ESP] = tswapl(sc->esp);
1268 env->regs[R_EBX] = tswapl(sc->ebx);
1269 env->regs[R_EDX] = tswapl(sc->edx);
1270 env->regs[R_ECX] = tswapl(sc->ecx);
1271 env->regs[R_EAX] = tswapl(sc->eax);
1272
1273 env->eip = tswapl(sc->eip);
1274 #else
1275 env->regs[8] = tswapl(sc->r8);
1276 env->regs[9] = tswapl(sc->r9);
1277 env->regs[10] = tswapl(sc->r10);
1278 env->regs[11] = tswapl(sc->r11);
1279 env->regs[12] = tswapl(sc->r12);
1280 env->regs[13] = tswapl(sc->r13);
1281 env->regs[14] = tswapl(sc->r14);
1282 env->regs[15] = tswapl(sc->r15);
1283
1284 env->regs[R_EDI] = tswapl(sc->rdi);
1285 env->regs[R_ESI] = tswapl(sc->rsi);
1286 env->regs[R_EBP] = tswapl(sc->rbp);
1287 env->regs[R_EBX] = tswapl(sc->rbx);
1288 env->regs[R_EDX] = tswapl(sc->rdx);
1289 env->regs[R_EAX] = tswapl(sc->rax);
1290 env->regs[R_ECX] = tswapl(sc->rcx);
1291 env->regs[R_ESP] = tswapl(sc->rsp);
1292
1293 env->eip = tswapl(sc->rip);
1294 #endif
1295
1296 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1297 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1298
1299 tmpflags = tswapl(sc->eflags);
1300 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1301 // regs->orig_eax = -1; /* disable syscall checks */
1302
1303 fpstate_addr = tswapl(sc->fpstate);
1304 if (fpstate_addr != 0) {
1305 if (!access_ok(VERIFY_READ, fpstate_addr,
1306 sizeof(struct target_fpstate)))
1307 goto badframe;
1308 #ifndef TARGET_X86_64
1309 cpu_x86_frstor(env, fpstate_addr, 1);
1310 #else
1311 cpu_x86_fxrstor(env, fpstate_addr);
1312 #endif
1313 }
1314
1315 return err;
1316 badframe:
1317 return 1;
1318 }
1319
1320 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1321 #ifndef TARGET_X86_64
1322 long do_sigreturn(CPUX86State *env)
1323 {
1324 struct sigframe *frame;
1325 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1326 target_sigset_t target_set;
1327 sigset_t set;
1328 int i;
1329
1330 trace_user_do_sigreturn(env, frame_addr);
1331 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1332 goto badframe;
1333 /* set blocked signals */
1334 __get_user(target_set.sig[0], &frame->sc.oldmask);
1335 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1336 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1337 }
1338
1339 target_to_host_sigset_internal(&set, &target_set);
1340 set_sigmask(&set);
1341
1342 /* restore registers */
1343 if (restore_sigcontext(env, &frame->sc))
1344 goto badframe;
1345 unlock_user_struct(frame, frame_addr, 0);
1346 return -TARGET_QEMU_ESIGRETURN;
1347
1348 badframe:
1349 unlock_user_struct(frame, frame_addr, 0);
1350 force_sig(TARGET_SIGSEGV);
1351 return -TARGET_QEMU_ESIGRETURN;
1352 }
1353 #endif
1354
1355 long do_rt_sigreturn(CPUX86State *env)
1356 {
1357 abi_ulong frame_addr;
1358 struct rt_sigframe *frame;
1359 sigset_t set;
1360
1361 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1362 trace_user_do_rt_sigreturn(env, frame_addr);
1363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1364 goto badframe;
1365 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1366 set_sigmask(&set);
1367
1368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1369 goto badframe;
1370 }
1371
1372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1373 get_sp_from_cpustate(env)) == -EFAULT) {
1374 goto badframe;
1375 }
1376
1377 unlock_user_struct(frame, frame_addr, 0);
1378 return -TARGET_QEMU_ESIGRETURN;
1379
1380 badframe:
1381 unlock_user_struct(frame, frame_addr, 0);
1382 force_sig(TARGET_SIGSEGV);
1383 return -TARGET_QEMU_ESIGRETURN;
1384 }
1385
1386 #elif defined(TARGET_SPARC)
1387
1388 #define __SUNOS_MAXWIN 31
1389
1390 /* This is what SunOS does, so shall I. */
1391 struct target_sigcontext {
1392 abi_ulong sigc_onstack; /* state to restore */
1393
1394 abi_ulong sigc_mask; /* sigmask to restore */
1395 abi_ulong sigc_sp; /* stack pointer */
1396 abi_ulong sigc_pc; /* program counter */
1397 abi_ulong sigc_npc; /* next program counter */
1398 abi_ulong sigc_psr; /* for condition codes etc */
1399 abi_ulong sigc_g1; /* User uses these two registers */
1400 abi_ulong sigc_o0; /* within the trampoline code. */
1401
1402 /* Now comes information regarding the users window set
1403 * at the time of the signal.
1404 */
1405 abi_ulong sigc_oswins; /* outstanding windows */
1406
1407 /* stack ptrs for each regwin buf */
1408 char *sigc_spbuf[__SUNOS_MAXWIN];
1409
1410 /* Windows to restore after signal */
1411 struct {
1412 abi_ulong locals[8];
1413 abi_ulong ins[8];
1414 } sigc_wbuf[__SUNOS_MAXWIN];
1415 };
1416 /* A Sparc stack frame */
1417 struct sparc_stackf {
1418 abi_ulong locals[8];
1419 abi_ulong ins[8];
1420 /* It's simpler to treat fp and callers_pc as elements of ins[]
1421 * since we never need to access them ourselves.
1422 */
1423 char *structptr;
1424 abi_ulong xargs[6];
1425 abi_ulong xxargs[1];
1426 };
1427
1428 typedef struct {
1429 struct {
1430 abi_ulong psr;
1431 abi_ulong pc;
1432 abi_ulong npc;
1433 abi_ulong y;
1434 abi_ulong u_regs[16]; /* globals and ins */
1435 } si_regs;
1436 int si_mask;
1437 } __siginfo_t;
1438
1439 typedef struct {
1440 abi_ulong si_float_regs[32];
1441 unsigned long si_fsr;
1442 unsigned long si_fpqdepth;
1443 struct {
1444 unsigned long *insn_addr;
1445 unsigned long insn;
1446 } si_fpqueue [16];
1447 } qemu_siginfo_fpu_t;
1448
1449
1450 struct target_signal_frame {
1451 struct sparc_stackf ss;
1452 __siginfo_t info;
1453 abi_ulong fpu_save;
1454 abi_ulong insns[2] __attribute__ ((aligned (8)));
1455 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
1456 abi_ulong extra_size; /* Should be 0 */
1457 qemu_siginfo_fpu_t fpu_state;
1458 };
1459 struct target_rt_signal_frame {
1460 struct sparc_stackf ss;
1461 siginfo_t info;
1462 abi_ulong regs[20];
1463 sigset_t mask;
1464 abi_ulong fpu_save;
1465 unsigned int insns[2];
1466 stack_t stack;
1467 unsigned int extra_size; /* Should be 0 */
1468 qemu_siginfo_fpu_t fpu_state;
1469 };
1470
1471 #define UREG_O0 16
1472 #define UREG_O6 22
1473 #define UREG_I0 0
1474 #define UREG_I1 1
1475 #define UREG_I2 2
1476 #define UREG_I3 3
1477 #define UREG_I4 4
1478 #define UREG_I5 5
1479 #define UREG_I6 6
1480 #define UREG_I7 7
1481 #define UREG_L0 8
1482 #define UREG_FP UREG_I6
1483 #define UREG_SP UREG_O6
1484
1485 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
1486 CPUSPARCState *env,
1487 unsigned long framesize)
1488 {
1489 abi_ulong sp;
1490
1491 sp = env->regwptr[UREG_FP];
1492
1493 /* This is the X/Open sanctioned signal stack switching. */
1494 if (sa->sa_flags & TARGET_SA_ONSTACK) {
1495 if (!on_sig_stack(sp)
1496 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
1497 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1498 }
1499 }
1500 return sp - framesize;
1501 }
1502
1503 static int
1504 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
1505 {
1506 int err = 0, i;
1507
1508 __put_user(env->psr, &si->si_regs.psr);
1509 __put_user(env->pc, &si->si_regs.pc);
1510 __put_user(env->npc, &si->si_regs.npc);
1511 __put_user(env->y, &si->si_regs.y);
1512 for (i=0; i < 8; i++) {
1513 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
1514 }
1515 for (i=0; i < 8; i++) {
1516 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
1517 }
1518 __put_user(mask, &si->si_mask);
1519 return err;
1520 }
1521
1522 #if 0
1523 static int
1524 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1525 CPUSPARCState *env, unsigned long mask)
1526 {
1527 int err = 0;
1528
1529 __put_user(mask, &sc->sigc_mask);
1530 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
1531 __put_user(env->pc, &sc->sigc_pc);
1532 __put_user(env->npc, &sc->sigc_npc);
1533 __put_user(env->psr, &sc->sigc_psr);
1534 __put_user(env->gregs[1], &sc->sigc_g1);
1535 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
1536
1537 return err;
1538 }
1539 #endif
1540 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
1541
1542 static void setup_frame(int sig, struct target_sigaction *ka,
1543 target_sigset_t *set, CPUSPARCState *env)
1544 {
1545 abi_ulong sf_addr;
1546 struct target_signal_frame *sf;
1547 int sigframe_size, err, i;
1548
1549 /* 1. Make sure everything is clean */
1550 //synchronize_user_stack();
1551
1552 sigframe_size = NF_ALIGNEDSZ;
1553 sf_addr = get_sigframe(ka, env, sigframe_size);
1554 trace_user_setup_frame(env, sf_addr);
1555
1556 sf = lock_user(VERIFY_WRITE, sf_addr,
1557 sizeof(struct target_signal_frame), 0);
1558 if (!sf) {
1559 goto sigsegv;
1560 }
1561 #if 0
1562 if (invalid_frame_pointer(sf, sigframe_size))
1563 goto sigill_and_return;
1564 #endif
1565 /* 2. Save the current process state */
1566 err = setup___siginfo(&sf->info, env, set->sig[0]);
1567 __put_user(0, &sf->extra_size);
1568
1569 //save_fpu_state(regs, &sf->fpu_state);
1570 //__put_user(&sf->fpu_state, &sf->fpu_save);
1571
1572 __put_user(set->sig[0], &sf->info.si_mask);
1573 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
1574 __put_user(set->sig[i + 1], &sf->extramask[i]);
1575 }
1576
1577 for (i = 0; i < 8; i++) {
1578 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
1579 }
1580 for (i = 0; i < 8; i++) {
1581 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
1582 }
1583 if (err)
1584 goto sigsegv;
1585
1586 /* 3. signal handler back-trampoline and parameters */
1587 env->regwptr[UREG_FP] = sf_addr;
1588 env->regwptr[UREG_I0] = sig;
1589 env->regwptr[UREG_I1] = sf_addr +
1590 offsetof(struct target_signal_frame, info);
1591 env->regwptr[UREG_I2] = sf_addr +
1592 offsetof(struct target_signal_frame, info);
1593
1594 /* 4. signal handler */
1595 env->pc = ka->_sa_handler;
1596 env->npc = (env->pc + 4);
1597 /* 5. return to kernel instructions */
1598 if (ka->ka_restorer) {
1599 env->regwptr[UREG_I7] = ka->ka_restorer;
1600 } else {
1601 uint32_t val32;
1602
1603 env->regwptr[UREG_I7] = sf_addr +
1604 offsetof(struct target_signal_frame, insns) - 2 * 4;
1605
1606 /* mov __NR_sigreturn, %g1 */
1607 val32 = 0x821020d8;
1608 __put_user(val32, &sf->insns[0]);
1609
1610 /* t 0x10 */
1611 val32 = 0x91d02010;
1612 __put_user(val32, &sf->insns[1]);
1613 if (err)
1614 goto sigsegv;
1615
1616 /* Flush instruction space. */
1617 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
1618 // tb_flush(env);
1619 }
1620 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1621 return;
1622 #if 0
1623 sigill_and_return:
1624 force_sig(TARGET_SIGILL);
1625 #endif
1626 sigsegv:
1627 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1628 force_sigsegv(sig);
1629 }
1630
1631 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1632 target_siginfo_t *info,
1633 target_sigset_t *set, CPUSPARCState *env)
1634 {
1635 fprintf(stderr, "setup_rt_frame: not implemented\n");
1636 }
1637
1638 long do_sigreturn(CPUSPARCState *env)
1639 {
1640 abi_ulong sf_addr;
1641 struct target_signal_frame *sf;
1642 uint32_t up_psr, pc, npc;
1643 target_sigset_t set;
1644 sigset_t host_set;
1645 int err=0, i;
1646
1647 sf_addr = env->regwptr[UREG_FP];
1648 trace_user_do_sigreturn(env, sf_addr);
1649 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
1650 goto segv_and_exit;
1651 }
1652
1653 /* 1. Make sure we are not getting garbage from the user */
1654
1655 if (sf_addr & 3)
1656 goto segv_and_exit;
1657
1658 __get_user(pc, &sf->info.si_regs.pc);
1659 __get_user(npc, &sf->info.si_regs.npc);
1660
1661 if ((pc | npc) & 3) {
1662 goto segv_and_exit;
1663 }
1664
1665 /* 2. Restore the state */
1666 __get_user(up_psr, &sf->info.si_regs.psr);
1667
1668 /* User can only change condition codes and FPU enabling in %psr. */
1669 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
1670 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
1671
1672 env->pc = pc;
1673 env->npc = npc;
1674 __get_user(env->y, &sf->info.si_regs.y);
1675 for (i=0; i < 8; i++) {
1676 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
1677 }
1678 for (i=0; i < 8; i++) {
1679 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
1680 }
1681
1682 /* FIXME: implement FPU save/restore:
1683 * __get_user(fpu_save, &sf->fpu_save);
1684 * if (fpu_save)
1685 * err |= restore_fpu_state(env, fpu_save);
1686 */
1687
1688 /* This is pretty much atomic, no amount locking would prevent
1689 * the races which exist anyways.
1690 */
1691 __get_user(set.sig[0], &sf->info.si_mask);
1692 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1693 __get_user(set.sig[i], &sf->extramask[i - 1]);
1694 }
1695
1696 target_to_host_sigset_internal(&host_set, &set);
1697 set_sigmask(&host_set);
1698
1699 if (err) {
1700 goto segv_and_exit;
1701 }
1702 unlock_user_struct(sf, sf_addr, 0);
1703 return -TARGET_QEMU_ESIGRETURN;
1704
1705 segv_and_exit:
1706 unlock_user_struct(sf, sf_addr, 0);
1707 force_sig(TARGET_SIGSEGV);
1708 return -TARGET_QEMU_ESIGRETURN;
1709 }
1710
1711 long do_rt_sigreturn(CPUSPARCState *env)
1712 {
1713 trace_user_do_rt_sigreturn(env, 0);
1714 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1715 return -TARGET_ENOSYS;
1716 }
1717
1718 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1719 #define SPARC_MC_TSTATE 0
1720 #define SPARC_MC_PC 1
1721 #define SPARC_MC_NPC 2
1722 #define SPARC_MC_Y 3
1723 #define SPARC_MC_G1 4
1724 #define SPARC_MC_G2 5
1725 #define SPARC_MC_G3 6
1726 #define SPARC_MC_G4 7
1727 #define SPARC_MC_G5 8
1728 #define SPARC_MC_G6 9
1729 #define SPARC_MC_G7 10
1730 #define SPARC_MC_O0 11
1731 #define SPARC_MC_O1 12
1732 #define SPARC_MC_O2 13
1733 #define SPARC_MC_O3 14
1734 #define SPARC_MC_O4 15
1735 #define SPARC_MC_O5 16
1736 #define SPARC_MC_O6 17
1737 #define SPARC_MC_O7 18
1738 #define SPARC_MC_NGREG 19
1739
1740 typedef abi_ulong target_mc_greg_t;
1741 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
1742
1743 struct target_mc_fq {
1744 abi_ulong *mcfq_addr;
1745 uint32_t mcfq_insn;
1746 };
1747
1748 struct target_mc_fpu {
1749 union {
1750 uint32_t sregs[32];
1751 uint64_t dregs[32];
1752 //uint128_t qregs[16];
1753 } mcfpu_fregs;
1754 abi_ulong mcfpu_fsr;
1755 abi_ulong mcfpu_fprs;
1756 abi_ulong mcfpu_gsr;
1757 struct target_mc_fq *mcfpu_fq;
1758 unsigned char mcfpu_qcnt;
1759 unsigned char mcfpu_qentsz;
1760 unsigned char mcfpu_enab;
1761 };
1762 typedef struct target_mc_fpu target_mc_fpu_t;
1763
1764 typedef struct {
1765 target_mc_gregset_t mc_gregs;
1766 target_mc_greg_t mc_fp;
1767 target_mc_greg_t mc_i7;
1768 target_mc_fpu_t mc_fpregs;
1769 } target_mcontext_t;
1770
1771 struct target_ucontext {
1772 struct target_ucontext *tuc_link;
1773 abi_ulong tuc_flags;
1774 target_sigset_t tuc_sigmask;
1775 target_mcontext_t tuc_mcontext;
1776 };
1777
1778 /* A V9 register window */
1779 struct target_reg_window {
1780 abi_ulong locals[8];
1781 abi_ulong ins[8];
1782 };
1783
1784 #define TARGET_STACK_BIAS 2047
1785
1786 /* {set, get}context() needed for 64-bit SparcLinux userland. */
1787 void sparc64_set_context(CPUSPARCState *env)
1788 {
1789 abi_ulong ucp_addr;
1790 struct target_ucontext *ucp;
1791 target_mc_gregset_t *grp;
1792 abi_ulong pc, npc, tstate;
1793 abi_ulong fp, i7, w_addr;
1794 unsigned int i;
1795
1796 ucp_addr = env->regwptr[UREG_I0];
1797 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
1798 goto do_sigsegv;
1799 }
1800 grp = &ucp->tuc_mcontext.mc_gregs;
1801 __get_user(pc, &((*grp)[SPARC_MC_PC]));
1802 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
1803 if ((pc | npc) & 3) {
1804 goto do_sigsegv;
1805 }
1806 if (env->regwptr[UREG_I1]) {
1807 target_sigset_t target_set;
1808 sigset_t set;
1809
1810 if (TARGET_NSIG_WORDS == 1) {
1811 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
1812 } else {
1813 abi_ulong *src, *dst;
1814 src = ucp->tuc_sigmask.sig;
1815 dst = target_set.sig;
1816 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1817 __get_user(*dst, src);
1818 }
1819 }
1820 target_to_host_sigset_internal(&set, &target_set);
1821 set_sigmask(&set);
1822 }
1823 env->pc = pc;
1824 env->npc = npc;
1825 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
1826 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
1827 env->asi = (tstate >> 24) & 0xff;
1828 cpu_put_ccr(env, tstate >> 32);
1829 cpu_put_cwp64(env, tstate & 0x1f);
1830 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
1831 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
1832 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
1833 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
1834 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
1835 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
1836 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
1837 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
1838 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
1839 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
1840 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
1841 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
1842 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
1843 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
1844 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
1845
1846 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
1847 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
1848
1849 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1850 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1851 abi_ulong) != 0) {
1852 goto do_sigsegv;
1853 }
1854 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1855 abi_ulong) != 0) {
1856 goto do_sigsegv;
1857 }
1858 /* FIXME this does not match how the kernel handles the FPU in
1859 * its sparc64_set_context implementation. In particular the FPU
1860 * is only restored if fenab is non-zero in:
1861 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
1862 */
1863 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
1864 {
1865 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1866 for (i = 0; i < 64; i++, src++) {
1867 if (i & 1) {
1868 __get_user(env->fpr[i/2].l.lower, src);
1869 } else {
1870 __get_user(env->fpr[i/2].l.upper, src);
1871 }
1872 }
1873 }
1874 __get_user(env->fsr,
1875 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
1876 __get_user(env->gsr,
1877 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
1878 unlock_user_struct(ucp, ucp_addr, 0);
1879 return;
1880 do_sigsegv:
1881 unlock_user_struct(ucp, ucp_addr, 0);
1882 force_sig(TARGET_SIGSEGV);
1883 }
1884
1885 void sparc64_get_context(CPUSPARCState *env)
1886 {
1887 abi_ulong ucp_addr;
1888 struct target_ucontext *ucp;
1889 target_mc_gregset_t *grp;
1890 target_mcontext_t *mcp;
1891 abi_ulong fp, i7, w_addr;
1892 int err;
1893 unsigned int i;
1894 target_sigset_t target_set;
1895 sigset_t set;
1896
1897 ucp_addr = env->regwptr[UREG_I0];
1898 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
1899 goto do_sigsegv;
1900 }
1901
1902 mcp = &ucp->tuc_mcontext;
1903 grp = &mcp->mc_gregs;
1904
1905 /* Skip over the trap instruction, first. */
1906 env->pc = env->npc;
1907 env->npc += 4;
1908
1909 /* If we're only reading the signal mask then do_sigprocmask()
1910 * is guaranteed not to fail, which is important because we don't
1911 * have any way to signal a failure or restart this operation since
1912 * this is not a normal syscall.
1913 */
1914 err = do_sigprocmask(0, NULL, &set);
1915 assert(err == 0);
1916 host_to_target_sigset_internal(&target_set, &set);
1917 if (TARGET_NSIG_WORDS == 1) {
1918 __put_user(target_set.sig[0],
1919 (abi_ulong *)&ucp->tuc_sigmask);
1920 } else {
1921 abi_ulong *src, *dst;
1922 src = target_set.sig;
1923 dst = ucp->tuc_sigmask.sig;
1924 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1925 __put_user(*src, dst);
1926 }
1927 if (err)
1928 goto do_sigsegv;
1929 }
1930
1931 /* XXX: tstate must be saved properly */
1932 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
1933 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
1934 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
1935 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
1936 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
1937 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
1938 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
1939 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
1940 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
1941 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
1942 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
1943 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
1944 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
1945 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
1946 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
1947 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
1948 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
1949 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
1950 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
1951
1952 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1953 fp = i7 = 0;
1954 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1955 abi_ulong) != 0) {
1956 goto do_sigsegv;
1957 }
1958 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1959 abi_ulong) != 0) {
1960 goto do_sigsegv;
1961 }
1962 __put_user(fp, &(mcp->mc_fp));
1963 __put_user(i7, &(mcp->mc_i7));
1964
1965 {
1966 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1967 for (i = 0; i < 64; i++, dst++) {
1968 if (i & 1) {
1969 __put_user(env->fpr[i/2].l.lower, dst);
1970 } else {
1971 __put_user(env->fpr[i/2].l.upper, dst);
1972 }
1973 }
1974 }
1975 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
1976 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
1977 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
1978
1979 if (err)
1980 goto do_sigsegv;
1981 unlock_user_struct(ucp, ucp_addr, 1);
1982 return;
1983 do_sigsegv:
1984 unlock_user_struct(ucp, ucp_addr, 1);
1985 force_sig(TARGET_SIGSEGV);
1986 }
1987 #endif
1988 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
1989
1990 # if defined(TARGET_ABI_MIPSO32)
1991 struct target_sigcontext {
1992 uint32_t sc_regmask; /* Unused */
1993 uint32_t sc_status;
1994 uint64_t sc_pc;
1995 uint64_t sc_regs[32];
1996 uint64_t sc_fpregs[32];
1997 uint32_t sc_ownedfp; /* Unused */
1998 uint32_t sc_fpc_csr;
1999 uint32_t sc_fpc_eir; /* Unused */
2000 uint32_t sc_used_math;
2001 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2002 uint32_t pad0;
2003 uint64_t sc_mdhi;
2004 uint64_t sc_mdlo;
2005 target_ulong sc_hi1; /* Was sc_cause */
2006 target_ulong sc_lo1; /* Was sc_badvaddr */
2007 target_ulong sc_hi2; /* Was sc_sigset[4] */
2008 target_ulong sc_lo2;
2009 target_ulong sc_hi3;
2010 target_ulong sc_lo3;
2011 };
2012 # else /* N32 || N64 */
2013 struct target_sigcontext {
2014 uint64_t sc_regs[32];
2015 uint64_t sc_fpregs[32];
2016 uint64_t sc_mdhi;
2017 uint64_t sc_hi1;
2018 uint64_t sc_hi2;
2019 uint64_t sc_hi3;
2020 uint64_t sc_mdlo;
2021 uint64_t sc_lo1;
2022 uint64_t sc_lo2;
2023 uint64_t sc_lo3;
2024 uint64_t sc_pc;
2025 uint32_t sc_fpc_csr;
2026 uint32_t sc_used_math;
2027 uint32_t sc_dsp;
2028 uint32_t sc_reserved;
2029 };
2030 # endif /* O32 */
2031
2032 struct sigframe {
2033 uint32_t sf_ass[4]; /* argument save space for o32 */
2034 uint32_t sf_code[2]; /* signal trampoline */
2035 struct target_sigcontext sf_sc;
2036 target_sigset_t sf_mask;
2037 };
2038
2039 struct target_ucontext {
2040 target_ulong tuc_flags;
2041 target_ulong tuc_link;
2042 target_stack_t tuc_stack;
2043 target_ulong pad0;
2044 struct target_sigcontext tuc_mcontext;
2045 target_sigset_t tuc_sigmask;
2046 };
2047
2048 struct target_rt_sigframe {
2049 uint32_t rs_ass[4]; /* argument save space for o32 */
2050 uint32_t rs_code[2]; /* signal trampoline */
2051 struct target_siginfo rs_info;
2052 struct target_ucontext rs_uc;
2053 };
2054
2055 /* Install trampoline to jump back from signal handler */
2056 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2057 {
2058 int err = 0;
2059
2060 /*
2061 * Set up the return code ...
2062 *
2063 * li v0, __NR__foo_sigreturn
2064 * syscall
2065 */
2066
2067 __put_user(0x24020000 + syscall, tramp + 0);
2068 __put_user(0x0000000c , tramp + 1);
2069 return err;
2070 }
2071
2072 static inline void setup_sigcontext(CPUMIPSState *regs,
2073 struct target_sigcontext *sc)
2074 {
2075 int i;
2076
2077 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2078 regs->hflags &= ~MIPS_HFLAG_BMASK;
2079
2080 __put_user(0, &sc->sc_regs[0]);
2081 for (i = 1; i < 32; ++i) {
2082 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2083 }
2084
2085 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2086 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2087
2088 /* Rather than checking for dsp existence, always copy. The storage
2089 would just be garbage otherwise. */
2090 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2091 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2092 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2093 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2094 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2095 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2096 {
2097 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2098 __put_user(dsp, &sc->sc_dsp);
2099 }
2100
2101 __put_user(1, &sc->sc_used_math);
2102
2103 for (i = 0; i < 32; ++i) {
2104 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2105 }
2106 }
2107
2108 static inline void
2109 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2110 {
2111 int i;
2112
2113 __get_user(regs->CP0_EPC, &sc->sc_pc);
2114
2115 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2116 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2117
2118 for (i = 1; i < 32; ++i) {
2119 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2120 }
2121
2122 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2123 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2124 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2125 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2126 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2127 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2128 {
2129 uint32_t dsp;
2130 __get_user(dsp, &sc->sc_dsp);
2131 cpu_wrdsp(dsp, 0x3ff, regs);
2132 }
2133
2134 for (i = 0; i < 32; ++i) {
2135 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2136 }
2137 }
2138
2139 /*
2140 * Determine which stack to use..
2141 */
2142 static inline abi_ulong
2143 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2144 {
2145 unsigned long sp;
2146
2147 /* Default to using normal stack */
2148 sp = regs->active_tc.gpr[29];
2149
2150 /*
2151 * FPU emulator may have its own trampoline active just
2152 * above the user stack, 16-bytes before the next lowest
2153 * 16 byte boundary. Try to avoid trashing it.
2154 */
2155 sp -= 32;
2156
2157 /* This is the X/Open sanctioned signal stack switching. */
2158 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2159 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2160 }
2161
2162 return (sp - frame_size) & ~7;
2163 }
2164
2165 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2166 {
2167 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2168 env->hflags &= ~MIPS_HFLAG_M16;
2169 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2170 env->active_tc.PC &= ~(target_ulong) 1;
2171 }
2172 }
2173
2174 # if defined(TARGET_ABI_MIPSO32)
2175 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2176 static void setup_frame(int sig, struct target_sigaction * ka,
2177 target_sigset_t *set, CPUMIPSState *regs)
2178 {
2179 struct sigframe *frame;
2180 abi_ulong frame_addr;
2181 int i;
2182
2183 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2184 trace_user_setup_frame(regs, frame_addr);
2185 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2186 goto give_sigsegv;
2187 }
2188
2189 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2190
2191 setup_sigcontext(regs, &frame->sf_sc);
2192
2193 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2194 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2195 }
2196
2197 /*
2198 * Arguments to signal handler:
2199 *
2200 * a0 = signal number
2201 * a1 = 0 (should be cause)
2202 * a2 = pointer to struct sigcontext
2203 *
2204 * $25 and PC point to the signal handler, $29 points to the
2205 * struct sigframe.
2206 */
2207 regs->active_tc.gpr[ 4] = sig;
2208 regs->active_tc.gpr[ 5] = 0;
2209 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2210 regs->active_tc.gpr[29] = frame_addr;
2211 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2212 /* The original kernel code sets CP0_EPC to the handler
2213 * since it returns to userland using eret
2214 * we cannot do this here, and we must set PC directly */
2215 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2216 mips_set_hflags_isa_mode_from_pc(regs);
2217 unlock_user_struct(frame, frame_addr, 1);
2218 return;
2219
2220 give_sigsegv:
2221 force_sigsegv(sig);
2222 }
2223
2224 long do_sigreturn(CPUMIPSState *regs)
2225 {
2226 struct sigframe *frame;
2227 abi_ulong frame_addr;
2228 sigset_t blocked;
2229 target_sigset_t target_set;
2230 int i;
2231
2232 frame_addr = regs->active_tc.gpr[29];
2233 trace_user_do_sigreturn(regs, frame_addr);
2234 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2235 goto badframe;
2236
2237 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2238 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2239 }
2240
2241 target_to_host_sigset_internal(&blocked, &target_set);
2242 set_sigmask(&blocked);
2243
2244 restore_sigcontext(regs, &frame->sf_sc);
2245
2246 #if 0
2247 /*
2248 * Don't let your children do this ...
2249 */
2250 __asm__ __volatile__(
2251 "move\t$29, %0\n\t"
2252 "j\tsyscall_exit"
2253 :/* no outputs */
2254 :"r" (&regs));
2255 /* Unreached */
2256 #endif
2257
2258 regs->active_tc.PC = regs->CP0_EPC;
2259 mips_set_hflags_isa_mode_from_pc(regs);
2260 /* I am not sure this is right, but it seems to work
2261 * maybe a problem with nested signals ? */
2262 regs->CP0_EPC = 0;
2263 return -TARGET_QEMU_ESIGRETURN;
2264
2265 badframe:
2266 force_sig(TARGET_SIGSEGV);
2267 return -TARGET_QEMU_ESIGRETURN;
2268 }
2269 # endif /* O32 */
2270
2271 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2272 target_siginfo_t *info,
2273 target_sigset_t *set, CPUMIPSState *env)
2274 {
2275 struct target_rt_sigframe *frame;
2276 abi_ulong frame_addr;
2277 int i;
2278
2279 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2280 trace_user_setup_rt_frame(env, frame_addr);
2281 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2282 goto give_sigsegv;
2283 }
2284
2285 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
2286
2287 tswap_siginfo(&frame->rs_info, info);
2288
2289 __put_user(0, &frame->rs_uc.tuc_flags);
2290 __put_user(0, &frame->rs_uc.tuc_link);
2291 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
2292 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
2293 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
2294 &frame->rs_uc.tuc_stack.ss_flags);
2295
2296 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2297
2298 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2299 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
2300 }
2301
2302 /*
2303 * Arguments to signal handler:
2304 *
2305 * a0 = signal number
2306 * a1 = pointer to siginfo_t
2307 * a2 = pointer to ucontext_t
2308 *
2309 * $25 and PC point to the signal handler, $29 points to the
2310 * struct sigframe.
2311 */
2312 env->active_tc.gpr[ 4] = sig;
2313 env->active_tc.gpr[ 5] = frame_addr
2314 + offsetof(struct target_rt_sigframe, rs_info);
2315 env->active_tc.gpr[ 6] = frame_addr
2316 + offsetof(struct target_rt_sigframe, rs_uc);
2317 env->active_tc.gpr[29] = frame_addr;
2318 env->active_tc.gpr[31] = frame_addr
2319 + offsetof(struct target_rt_sigframe, rs_code);
2320 /* The original kernel code sets CP0_EPC to the handler
2321 * since it returns to userland using eret
2322 * we cannot do this here, and we must set PC directly */
2323 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
2324 mips_set_hflags_isa_mode_from_pc(env);
2325 unlock_user_struct(frame, frame_addr, 1);
2326 return;
2327
2328 give_sigsegv:
2329 unlock_user_struct(frame, frame_addr, 1);
2330 force_sigsegv(sig);
2331 }
2332
2333 long do_rt_sigreturn(CPUMIPSState *env)
2334 {
2335 struct target_rt_sigframe *frame;
2336 abi_ulong frame_addr;
2337 sigset_t blocked;
2338
2339 frame_addr = env->active_tc.gpr[29];
2340 trace_user_do_rt_sigreturn(env, frame_addr);
2341 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2342 goto badframe;
2343 }
2344
2345 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
2346 set_sigmask(&blocked);
2347
2348 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2349
2350 if (do_sigaltstack(frame_addr +
2351 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
2352 0, get_sp_from_cpustate(env)) == -EFAULT)
2353 goto badframe;
2354
2355 env->active_tc.PC = env->CP0_EPC;
2356 mips_set_hflags_isa_mode_from_pc(env);
2357 /* I am not sure this is right, but it seems to work
2358 * maybe a problem with nested signals ? */
2359 env->CP0_EPC = 0;
2360 return -TARGET_QEMU_ESIGRETURN;
2361
2362 badframe:
2363 force_sig(TARGET_SIGSEGV);
2364 return -TARGET_QEMU_ESIGRETURN;
2365 }
2366
2367 #elif defined(TARGET_MICROBLAZE)
2368
2369 struct target_sigcontext {
2370 struct target_pt_regs regs; /* needs to be first */
2371 uint32_t oldmask;
2372 };
2373
2374 struct target_stack_t {
2375 abi_ulong ss_sp;
2376 int ss_flags;
2377 unsigned int ss_size;
2378 };
2379
2380 struct target_ucontext {
2381 abi_ulong tuc_flags;
2382 abi_ulong tuc_link;
2383 struct target_stack_t tuc_stack;
2384 struct target_sigcontext tuc_mcontext;
2385 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
2386 };
2387
2388 /* Signal frames. */
2389 struct target_signal_frame {
2390 struct target_ucontext uc;
2391 uint32_t extramask[TARGET_NSIG_WORDS - 1];
2392 uint32_t tramp[2];
2393 };
2394
2395 struct rt_signal_frame {
2396 siginfo_t info;
2397 ucontext_t uc;
2398 uint32_t tramp[2];
2399 };
2400
2401 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
2402 {
2403 __put_user(env->regs[0], &sc->regs.r0);
2404 __put_user(env->regs[1], &sc->regs.r1);
2405 __put_user(env->regs[2], &sc->regs.r2);
2406 __put_user(env->regs[3], &sc->regs.r3);
2407 __put_user(env->regs[4], &sc->regs.r4);
2408 __put_user(env->regs[5], &sc->regs.r5);
2409 __put_user(env->regs[6], &sc->regs.r6);
2410 __put_user(env->regs[7], &sc->regs.r7);
2411 __put_user(env->regs[8], &sc->regs.r8);
2412 __put_user(env->regs[9], &sc->regs.r9);
2413 __put_user(env->regs[10], &sc->regs.r10);
2414 __put_user(env->regs[11], &sc->regs.r11);
2415 __put_user(env->regs[12], &sc->regs.r12);
2416 __put_user(env->regs[13], &sc->regs.r13);
2417 __put_user(env->regs[14], &sc->regs.r14);
2418 __put_user(env->regs[15], &sc->regs.r15);
2419 __put_user(env->regs[16], &sc->regs.r16);
2420 __put_user(env->regs[17], &sc->regs.r17);
2421 __put_user(env->regs[18], &sc->regs.r18);
2422 __put_user(env->regs[19], &sc->regs.r19);
2423 __put_user(env->regs[20], &sc->regs.r20);
2424 __put_user(env->regs[21], &sc->regs.r21);
2425 __put_user(env->regs[22], &sc->regs.r22);
2426 __put_user(env->regs[23], &sc->regs.r23);
2427 __put_user(env->regs[24], &sc->regs.r24);
2428 __put_user(env->regs[25], &sc->regs.r25);
2429 __put_user(env->regs[26], &sc->regs.r26);
2430 __put_user(env->regs[27], &sc->regs.r27);
2431 __put_user(env->regs[28], &sc->regs.r28);
2432 __put_user(env->regs[29], &sc->regs.r29);
2433 __put_user(env->regs[30], &sc->regs.r30);
2434 __put_user(env->regs[31], &sc->regs.r31);
2435 __put_user(env->sregs[SR_PC], &sc->regs.pc);
2436 }
2437
2438 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
2439 {
2440 __get_user(env->regs[0], &sc->regs.r0);
2441 __get_user(env->regs[1], &sc->regs.r1);
2442 __get_user(env->regs[2], &sc->regs.r2);
2443 __get_user(env->regs[3], &sc->regs.r3);
2444 __get_user(env->regs[4], &sc->regs.r4);
2445 __get_user(env->regs[5], &sc->regs.r5);
2446 __get_user(env->regs[6], &sc->regs.r6);
2447 __get_user(env->regs[7], &sc->regs.r7);
2448 __get_user(env->regs[8], &sc->regs.r8);
2449 __get_user(env->regs[9], &sc->regs.r9);
2450 __get_user(env->regs[10], &sc->regs.r10);
2451 __get_user(env->regs[11], &sc->regs.r11);
2452 __get_user(env->regs[12], &sc->regs.r12);
2453 __get_user(env->regs[13], &sc->regs.r13);
2454 __get_user(env->regs[14], &sc->regs.r14);
2455 __get_user(env->regs[15], &sc->regs.r15);
2456 __get_user(env->regs[16], &sc->regs.r16);
2457 __get_user(env->regs[17], &sc->regs.r17);
2458 __get_user(env->regs[18], &sc->regs.r18);
2459 __get_user(env->regs[19], &sc->regs.r19);
2460 __get_user(env->regs[20], &sc->regs.r20);
2461 __get_user(env->regs[21], &sc->regs.r21);
2462 __get_user(env->regs[22], &sc->regs.r22);
2463 __get_user(env->regs[23], &sc->regs.r23);
2464 __get_user(env->regs[24], &sc->regs.r24);
2465 __get_user(env->regs[25], &sc->regs.r25);
2466 __get_user(env->regs[26], &sc->regs.r26);
2467 __get_user(env->regs[27], &sc->regs.r27);
2468 __get_user(env->regs[28], &sc->regs.r28);
2469 __get_user(env->regs[29], &sc->regs.r29);
2470 __get_user(env->regs[30], &sc->regs.r30);
2471 __get_user(env->regs[31], &sc->regs.r31);
2472 __get_user(env->sregs[SR_PC], &sc->regs.pc);
2473 }
2474
2475 static abi_ulong get_sigframe(struct target_sigaction *ka,
2476 CPUMBState *env, int frame_size)
2477 {
2478 abi_ulong sp = env->regs[1];
2479
2480 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
2481 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2482 }
2483
2484 return ((sp - frame_size) & -8UL);
2485 }
2486
2487 static void setup_frame(int sig, struct target_sigaction *ka,
2488 target_sigset_t *set, CPUMBState *env)
2489 {
2490 struct target_signal_frame *frame;
2491 abi_ulong frame_addr;
2492 int i;
2493
2494 frame_addr = get_sigframe(ka, env, sizeof *frame);
2495 trace_user_setup_frame(env, frame_addr);
2496 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2497 goto badframe;
2498
2499 /* Save the mask. */
2500 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
2501
2502 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2503 __put_user(set->sig[i], &frame->extramask[i - 1]);
2504 }
2505
2506 setup_sigcontext(&frame->uc.tuc_mcontext, env);
2507
2508 /* Set up to return from userspace. If provided, use a stub
2509 already in userspace. */
2510 /* minus 8 is offset to cater for "rtsd r15,8" offset */
2511 if (ka->sa_flags & TARGET_SA_RESTORER) {
2512 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
2513 } else {
2514 uint32_t t;
2515 /* Note, these encodings are _big endian_! */
2516 /* addi r12, r0, __NR_sigreturn */
2517 t = 0x31800000UL | TARGET_NR_sigreturn;
2518 __put_user(t, frame->tramp + 0);
2519 /* brki r14, 0x8 */
2520 t = 0xb9cc0008UL;
2521 __put_user(t, frame->tramp + 1);
2522
2523 /* Return from sighandler will jump to the tramp.
2524 Negative 8 offset because return is rtsd r15, 8 */
2525 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
2526 - 8;
2527 }
2528
2529 /* Set up registers for signal handler */
2530 env->regs[1] = frame_addr;
2531 /* Signal handler args: */
2532 env->regs[5] = sig; /* Arg 0: signum */
2533 env->regs[6] = 0;
2534 /* arg 1: sigcontext */
2535 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
2536
2537 /* Offset of 4 to handle microblaze rtid r14, 0 */
2538 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
2539
2540 unlock_user_struct(frame, frame_addr, 1);
2541 return;
2542 badframe:
2543 force_sigsegv(sig);
2544 }
2545
2546 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2547 target_siginfo_t *info,
2548 target_sigset_t *set, CPUMBState *env)
2549 {
2550 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
2551 }
2552
2553 long do_sigreturn(CPUMBState *env)
2554 {
2555 struct target_signal_frame *frame;
2556 abi_ulong frame_addr;
2557 target_sigset_t target_set;
2558 sigset_t set;
2559 int i;
2560
2561 frame_addr = env->regs[R_SP];
2562 trace_user_do_sigreturn(env, frame_addr);
2563 /* Make sure the guest isn't playing games. */
2564 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
2565 goto badframe;
2566
2567 /* Restore blocked signals */
2568 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
2569 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2570 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
2571 }
2572 target_to_host_sigset_internal(&set, &target_set);
2573 set_sigmask(&set);
2574
2575 restore_sigcontext(&frame->uc.tuc_mcontext, env);
2576 /* We got here through a sigreturn syscall, our path back is via an
2577 rtb insn so setup r14 for that. */
2578 env->regs[14] = env->sregs[SR_PC];
2579
2580 unlock_user_struct(frame, frame_addr, 0);
2581 return -TARGET_QEMU_ESIGRETURN;
2582 badframe:
2583 force_sig(TARGET_SIGSEGV);
2584 return -TARGET_QEMU_ESIGRETURN;
2585 }
2586
2587 long do_rt_sigreturn(CPUMBState *env)
2588 {
2589 trace_user_do_rt_sigreturn(env, 0);
2590 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
2591 return -TARGET_ENOSYS;
2592 }
2593
2594 #elif defined(TARGET_CRIS)
2595
2596 struct target_sigcontext {
2597 struct target_pt_regs regs; /* needs to be first */
2598 uint32_t oldmask;
2599 uint32_t usp; /* usp before stacking this gunk on it */
2600 };
2601
2602 /* Signal frames. */
2603 struct target_signal_frame {
2604 struct target_sigcontext sc;
2605 uint32_t extramask[TARGET_NSIG_WORDS - 1];
2606 uint16_t retcode[4]; /* Trampoline code. */
2607 };
2608
2609 struct rt_signal_frame {
2610 siginfo_t *pinfo;
2611 void *puc;
2612 siginfo_t info;
2613 ucontext_t uc;
2614 uint16_t retcode[4]; /* Trampoline code. */
2615 };
2616
2617 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
2618 {
2619 __put_user(env->regs[0], &sc->regs.r0);
2620 __put_user(env->regs[1], &sc->regs.r1);
2621 __put_user(env->regs[2], &sc->regs.r2);
2622 __put_user(env->regs[3], &sc->regs.r3);
2623 __put_user(env->regs[4], &sc->regs.r4);
2624 __put_user(env->regs[5], &sc->regs.r5);
2625 __put_user(env->regs[6], &sc->regs.r6);
2626 __put_user(env->regs[7], &sc->regs.r7);
2627 __put_user(env->regs[8], &sc->regs.r8);
2628 __put_user(env->regs[9], &sc->regs.r9);
2629 __put_user(env->regs[10], &sc->regs.r10);
2630 __put_user(env->regs[11], &sc->regs.r11);
2631 __put_user(env->regs[12], &sc->regs.r12);
2632 __put_user(env->regs[13], &sc->regs.r13);
2633 __put_user(env->regs[14], &sc->usp);
2634 __put_user(env->regs[15], &sc->regs.acr);
2635 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
2636 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
2637 __put_user(env->pc, &sc->regs.erp);
2638 }
2639
2640 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
2641 {
2642 __get_user(env->regs[0], &sc->regs.r0);
2643 __get_user(env->regs[1], &sc->regs.r1);
2644 __get_user(env->regs[2], &sc->regs.r2);
2645 __get_user(env->regs[3], &sc->regs.r3);
2646 __get_user(env->regs[4], &sc->regs.r4);
2647 __get_user(env->regs[5], &sc->regs.r5);
2648 __get_user(env->regs[6], &sc->regs.r6);
2649 __get_user(env->regs[7], &sc->regs.r7);
2650 __get_user(env->regs[8], &sc->regs.r8);
2651 __get_user(env->regs[9], &sc->regs.r9);
2652 __get_user(env->regs[10], &sc->regs.r10);
2653 __get_user(env->regs[11], &sc->regs.r11);
2654 __get_user(env->regs[12], &sc->regs.r12);
2655 __get_user(env->regs[13], &sc->regs.r13);
2656 __get_user(env->regs[14], &sc->usp);
2657 __get_user(env->regs[15], &sc->regs.acr);
2658 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
2659 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
2660 __get_user(env->pc, &sc->regs.erp);
2661 }
2662
2663 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
2664 {
2665 abi_ulong sp;
2666 /* Align the stack downwards to 4. */
2667 sp = (env->regs[R_SP] & ~3);
2668 return sp - framesize;
2669 }
2670
2671 static void setup_frame(int sig, struct target_sigaction *ka,
2672 target_sigset_t *set, CPUCRISState *env)
2673 {
2674 struct target_signal_frame *frame;
2675 abi_ulong frame_addr;
2676 int i;
2677
2678 frame_addr = get_sigframe(env, sizeof *frame);
2679 trace_user_setup_frame(env, frame_addr);
2680 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2681 goto badframe;
2682
2683 /*
2684 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
2685 * use this trampoline anymore but it sets it up for GDB.
2686 * In QEMU, using the trampoline simplifies things a bit so we use it.
2687 *
2688 * This is movu.w __NR_sigreturn, r9; break 13;
2689 */
2690 __put_user(0x9c5f, frame->retcode+0);
2691 __put_user(TARGET_NR_sigreturn,
2692 frame->retcode + 1);
2693 __put_user(0xe93d, frame->retcode + 2);
2694
2695 /* Save the mask. */
2696 __put_user(set->sig[0], &frame->sc.oldmask);
2697
2698 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2699 __put_user(set->sig[i], &frame->extramask[i - 1]);
2700 }
2701
2702 setup_sigcontext(&frame->sc, env);
2703
2704 /* Move the stack and setup the arguments for the handler. */
2705 env->regs[R_SP] = frame_addr;
2706 env->regs[10] = sig;
2707 env->pc = (unsigned long) ka->_sa_handler;
2708 /* Link SRP so the guest returns through the trampoline. */
2709 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
2710
2711 unlock_user_struct(frame, frame_addr, 1);
2712 return;
2713 badframe:
2714 force_sigsegv(sig);
2715 }
2716
2717 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2718 target_siginfo_t *info,
2719 target_sigset_t *set, CPUCRISState *env)
2720 {
2721 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
2722 }
2723
2724 long do_sigreturn(CPUCRISState *env)
2725 {
2726 struct target_signal_frame *frame;
2727 abi_ulong frame_addr;
2728 target_sigset_t target_set;
2729 sigset_t set;
2730 int i;
2731
2732 frame_addr = env->regs[R_SP];
2733 trace_user_do_sigreturn(env, frame_addr);
2734 /* Make sure the guest isn't playing games. */
2735 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
2736 goto badframe;
2737 }
2738
2739 /* Restore blocked signals */
2740 __get_user(target_set.sig[0], &frame->sc.oldmask);
2741 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2742 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
2743 }
2744 target_to_host_sigset_internal(&set, &target_set);
2745 set_sigmask(&set);
2746
2747 restore_sigcontext(&frame->sc, env);
2748 unlock_user_struct(frame, frame_addr, 0);
2749 return -TARGET_QEMU_ESIGRETURN;
2750 badframe:
2751 force_sig(TARGET_SIGSEGV);
2752 return -TARGET_QEMU_ESIGRETURN;
2753 }
2754
2755 long do_rt_sigreturn(CPUCRISState *env)
2756 {
2757 trace_user_do_rt_sigreturn(env, 0);
2758 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
2759 return -TARGET_ENOSYS;
2760 }
2761
2762 #elif defined(TARGET_NIOS2)
2763
2764 #define MCONTEXT_VERSION 2
2765
2766 struct target_sigcontext {
2767 int version;
2768 unsigned long gregs[32];
2769 };
2770
2771 struct target_ucontext {
2772 abi_ulong tuc_flags;
2773 abi_ulong tuc_link;
2774 target_stack_t tuc_stack;
2775 struct target_sigcontext tuc_mcontext;
2776 target_sigset_t tuc_sigmask; /* mask last for extensibility */
2777 };
2778
2779 struct target_rt_sigframe {
2780 struct target_siginfo info;
2781 struct target_ucontext uc;
2782 };
2783
2784 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
2785 {
2786 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
2787 #ifdef CONFIG_STACK_GROWSUP
2788 return target_sigaltstack_used.ss_sp;
2789 #else
2790 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2791 #endif
2792 }
2793 return sp;
2794 }
2795
2796 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
2797 {
2798 unsigned long *gregs = uc->tuc_mcontext.gregs;
2799
2800 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
2801 __put_user(env->regs[1], &gregs[0]);
2802 __put_user(env->regs[2], &gregs[1]);
2803 __put_user(env->regs[3], &gregs[2]);
2804 __put_user(env->regs[4], &gregs[3]);
2805 __put_user(env->regs[5], &gregs[4]);
2806 __put_user(env->regs[6], &gregs[5]);
2807 __put_user(env->regs[7], &gregs[6]);
2808 __put_user(env->regs[8], &gregs[7]);
2809 __put_user(env->regs[9], &gregs[8]);
2810 __put_user(env->regs[10], &gregs[9]);
2811 __put_user(env->regs[11], &gregs[10]);
2812 __put_user(env->regs[12], &gregs[11]);
2813 __put_user(env->regs[13], &gregs[12]);
2814 __put_user(env->regs[14], &gregs[13]);
2815 __put_user(env->regs[15], &gregs[14]);
2816 __put_user(env->regs[16], &gregs[15]);
2817 __put_user(env->regs[17], &gregs[16]);
2818 __put_user(env->regs[18], &gregs[17]);
2819 __put_user(env->regs[19], &gregs[18]);
2820 __put_user(env->regs[20], &gregs[19]);
2821 __put_user(env->regs[21], &gregs[20]);
2822 __put_user(env->regs[22], &gregs[21]);
2823 __put_user(env->regs[23], &gregs[22]);
2824 __put_user(env->regs[R_RA], &gregs[23]);
2825 __put_user(env->regs[R_FP], &gregs[24]);
2826 __put_user(env->regs[R_GP], &gregs[25]);
2827 __put_user(env->regs[R_EA], &gregs[27]);
2828 __put_user(env->regs[R_SP], &gregs[28]);
2829
2830 return 0;
2831 }
2832
2833 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
2834 int *pr2)
2835 {
2836 int temp;
2837 abi_ulong off, frame_addr = env->regs[R_SP];
2838 unsigned long *gregs = uc->tuc_mcontext.gregs;
2839 int err;
2840
2841 /* Always make any pending restarted system calls return -EINTR */
2842 /* current->restart_block.fn = do_no_restart_syscall; */
2843
2844 __get_user(temp, &uc->tuc_mcontext.version);
2845 if (temp != MCONTEXT_VERSION) {
2846 return 1;
2847 }
2848
2849 /* restore passed registers */
2850 __get_user(env->regs[1], &gregs[0]);
2851 __get_user(env->regs[2], &gregs[1]);
2852 __get_user(env->regs[3], &gregs[2]);
2853 __get_user(env->regs[4], &gregs[3]);
2854 __get_user(env->regs[5], &gregs[4]);
2855 __get_user(env->regs[6], &gregs[5]);
2856 __get_user(env->regs[7], &gregs[6]);
2857 __get_user(env->regs[8], &gregs[7]);
2858 __get_user(env->regs[9], &gregs[8]);
2859 __get_user(env->regs[10], &gregs[9]);
2860 __get_user(env->regs[11], &gregs[10]);
2861 __get_user(env->regs[12], &gregs[11]);
2862 __get_user(env->regs[13], &gregs[12]);
2863 __get_user(env->regs[14], &gregs[13]);
2864 __get_user(env->regs[15], &gregs[14]);
2865 __get_user(env->regs[16], &gregs[15]);
2866 __get_user(env->regs[17], &gregs[16]);
2867 __get_user(env->regs[18], &gregs[17]);
2868 __get_user(env->regs[19], &gregs[18]);
2869 __get_user(env->regs[20], &gregs[19]);
2870 __get_user(env->regs[21], &gregs[20]);
2871 __get_user(env->regs[22], &gregs[21]);
2872 __get_user(env->regs[23], &gregs[22]);
2873 /* gregs[23] is handled below */
2874 /* Verify, should this be settable */
2875 __get_user(env->regs[R_FP], &gregs[24]);
2876 /* Verify, should this be settable */
2877 __get_user(env->regs[R_GP], &gregs[25]);
2878 /* Not really necessary no user settable bits */
2879 __get_user(temp, &gregs[26]);
2880 __get_user(env->regs[R_EA], &gregs[27]);
2881
2882 __get_user(env->regs[R_RA], &gregs[23]);
2883 __get_user(env->regs[R_SP], &gregs[28]);
2884
2885 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
2886 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
2887 if (err == -EFAULT) {
2888 return 1;
2889 }
2890
2891 *pr2 = env->regs[2];
2892 return 0;
2893 }
2894
2895 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
2896 size_t frame_size)
2897 {
2898 unsigned long usp;
2899
2900 /* Default to using normal stack. */
2901 usp = env->regs[R_SP];
2902
2903 /* This is the X/Open sanctioned signal stack switching. */
2904 usp = sigsp(usp, ka);
2905
2906 /* Verify, is it 32 or 64 bit aligned */
2907 return (void *)((usp - frame_size) & -8UL);
2908 }
2909
2910 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2911 target_siginfo_t *info,
2912 target_sigset_t *set,
2913 CPUNios2State *env)
2914 {
2915 struct target_rt_sigframe *frame;
2916 int i, err = 0;
2917
2918 frame = get_sigframe(ka, env, sizeof(*frame));
2919
2920 if (ka->sa_flags & SA_SIGINFO) {
2921 tswap_siginfo(&frame->info, info);
2922 }
2923
2924 /* Create the ucontext. */
2925 __put_user(0, &frame->uc.tuc_flags);
2926 __put_user(0, &frame->uc.tuc_link);
2927 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
2928 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
2929 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
2930 err |= rt_setup_ucontext(&frame->uc, env);
2931 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
2932 __put_user((abi_ulong)set->sig[i],
2933 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
2934 }
2935
2936 if (err) {
2937 goto give_sigsegv;
2938 }
2939
2940 /* Set up to return from userspace; jump to fixed address sigreturn
2941 trampoline on kuser page. */
2942 env->regs[R_RA] = (unsigned long) (0x1044);
2943
2944 /* Set up registers for signal handler */
2945 env->regs[R_SP] = (unsigned long) frame;
2946 env->regs[4] = (unsigned long) sig;
2947 env->regs[5] = (unsigned long) &frame->info;
2948 env->regs[6] = (unsigned long) &frame->uc;
2949 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
2950 return;
2951
2952 give_sigsegv:
2953 if (sig == TARGET_SIGSEGV) {
2954 ka->_sa_handler = TARGET_SIG_DFL;
2955 }
2956 force_sigsegv(sig);
2957 return;
2958 }
2959
2960 long do_sigreturn(CPUNios2State *env)
2961 {
2962 trace_user_do_sigreturn(env, 0);
2963 fprintf(stderr, "do_sigreturn: not implemented\n");
2964 return -TARGET_ENOSYS;
2965 }
2966
2967 long do_rt_sigreturn(CPUNios2State *env)
2968 {
2969 /* Verify, can we follow the stack back */
2970 abi_ulong frame_addr = env->regs[R_SP];
2971 struct target_rt_sigframe *frame;
2972 sigset_t set;
2973 int rval;
2974
2975 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2976 goto badframe;
2977 }
2978
2979 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
2980 do_sigprocmask(SIG_SETMASK, &set, NULL);
2981
2982 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
2983 goto badframe;
2984 }
2985
2986 unlock_user_struct(frame, frame_addr, 0);
2987 return rval;
2988
2989 badframe:
2990 unlock_user_struct(frame, frame_addr, 0);
2991 force_sig(TARGET_SIGSEGV);
2992 return 0;
2993 }
2994 /* TARGET_NIOS2 */
2995
2996 #elif defined(TARGET_OPENRISC)
2997
2998 struct target_sigcontext {
2999 struct target_pt_regs regs;
3000 abi_ulong oldmask;
3001 abi_ulong usp;
3002 };
3003
3004 struct target_ucontext {
3005 abi_ulong tuc_flags;
3006 abi_ulong tuc_link;
3007 target_stack_t tuc_stack;
3008 struct target_sigcontext tuc_mcontext;
3009 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3010 };
3011
3012 struct target_rt_sigframe {
3013 abi_ulong pinfo;
3014 uint64_t puc;
3015 struct target_siginfo info;
3016 struct target_sigcontext sc;
3017 struct target_ucontext uc;
3018 unsigned char retcode[16]; /* trampoline code */
3019 };
3020
3021 /* This is the asm-generic/ucontext.h version */
3022 #if 0
3023 static int restore_sigcontext(CPUOpenRISCState *regs,
3024 struct target_sigcontext *sc)
3025 {
3026 unsigned int err = 0;
3027 unsigned long old_usp;
3028
3029 /* Alwys make any pending restarted system call return -EINTR */
3030 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3031
3032 /* restore the regs from &sc->regs (same as sc, since regs is first)
3033 * (sc is already checked for VERIFY_READ since the sigframe was
3034 * checked in sys_sigreturn previously)
3035 */
3036
3037 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3038 goto badframe;
3039 }
3040
3041 /* make sure the U-flag is set so user-mode cannot fool us */
3042
3043 regs->sr &= ~SR_SM;
3044
3045 /* restore the old USP as it was before we stacked the sc etc.
3046 * (we cannot just pop the sigcontext since we aligned the sp and
3047 * stuff after pushing it)
3048 */
3049
3050 __get_user(old_usp, &sc->usp);
3051 phx_signal("old_usp 0x%lx", old_usp);
3052
3053 __PHX__ REALLY /* ??? */
3054 wrusp(old_usp);
3055 regs->gpr[1] = old_usp;
3056
3057 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3058 * after this completes, but we don't use that mechanism. maybe we can
3059 * use it now ?
3060 */
3061
3062 return err;
3063
3064 badframe:
3065 return 1;
3066 }
3067 #endif
3068
3069 /* Set up a signal frame. */
3070
3071 static void setup_sigcontext(struct target_sigcontext *sc,
3072 CPUOpenRISCState *regs,
3073 unsigned long mask)
3074 {
3075 unsigned long usp = cpu_get_gpr(regs, 1);
3076
3077 /* copy the regs. they are first in sc so we can use sc directly */
3078
3079 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3080
3081 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3082 the signal handler. The frametype will be restored to its previous
3083 value in restore_sigcontext. */
3084 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3085
3086 /* then some other stuff */
3087 __put_user(mask, &sc->oldmask);
3088 __put_user(usp, &sc->usp);
3089 }
3090
3091 static inline unsigned long align_sigframe(unsigned long sp)
3092 {
3093 return sp & ~3UL;
3094 }
3095
3096 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3097 CPUOpenRISCState *regs,
3098 size_t frame_size)
3099 {
3100 unsigned long sp = cpu_get_gpr(regs, 1);
3101 int onsigstack = on_sig_stack(sp);
3102
3103 /* redzone */
3104 /* This is the X/Open sanctioned signal stack switching. */
3105 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3106 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3107 }
3108
3109 sp = align_sigframe(sp - frame_size);
3110
3111 /*
3112 * If we are on the alternate signal stack and would overflow it, don't.
3113 * Return an always-bogus address instead so we will die with SIGSEGV.
3114 */
3115
3116 if (onsigstack && !likely(on_sig_stack(sp))) {
3117 return -1L;
3118 }
3119
3120 return sp;
3121 }
3122
3123 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3124 target_siginfo_t *info,
3125 target_sigset_t *set, CPUOpenRISCState *env)
3126 {
3127 int err = 0;
3128 abi_ulong frame_addr;
3129 unsigned long return_ip;
3130 struct target_rt_sigframe *frame;
3131 abi_ulong info_addr, uc_addr;
3132
3133 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3134 trace_user_setup_rt_frame(env, frame_addr);
3135 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3136 goto give_sigsegv;
3137 }
3138
3139 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
3140 __put_user(info_addr, &frame->pinfo);
3141 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
3142 __put_user(uc_addr, &frame->puc);
3143
3144 if (ka->sa_flags & SA_SIGINFO) {
3145 tswap_siginfo(&frame->info, info);
3146 }
3147
3148 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
3149 __put_user(0, &frame->uc.tuc_flags);
3150 __put_user(0, &frame->uc.tuc_link);
3151 __put_user(target_sigaltstack_used.ss_sp,
3152 &frame->uc.tuc_stack.ss_sp);
3153 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
3154 &frame->uc.tuc_stack.ss_flags);
3155 __put_user(target_sigaltstack_used.ss_size,
3156 &frame->uc.tuc_stack.ss_size);
3157 setup_sigcontext(&frame->sc, env, set->sig[0]);
3158
3159 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
3160
3161 /* trampoline - the desired return ip is the retcode itself */
3162 return_ip = (unsigned long)&frame->retcode;
3163 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
3164 __put_user(0xa960, (short *)(frame->retcode + 0));
3165 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
3166 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
3167 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
3168
3169 if (err) {
3170 goto give_sigsegv;
3171 }
3172
3173 /* TODO what is the current->exec_domain stuff and invmap ? */
3174
3175 /* Set up registers for signal handler */
3176 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
3177 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
3178 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
3179 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
3180 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
3181
3182 /* actually move the usp to reflect the stacked frame */
3183 cpu_set_gpr(env, 1, (unsigned long)frame);
3184
3185 return;
3186
3187 give_sigsegv:
3188 unlock_user_struct(frame, frame_addr, 1);
3189 force_sigsegv(sig);
3190 }
3191
3192 long do_sigreturn(CPUOpenRISCState *env)
3193 {
3194 trace_user_do_sigreturn(env, 0);
3195 fprintf(stderr, "do_sigreturn: not implemented\n");
3196 return -TARGET_ENOSYS;
3197 }
3198
3199 long do_rt_sigreturn(CPUOpenRISCState *env)
3200 {
3201 trace_user_do_rt_sigreturn(env, 0);
3202 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
3203 return -TARGET_ENOSYS;
3204 }
3205 /* TARGET_OPENRISC */
3206
3207 #elif defined(TARGET_S390X)
3208
3209 #define __NUM_GPRS 16
3210 #define __NUM_FPRS 16
3211 #define __NUM_ACRS 16
3212
3213 #define S390_SYSCALL_SIZE 2
3214 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
3215
3216 #define _SIGCONTEXT_NSIG 64
3217 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
3218 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
3219 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
3220 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
3221 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
3222
3223 typedef struct {
3224 target_psw_t psw;
3225 target_ulong gprs[__NUM_GPRS];
3226 unsigned int acrs[__NUM_ACRS];
3227 } target_s390_regs_common;
3228
3229 typedef struct {
3230 unsigned int fpc;
3231 double fprs[__NUM_FPRS];
3232 } target_s390_fp_regs;
3233
3234 typedef struct {
3235 target_s390_regs_common regs;
3236 target_s390_fp_regs fpregs;
3237 } target_sigregs;
3238
3239 struct target_sigcontext {
3240 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
3241 target_sigregs *sregs;
3242 };
3243
3244 typedef struct {
3245 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
3246 struct target_sigcontext sc;
3247 target_sigregs sregs;
3248 int signo;
3249 uint8_t retcode[S390_SYSCALL_SIZE];
3250 } sigframe;
3251
3252 struct target_ucontext {
3253 target_ulong tuc_flags;
3254 struct target_ucontext *tuc_link;
3255 target_stack_t tuc_stack;
3256 target_sigregs tuc_mcontext;
3257 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3258 };
3259
3260 typedef struct {
3261 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
3262 uint8_t retcode[S390_SYSCALL_SIZE];
3263 struct target_siginfo info;
3264 struct target_ucontext uc;
3265 } rt_sigframe;
3266
3267 static inline abi_ulong
3268 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
3269 {
3270 abi_ulong sp;
3271
3272 /* Default to using normal stack */
3273 sp = env->regs[15];
3274
3275 /* This is the X/Open sanctioned signal stack switching. */
3276 if (ka->sa_flags & TARGET_SA_ONSTACK) {
3277 if (!sas_ss_flags(sp)) {
3278 sp = target_sigaltstack_used.ss_sp +
3279 target_sigaltstack_used.ss_size;
3280 }
3281 }
3282
3283 /* This is the legacy signal stack switching. */
3284 else if (/* FIXME !user_mode(regs) */ 0 &&
3285 !(ka->sa_flags & TARGET_SA_RESTORER) &&
3286 ka->sa_restorer) {
3287 sp = (abi_ulong) ka->sa_restorer;
3288 }
3289
3290 return (sp - frame_size) & -8ul;
3291 }
3292
3293 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
3294 {
3295 int i;
3296 //save_access_regs(current->thread.acrs); FIXME
3297
3298 /* Copy a 'clean' PSW mask to the user to avoid leaking
3299 information about whether PER is currently on. */
3300 __put_user(env->psw.mask, &sregs->regs.psw.mask);
3301 __put_user(env->psw.addr, &sregs->regs.psw.addr);
3302 for (i = 0; i < 16; i++) {
3303 __put_user(env->regs[i], &sregs->regs.gprs[i]);
3304 }
3305 for (i = 0; i < 16; i++) {
3306 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
3307 }
3308 /*
3309 * We have to store the fp registers to current->thread.fp_regs
3310 * to merge them with the emulated registers.
3311 */
3312 //save_fp_regs(&current->thread.fp_regs); FIXME
3313 for (i = 0; i < 16; i++) {
3314 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
3315 }
3316 }
3317
3318 static void setup_frame(int sig, struct target_sigaction *ka,
3319 target_sigset_t *set, CPUS390XState *env)
3320 {
3321 sigframe *frame;
3322 abi_ulong frame_addr;
3323
3324 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3325 trace_user_setup_frame(env, frame_addr);
3326 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3327 goto give_sigsegv;
3328 }
3329
3330 __put_user(set->sig[0], &frame->sc.oldmask[0]);
3331
3332 save_sigregs(env, &frame->sregs);
3333
3334 __put_user((abi_ulong)(unsigned long)&frame->sregs,
3335 (abi_ulong *)&frame->sc.sregs);
3336
3337 /* Set up to return from userspace. If provided, use a stub
3338 already in userspace. */
3339 if (ka->sa_flags & TARGET_SA_RESTORER) {
3340 env->regs[14] = (unsigned long)
3341 ka->sa_restorer | PSW_ADDR_AMODE;
3342 } else {
3343 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
3344 | PSW_ADDR_AMODE;
3345 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
3346 (uint16_t *)(frame->retcode));
3347 }
3348
3349 /* Set up backchain. */
3350 __put_user(env->regs[15], (abi_ulong *) frame);
3351
3352 /* Set up registers for signal handler */
3353 env->regs[15] = frame_addr;
3354 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
3355
3356 env->regs[2] = sig; //map_signal(sig);
3357 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
3358
3359 /* We forgot to include these in the sigcontext.
3360 To avoid breaking binary compatibility, they are passed as args. */
3361 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
3362 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
3363
3364 /* Place signal number on stack to allow backtrace from handler. */
3365 __put_user(env->regs[2], &frame->signo);
3366 unlock_user_struct(frame, frame_addr, 1);
3367 return;
3368
3369 give_sigsegv:
3370 force_sigsegv(sig);
3371 }
3372
3373 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3374 target_siginfo_t *info,
3375 target_sigset_t *set, CPUS390XState *env)
3376 {
3377 int i;
3378 rt_sigframe *frame;
3379 abi_ulong frame_addr;
3380
3381 frame_addr = get_sigframe(ka, env, sizeof *frame);
3382 trace_user_setup_rt_frame(env, frame_addr);
3383 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3384 goto give_sigsegv;
3385 }
3386
3387 tswap_siginfo(&frame->info, info);
3388
3389 /* Create the ucontext. */
3390 __put_user(0, &frame->uc.tuc_flags);
3391 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
3392 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
3393 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3394 &frame->uc.tuc_stack.ss_flags);
3395 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
3396 save_sigregs(env, &frame->uc.tuc_mcontext);
3397 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
3398 __put_user((abi_ulong)set->sig[i],
3399 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
3400 }
3401
3402 /* Set up to return from userspace. If provided, use a stub
3403 already in userspace. */
3404 if (ka->sa_flags & TARGET_SA_RESTORER) {
3405 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
3406 } else {
3407 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
3408 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
3409 (uint16_t *)(frame->retcode));
3410 }
3411
3412 /* Set up backchain. */
3413 __put_user(env->regs[15], (abi_ulong *) frame);
3414
3415 /* Set up registers for signal handler */
3416 env->regs[15] = frame_addr;
3417 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
3418
3419 env->regs[2] = sig; //map_signal(sig);
3420 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
3421 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
3422 return;
3423
3424 give_sigsegv:
3425 force_sigsegv(sig);
3426 }
3427
3428 static int
3429 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
3430 {
3431 int err = 0;
3432 int i;
3433
3434 for (i = 0; i < 16; i++) {
3435 __get_user(env->regs[i], &sc->regs.gprs[i]);
3436 }
3437
3438 __get_user(env->psw.mask, &sc->regs.psw.mask);
3439 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
3440 (unsigned long long)env->psw.addr);
3441 __get_user(env->psw.addr, &sc->regs.psw.addr);
3442 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
3443
3444 for (i = 0; i < 16; i++) {
3445 __get_user(env->aregs[i], &sc->regs.acrs[i]);
3446 }
3447 for (i = 0; i < 16; i++) {
3448 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
3449 }
3450
3451 return err;
3452 }
3453
3454 long do_sigreturn(CPUS390XState *env)
3455 {
3456 sigframe *frame;
3457 abi_ulong frame_addr = env->regs[15];
3458 target_sigset_t target_set;
3459 sigset_t set;
3460
3461 trace_user_do_sigreturn(env, frame_addr);
3462 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3463 goto badframe;
3464 }
3465 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
3466
3467 target_to_host_sigset_internal(&set, &target_set);
3468 set_sigmask(&set); /* ~_BLOCKABLE? */
3469
3470 if (restore_sigregs(env, &frame->sregs)) {
3471 goto badframe;
3472 }
3473
3474 unlock_user_struct(frame, frame_addr, 0);
3475 return -TARGET_QEMU_ESIGRETURN;
3476
3477 badframe:
3478 force_sig(TARGET_SIGSEGV);
3479 return -TARGET_QEMU_ESIGRETURN;
3480 }
3481
3482 long do_rt_sigreturn(CPUS390XState *env)
3483 {
3484 rt_sigframe *frame;
3485 abi_ulong frame_addr = env->regs[15];
3486 sigset_t set;
3487
3488 trace_user_do_rt_sigreturn(env, frame_addr);
3489 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3490 goto badframe;
3491 }
3492 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3493
3494 set_sigmask(&set); /* ~_BLOCKABLE? */
3495
3496 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
3497 goto badframe;
3498 }
3499
3500 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
3501 get_sp_from_cpustate(env)) == -EFAULT) {
3502 goto badframe;
3503 }
3504 unlock_user_struct(frame, frame_addr, 0);
3505 return -TARGET_QEMU_ESIGRETURN;
3506
3507 badframe:
3508 unlock_user_struct(frame, frame_addr, 0);
3509 force_sig(TARGET_SIGSEGV);
3510 return -TARGET_QEMU_ESIGRETURN;
3511 }
3512
3513 #elif defined(TARGET_PPC)
3514
3515 /* Size of dummy stack frame allocated when calling signal handler.
3516 See arch/powerpc/include/asm/ptrace.h. */
3517 #if defined(TARGET_PPC64)
3518 #define SIGNAL_FRAMESIZE 128
3519 #else
3520 #define SIGNAL_FRAMESIZE 64
3521 #endif
3522
3523 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
3524 on 64-bit PPC, sigcontext and mcontext are one and the same. */
3525 struct target_mcontext {
3526 target_ulong mc_gregs[48];
3527 /* Includes fpscr. */
3528 uint64_t mc_fregs[33];
3529 #if defined(TARGET_PPC64)
3530 /* Pointer to the vector regs */
3531 target_ulong v_regs;
3532 #else
3533 target_ulong mc_pad[2];
3534 #endif
3535 /* We need to handle Altivec and SPE at the same time, which no
3536 kernel needs to do. Fortunately, the kernel defines this bit to
3537 be Altivec-register-large all the time, rather than trying to
3538 twiddle it based on the specific platform. */
3539 union {
3540 /* SPE vector registers. One extra for SPEFSCR. */
3541 uint32_t spe[33];
3542 /* Altivec vector registers. The packing of VSCR and VRSAVE
3543 varies depending on whether we're PPC64 or not: PPC64 splits
3544 them apart; PPC32 stuffs them together.
3545 We also need to account for the VSX registers on PPC64
3546 */
3547 #if defined(TARGET_PPC64)
3548 #define QEMU_NVRREG (34 + 16)
3549 /* On ppc64, this mcontext structure is naturally *unaligned*,
3550 * or rather it is aligned on a 8 bytes boundary but not on
3551 * a 16 bytes one. This pad fixes it up. This is also why the
3552 * vector regs are referenced by the v_regs pointer above so
3553 * any amount of padding can be added here
3554 */
3555 target_ulong pad;
3556 #else
3557 /* On ppc32, we are already aligned to 16 bytes */
3558 #define QEMU_NVRREG 33
3559 #endif
3560 /* We cannot use ppc_avr_t here as we do *not* want the implied
3561 * 16-bytes alignment that would result from it. This would have
3562 * the effect of making the whole struct target_mcontext aligned
3563 * which breaks the layout of struct target_ucontext on ppc64.
3564 */
3565 uint64_t altivec[QEMU_NVRREG][2];
3566 #undef QEMU_NVRREG
3567 } mc_vregs;
3568 };
3569
3570 /* See arch/powerpc/include/asm/sigcontext.h. */
3571 struct target_sigcontext {
3572 target_ulong _unused[4];
3573 int32_t signal;
3574 #if defined(TARGET_PPC64)
3575 int32_t pad0;
3576 #endif
3577 target_ulong handler;
3578 target_ulong oldmask;
3579 target_ulong regs; /* struct pt_regs __user * */
3580 #if defined(TARGET_PPC64)
3581 struct target_mcontext mcontext;
3582 #endif
3583 };
3584
3585 /* Indices for target_mcontext.mc_gregs, below.
3586 See arch/powerpc/include/asm/ptrace.h for details. */
3587 enum {
3588 TARGET_PT_R0 = 0,
3589 TARGET_PT_R1 = 1,
3590 TARGET_PT_R2 = 2,
3591 TARGET_PT_R3 = 3,
3592 TARGET_PT_R4 = 4,
3593 TARGET_PT_R5 = 5,
3594 TARGET_PT_R6 = 6,
3595 TARGET_PT_R7 = 7,
3596 TARGET_PT_R8 = 8,
3597 TARGET_PT_R9 = 9,
3598 TARGET_PT_R10 = 10,
3599 TARGET_PT_R11 = 11,
3600 TARGET_PT_R12 = 12,
3601 TARGET_PT_R13 = 13,
3602 TARGET_PT_R14 = 14,
3603 TARGET_PT_R15 = 15,
3604 TARGET_PT_R16 = 16,
3605 TARGET_PT_R17 = 17,
3606 TARGET_PT_R18 = 18,
3607 TARGET_PT_R19 = 19,
3608 TARGET_PT_R20 = 20,
3609 TARGET_PT_R21 = 21,
3610 TARGET_PT_R22 = 22,
3611 TARGET_PT_R23 = 23,
3612 TARGET_PT_R24 = 24,
3613 TARGET_PT_R25 = 25,
3614 TARGET_PT_R26 = 26,
3615 TARGET_PT_R27 = 27,
3616 TARGET_PT_R28 = 28,
3617 TARGET_PT_R29 = 29,
3618 TARGET_PT_R30 = 30,
3619 TARGET_PT_R31 = 31,
3620 TARGET_PT_NIP = 32,
3621 TARGET_PT_MSR = 33,
3622 TARGET_PT_ORIG_R3 = 34,
3623 TARGET_PT_CTR = 35,
3624 TARGET_PT_LNK = 36,
3625 TARGET_PT_XER = 37,
3626 TARGET_PT_CCR = 38,
3627 /* Yes, there are two registers with #39. One is 64-bit only. */
3628 TARGET_PT_MQ = 39,
3629 TARGET_PT_SOFTE = 39,
3630 TARGET_PT_TRAP = 40,
3631 TARGET_PT_DAR = 41,
3632 TARGET_PT_DSISR = 42,
3633 TARGET_PT_RESULT = 43,
3634 TARGET_PT_REGS_COUNT = 44
3635 };
3636
3637
3638 struct target_ucontext {
3639 target_ulong tuc_flags;
3640 target_ulong tuc_link; /* ucontext_t __user * */
3641 struct target_sigaltstack tuc_stack;
3642 #if !defined(TARGET_PPC64)
3643 int32_t tuc_pad[7];
3644 target_ulong tuc_regs; /* struct mcontext __user *
3645 points to uc_mcontext field */
3646 #endif
3647 target_sigset_t tuc_sigmask;
3648 #if defined(TARGET_PPC64)
3649 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
3650 struct target_sigcontext tuc_sigcontext;
3651 #else
3652 int32_t tuc_maskext[30];
3653 int32_t tuc_pad2[3];
3654 struct target_mcontext tuc_mcontext;
3655 #endif
3656 };
3657
3658 /* See arch/powerpc/kernel/signal_32.c. */
3659 struct target_sigframe {
3660 struct target_sigcontext sctx;
3661 struct target_mcontext mctx;
3662 int32_t abigap[56];
3663 };
3664
3665 #if defined(TARGET_PPC64)
3666
3667 #define TARGET_TRAMP_SIZE 6
3668
3669 struct target_rt_sigframe {
3670 /* sys_rt_sigreturn requires the ucontext be the first field */
3671 struct target_ucontext uc;
3672 target_ulong _unused[2];
3673 uint32_t trampoline[TARGET_TRAMP_SIZE];
3674 target_ulong pinfo; /* struct siginfo __user * */
3675 target_ulong puc; /* void __user * */
3676 struct target_siginfo info;
3677 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
3678 char abigap[288];
3679 } __attribute__((aligned(16)));
3680
3681 #else
3682
3683 struct target_rt_sigframe {
3684 struct target_siginfo info;
3685 struct target_ucontext uc;
3686 int32_t abigap[56];
3687 };
3688
3689 #endif
3690
3691 #if defined(TARGET_PPC64)
3692
3693 struct target_func_ptr {
3694 target_ulong entry;
3695 target_ulong toc;
3696 };
3697
3698 #endif
3699
3700 /* We use the mc_pad field for the signal return trampoline. */
3701 #define tramp mc_pad
3702
3703 /* See arch/powerpc/kernel/signal.c. */
3704 static target_ulong get_sigframe(struct target_sigaction *ka,
3705 CPUPPCState *env,
3706 int frame_size)
3707 {
3708 target_ulong oldsp;
3709
3710 oldsp = env->gpr[1];
3711
3712 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
3713 (sas_ss_flags(oldsp) == 0)) {
3714 oldsp = (target_sigaltstack_used.ss_sp
3715 + target_sigaltstack_used.ss_size);
3716 }
3717
3718 return (oldsp - frame_size) & ~0xFUL;
3719 }
3720
3721 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
3722 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
3723 #define PPC_VEC_HI 0
3724 #define PPC_VEC_LO 1
3725 #else
3726 #define PPC_VEC_HI 1
3727 #define PPC_VEC_LO 0
3728 #endif
3729
3730
3731 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
3732 {
3733 target_ulong msr = env->msr;
3734 int i;
3735 target_ulong ccr = 0;
3736
3737 /* In general, the kernel attempts to be intelligent about what it
3738 needs to save for Altivec/FP/SPE registers. We don't care that
3739 much, so we just go ahead and save everything. */
3740
3741 /* Save general registers. */
3742 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
3743 __put_user(env->gpr[i], &frame->mc_gregs[i]);
3744 }
3745 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
3746 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
3747 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
3748 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
3749
3750 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
3751 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
3752 }
3753 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
3754
3755 /* Save Altivec registers if necessary. */
3756 if (env->insns_flags & PPC_ALTIVEC) {
3757 uint32_t *vrsave;
3758 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
3759 ppc_avr_t *avr = &env->avr[i];
3760 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
3761
3762 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
3763 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
3764 }
3765 /* Set MSR_VR in the saved MSR value to indicate that
3766 frame->mc_vregs contains valid data. */
3767 msr |= MSR_VR;
3768 #if defined(TARGET_PPC64)
3769 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
3770 /* 64-bit needs to put a pointer to the vectors in the frame */
3771 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
3772 #else
3773 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
3774 #endif
3775 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
3776 }
3777
3778 /* Save VSX second halves */
3779 if (env->insns_flags2 & PPC2_VSX) {
3780 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
3781 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
3782 __put_user(env->vsr[i], &vsregs[i]);
3783 }
3784 }
3785
3786 /* Save floating point registers. */
3787 if (env->insns_flags & PPC_FLOAT) {
3788 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
3789 __put_user(env->fpr[i], &frame->mc_fregs[i]);
3790 }
3791 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
3792 }
3793
3794 /* Save SPE registers. The kernel only saves the high half. */
3795 if (env->insns_flags & PPC_SPE) {
3796 #if defined(TARGET_PPC64)
3797 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
3798 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
3799 }
3800 #else
3801 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
3802 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
3803 }
3804 #endif
3805 /* Set MSR_SPE in the saved MSR value to indicate that
3806 frame->mc_vregs contains valid data. */
3807 msr |= MSR_SPE;
3808 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
3809 }
3810
3811 /* Store MSR. */
3812 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
3813 }
3814
3815 static void encode_trampoline(int sigret, uint32_t *tramp)
3816 {
3817 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
3818 if (sigret) {
3819 __put_user(0x38000000 | sigret, &tramp[0]);
3820 __put_user(0x44000002, &tramp[1]);
3821 }
3822 }
3823
3824 static void restore_user_regs(CPUPPCState *env,
3825 struct target_mcontext *frame, int sig)
3826 {
3827 target_ulong save_r2 = 0;
3828 target_ulong msr;
3829 target_ulong ccr;
3830
3831 int i;
3832
3833 if (!sig) {
3834 save_r2 = env->gpr[2];
3835 }
3836
3837 /* Restore general registers. */
3838 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
3839 __get_user(env->gpr[i], &frame->mc_gregs[i]);
3840 }
3841 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
3842 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
3843 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
3844 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
3845 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
3846
3847 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
3848 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
3849 }
3850
3851 if (!sig) {
3852 env->gpr[2] = save_r2;
3853 }
3854 /* Restore MSR. */
3855 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
3856
3857 /* If doing signal return, restore the previous little-endian mode. */
3858 if (sig)
3859 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
3860
3861 /* Restore Altivec registers if necessary. */
3862 if (env->insns_flags & PPC_ALTIVEC) {
3863 ppc_avr_t *v_regs;
3864 uint32_t *vrsave;
3865 #if defined(TARGET_PPC64)
3866 uint64_t v_addr;
3867 /* 64-bit needs to recover the pointer to the vectors from the frame */
3868 __get_user(v_addr, &frame->v_regs);
3869 v_regs = g2h(v_addr);
3870 #else
3871 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
3872 #endif
3873 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
3874 ppc_avr_t *avr = &env->avr[i];
3875 ppc_avr_t *vreg = &v_regs[i];
3876
3877 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
3878 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
3879 }
3880 /* Set MSR_VEC in the saved MSR value to indicate that
3881 frame->mc_vregs contains valid data. */
3882 #if defined(TARGET_PPC64)
3883 vrsave = (uint32_t *)&v_regs[33];
3884 #else
3885 vrsave = (uint32_t *)&v_regs[32];
3886 #endif
3887 __get_user(env->spr[SPR_VRSAVE], vrsave);
3888 }
3889
3890 /* Restore VSX second halves */
3891 if (env->insns_flags2 & PPC2_VSX) {
3892 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
3893 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
3894 __get_user(env->vsr[i], &vsregs[i]);
3895 }
3896 }
3897
3898 /* Restore floating point registers. */
3899 if (env->insns_flags & PPC_FLOAT) {
3900 uint64_t fpscr;
3901 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
3902 __get_user(env->fpr[i], &frame->mc_fregs[i]);
3903 }
3904 __get_user(fpscr, &frame->mc_fregs[32]);
3905 env->fpscr = (uint32_t) fpscr;
3906 }
3907
3908 /* Save SPE registers. The kernel only saves the high half. */
3909 if (env->insns_flags & PPC_SPE) {
3910 #if defined(TARGET_PPC64)
3911 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
3912 uint32_t hi;
3913
3914 __get_user(hi, &frame->mc_vregs.spe[i]);
3915 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
3916 }
3917 #else
3918 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
3919 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
3920 }
3921 #endif
3922 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
3923 }
3924 }
3925
3926 #if !defined(TARGET_PPC64)
3927 static void setup_frame(int sig, struct target_sigaction *ka,
3928 target_sigset_t *set, CPUPPCState *env)
3929 {
3930 struct target_sigframe *frame;
3931 struct target_sigcontext *sc;
3932 target_ulong frame_addr, newsp;
3933 int err = 0;
3934
3935 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3936 trace_user_setup_frame(env, frame_addr);
3937 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3938 goto sigsegv;
3939 sc = &frame->sctx;
3940
3941 __put_user(ka->_sa_handler, &sc->handler);
3942 __put_user(set->sig[0], &sc->oldmask);
3943 __put_user(set->sig[1], &sc->_unused[3]);
3944 __put_user(h2g(&frame->mctx), &sc->regs);
3945 __put_user(sig, &sc->signal);
3946
3947 /* Save user regs. */
3948 save_user_regs(env, &frame->mctx);
3949
3950 /* Construct the trampoline code on the stack. */
3951 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
3952
3953 /* The kernel checks for the presence of a VDSO here. We don't
3954 emulate a vdso, so use a sigreturn system call. */
3955 env->lr = (target_ulong) h2g(frame->mctx.tramp);
3956
3957 /* Turn off all fp exceptions. */
3958 env->fpscr = 0;
3959
3960 /* Create a stack frame for the caller of the handler. */
3961 newsp = frame_addr - SIGNAL_FRAMESIZE;
3962 err |= put_user(env->gpr[1], newsp, target_ulong);
3963
3964 if (err)
3965 goto sigsegv;
3966
3967 /* Set up registers for signal handler. */
3968 env->gpr[1] = newsp;
3969 env->gpr[3] = sig;
3970 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
3971
3972 env->nip = (target_ulong) ka->_sa_handler;
3973
3974 /* Signal handlers are entered in big-endian mode. */
3975 env->msr &= ~(1ull << MSR_LE);
3976
3977 unlock_user_struct(frame, frame_addr, 1);
3978 return;
3979
3980 sigsegv:
3981 unlock_user_struct(frame, frame_addr, 1);
3982 force_sigsegv(sig);
3983 }
3984 #endif /* !defined(TARGET_PPC64) */
3985
3986 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3987 target_siginfo_t *info,
3988 target_sigset_t *set, CPUPPCState *env)
3989 {
3990 struct target_rt_sigframe *rt_sf;
3991 uint32_t *trampptr = 0;
3992 struct target_mcontext *mctx = 0;
3993 target_ulong rt_sf_addr, newsp = 0;
3994 int i, err = 0;
3995 #if defined(TARGET_PPC64)
3996 struct target_sigcontext *sc = 0;
3997 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
3998 #endif
3999
4000 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4001 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4002 goto sigsegv;
4003
4004 tswap_siginfo(&rt_sf->info, info);
4005
4006 __put_user(0, &rt_sf->uc.tuc_flags);
4007 __put_user(0, &rt_sf->uc.tuc_link);
4008 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4009 &rt_sf->uc.tuc_stack.ss_sp);
4010 __put_user(sas_ss_flags(env->gpr[1]),
4011 &rt_sf->uc.tuc_stack.ss_flags);
4012 __put_user(target_sigaltstack_used.ss_size,
4013 &rt_sf->uc.tuc_stack.ss_size);
4014 #if !defined(TARGET_PPC64)
4015 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4016 &rt_sf->uc.tuc_regs);
4017 #endif
4018 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4019 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4020 }
4021
4022 #if defined(TARGET_PPC64)
4023 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4024 trampptr = &rt_sf->trampoline[0];
4025
4026 sc = &rt_sf->uc.tuc_sigcontext;
4027 __put_user(h2g(mctx), &sc->regs);
4028 __put_user(sig, &sc->signal);
4029 #else
4030 mctx = &rt_sf->uc.tuc_mcontext;
4031 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4032 #endif
4033
4034 save_user_regs(env, mctx);
4035 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4036
4037 /* The kernel checks for the presence of a VDSO here. We don't
4038 emulate a vdso, so use a sigreturn system call. */
4039 env->lr = (target_ulong) h2g(trampptr);
4040
4041 /* Turn off all fp exceptions. */
4042 env->fpscr = 0;
4043
4044 /* Create a stack frame for the caller of the handler. */
4045 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4046 err |= put_user(env->gpr[1], newsp, target_ulong);
4047
4048 if (err)
4049 goto sigsegv;
4050
4051 /* Set up registers for signal handler. */
4052 env->gpr[1] = newsp;
4053 env->gpr[3] = (target_ulong) sig;
4054 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4055 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4056 env->gpr[6] = (target_ulong) h2g(rt_sf);
4057
4058 #if defined(TARGET_PPC64)
4059 if (get_ppc64_abi(image) < 2) {
4060 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4061 struct target_func_ptr *handler =
4062 (struct target_func_ptr *)g2h(ka->_sa_handler);
4063 env->nip = tswapl(handler->entry);
4064 env->gpr[2] = tswapl(handler->toc);
4065 } else {
4066 /* ELFv2 PPC64 function pointers are entry points, but R12
4067 * must also be set */
4068 env->nip = tswapl((target_ulong) ka->_sa_handler);
4069 env->gpr[12] = env->nip;
4070 }
4071 #else
4072 env->nip = (target_ulong) ka->_sa_handler;
4073 #endif
4074
4075 /* Signal handlers are entered in big-endian mode. */
4076 env->msr &= ~(1ull << MSR_LE);
4077
4078 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4079 return;
4080
4081 sigsegv:
4082 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4083 force_sigsegv(sig);
4084
4085 }
4086
4087 #if !defined(TARGET_PPC64)
4088 long do_sigreturn(CPUPPCState *env)
4089 {
4090 struct target_sigcontext *sc = NULL;
4091 struct target_mcontext *sr = NULL;
4092 target_ulong sr_addr = 0, sc_addr;
4093 sigset_t blocked;
4094 target_sigset_t set;
4095
4096 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4097 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4098 goto sigsegv;
4099
4100 #if defined(TARGET_PPC64)
4101 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4102 #else
4103 __get_user(set.sig[0], &sc->oldmask);
4104 __get_user(set.sig[1], &sc->_unused[3]);
4105 #endif
4106 target_to_host_sigset_internal(&blocked, &set);
4107 set_sigmask(&blocked);
4108
4109 __get_user(sr_addr, &sc->regs);
4110 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4111 goto sigsegv;
4112 restore_user_regs(env, sr, 1);
4113
4114 unlock_user_struct(sr, sr_addr, 1);
4115 unlock_user_struct(sc, sc_addr, 1);
4116 return -TARGET_QEMU_ESIGRETURN;
4117
4118 sigsegv:
4119 unlock_user_struct(sr, sr_addr, 1);
4120 unlock_user_struct(sc, sc_addr, 1);
4121 force_sig(TARGET_SIGSEGV);
4122 return -TARGET_QEMU_ESIGRETURN;
4123 }
4124 #endif /* !defined(TARGET_PPC64) */
4125
4126 /* See arch/powerpc/kernel/signal_32.c. */
4127 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4128 {
4129 struct target_mcontext *mcp;
4130 target_ulong mcp_addr;
4131 sigset_t blocked;
4132 target_sigset_t set;
4133
4134 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4135 sizeof (set)))
4136 return 1;
4137
4138 #if defined(TARGET_PPC64)
4139 mcp_addr = h2g(ucp) +
4140 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4141 #else
4142 __get_user(mcp_addr, &ucp->tuc_regs);
4143 #endif
4144
4145 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4146 return 1;
4147
4148 target_to_host_sigset_internal(&blocked, &set);
4149 set_sigmask(&blocked);
4150 restore_user_regs(env, mcp, sig);
4151
4152 unlock_user_struct(mcp, mcp_addr, 1);
4153 return 0;
4154 }
4155
4156 long do_rt_sigreturn(CPUPPCState *env)
4157 {
4158 struct target_rt_sigframe *rt_sf = NULL;
4159 target_ulong rt_sf_addr;
4160
4161 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4162 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4163 goto sigsegv;
4164
4165 if (do_setcontext(&rt_sf->uc, env, 1))
4166 goto sigsegv;
4167
4168 do_sigaltstack(rt_sf_addr
4169 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4170 0, env->gpr[1]);
4171
4172 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4173 return -TARGET_QEMU_ESIGRETURN;
4174
4175 sigsegv:
4176 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4177 force_sig(TARGET_SIGSEGV);
4178 return -TARGET_QEMU_ESIGRETURN;
4179 }
4180
4181 #elif defined(TARGET_M68K)
4182
4183 struct target_sigcontext {
4184 abi_ulong sc_mask;
4185 abi_ulong sc_usp;
4186 abi_ulong sc_d0;
4187 abi_ulong sc_d1;
4188 abi_ulong sc_a0;
4189 abi_ulong sc_a1;
4190 unsigned short sc_sr;
4191 abi_ulong sc_pc;
4192 };
4193
4194 struct target_sigframe
4195 {
4196 abi_ulong pretcode;
4197 int sig;
4198 int code;
4199 abi_ulong psc;
4200 char retcode[8];
4201 abi_ulong extramask[TARGET_NSIG_WORDS-1];
4202 struct target_sigcontext sc;
4203 };
4204
4205 typedef int target_greg_t;
4206 #define TARGET_NGREG 18
4207 typedef target_greg_t target_gregset_t[TARGET_NGREG];
4208
4209 typedef struct target_fpregset {
4210 int f_fpcntl[3];
4211 int f_fpregs[8*3];
4212 } target_fpregset_t;
4213
4214 struct target_mcontext {
4215 int version;
4216 target_gregset_t gregs;
4217 target_fpregset_t fpregs;
4218 };
4219
4220 #define TARGET_MCONTEXT_VERSION 2
4221
4222 struct target_ucontext {
4223 abi_ulong tuc_flags;
4224 abi_ulong tuc_link;
4225 target_stack_t tuc_stack;
4226 struct target_mcontext tuc_mcontext;
4227 abi_long tuc_filler[80];
4228 target_sigset_t tuc_sigmask;
4229 };
4230
4231 struct target_rt_sigframe
4232 {
4233 abi_ulong pretcode;
4234 int sig;
4235 abi_ulong pinfo;
4236 abi_ulong puc;
4237 char retcode[8];
4238 struct target_siginfo info;
4239 struct target_ucontext uc;
4240 };
4241
4242 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
4243 abi_ulong mask)
4244 {
4245 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
4246 __put_user(mask, &sc->sc_mask);
4247 __put_user(env->aregs[7], &sc->sc_usp);
4248 __put_user(env->dregs[0], &sc->sc_d0);
4249 __put_user(env->dregs[1], &sc->sc_d1);
4250 __put_user(env->aregs[0], &sc->sc_a0);
4251 __put_user(env->aregs[1], &sc->sc_a1);
4252 __put_user(sr, &sc->sc_sr);
4253 __put_user(env->pc, &sc->sc_pc);
4254 }
4255
4256 static void
4257 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
4258 {
4259 int temp;
4260
4261 __get_user(env->aregs[7], &sc->sc_usp);
4262 __get_user(env->dregs[0], &sc->sc_d0);
4263 __get_user(env->dregs[1], &sc->sc_d1);
4264 __get_user(env->aregs[0], &sc->sc_a0);
4265 __get_user(env->aregs[1], &sc->sc_a1);
4266 __get_user(env->pc, &sc->sc_pc);
4267 __get_user(temp, &sc->sc_sr);
4268 cpu_m68k_set_ccr(env, temp);
4269 }
4270
4271 /*
4272 * Determine which stack to use..
4273 */
4274 static inline abi_ulong
4275 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
4276 size_t frame_size)
4277 {
4278 unsigned long sp;
4279
4280 sp = regs->aregs[7];
4281
4282 /* This is the X/Open sanctioned signal stack switching. */
4283 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
4284 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4285 }
4286
4287 return ((sp - frame_size) & -8UL);
4288 }
4289
4290 static void setup_frame(int sig, struct target_sigaction *ka,
4291 target_sigset_t *set, CPUM68KState *env)
4292 {
4293 struct target_sigframe *frame;
4294 abi_ulong frame_addr;
4295 abi_ulong retcode_addr;
4296 abi_ulong sc_addr;
4297 int i;
4298
4299 frame_addr = get_sigframe(ka, env, sizeof *frame);
4300 trace_user_setup_frame(env, frame_addr);
4301 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4302 goto give_sigsegv;
4303 }
4304
4305 __put_user(sig, &frame->sig);
4306
4307 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
4308 __put_user(sc_addr, &frame->psc);
4309
4310 setup_sigcontext(&frame->sc, env, set->sig[0]);
4311
4312 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4313 __put_user(set->sig[i], &frame->extramask[i - 1]);
4314 }
4315
4316 /* Set up to return from userspace. */
4317
4318 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
4319 __put_user(retcode_addr, &frame->pretcode);
4320
4321 /* moveq #,d0; trap #0 */
4322
4323 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
4324 (uint32_t *)(frame->retcode));
4325
4326 /* Set up to return from userspace */
4327
4328 env->aregs[7] = frame_addr;
4329 env->pc = ka->_sa_handler;
4330
4331 unlock_user_struct(frame, frame_addr, 1);
4332 return;
4333
4334 give_sigsegv:
4335 force_sigsegv(sig);
4336 }
4337
4338 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
4339 CPUM68KState *env)
4340 {
4341 int i;
4342 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
4343
4344 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
4345 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
4346 /* fpiar is not emulated */
4347
4348 for (i = 0; i < 8; i++) {
4349 uint32_t high = env->fregs[i].d.high << 16;
4350 __put_user(high, &fpregs->f_fpregs[i * 3]);
4351 __put_user(env->fregs[i].d.low,
4352 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
4353 }
4354 }
4355
4356 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
4357 CPUM68KState *env)
4358 {
4359 target_greg_t *gregs = uc->tuc_mcontext.gregs;
4360 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
4361
4362 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4363 __put_user(env->dregs[0], &gregs[0]);
4364 __put_user(env->dregs[1], &gregs[1]);
4365 __put_user(env->dregs[2], &gregs[2]);
4366 __put_user(env->dregs[3], &gregs[3]);
4367 __put_user(env->dregs[4], &gregs[4]);
4368 __put_user(env->dregs[5], &gregs[5]);
4369 __put_user(env->dregs[6], &gregs[6]);
4370 __put_user(env->dregs[7], &gregs[7]);
4371 __put_user(env->aregs[0], &gregs[8]);
4372 __put_user(env->aregs[1], &gregs[9]);
4373 __put_user(env->aregs[2], &gregs[10]);
4374 __put_user(env->aregs[3], &gregs[11]);
4375 __put_user(env->aregs[4], &gregs[12]);
4376 __put_user(env->aregs[5], &gregs[13]);
4377 __put_user(env->aregs[6], &gregs[14]);
4378 __put_user(env->aregs[7], &gregs[15]);
4379 __put_user(env->pc, &gregs[16]);
4380 __put_user(sr, &gregs[17]);
4381
4382 target_rt_save_fpu_state(uc, env);
4383
4384 return 0;
4385 }
4386
4387 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
4388 struct target_ucontext *uc)
4389 {
4390 int i;
4391 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
4392 uint32_t fpcr;
4393
4394 __get_user(fpcr, &fpregs->f_fpcntl[0]);
4395 cpu_m68k_set_fpcr(env, fpcr);
4396 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
4397 /* fpiar is not emulated */
4398
4399 for (i = 0; i < 8; i++) {
4400 uint32_t high;
4401 __get_user(high, &fpregs->f_fpregs[i * 3]);
4402 env->fregs[i].d.high = high >> 16;
4403 __get_user(env->fregs[i].d.low,
4404 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
4405 }
4406 }
4407
4408 static inline int target_rt_restore_ucontext(CPUM68KState *env,
4409 struct target_ucontext *uc)
4410 {
4411 int temp;
4412 target_greg_t *gregs = uc->tuc_mcontext.gregs;
4413
4414 __get_user(temp, &uc->tuc_mcontext.version);
4415 if (temp != TARGET_MCONTEXT_VERSION)
4416 goto badframe;
4417
4418 /* restore passed registers */
4419 __get_user(env->dregs[0], &gregs[0]);
4420 __get_user(env->dregs[1], &gregs[1]);
4421 __get_user(env->dregs[2], &gregs[2]);
4422 __get_user(env->dregs[3], &gregs[3]);
4423 __get_user(env->dregs[4], &gregs[4]);
4424 __get_user(env->dregs[5], &gregs[5]);
4425 __get_user(env->dregs[6], &gregs[6]);
4426 __get_user(env->dregs[7], &gregs[7]);
4427 __get_user(env->aregs[0], &gregs[8]);
4428 __get_user(env->aregs[1], &gregs[9]);
4429 __get_user(env->aregs[2], &gregs[10]);
4430 __get_user(env->aregs[3], &gregs[11]);
4431 __get_user(env->aregs[4], &gregs[12]);
4432 __get_user(env->aregs[5], &gregs[13]);
4433 __get_user(env->aregs[6], &gregs[14]);
4434 __get_user(env->aregs[7], &gregs[15]);
4435 __get_user(env->pc, &gregs[16]);
4436 __get_user(temp, &gregs[17]);
4437 cpu_m68k_set_ccr(env, temp);
4438
4439 target_rt_restore_fpu_state(env, uc);
4440
4441 return 0;
4442
4443 badframe:
4444 return 1;
4445 }
4446
4447 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4448 target_siginfo_t *info,
4449 target_sigset_t *set, CPUM68KState *env)
4450 {
4451 struct target_rt_sigframe *frame;
4452 abi_ulong frame_addr;
4453 abi_ulong retcode_addr;
4454 abi_ulong info_addr;
4455 abi_ulong uc_addr;
4456 int err = 0;
4457 int i;
4458
4459 frame_addr = get_sigframe(ka, env, sizeof *frame);
4460 trace_user_setup_rt_frame(env, frame_addr);
4461 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4462 goto give_sigsegv;
4463 }
4464
4465 __put_user(sig, &frame->sig);
4466
4467 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4468 __put_user(info_addr, &frame->pinfo);
4469
4470 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4471 __put_user(uc_addr, &frame->puc);
4472
4473 tswap_siginfo(&frame->info, info);
4474
4475 /* Create the ucontext */
4476
4477 __put_user(0, &frame->uc.tuc_flags);
4478 __put_user(0, &frame->uc.tuc_link);
4479 __put_user(target_sigaltstack_used.ss_sp,
4480 &frame->uc.tuc_stack.ss_sp);
4481 __put_user(sas_ss_flags(env->aregs[7]),
4482 &frame->uc.tuc_stack.ss_flags);
4483 __put_user(target_sigaltstack_used.ss_size,
4484 &frame->uc.tuc_stack.ss_size);
4485 err |= target_rt_setup_ucontext(&frame->uc, env);
4486
4487 if (err)
4488 goto give_sigsegv;
4489
4490 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4491 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
4492 }
4493
4494 /* Set up to return from userspace. */
4495
4496 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
4497 __put_user(retcode_addr, &frame->pretcode);
4498
4499 /* moveq #,d0; notb d0; trap #0 */
4500
4501 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
4502 (uint32_t *)(frame->retcode + 0));
4503 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
4504
4505 if (err)
4506 goto give_sigsegv;
4507
4508 /* Set up to return from userspace */
4509
4510 env->aregs[7] = frame_addr;
4511 env->pc = ka->_sa_handler;
4512
4513 unlock_user_struct(frame, frame_addr, 1);
4514 return;
4515
4516 give_sigsegv:
4517 unlock_user_struct(frame, frame_addr, 1);
4518 force_sigsegv(sig);
4519 }
4520
4521 long do_sigreturn(CPUM68KState *env)
4522 {
4523 struct target_sigframe *frame;
4524 abi_ulong frame_addr = env->aregs[7] - 4;
4525 target_sigset_t target_set;
4526 sigset_t set;
4527 int i;
4528
4529 trace_user_do_sigreturn(env, frame_addr);
4530 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
4531 goto badframe;
4532
4533 /* set blocked signals */
4534
4535 __get_user(target_set.sig[0], &frame->sc.sc_mask);
4536
4537 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4538 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4539 }
4540
4541 target_to_host_sigset_internal(&set, &target_set);
4542 set_sigmask(&set);
4543
4544 /* restore registers */
4545
4546 restore_sigcontext(env, &frame->sc);
4547
4548 unlock_user_struct(frame, frame_addr, 0);
4549 return -TARGET_QEMU_ESIGRETURN;
4550
4551 badframe:
4552 force_sig(TARGET_SIGSEGV);
4553 return -TARGET_QEMU_ESIGRETURN;
4554 }
4555
4556 long do_rt_sigreturn(CPUM68KState *env)
4557 {
4558 struct target_rt_sigframe *frame;
4559 abi_ulong frame_addr = env->aregs[7] - 4;
4560 sigset_t set;
4561
4562 trace_user_do_rt_sigreturn(env, frame_addr);
4563 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
4564 goto badframe;
4565
4566 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4567 set_sigmask(&set);
4568
4569 /* restore registers */
4570
4571 if (target_rt_restore_ucontext(env, &frame->uc))
4572 goto badframe;
4573
4574 if (do_sigaltstack(frame_addr +
4575 offsetof(struct target_rt_sigframe, uc.tuc_stack),
4576 0, get_sp_from_cpustate(env)) == -EFAULT)
4577 goto badframe;
4578
4579 unlock_user_struct(frame, frame_addr, 0);
4580 return -TARGET_QEMU_ESIGRETURN;
4581
4582 badframe:
4583 unlock_user_struct(frame, frame_addr, 0);
4584 force_sig(TARGET_SIGSEGV);
4585 return -TARGET_QEMU_ESIGRETURN;
4586 }
4587
4588 #elif defined(TARGET_ALPHA)
4589
4590 struct target_sigcontext {
4591 abi_long sc_onstack;
4592 abi_long sc_mask;
4593 abi_long sc_pc;
4594 abi_long sc_ps;
4595 abi_long sc_regs[32];
4596 abi_long sc_ownedfp;
4597 abi_long sc_fpregs[32];
4598 abi_ulong sc_fpcr;
4599 abi_ulong sc_fp_control;
4600 abi_ulong sc_reserved1;
4601 abi_ulong sc_reserved2;
4602 abi_ulong sc_ssize;
4603 abi_ulong sc_sbase;
4604 abi_ulong sc_traparg_a0;
4605 abi_ulong sc_traparg_a1;
4606 abi_ulong sc_traparg_a2;
4607 abi_ulong sc_fp_trap_pc;
4608 abi_ulong sc_fp_trigger_sum;
4609 abi_ulong sc_fp_trigger_inst;
4610 };
4611
4612 struct target_ucontext {
4613 abi_ulong tuc_flags;
4614 abi_ulong tuc_link;
4615 abi_ulong tuc_osf_sigmask;
4616 target_stack_t tuc_stack;
4617 struct target_sigcontext tuc_mcontext;
4618 target_sigset_t tuc_sigmask;
4619 };
4620
4621 struct target_sigframe {
4622 struct target_sigcontext sc;
4623 unsigned int retcode[3];
4624 };
4625
4626 struct target_rt_sigframe {
4627 target_siginfo_t info;
4628 struct target_ucontext uc;
4629 unsigned int retcode[3];
4630 };
4631
4632 #define INSN_MOV_R30_R16 0x47fe0410
4633 #define INSN_LDI_R0 0x201f0000
4634 #define INSN_CALLSYS 0x00000083
4635
4636 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
4637 abi_ulong frame_addr, target_sigset_t *set)
4638 {
4639 int i;
4640
4641 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
4642 __put_user(set->sig[0], &sc->sc_mask);
4643 __put_user(env->pc, &sc->sc_pc);
4644 __put_user(8, &sc->sc_ps);
4645
4646 for (i = 0; i < 31; ++i) {
4647 __put_user(env->ir[i], &sc->sc_regs[i]);
4648 }
4649 __put_user(0, &sc->sc_regs[31]);
4650
4651 for (i = 0; i < 31; ++i) {
4652 __put_user(env->fir[i], &sc->sc_fpregs[i]);
4653 }
4654 __put_user(0, &sc->sc_fpregs[31]);
4655 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
4656
4657 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
4658 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
4659 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
4660 }
4661
4662 static void restore_sigcontext(CPUAlphaState *env,
4663 struct target_sigcontext *sc)
4664 {
4665 uint64_t fpcr;
4666 int i;
4667
4668 __get_user(env->pc, &sc->sc_pc);
4669
4670 for (i = 0; i < 31; ++i) {
4671 __get_user(env->ir[i], &sc->sc_regs[i]);
4672 }
4673 for (i = 0; i < 31; ++i) {
4674 __get_user(env->fir[i], &sc->sc_fpregs[i]);
4675 }
4676
4677 __get_user(fpcr, &sc->sc_fpcr);
4678 cpu_alpha_store_fpcr(env, fpcr);
4679 }
4680
4681 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
4682 CPUAlphaState *env,
4683 unsigned long framesize)
4684 {
4685 abi_ulong sp = env->ir[IR_SP];
4686
4687 /* This is the X/Open sanctioned signal stack switching. */
4688 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
4689 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4690 }
4691 return (sp - framesize) & -32;
4692 }
4693
4694 static void setup_frame(int sig, struct target_sigaction *ka,
4695 target_sigset_t *set, CPUAlphaState *env)
4696 {
4697 abi_ulong frame_addr, r26;
4698 struct target_sigframe *frame;
4699 int err = 0;
4700
4701 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4702 trace_user_setup_frame(env, frame_addr);
4703 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4704 goto give_sigsegv;
4705 }
4706
4707 setup_sigcontext(&frame->sc, env, frame_addr, set);
4708
4709 if (ka->sa_restorer) {
4710 r26 = ka->sa_restorer;
4711 } else {
4712 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
4713 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
4714 &frame->retcode[1]);
4715 __put_user(INSN_CALLSYS, &frame->retcode[2]);
4716 /* imb() */
4717 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
4718 }
4719
4720 unlock_user_struct(frame, frame_addr, 1);
4721
4722 if (err) {
4723 give_sigsegv:
4724 force_sigsegv(sig);
4725 return;
4726 }
4727
4728 env->ir[IR_RA] = r26;
4729 env->ir[IR_PV] = env->pc = ka->_sa_handler;
4730 env->ir[IR_A0] = sig;
4731 env->ir[IR_A1] = 0;
4732 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
4733 env->ir[IR_SP] = frame_addr;
4734 }
4735
4736 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4737 target_siginfo_t *info,
4738 target_sigset_t *set, CPUAlphaState *env)
4739 {
4740 abi_ulong frame_addr, r26;
4741 struct target_rt_sigframe *frame;
4742 int i, err = 0;
4743
4744 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4745 trace_user_setup_rt_frame(env, frame_addr);
4746 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4747 goto give_sigsegv;
4748 }
4749
4750 tswap_siginfo(&frame->info, info);
4751
4752 __put_user(0, &frame->uc.tuc_flags);
4753 __put_user(0, &frame->uc.tuc_link);
4754 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
4755 __put_user(target_sigaltstack_used.ss_sp,
4756 &frame->uc.tuc_stack.ss_sp);
4757 __put_user(sas_ss_flags(env->ir[IR_SP]),
4758 &frame->uc.tuc_stack.ss_flags);
4759 __put_user(target_sigaltstack_used.ss_size,
4760 &frame->uc.tuc_stack.ss_size);
4761 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
4762 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
4763 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
4764 }
4765
4766 if (ka->sa_restorer) {
4767 r26 = ka->sa_restorer;
4768 } else {
4769 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
4770 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
4771 &frame->retcode[1]);
4772 __put_user(INSN_CALLSYS, &frame->retcode[2]);
4773 /* imb(); */
4774 r26 = frame_addr + offsetof(struct target_sigframe, retcode);
4775 }
4776
4777 if (err) {
4778 give_sigsegv:
4779 force_sigsegv(sig);
4780 return;
4781 }
4782
4783 env->ir[IR_RA] = r26;
4784 env->ir[IR_PV] = env->pc = ka->_sa_handler;
4785 env->ir[IR_A0] = sig;
4786 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
4787 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
4788 env->ir[IR_SP] = frame_addr;
4789 }
4790
4791 long do_sigreturn(CPUAlphaState *env)
4792 {
4793 struct target_sigcontext *sc;
4794 abi_ulong sc_addr = env->ir[IR_A0];
4795 target_sigset_t target_set;
4796 sigset_t set;
4797
4798 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
4799 goto badframe;
4800 }
4801
4802 target_sigemptyset(&target_set);
4803 __get_user(target_set.sig[0], &sc->sc_mask);
4804
4805 target_to_host_sigset_internal(&set, &target_set);
4806 set_sigmask(&set);
4807
4808 restore_sigcontext(env, sc);
4809 unlock_user_struct(sc, sc_addr, 0);
4810 return -TARGET_QEMU_ESIGRETURN;
4811
4812 badframe:
4813 force_sig(TARGET_SIGSEGV);
4814 return -TARGET_QEMU_ESIGRETURN;
4815 }
4816
4817 long do_rt_sigreturn(CPUAlphaState *env)
4818 {
4819 abi_ulong frame_addr = env->ir[IR_A0];
4820 struct target_rt_sigframe *frame;
4821 sigset_t set;
4822
4823 trace_user_do_rt_sigreturn(env, frame_addr);
4824 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4825 goto badframe;
4826 }
4827 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4828 set_sigmask(&set);
4829
4830 restore_sigcontext(env, &frame->uc.tuc_mcontext);
4831 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
4832 uc.tuc_stack),
4833 0, env->ir[IR_SP]) == -EFAULT) {
4834 goto badframe;
4835 }
4836
4837 unlock_user_struct(frame, frame_addr, 0);
4838 return -TARGET_QEMU_ESIGRETURN;
4839
4840
4841 badframe:
4842 unlock_user_struct(frame, frame_addr, 0);
4843 force_sig(TARGET_SIGSEGV);
4844 return -TARGET_QEMU_ESIGRETURN;
4845 }
4846
4847 #elif defined(TARGET_TILEGX)
4848
4849 struct target_sigcontext {
4850 union {
4851 /* General-purpose registers. */
4852 abi_ulong gregs[56];
4853 struct {
4854 abi_ulong __gregs[53];
4855 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
4856 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
4857 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
4858 };
4859 };
4860 abi_ulong pc; /* Program counter. */
4861 abi_ulong ics; /* In Interrupt Critical Section? */
4862 abi_ulong faultnum; /* Fault number. */
4863 abi_ulong pad[5];
4864 };
4865
4866 struct target_ucontext {
4867 abi_ulong tuc_flags;
4868 abi_ulong tuc_link;
4869 target_stack_t tuc_stack;
4870 struct target_sigcontext tuc_mcontext;
4871 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4872 };
4873
4874 struct target_rt_sigframe {
4875 unsigned char save_area[16]; /* caller save area */
4876 struct target_siginfo info;
4877 struct target_ucontext uc;
4878 abi_ulong retcode[2];
4879 };
4880
4881 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
4882 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
4883
4884
4885 static void setup_sigcontext(struct target_sigcontext *sc,
4886 CPUArchState *env, int signo)
4887 {
4888 int i;
4889
4890 for (i = 0; i < TILEGX_R_COUNT; ++i) {
4891 __put_user(env->regs[i], &sc->gregs[i]);
4892 }
4893
4894 __put_user(env->pc, &sc->pc);
4895 __put_user(0, &sc->ics);
4896 __put_user(signo, &sc->faultnum);
4897 }
4898
4899 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
4900 {
4901 int i;
4902
4903 for (i = 0; i < TILEGX_R_COUNT; ++i) {
4904 __get_user(env->regs[i], &sc->gregs[i]);
4905 }
4906
4907 __get_user(env->pc, &sc->pc);
4908 }
4909
4910 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
4911 size_t frame_size)
4912 {
4913 unsigned long sp = env->regs[TILEGX_R_SP];
4914
4915 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
4916 return -1UL;
4917 }
4918
4919 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
4920 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4921 }
4922
4923 sp -= frame_size;
4924 sp &= -16UL;
4925 return sp;
4926 }
4927
4928 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4929 target_siginfo_t *info,
4930 target_sigset_t *set, CPUArchState *env)
4931 {
4932 abi_ulong frame_addr;
4933 struct target_rt_sigframe *frame;
4934 unsigned long restorer;
4935
4936 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4937 trace_user_setup_rt_frame(env, frame_addr);
4938 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4939 goto give_sigsegv;
4940 }
4941
4942 /* Always write at least the signal number for the stack backtracer. */
4943 if (ka->sa_flags & TARGET_SA_SIGINFO) {
4944 /* At sigreturn time, restore the callee-save registers too. */
4945 tswap_siginfo(&frame->info, info);
4946 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
4947 } else {
4948 __put_user(info->si_signo, &frame->info.si_signo);
4949 }
4950
4951 /* Create the ucontext. */
4952 __put_user(0, &frame->uc.tuc_flags);
4953 __put_user(0, &frame->uc.tuc_link);
4954 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4955 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
4956 &frame->uc.tuc_stack.ss_flags);
4957 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4958 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
4959
4960 if (ka->sa_flags & TARGET_SA_RESTORER) {
4961 restorer = (unsigned long) ka->sa_restorer;
4962 } else {
4963 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
4964 __put_user(INSN_SWINT1, &frame->retcode[1]);
4965 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
4966 }
4967 env->pc = (unsigned long) ka->_sa_handler;
4968 env->regs[TILEGX_R_SP] = (unsigned long) frame;
4969 env->regs[TILEGX_R_LR] = restorer;
4970 env->regs[0] = (unsigned long) sig;
4971 env->regs[1] = (unsigned long) &frame->info;
4972 env->regs[2] = (unsigned long) &frame->uc;
4973 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
4974
4975 unlock_user_struct(frame, frame_addr, 1);
4976 return;
4977
4978 give_sigsegv:
4979 force_sigsegv(sig);
4980 }
4981
4982 long do_rt_sigreturn(CPUTLGState *env)
4983 {
4984 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
4985 struct target_rt_sigframe *frame;
4986 sigset_t set;
4987
4988 trace_user_do_rt_sigreturn(env, frame_addr);
4989 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4990 goto badframe;
4991 }
4992 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4993 set_sigmask(&set);
4994
4995 restore_sigcontext(env, &frame->uc.tuc_mcontext);
4996 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
4997 uc.tuc_stack),
4998 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
4999 goto badframe;
5000 }
5001
5002 unlock_user_struct(frame, frame_addr, 0);
5003 return -TARGET_QEMU_ESIGRETURN;
5004
5005
5006 badframe:
5007 unlock_user_struct(frame, frame_addr, 0);
5008 force_sig(TARGET_SIGSEGV);
5009 return -TARGET_QEMU_ESIGRETURN;
5010 }
5011
5012 #elif defined(TARGET_RISCV)
5013
5014 /* Signal handler invocation must be transparent for the code being
5015 interrupted. Complete CPU (hart) state is saved on entry and restored
5016 before returning from the handler. Process sigmask is also saved to block
5017 signals while the handler is running. The handler gets its own stack,
5018 which also doubles as storage for the CPU state and sigmask.
5019
5020 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
5021
5022 struct target_sigcontext {
5023 abi_long pc;
5024 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
5025 uint64_t fpr[32];
5026 uint32_t fcsr;
5027 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
5028
5029 struct target_ucontext {
5030 unsigned long uc_flags;
5031 struct target_ucontext *uc_link;
5032 target_stack_t uc_stack;
5033 struct target_sigcontext uc_mcontext;
5034 target_sigset_t uc_sigmask;
5035 };
5036
5037 struct target_rt_sigframe {
5038 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
5039 struct target_siginfo info;
5040 struct target_ucontext uc;
5041 };
5042
5043 static abi_ulong get_sigframe(struct target_sigaction *ka,
5044 CPURISCVState *regs, size_t framesize)
5045 {
5046 abi_ulong sp = regs->gpr[xSP];
5047 int onsigstack = on_sig_stack(sp);
5048
5049 /* redzone */
5050 /* This is the X/Open sanctioned signal stack switching. */
5051 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
5052 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5053 }
5054
5055 sp -= framesize;
5056 sp &= ~3UL; /* align sp on 4-byte boundary */
5057
5058 /* If we are on the alternate signal stack and would overflow it, don't.
5059 Return an always-bogus address instead so we will die with SIGSEGV. */
5060 if (onsigstack && !likely(on_sig_stack(sp))) {
5061 return -1L;
5062 }
5063
5064 return sp;
5065 }
5066
5067 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
5068 {
5069 int i;
5070
5071 __put_user(env->pc, &sc->pc);
5072
5073 for (i = 1; i < 32; i++) {
5074 __put_user(env->gpr[i], &sc->gpr[i - 1]);
5075 }
5076 for (i = 0; i < 32; i++) {
5077 __put_user(env->fpr[i], &sc->fpr[i]);
5078 }
5079
5080 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
5081 __put_user(fcsr, &sc->fcsr);
5082 }
5083
5084 static void setup_ucontext(struct target_ucontext *uc,
5085 CPURISCVState *env, target_sigset_t *set)
5086 {
5087 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
5088 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
5089 abi_ulong ss_size = target_sigaltstack_used.ss_size;
5090
5091 __put_user(0, &(uc->uc_flags));
5092 __put_user(0, &(uc->uc_link));
5093
5094 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
5095 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
5096 __put_user(ss_size, &(uc->uc_stack.ss_size));
5097
5098 int i;
5099 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
5100 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
5101 }
5102
5103 setup_sigcontext(&uc->uc_mcontext, env);
5104 }
5105
5106 static inline void install_sigtramp(uint32_t *tramp)
5107 {
5108 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
5109 __put_user(0x00000073, tramp + 1); /* ecall */
5110 }
5111
5112 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5113 target_siginfo_t *info,
5114 target_sigset_t *set, CPURISCVState *env)
5115 {
5116 abi_ulong frame_addr;
5117 struct target_rt_sigframe *frame;
5118
5119 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5120 trace_user_setup_rt_frame(env, frame_addr);
5121
5122 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5123 goto badframe;
5124 }
5125
5126 setup_ucontext(&frame->uc, env, set);
5127 tswap_siginfo(&frame->info, info);
5128 install_sigtramp(frame->tramp);
5129
5130 env->pc = ka->_sa_handler;
5131 env->gpr[xSP] = frame_addr;
5132 env->gpr[xA0] = sig;
5133 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5134 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5135 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
5136
5137 return;
5138
5139 badframe:
5140 unlock_user_struct(frame, frame_addr, 1);
5141 if (sig == TARGET_SIGSEGV) {
5142 ka->_sa_handler = TARGET_SIG_DFL;
5143 }
5144 force_sig(TARGET_SIGSEGV);
5145 }
5146
5147 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
5148 {
5149 int i;
5150
5151 __get_user(env->pc, &sc->pc);
5152
5153 for (i = 1; i < 32; ++i) {
5154 __get_user(env->gpr[i], &sc->gpr[i - 1]);
5155 }
5156 for (i = 0; i < 32; ++i) {
5157 __get_user(env->fpr[i], &sc->fpr[i]);
5158 }
5159
5160 uint32_t fcsr;
5161 __get_user(fcsr, &sc->fcsr);
5162 csr_write_helper(env, fcsr, CSR_FCSR);
5163 }
5164
5165 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
5166 {
5167 sigset_t blocked;
5168 target_sigset_t target_set;
5169 int i;
5170
5171 target_sigemptyset(&target_set);
5172 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
5173 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
5174 }
5175
5176 target_to_host_sigset_internal(&blocked, &target_set);
5177 set_sigmask(&blocked);
5178
5179 restore_sigcontext(env, &uc->uc_mcontext);
5180 }
5181
5182 long do_rt_sigreturn(CPURISCVState *env)
5183 {
5184 struct target_rt_sigframe *frame;
5185 abi_ulong frame_addr;
5186
5187 frame_addr = env->gpr[xSP];
5188 trace_user_do_sigreturn(env, frame_addr);
5189 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5190 goto badframe;
5191 }
5192
5193 restore_ucontext(env, &frame->uc);
5194
5195 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5196 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
5197 goto badframe;
5198 }
5199
5200 unlock_user_struct(frame, frame_addr, 0);
5201 return -TARGET_QEMU_ESIGRETURN;
5202
5203 badframe:
5204 unlock_user_struct(frame, frame_addr, 0);
5205 force_sig(TARGET_SIGSEGV);
5206 return 0;
5207 }
5208
5209 #elif defined(TARGET_HPPA)
5210
5211 struct target_sigcontext {
5212 abi_ulong sc_flags;
5213 abi_ulong sc_gr[32];
5214 uint64_t sc_fr[32];
5215 abi_ulong sc_iasq[2];
5216 abi_ulong sc_iaoq[2];
5217 abi_ulong sc_sar;
5218 };
5219
5220 struct target_ucontext {
5221 abi_uint tuc_flags;
5222 abi_ulong tuc_link;
5223 target_stack_t tuc_stack;
5224 abi_uint pad[1];
5225 struct target_sigcontext tuc_mcontext;
5226 target_sigset_t tuc_sigmask;
5227 };
5228
5229 struct target_rt_sigframe {
5230 abi_uint tramp[9];
5231 target_siginfo_t info;
5232 struct target_ucontext uc;
5233 /* hidden location of upper halves of pa2.0 64-bit gregs */
5234 };
5235
5236 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
5237 {
5238 int flags = 0;
5239 int i;
5240
5241 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
5242
5243 if (env->iaoq_f < TARGET_PAGE_SIZE) {
5244 /* In the gateway page, executing a syscall. */
5245 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
5246 __put_user(env->gr[31], &sc->sc_iaoq[0]);
5247 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
5248 } else {
5249 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
5250 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
5251 }
5252 __put_user(0, &sc->sc_iasq[0]);
5253 __put_user(0, &sc->sc_iasq[1]);
5254 __put_user(flags, &sc->sc_flags);
5255
5256 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
5257 for (i = 1; i < 32; ++i) {
5258 __put_user(env->gr[i], &sc->sc_gr[i]);
5259 }
5260
5261 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
5262 for (i = 1; i < 32; ++i) {
5263 __put_user(env->fr[i], &sc->sc_fr[i]);
5264 }
5265
5266 __put_user(env->cr[CR_SAR], &sc->sc_sar);
5267 }
5268
5269 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
5270 {
5271 target_ulong psw;
5272 int i;
5273
5274 __get_user(psw, &sc->sc_gr[0]);
5275 cpu_hppa_put_psw(env, psw);
5276
5277 for (i = 1; i < 32; ++i) {
5278 __get_user(env->gr[i], &sc->sc_gr[i]);
5279 }
5280 for (i = 0; i < 32; ++i) {
5281 __get_user(env->fr[i], &sc->sc_fr[i]);
5282 }
5283 cpu_hppa_loaded_fr0(env);
5284
5285 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
5286 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
5287 __get_user(env->cr[CR_SAR], &sc->sc_sar);
5288 }
5289
5290 /* No, this doesn't look right, but it's copied straight from the kernel. */
5291 #define PARISC_RT_SIGFRAME_SIZE32 \
5292 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
5293
5294 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5295 target_siginfo_t *info,
5296 target_sigset_t *set, CPUArchState *env)
5297 {
5298 abi_ulong frame_addr, sp, haddr;
5299 struct target_rt_sigframe *frame;
5300 int i;
5301
5302 sp = env->gr[30];
5303 if (ka->sa_flags & TARGET_SA_ONSTACK) {
5304 if (sas_ss_flags(sp) == 0) {
5305 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
5306 }
5307 }
5308 frame_addr = QEMU_ALIGN_UP(sp, 64);
5309 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
5310
5311 trace_user_setup_rt_frame(env, frame_addr);
5312
5313 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5314 goto give_sigsegv;
5315 }
5316
5317 tswap_siginfo(&frame->info, info);
5318 frame->uc.tuc_flags = 0;
5319 frame->uc.tuc_link = 0;
5320
5321 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5322 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
5323 &frame->uc.tuc_stack.ss_flags);
5324 __put_user(target_sigaltstack_used.ss_size,
5325 &frame->uc.tuc_stack.ss_size);
5326
5327 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
5328 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5329 }
5330
5331 setup_sigcontext(&frame->uc.tuc_mcontext, env);
5332
5333 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
5334 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
5335 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
5336 __put_user(0x08000240, frame->tramp + 3); /* nop */
5337
5338 unlock_user_struct(frame, frame_addr, 1);
5339
5340 env->gr[2] = h2g(frame->tramp);
5341 env->gr[30] = sp;
5342 env->gr[26] = sig;
5343 env->gr[25] = h2g(&frame->info);
5344 env->gr[24] = h2g(&frame->uc);
5345
5346 haddr = ka->_sa_handler;
5347 if (haddr & 2) {
5348 /* Function descriptor. */
5349 target_ulong *fdesc, dest;
5350
5351 haddr &= -4;
5352 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
5353 goto give_sigsegv;
5354 }
5355 __get_user(dest, fdesc);
5356 __get_user(env->gr[19], fdesc + 1);
5357 unlock_user_struct(fdesc, haddr, 1);
5358 haddr = dest;
5359 }
5360 env->iaoq_f = haddr;
5361 env->iaoq_b = haddr + 4;
5362 return;
5363
5364 give_sigsegv:
5365 force_sigsegv(sig);
5366 }
5367
5368 long do_rt_sigreturn(CPUArchState *env)
5369 {
5370 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
5371 struct target_rt_sigframe *frame;
5372 sigset_t set;
5373
5374 trace_user_do_rt_sigreturn(env, frame_addr);
5375 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5376 goto badframe;
5377 }
5378 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5379 set_sigmask(&set);
5380
5381 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5382 unlock_user_struct(frame, frame_addr, 0);
5383
5384 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5385 uc.tuc_stack),
5386 0, env->gr[30]) == -EFAULT) {
5387 goto badframe;
5388 }
5389
5390 unlock_user_struct(frame, frame_addr, 0);
5391 return -TARGET_QEMU_ESIGRETURN;
5392
5393 badframe:
5394 force_sig(TARGET_SIGSEGV);
5395 return -TARGET_QEMU_ESIGRETURN;
5396 }
5397
5398 #elif defined(TARGET_XTENSA)
5399
5400 struct target_sigcontext {
5401 abi_ulong sc_pc;
5402 abi_ulong sc_ps;
5403 abi_ulong sc_lbeg;
5404 abi_ulong sc_lend;
5405 abi_ulong sc_lcount;
5406 abi_ulong sc_sar;
5407 abi_ulong sc_acclo;
5408 abi_ulong sc_acchi;
5409 abi_ulong sc_a[16];
5410 abi_ulong sc_xtregs;
5411 };
5412
5413 struct target_ucontext {
5414 abi_ulong tuc_flags;
5415 abi_ulong tuc_link;
5416 target_stack_t tuc_stack;
5417 struct target_sigcontext tuc_mcontext;
5418 target_sigset_t tuc_sigmask;
5419 };
5420
5421 struct target_rt_sigframe {
5422 target_siginfo_t info;
5423 struct target_ucontext uc;
5424 /* TODO: xtregs */
5425 uint8_t retcode[6];
5426 abi_ulong window[4];
5427 };
5428
5429 static abi_ulong get_sigframe(struct target_sigaction *sa,
5430 CPUXtensaState *env,
5431 unsigned long framesize)
5432 {
5433 abi_ulong sp = env->regs[1];
5434
5435 /* This is the X/Open sanctioned signal stack switching. */
5436 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5437 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5438 }
5439 return (sp - framesize) & -16;
5440 }
5441
5442 static int flush_window_regs(CPUXtensaState *env)
5443 {
5444 uint32_t wb = env->sregs[WINDOW_BASE];
5445 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1);
5446 unsigned d = ctz32(ws) + 1;
5447 unsigned i;
5448 int ret = 0;
5449
5450 for (i = d; i < env->config->nareg / 4; i += d) {
5451 uint32_t ssp, osp;
5452 unsigned j;
5453
5454 ws >>= d;
5455 xtensa_rotate_window(env, d);
5456
5457 if (ws & 0x1) {
5458 ssp = env->regs[5];
5459 d = 1;
5460 } else if (ws & 0x2) {
5461 ssp = env->regs[9];
5462 ret |= get_user_ual(osp, env->regs[1] - 12);
5463 osp -= 32;
5464 d = 2;
5465 } else if (ws & 0x4) {
5466 ssp = env->regs[13];
5467 ret |= get_user_ual(osp, env->regs[1] - 12);
5468 osp -= 48;
5469 d = 3;
5470 } else {
5471 g_assert_not_reached();
5472 }
5473
5474 for (j = 0; j < 4; ++j) {
5475 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4);
5476 }
5477 for (j = 4; j < d * 4; ++j) {
5478 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4);
5479 }
5480 }
5481 xtensa_rotate_window(env, d);
5482 g_assert(env->sregs[WINDOW_BASE] == wb);
5483 return ret == 0;
5484 }
5485
5486 static int setup_sigcontext(struct target_rt_sigframe *frame,
5487 CPUXtensaState *env)
5488 {
5489 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
5490 int i;
5491
5492 __put_user(env->pc, &sc->sc_pc);
5493 __put_user(env->sregs[PS], &sc->sc_ps);
5494 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
5495 __put_user(env->sregs[LEND], &sc->sc_lend);
5496 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
5497 if (!flush_window_regs(env)) {
5498 return 0;
5499 }
5500 for (i = 0; i < 16; ++i) {
5501 __put_user(env->regs[i], sc->sc_a + i);
5502 }
5503 __put_user(0, &sc->sc_xtregs);
5504 /* TODO: xtregs */
5505 return 1;
5506 }
5507
5508 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5509 target_siginfo_t *info,
5510 target_sigset_t *set, CPUXtensaState *env)
5511 {
5512 abi_ulong frame_addr;
5513 struct target_rt_sigframe *frame;
5514 uint32_t ra;
5515 int i;
5516
5517 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5518 trace_user_setup_rt_frame(env, frame_addr);
5519
5520 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5521 goto give_sigsegv;
5522 }
5523
5524 if (ka->sa_flags & SA_SIGINFO) {
5525 tswap_siginfo(&frame->info, info);
5526 }
5527
5528 __put_user(0, &frame->uc.tuc_flags);
5529 __put_user(0, &frame->uc.tuc_link);
5530 __put_user(target_sigaltstack_used.ss_sp,
5531 &frame->uc.tuc_stack.ss_sp);
5532 __put_user(sas_ss_flags(env->regs[1]),
5533 &frame->uc.tuc_stack.ss_flags);
5534 __put_user(target_sigaltstack_used.ss_size,
5535 &frame->uc.tuc_stack.ss_size);
5536 if (!setup_sigcontext(frame, env)) {
5537 unlock_user_struct(frame, frame_addr, 0);
5538 goto give_sigsegv;
5539 }
5540 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5541 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5542 }
5543
5544 if (ka->sa_flags & TARGET_SA_RESTORER) {
5545 ra = ka->sa_restorer;
5546 } else {
5547 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5548 #ifdef TARGET_WORDS_BIGENDIAN
5549 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
5550 __put_user(0x22, &frame->retcode[0]);
5551 __put_user(0x0a, &frame->retcode[1]);
5552 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
5553 /* Generate instruction: SYSCALL */
5554 __put_user(0x00, &frame->retcode[3]);
5555 __put_user(0x05, &frame->retcode[4]);
5556 __put_user(0x00, &frame->retcode[5]);
5557 #else
5558 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
5559 __put_user(0x22, &frame->retcode[0]);
5560 __put_user(0xa0, &frame->retcode[1]);
5561 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
5562 /* Generate instruction: SYSCALL */
5563 __put_user(0x00, &frame->retcode[3]);
5564 __put_user(0x50, &frame->retcode[4]);
5565 __put_user(0x00, &frame->retcode[5]);
5566 #endif
5567 }
5568 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
5569 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
5570 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
5571 }
5572 memset(env->regs, 0, sizeof(env->regs));
5573 env->pc = ka->_sa_handler;
5574 env->regs[1] = frame_addr;
5575 env->sregs[WINDOW_BASE] = 0;
5576 env->sregs[WINDOW_START] = 1;
5577
5578 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
5579 env->regs[6] = sig;
5580 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
5581 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5582 unlock_user_struct(frame, frame_addr, 1);
5583 return;
5584
5585 give_sigsegv:
5586 force_sigsegv(sig);
5587 return;
5588 }
5589
5590 static void restore_sigcontext(CPUXtensaState *env,
5591 struct target_rt_sigframe *frame)
5592 {
5593 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
5594 uint32_t ps;
5595 int i;
5596
5597 __get_user(env->pc, &sc->sc_pc);
5598 __get_user(ps, &sc->sc_ps);
5599 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
5600 __get_user(env->sregs[LEND], &sc->sc_lend);
5601 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
5602
5603 env->sregs[WINDOW_BASE] = 0;
5604 env->sregs[WINDOW_START] = 1;
5605 env->sregs[PS] = deposit32(env->sregs[PS],
5606 PS_CALLINC_SHIFT,
5607 PS_CALLINC_LEN,
5608 extract32(ps, PS_CALLINC_SHIFT,
5609 PS_CALLINC_LEN));
5610 for (i = 0; i < 16; ++i) {
5611 __get_user(env->regs[i], sc->sc_a + i);
5612 }
5613 /* TODO: xtregs */
5614 }
5615
5616 long do_rt_sigreturn(CPUXtensaState *env)
5617 {
5618 abi_ulong frame_addr = env->regs[1];
5619 struct target_rt_sigframe *frame;
5620 sigset_t set;
5621
5622 trace_user_do_rt_sigreturn(env, frame_addr);
5623 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5624 goto badframe;
5625 }
5626 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5627 set_sigmask(&set);
5628
5629 restore_sigcontext(env, frame);
5630
5631 if (do_sigaltstack(frame_addr +
5632 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5633 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
5634 goto badframe;
5635 }
5636 unlock_user_struct(frame, frame_addr, 0);
5637 return -TARGET_QEMU_ESIGRETURN;
5638
5639 badframe:
5640 unlock_user_struct(frame, frame_addr, 0);
5641 force_sig(TARGET_SIGSEGV);
5642 return -TARGET_QEMU_ESIGRETURN;
5643 }
5644 #endif
5645
5646 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
5647 struct emulated_sigtable *k)
5648 {
5649 CPUState *cpu = ENV_GET_CPU(cpu_env);
5650 abi_ulong handler;
5651 sigset_t set;
5652 target_sigset_t target_old_set;
5653 struct target_sigaction *sa;
5654 TaskState *ts = cpu->opaque;
5655
5656 trace_user_handle_signal(cpu_env, sig);
5657 /* dequeue signal */
5658 k->pending = 0;
5659
5660 sig = gdb_handlesig(cpu, sig);
5661 if (!sig) {
5662 sa = NULL;
5663 handler = TARGET_SIG_IGN;
5664 } else {
5665 sa = &sigact_table[sig - 1];
5666 handler = sa->_sa_handler;
5667 }
5668
5669 if (do_strace) {
5670 print_taken_signal(sig, &k->info);
5671 }
5672
5673 if (handler == TARGET_SIG_DFL) {
5674 /* default handler : ignore some signal. The other are job control or fatal */
5675 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5676 kill(getpid(),SIGSTOP);
5677 } else if (sig != TARGET_SIGCHLD &&
5678 sig != TARGET_SIGURG &&
5679 sig != TARGET_SIGWINCH &&
5680 sig != TARGET_SIGCONT) {
5681 dump_core_and_abort(sig);
5682 }
5683 } else if (handler == TARGET_SIG_IGN) {
5684 /* ignore sig */
5685 } else if (handler == TARGET_SIG_ERR) {
5686 dump_core_and_abort(sig);
5687 } else {
5688 /* compute the blocked signals during the handler execution */
5689 sigset_t *blocked_set;
5690
5691 target_to_host_sigset(&set, &sa->sa_mask);
5692 /* SA_NODEFER indicates that the current signal should not be
5693 blocked during the handler */
5694 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5695 sigaddset(&set, target_to_host_signal(sig));
5696
5697 /* save the previous blocked signal state to restore it at the
5698 end of the signal execution (see do_sigreturn) */
5699 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5700
5701 /* block signals in the handler */
5702 blocked_set = ts->in_sigsuspend ?
5703 &ts->sigsuspend_mask : &ts->signal_mask;
5704 sigorset(&ts->signal_mask, blocked_set, &set);
5705 ts->in_sigsuspend = 0;
5706
5707 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5708 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5709 {
5710 CPUX86State *env = cpu_env;
5711 if (env->eflags & VM_MASK)
5712 save_v86_state(env);
5713 }
5714 #endif
5715 /* prepare the stack frame of the virtual CPU */
5716 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5717 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
5718 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
5719 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
5720 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
5721 /* These targets do not have traditional signals. */
5722 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5723 #else
5724 if (sa->sa_flags & TARGET_SA_SIGINFO)
5725 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5726 else
5727 setup_frame(sig, sa, &target_old_set, cpu_env);
5728 #endif
5729 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5730 sa->_sa_handler = TARGET_SIG_DFL;
5731 }
5732 }
5733 }
5734
5735 void process_pending_signals(CPUArchState *cpu_env)
5736 {
5737 CPUState *cpu = ENV_GET_CPU(cpu_env);
5738 int sig;
5739 TaskState *ts = cpu->opaque;
5740 sigset_t set;
5741 sigset_t *blocked_set;
5742
5743 while (atomic_read(&ts->signal_pending)) {
5744 /* FIXME: This is not threadsafe. */
5745 sigfillset(&set);
5746 sigprocmask(SIG_SETMASK, &set, 0);
5747
5748 restart_scan:
5749 sig = ts->sync_signal.pending;
5750 if (sig) {
5751 /* Synchronous signals are forced,
5752 * see force_sig_info() and callers in Linux
5753 * Note that not all of our queue_signal() calls in QEMU correspond
5754 * to force_sig_info() calls in Linux (some are send_sig_info()).
5755 * However it seems like a kernel bug to me to allow the process
5756 * to block a synchronous signal since it could then just end up
5757 * looping round and round indefinitely.
5758 */
5759 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
5760 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
5761 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
5762 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
5763 }
5764
5765 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
5766 }
5767
5768 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5769 blocked_set = ts->in_sigsuspend ?
5770 &ts->sigsuspend_mask : &ts->signal_mask;
5771
5772 if (ts->sigtab[sig - 1].pending &&
5773 (!sigismember(blocked_set,
5774 target_to_host_signal_table[sig]))) {
5775 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
5776 /* Restart scan from the beginning, as handle_pending_signal
5777 * might have resulted in a new synchronous signal (eg SIGSEGV).
5778 */
5779 goto restart_scan;
5780 }
5781 }
5782
5783 /* if no signal is pending, unblock signals and recheck (the act
5784 * of unblocking might cause us to take another host signal which
5785 * will set signal_pending again).
5786 */
5787 atomic_set(&ts->signal_pending, 0);
5788 ts->in_sigsuspend = 0;
5789 set = ts->signal_mask;
5790 sigdelset(&set, SIGSEGV);
5791 sigdelset(&set, SIGBUS);
5792 sigprocmask(SIG_SETMASK, &set, 0);
5793 }
5794 ts->in_sigsuspend = 0;
5795 }