]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
58bbb7693c3f2145e268627e0cbdb876f8d7fa0b
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29
30 struct target_sigaltstack target_sigaltstack_used = {
31 .ss_sp = 0,
32 .ss_size = 0,
33 .ss_flags = TARGET_SS_DISABLE,
34 };
35
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39 void *puc);
40
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42 [SIGHUP] = TARGET_SIGHUP,
43 [SIGINT] = TARGET_SIGINT,
44 [SIGQUIT] = TARGET_SIGQUIT,
45 [SIGILL] = TARGET_SIGILL,
46 [SIGTRAP] = TARGET_SIGTRAP,
47 [SIGABRT] = TARGET_SIGABRT,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS] = TARGET_SIGBUS,
50 [SIGFPE] = TARGET_SIGFPE,
51 [SIGKILL] = TARGET_SIGKILL,
52 [SIGUSR1] = TARGET_SIGUSR1,
53 [SIGSEGV] = TARGET_SIGSEGV,
54 [SIGUSR2] = TARGET_SIGUSR2,
55 [SIGPIPE] = TARGET_SIGPIPE,
56 [SIGALRM] = TARGET_SIGALRM,
57 [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59 [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61 [SIGCHLD] = TARGET_SIGCHLD,
62 [SIGCONT] = TARGET_SIGCONT,
63 [SIGSTOP] = TARGET_SIGSTOP,
64 [SIGTSTP] = TARGET_SIGTSTP,
65 [SIGTTIN] = TARGET_SIGTTIN,
66 [SIGTTOU] = TARGET_SIGTTOU,
67 [SIGURG] = TARGET_SIGURG,
68 [SIGXCPU] = TARGET_SIGXCPU,
69 [SIGXFSZ] = TARGET_SIGXFSZ,
70 [SIGVTALRM] = TARGET_SIGVTALRM,
71 [SIGPROF] = TARGET_SIGPROF,
72 [SIGWINCH] = TARGET_SIGWINCH,
73 [SIGIO] = TARGET_SIGIO,
74 [SIGPWR] = TARGET_SIGPWR,
75 [SIGSYS] = TARGET_SIGSYS,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN] = __SIGRTMAX,
82 [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85
86 int host_to_target_signal(int sig)
87 {
88 if (sig < 0 || sig >= _NSIG)
89 return sig;
90 return host_to_target_signal_table[sig];
91 }
92
93 int target_to_host_signal(int sig)
94 {
95 if (sig < 0 || sig >= _NSIG)
96 return sig;
97 return target_to_host_signal_table[sig];
98 }
99
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113
114 void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
116 {
117 int i;
118 target_sigemptyset(d);
119 for (i = 1; i <= TARGET_NSIG; i++) {
120 if (sigismember(s, i)) {
121 target_sigaddset(d, host_to_target_signal(i));
122 }
123 }
124 }
125
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128 target_sigset_t d1;
129 int i;
130
131 host_to_target_sigset_internal(&d1, s);
132 for(i = 0;i < TARGET_NSIG_WORDS; i++)
133 d->sig[i] = tswapal(d1.sig[i]);
134 }
135
136 void target_to_host_sigset_internal(sigset_t *d,
137 const target_sigset_t *s)
138 {
139 int i;
140 sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (target_sigismember(s, i)) {
143 sigaddset(d, target_to_host_signal(i));
144 }
145 }
146 }
147
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150 target_sigset_t s1;
151 int i;
152
153 for(i = 0;i < TARGET_NSIG_WORDS; i++)
154 s1.sig[i] = tswapal(s->sig[i]);
155 target_to_host_sigset_internal(d, &s1);
156 }
157
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159 const sigset_t *sigset)
160 {
161 target_sigset_t d;
162 host_to_target_sigset(&d, sigset);
163 *old_sigset = d.sig[0];
164 }
165
166 void target_to_host_old_sigset(sigset_t *sigset,
167 const abi_ulong *old_sigset)
168 {
169 target_sigset_t d;
170 int i;
171
172 d.sig[0] = *old_sigset;
173 for(i = 1;i < TARGET_NSIG_WORDS; i++)
174 d.sig[i] = 0;
175 target_to_host_sigset(sigset, &d);
176 }
177
178 int block_signals(void)
179 {
180 TaskState *ts = (TaskState *)thread_cpu->opaque;
181 sigset_t set;
182
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
186 */
187 sigfillset(&set);
188 sigprocmask(SIG_SETMASK, &set, 0);
189
190 return atomic_xchg(&ts->signal_pending, 1);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
197 * 0 on success.
198 * If set is NULL, this is guaranteed not to fail.
199 */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202 TaskState *ts = (TaskState *)thread_cpu->opaque;
203
204 if (oldset) {
205 *oldset = ts->signal_mask;
206 }
207
208 if (set) {
209 int i;
210
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS;
213 }
214
215 switch (how) {
216 case SIG_BLOCK:
217 sigorset(&ts->signal_mask, &ts->signal_mask, set);
218 break;
219 case SIG_UNBLOCK:
220 for (i = 1; i <= NSIG; ++i) {
221 if (sigismember(set, i)) {
222 sigdelset(&ts->signal_mask, i);
223 }
224 }
225 break;
226 case SIG_SETMASK:
227 ts->signal_mask = *set;
228 break;
229 default:
230 g_assert_not_reached();
231 }
232
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts->signal_mask, SIGKILL);
235 sigdelset(&ts->signal_mask, SIGSTOP);
236 }
237 return 0;
238 }
239
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
243 */
244 void set_sigmask(const sigset_t *set)
245 {
246 TaskState *ts = (TaskState *)thread_cpu->opaque;
247
248 ts->signal_mask = *set;
249 }
250 #endif
251
252 /* siginfo conversion */
253
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255 const siginfo_t *info)
256 {
257 int sig = host_to_target_signal(info->si_signo);
258 int si_code = info->si_code;
259 int si_type;
260 tinfo->si_signo = sig;
261 tinfo->si_errno = 0;
262 tinfo->si_code = info->si_code;
263
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
269 */
270 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
280 *
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
285 */
286
287 switch (si_code) {
288 case SI_USER:
289 case SI_TKILL:
290 case SI_KERNEL:
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
293 */
294 tinfo->_sifields._kill._pid = info->si_pid;
295 tinfo->_sifields._kill._uid = info->si_uid;
296 si_type = QEMU_SI_KILL;
297 break;
298 default:
299 /* Everything else is spoofable. Make best guess based on signal */
300 switch (sig) {
301 case TARGET_SIGCHLD:
302 tinfo->_sifields._sigchld._pid = info->si_pid;
303 tinfo->_sifields._sigchld._uid = info->si_uid;
304 tinfo->_sifields._sigchld._status
305 = host_to_target_waitstatus(info->si_status);
306 tinfo->_sifields._sigchld._utime = info->si_utime;
307 tinfo->_sifields._sigchld._stime = info->si_stime;
308 si_type = QEMU_SI_CHLD;
309 break;
310 case TARGET_SIGIO:
311 tinfo->_sifields._sigpoll._band = info->si_band;
312 tinfo->_sifields._sigpoll._fd = info->si_fd;
313 si_type = QEMU_SI_POLL;
314 break;
315 default:
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo->_sifields._rt._pid = info->si_pid;
318 tinfo->_sifields._rt._uid = info->si_uid;
319 /* XXX: potential problem if 64 bit */
320 tinfo->_sifields._rt._sigval.sival_ptr
321 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322 si_type = QEMU_SI_RT;
323 break;
324 }
325 break;
326 }
327
328 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330
331 void tswap_siginfo(target_siginfo_t *tinfo,
332 const target_siginfo_t *info)
333 {
334 int si_type = extract32(info->si_code, 16, 16);
335 int si_code = sextract32(info->si_code, 0, 16);
336
337 __put_user(info->si_signo, &tinfo->si_signo);
338 __put_user(info->si_errno, &tinfo->si_errno);
339 __put_user(si_code, &tinfo->si_code);
340
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
344 */
345 switch (si_type) {
346 case QEMU_SI_KILL:
347 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349 break;
350 case QEMU_SI_TIMER:
351 __put_user(info->_sifields._timer._timer1,
352 &tinfo->_sifields._timer._timer1);
353 __put_user(info->_sifields._timer._timer2,
354 &tinfo->_sifields._timer._timer2);
355 break;
356 case QEMU_SI_POLL:
357 __put_user(info->_sifields._sigpoll._band,
358 &tinfo->_sifields._sigpoll._band);
359 __put_user(info->_sifields._sigpoll._fd,
360 &tinfo->_sifields._sigpoll._fd);
361 break;
362 case QEMU_SI_FAULT:
363 __put_user(info->_sifields._sigfault._addr,
364 &tinfo->_sifields._sigfault._addr);
365 break;
366 case QEMU_SI_CHLD:
367 __put_user(info->_sifields._sigchld._pid,
368 &tinfo->_sifields._sigchld._pid);
369 __put_user(info->_sifields._sigchld._uid,
370 &tinfo->_sifields._sigchld._uid);
371 __put_user(info->_sifields._sigchld._status,
372 &tinfo->_sifields._sigchld._status);
373 __put_user(info->_sifields._sigchld._utime,
374 &tinfo->_sifields._sigchld._utime);
375 __put_user(info->_sifields._sigchld._stime,
376 &tinfo->_sifields._sigchld._stime);
377 break;
378 case QEMU_SI_RT:
379 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381 __put_user(info->_sifields._rt._sigval.sival_ptr,
382 &tinfo->_sifields._rt._sigval.sival_ptr);
383 break;
384 default:
385 g_assert_not_reached();
386 }
387 }
388
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391 target_siginfo_t tgt_tmp;
392 host_to_target_siginfo_noswap(&tgt_tmp, info);
393 tswap_siginfo(tinfo, &tgt_tmp);
394 }
395
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
402 */
403 abi_ulong sival_ptr;
404
405 __get_user(info->si_signo, &tinfo->si_signo);
406 __get_user(info->si_errno, &tinfo->si_errno);
407 __get_user(info->si_code, &tinfo->si_code);
408 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411 info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413
414 static int fatal_signal (int sig)
415 {
416 switch (sig) {
417 case TARGET_SIGCHLD:
418 case TARGET_SIGURG:
419 case TARGET_SIGWINCH:
420 /* Ignored by default. */
421 return 0;
422 case TARGET_SIGCONT:
423 case TARGET_SIGSTOP:
424 case TARGET_SIGTSTP:
425 case TARGET_SIGTTIN:
426 case TARGET_SIGTTOU:
427 /* Job control signals. */
428 return 0;
429 default:
430 return 1;
431 }
432 }
433
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437 switch (sig) {
438 case TARGET_SIGABRT:
439 case TARGET_SIGFPE:
440 case TARGET_SIGILL:
441 case TARGET_SIGQUIT:
442 case TARGET_SIGSEGV:
443 case TARGET_SIGTRAP:
444 case TARGET_SIGBUS:
445 return (1);
446 default:
447 return (0);
448 }
449 }
450
451 void signal_init(void)
452 {
453 TaskState *ts = (TaskState *)thread_cpu->opaque;
454 struct sigaction act;
455 struct sigaction oact;
456 int i, j;
457 int host_sig;
458
459 /* generate signal conversion tables */
460 for(i = 1; i < _NSIG; i++) {
461 if (host_to_target_signal_table[i] == 0)
462 host_to_target_signal_table[i] = i;
463 }
464 for(i = 1; i < _NSIG; i++) {
465 j = host_to_target_signal_table[i];
466 target_to_host_signal_table[j] = i;
467 }
468
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts->signal_mask);
471
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table, 0, sizeof(sigact_table));
475
476 sigfillset(&act.sa_mask);
477 act.sa_flags = SA_SIGINFO;
478 act.sa_sigaction = host_signal_handler;
479 for(i = 1; i <= TARGET_NSIG; i++) {
480 host_sig = target_to_host_signal(i);
481 sigaction(host_sig, NULL, &oact);
482 if (oact.sa_sigaction == (void *)SIG_IGN) {
483 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486 }
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i))
494 sigaction(host_sig, &act, NULL);
495 }
496 }
497
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
501 */
502 void force_sig(int sig)
503 {
504 CPUState *cpu = thread_cpu;
505 CPUArchState *env = cpu->env_ptr;
506 target_siginfo_t info;
507
508 info.si_signo = sig;
509 info.si_errno = 0;
510 info.si_code = TARGET_SI_KERNEL;
511 info._sifields._kill._pid = 0;
512 info._sifields._kill._uid = 0;
513 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
519 */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523 if (oldsig == SIGSEGV) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
526 */
527 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528 }
529 force_sig(TARGET_SIGSEGV);
530 }
531
532 #endif
533
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537 CPUState *cpu = thread_cpu;
538 CPUArchState *env = cpu->env_ptr;
539 TaskState *ts = (TaskState *)cpu->opaque;
540 int host_sig, core_dumped = 0;
541 struct sigaction act;
542
543 host_sig = target_to_host_signal(target_sig);
544 trace_user_force_sig(env, target_sig, host_sig);
545 gdb_signalled(env, target_sig);
546
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549 stop_all_tasks();
550 core_dumped =
551 ((*ts->bprm->core_dump)(target_sig, env) == 0);
552 }
553 if (core_dumped) {
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump;
557 getrlimit(RLIMIT_CORE, &nodump);
558 nodump.rlim_cur=0;
559 setrlimit(RLIMIT_CORE, &nodump);
560 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig, strsignal(host_sig), "core dumped" );
562 }
563
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
569 * it to arrive. */
570 sigfillset(&act.sa_mask);
571 act.sa_handler = SIG_DFL;
572 act.sa_flags = 0;
573 sigaction(host_sig, &act, NULL);
574
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig);
578
579 /* Make sure the signal isn't masked (just reuse the mask inside
580 of act) */
581 sigdelset(&act.sa_mask, host_sig);
582 sigsuspend(&act.sa_mask);
583
584 /* unreachable */
585 abort();
586 }
587
588 /* queue a signal so that it will be send to the virtual CPU as soon
589 as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591 target_siginfo_t *info)
592 {
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
595
596 trace_user_queue_signal(env, sig);
597
598 info->si_code = deposit32(info->si_code, 16, 16, si_type);
599
600 ts->sync_signal.info = *info;
601 ts->sync_signal.pending = sig;
602 /* signal that a new signal is pending */
603 atomic_set(&ts->signal_pending, 1);
604 return 1; /* indicates that the signal was queued */
605 }
606
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610 /* Default version: never rewind */
611 }
612 #endif
613
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615 void *puc)
616 {
617 CPUArchState *env = thread_cpu->env_ptr;
618 CPUState *cpu = ENV_GET_CPU(env);
619 TaskState *ts = cpu->opaque;
620
621 int sig;
622 target_siginfo_t tinfo;
623 ucontext_t *uc = puc;
624 struct emulated_sigtable *k;
625
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629 && info->si_code > 0) {
630 if (cpu_signal_handler(host_signum, info, puc))
631 return;
632 }
633
634 /* get target signal number */
635 sig = host_to_target_signal(host_signum);
636 if (sig < 1 || sig > TARGET_NSIG)
637 return;
638 trace_user_host_signal(env, host_signum, sig);
639
640 rewind_if_in_safe_syscall(puc);
641
642 host_to_target_siginfo_noswap(&tinfo, info);
643 k = &ts->sigtab[sig - 1];
644 k->info = tinfo;
645 k->pending = sig;
646 ts->signal_pending = 1;
647
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
653 *
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
661 */
662 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663 sigdelset(&uc->uc_sigmask, SIGSEGV);
664 sigdelset(&uc->uc_sigmask, SIGBUS);
665
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu);
668 }
669
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674 int ret;
675 struct target_sigaltstack oss;
676
677 /* XXX: test errors */
678 if(uoss_addr)
679 {
680 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682 __put_user(sas_ss_flags(sp), &oss.ss_flags);
683 }
684
685 if(uss_addr)
686 {
687 struct target_sigaltstack *uss;
688 struct target_sigaltstack ss;
689 size_t minstacksize = TARGET_MINSIGSTKSZ;
690
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694 if (get_ppc64_abi(image) > 1) {
695 minstacksize = 4096;
696 }
697 #endif
698
699 ret = -TARGET_EFAULT;
700 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701 goto out;
702 }
703 __get_user(ss.ss_sp, &uss->ss_sp);
704 __get_user(ss.ss_size, &uss->ss_size);
705 __get_user(ss.ss_flags, &uss->ss_flags);
706 unlock_user_struct(uss, uss_addr, 0);
707
708 ret = -TARGET_EPERM;
709 if (on_sig_stack(sp))
710 goto out;
711
712 ret = -TARGET_EINVAL;
713 if (ss.ss_flags != TARGET_SS_DISABLE
714 && ss.ss_flags != TARGET_SS_ONSTACK
715 && ss.ss_flags != 0)
716 goto out;
717
718 if (ss.ss_flags == TARGET_SS_DISABLE) {
719 ss.ss_size = 0;
720 ss.ss_sp = 0;
721 } else {
722 ret = -TARGET_ENOMEM;
723 if (ss.ss_size < minstacksize) {
724 goto out;
725 }
726 }
727
728 target_sigaltstack_used.ss_sp = ss.ss_sp;
729 target_sigaltstack_used.ss_size = ss.ss_size;
730 }
731
732 if (uoss_addr) {
733 ret = -TARGET_EFAULT;
734 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735 goto out;
736 }
737
738 ret = 0;
739 out:
740 return ret;
741 }
742
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745 struct target_sigaction *oact)
746 {
747 struct target_sigaction *k;
748 struct sigaction act1;
749 int host_sig;
750 int ret = 0;
751
752 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753 return -TARGET_EINVAL;
754 }
755
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS;
758 }
759
760 k = &sigact_table[sig - 1];
761 if (oact) {
762 __put_user(k->_sa_handler, &oact->_sa_handler);
763 __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767 /* Not swapped. */
768 oact->sa_mask = k->sa_mask;
769 }
770 if (act) {
771 /* FIXME: This is not threadsafe. */
772 __get_user(k->_sa_handler, &act->_sa_handler);
773 __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777 /* To be swapped in target_to_host_sigset. */
778 k->sa_mask = act->sa_mask;
779
780 /* we update the host linux signal state */
781 host_sig = target_to_host_signal(sig);
782 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783 sigfillset(&act1.sa_mask);
784 act1.sa_flags = SA_SIGINFO;
785 if (k->sa_flags & TARGET_SA_RESTART)
786 act1.sa_flags |= SA_RESTART;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
789 syscalls */
790 if (k->_sa_handler == TARGET_SIG_IGN) {
791 act1.sa_sigaction = (void *)SIG_IGN;
792 } else if (k->_sa_handler == TARGET_SIG_DFL) {
793 if (fatal_signal (sig))
794 act1.sa_sigaction = host_signal_handler;
795 else
796 act1.sa_sigaction = (void *)SIG_DFL;
797 } else {
798 act1.sa_sigaction = host_signal_handler;
799 }
800 ret = sigaction(host_sig, &act1, NULL);
801 }
802 }
803 return ret;
804 }
805
806 #if defined(TARGET_I386)
807 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
808
809 struct target_fpreg {
810 uint16_t significand[4];
811 uint16_t exponent;
812 };
813
814 struct target_fpxreg {
815 uint16_t significand[4];
816 uint16_t exponent;
817 uint16_t padding[3];
818 };
819
820 struct target_xmmreg {
821 uint32_t element[4];
822 };
823
824 struct target_fpstate_32 {
825 /* Regular FPU environment */
826 uint32_t cw;
827 uint32_t sw;
828 uint32_t tag;
829 uint32_t ipoff;
830 uint32_t cssel;
831 uint32_t dataoff;
832 uint32_t datasel;
833 struct target_fpreg st[8];
834 uint16_t status;
835 uint16_t magic; /* 0xffff = regular FPU data only */
836
837 /* FXSR FPU environment */
838 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
839 uint32_t mxcsr;
840 uint32_t reserved;
841 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
842 struct target_xmmreg xmm[8];
843 uint32_t padding[56];
844 };
845
846 struct target_fpstate_64 {
847 /* FXSAVE format */
848 uint16_t cw;
849 uint16_t sw;
850 uint16_t twd;
851 uint16_t fop;
852 uint64_t rip;
853 uint64_t rdp;
854 uint32_t mxcsr;
855 uint32_t mxcsr_mask;
856 uint32_t st_space[32];
857 uint32_t xmm_space[64];
858 uint32_t reserved[24];
859 };
860
861 #ifndef TARGET_X86_64
862 # define target_fpstate target_fpstate_32
863 #else
864 # define target_fpstate target_fpstate_64
865 #endif
866
867 struct target_sigcontext_32 {
868 uint16_t gs, __gsh;
869 uint16_t fs, __fsh;
870 uint16_t es, __esh;
871 uint16_t ds, __dsh;
872 uint32_t edi;
873 uint32_t esi;
874 uint32_t ebp;
875 uint32_t esp;
876 uint32_t ebx;
877 uint32_t edx;
878 uint32_t ecx;
879 uint32_t eax;
880 uint32_t trapno;
881 uint32_t err;
882 uint32_t eip;
883 uint16_t cs, __csh;
884 uint32_t eflags;
885 uint32_t esp_at_signal;
886 uint16_t ss, __ssh;
887 uint32_t fpstate; /* pointer */
888 uint32_t oldmask;
889 uint32_t cr2;
890 };
891
892 struct target_sigcontext_64 {
893 uint64_t r8;
894 uint64_t r9;
895 uint64_t r10;
896 uint64_t r11;
897 uint64_t r12;
898 uint64_t r13;
899 uint64_t r14;
900 uint64_t r15;
901
902 uint64_t rdi;
903 uint64_t rsi;
904 uint64_t rbp;
905 uint64_t rbx;
906 uint64_t rdx;
907 uint64_t rax;
908 uint64_t rcx;
909 uint64_t rsp;
910 uint64_t rip;
911
912 uint64_t eflags;
913
914 uint16_t cs;
915 uint16_t gs;
916 uint16_t fs;
917 uint16_t ss;
918
919 uint64_t err;
920 uint64_t trapno;
921 uint64_t oldmask;
922 uint64_t cr2;
923
924 uint64_t fpstate; /* pointer */
925 uint64_t padding[8];
926 };
927
928 #ifndef TARGET_X86_64
929 # define target_sigcontext target_sigcontext_32
930 #else
931 # define target_sigcontext target_sigcontext_64
932 #endif
933
934 /* see Linux/include/uapi/asm-generic/ucontext.h */
935 struct target_ucontext {
936 abi_ulong tuc_flags;
937 abi_ulong tuc_link;
938 target_stack_t tuc_stack;
939 struct target_sigcontext tuc_mcontext;
940 target_sigset_t tuc_sigmask; /* mask last for extensibility */
941 };
942
943 #ifndef TARGET_X86_64
944 struct sigframe {
945 abi_ulong pretcode;
946 int sig;
947 struct target_sigcontext sc;
948 struct target_fpstate fpstate;
949 abi_ulong extramask[TARGET_NSIG_WORDS-1];
950 char retcode[8];
951 };
952
953 struct rt_sigframe {
954 abi_ulong pretcode;
955 int sig;
956 abi_ulong pinfo;
957 abi_ulong puc;
958 struct target_siginfo info;
959 struct target_ucontext uc;
960 struct target_fpstate fpstate;
961 char retcode[8];
962 };
963
964 #else
965
966 struct rt_sigframe {
967 abi_ulong pretcode;
968 struct target_ucontext uc;
969 struct target_siginfo info;
970 struct target_fpstate fpstate;
971 };
972
973 #endif
974
975 /*
976 * Set up a signal frame.
977 */
978
979 /* XXX: save x87 state */
980 static void setup_sigcontext(struct target_sigcontext *sc,
981 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
982 abi_ulong fpstate_addr)
983 {
984 CPUState *cs = CPU(x86_env_get_cpu(env));
985 #ifndef TARGET_X86_64
986 uint16_t magic;
987
988 /* already locked in setup_frame() */
989 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
990 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
991 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
992 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
993 __put_user(env->regs[R_EDI], &sc->edi);
994 __put_user(env->regs[R_ESI], &sc->esi);
995 __put_user(env->regs[R_EBP], &sc->ebp);
996 __put_user(env->regs[R_ESP], &sc->esp);
997 __put_user(env->regs[R_EBX], &sc->ebx);
998 __put_user(env->regs[R_EDX], &sc->edx);
999 __put_user(env->regs[R_ECX], &sc->ecx);
1000 __put_user(env->regs[R_EAX], &sc->eax);
1001 __put_user(cs->exception_index, &sc->trapno);
1002 __put_user(env->error_code, &sc->err);
1003 __put_user(env->eip, &sc->eip);
1004 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1005 __put_user(env->eflags, &sc->eflags);
1006 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1007 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1008
1009 cpu_x86_fsave(env, fpstate_addr, 1);
1010 fpstate->status = fpstate->sw;
1011 magic = 0xffff;
1012 __put_user(magic, &fpstate->magic);
1013 __put_user(fpstate_addr, &sc->fpstate);
1014
1015 /* non-iBCS2 extensions.. */
1016 __put_user(mask, &sc->oldmask);
1017 __put_user(env->cr[2], &sc->cr2);
1018 #else
1019 __put_user(env->regs[R_EDI], &sc->rdi);
1020 __put_user(env->regs[R_ESI], &sc->rsi);
1021 __put_user(env->regs[R_EBP], &sc->rbp);
1022 __put_user(env->regs[R_ESP], &sc->rsp);
1023 __put_user(env->regs[R_EBX], &sc->rbx);
1024 __put_user(env->regs[R_EDX], &sc->rdx);
1025 __put_user(env->regs[R_ECX], &sc->rcx);
1026 __put_user(env->regs[R_EAX], &sc->rax);
1027
1028 __put_user(env->regs[8], &sc->r8);
1029 __put_user(env->regs[9], &sc->r9);
1030 __put_user(env->regs[10], &sc->r10);
1031 __put_user(env->regs[11], &sc->r11);
1032 __put_user(env->regs[12], &sc->r12);
1033 __put_user(env->regs[13], &sc->r13);
1034 __put_user(env->regs[14], &sc->r14);
1035 __put_user(env->regs[15], &sc->r15);
1036
1037 __put_user(cs->exception_index, &sc->trapno);
1038 __put_user(env->error_code, &sc->err);
1039 __put_user(env->eip, &sc->rip);
1040
1041 __put_user(env->eflags, &sc->eflags);
1042 __put_user(env->segs[R_CS].selector, &sc->cs);
1043 __put_user((uint16_t)0, &sc->gs);
1044 __put_user((uint16_t)0, &sc->fs);
1045 __put_user(env->segs[R_SS].selector, &sc->ss);
1046
1047 __put_user(mask, &sc->oldmask);
1048 __put_user(env->cr[2], &sc->cr2);
1049
1050 /* fpstate_addr must be 16 byte aligned for fxsave */
1051 assert(!(fpstate_addr & 0xf));
1052
1053 cpu_x86_fxsave(env, fpstate_addr);
1054 __put_user(fpstate_addr, &sc->fpstate);
1055 #endif
1056 }
1057
1058 /*
1059 * Determine which stack to use..
1060 */
1061
1062 static inline abi_ulong
1063 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1064 {
1065 unsigned long esp;
1066
1067 /* Default to using normal stack */
1068 esp = env->regs[R_ESP];
1069 #ifdef TARGET_X86_64
1070 esp -= 128; /* this is the redzone */
1071 #endif
1072
1073 /* This is the X/Open sanctioned signal stack switching. */
1074 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1075 if (sas_ss_flags(esp) == 0) {
1076 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1077 }
1078 } else {
1079 #ifndef TARGET_X86_64
1080 /* This is the legacy signal stack switching. */
1081 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1082 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1083 ka->sa_restorer) {
1084 esp = (unsigned long) ka->sa_restorer;
1085 }
1086 #endif
1087 }
1088
1089 #ifndef TARGET_X86_64
1090 return (esp - frame_size) & -8ul;
1091 #else
1092 return ((esp - frame_size) & (~15ul)) - 8;
1093 #endif
1094 }
1095
1096 #ifndef TARGET_X86_64
1097 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1098 static void setup_frame(int sig, struct target_sigaction *ka,
1099 target_sigset_t *set, CPUX86State *env)
1100 {
1101 abi_ulong frame_addr;
1102 struct sigframe *frame;
1103 int i;
1104
1105 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1106 trace_user_setup_frame(env, frame_addr);
1107
1108 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1109 goto give_sigsegv;
1110
1111 __put_user(sig, &frame->sig);
1112
1113 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1114 frame_addr + offsetof(struct sigframe, fpstate));
1115
1116 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1117 __put_user(set->sig[i], &frame->extramask[i - 1]);
1118 }
1119
1120 /* Set up to return from userspace. If provided, use a stub
1121 already in userspace. */
1122 if (ka->sa_flags & TARGET_SA_RESTORER) {
1123 __put_user(ka->sa_restorer, &frame->pretcode);
1124 } else {
1125 uint16_t val16;
1126 abi_ulong retcode_addr;
1127 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1128 __put_user(retcode_addr, &frame->pretcode);
1129 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1130 val16 = 0xb858;
1131 __put_user(val16, (uint16_t *)(frame->retcode+0));
1132 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1133 val16 = 0x80cd;
1134 __put_user(val16, (uint16_t *)(frame->retcode+6));
1135 }
1136
1137 /* Set up registers for signal handler */
1138 env->regs[R_ESP] = frame_addr;
1139 env->eip = ka->_sa_handler;
1140
1141 cpu_x86_load_seg(env, R_DS, __USER_DS);
1142 cpu_x86_load_seg(env, R_ES, __USER_DS);
1143 cpu_x86_load_seg(env, R_SS, __USER_DS);
1144 cpu_x86_load_seg(env, R_CS, __USER_CS);
1145 env->eflags &= ~TF_MASK;
1146
1147 unlock_user_struct(frame, frame_addr, 1);
1148
1149 return;
1150
1151 give_sigsegv:
1152 force_sigsegv(sig);
1153 }
1154 #endif
1155
1156 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1157 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1158 target_siginfo_t *info,
1159 target_sigset_t *set, CPUX86State *env)
1160 {
1161 abi_ulong frame_addr;
1162 #ifndef TARGET_X86_64
1163 abi_ulong addr;
1164 #endif
1165 struct rt_sigframe *frame;
1166 int i;
1167
1168 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1169 trace_user_setup_rt_frame(env, frame_addr);
1170
1171 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1172 goto give_sigsegv;
1173
1174 /* These fields are only in rt_sigframe on 32 bit */
1175 #ifndef TARGET_X86_64
1176 __put_user(sig, &frame->sig);
1177 addr = frame_addr + offsetof(struct rt_sigframe, info);
1178 __put_user(addr, &frame->pinfo);
1179 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1180 __put_user(addr, &frame->puc);
1181 #endif
1182 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1183 tswap_siginfo(&frame->info, info);
1184 }
1185
1186 /* Create the ucontext. */
1187 __put_user(0, &frame->uc.tuc_flags);
1188 __put_user(0, &frame->uc.tuc_link);
1189 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1190 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1191 &frame->uc.tuc_stack.ss_flags);
1192 __put_user(target_sigaltstack_used.ss_size,
1193 &frame->uc.tuc_stack.ss_size);
1194 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1195 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1196
1197 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1198 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1199 }
1200
1201 /* Set up to return from userspace. If provided, use a stub
1202 already in userspace. */
1203 #ifndef TARGET_X86_64
1204 if (ka->sa_flags & TARGET_SA_RESTORER) {
1205 __put_user(ka->sa_restorer, &frame->pretcode);
1206 } else {
1207 uint16_t val16;
1208 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1209 __put_user(addr, &frame->pretcode);
1210 /* This is movl $,%eax ; int $0x80 */
1211 __put_user(0xb8, (char *)(frame->retcode+0));
1212 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1213 val16 = 0x80cd;
1214 __put_user(val16, (uint16_t *)(frame->retcode+5));
1215 }
1216 #else
1217 /* XXX: Would be slightly better to return -EFAULT here if test fails
1218 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1219 __put_user(ka->sa_restorer, &frame->pretcode);
1220 #endif
1221
1222 /* Set up registers for signal handler */
1223 env->regs[R_ESP] = frame_addr;
1224 env->eip = ka->_sa_handler;
1225
1226 #ifndef TARGET_X86_64
1227 env->regs[R_EAX] = sig;
1228 env->regs[R_EDX] = (unsigned long)&frame->info;
1229 env->regs[R_ECX] = (unsigned long)&frame->uc;
1230 #else
1231 env->regs[R_EAX] = 0;
1232 env->regs[R_EDI] = sig;
1233 env->regs[R_ESI] = (unsigned long)&frame->info;
1234 env->regs[R_EDX] = (unsigned long)&frame->uc;
1235 #endif
1236
1237 cpu_x86_load_seg(env, R_DS, __USER_DS);
1238 cpu_x86_load_seg(env, R_ES, __USER_DS);
1239 cpu_x86_load_seg(env, R_CS, __USER_CS);
1240 cpu_x86_load_seg(env, R_SS, __USER_DS);
1241 env->eflags &= ~TF_MASK;
1242
1243 unlock_user_struct(frame, frame_addr, 1);
1244
1245 return;
1246
1247 give_sigsegv:
1248 force_sigsegv(sig);
1249 }
1250
1251 static int
1252 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1253 {
1254 unsigned int err = 0;
1255 abi_ulong fpstate_addr;
1256 unsigned int tmpflags;
1257
1258 #ifndef TARGET_X86_64
1259 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1260 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1261 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1262 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1263
1264 env->regs[R_EDI] = tswapl(sc->edi);
1265 env->regs[R_ESI] = tswapl(sc->esi);
1266 env->regs[R_EBP] = tswapl(sc->ebp);
1267 env->regs[R_ESP] = tswapl(sc->esp);
1268 env->regs[R_EBX] = tswapl(sc->ebx);
1269 env->regs[R_EDX] = tswapl(sc->edx);
1270 env->regs[R_ECX] = tswapl(sc->ecx);
1271 env->regs[R_EAX] = tswapl(sc->eax);
1272
1273 env->eip = tswapl(sc->eip);
1274 #else
1275 env->regs[8] = tswapl(sc->r8);
1276 env->regs[9] = tswapl(sc->r9);
1277 env->regs[10] = tswapl(sc->r10);
1278 env->regs[11] = tswapl(sc->r11);
1279 env->regs[12] = tswapl(sc->r12);
1280 env->regs[13] = tswapl(sc->r13);
1281 env->regs[14] = tswapl(sc->r14);
1282 env->regs[15] = tswapl(sc->r15);
1283
1284 env->regs[R_EDI] = tswapl(sc->rdi);
1285 env->regs[R_ESI] = tswapl(sc->rsi);
1286 env->regs[R_EBP] = tswapl(sc->rbp);
1287 env->regs[R_EBX] = tswapl(sc->rbx);
1288 env->regs[R_EDX] = tswapl(sc->rdx);
1289 env->regs[R_EAX] = tswapl(sc->rax);
1290 env->regs[R_ECX] = tswapl(sc->rcx);
1291 env->regs[R_ESP] = tswapl(sc->rsp);
1292
1293 env->eip = tswapl(sc->rip);
1294 #endif
1295
1296 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1297 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1298
1299 tmpflags = tswapl(sc->eflags);
1300 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1301 // regs->orig_eax = -1; /* disable syscall checks */
1302
1303 fpstate_addr = tswapl(sc->fpstate);
1304 if (fpstate_addr != 0) {
1305 if (!access_ok(VERIFY_READ, fpstate_addr,
1306 sizeof(struct target_fpstate)))
1307 goto badframe;
1308 #ifndef TARGET_X86_64
1309 cpu_x86_frstor(env, fpstate_addr, 1);
1310 #else
1311 cpu_x86_fxrstor(env, fpstate_addr);
1312 #endif
1313 }
1314
1315 return err;
1316 badframe:
1317 return 1;
1318 }
1319
1320 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1321 #ifndef TARGET_X86_64
1322 long do_sigreturn(CPUX86State *env)
1323 {
1324 struct sigframe *frame;
1325 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1326 target_sigset_t target_set;
1327 sigset_t set;
1328 int i;
1329
1330 trace_user_do_sigreturn(env, frame_addr);
1331 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1332 goto badframe;
1333 /* set blocked signals */
1334 __get_user(target_set.sig[0], &frame->sc.oldmask);
1335 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1336 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1337 }
1338
1339 target_to_host_sigset_internal(&set, &target_set);
1340 set_sigmask(&set);
1341
1342 /* restore registers */
1343 if (restore_sigcontext(env, &frame->sc))
1344 goto badframe;
1345 unlock_user_struct(frame, frame_addr, 0);
1346 return -TARGET_QEMU_ESIGRETURN;
1347
1348 badframe:
1349 unlock_user_struct(frame, frame_addr, 0);
1350 force_sig(TARGET_SIGSEGV);
1351 return -TARGET_QEMU_ESIGRETURN;
1352 }
1353 #endif
1354
1355 long do_rt_sigreturn(CPUX86State *env)
1356 {
1357 abi_ulong frame_addr;
1358 struct rt_sigframe *frame;
1359 sigset_t set;
1360
1361 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1362 trace_user_do_rt_sigreturn(env, frame_addr);
1363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1364 goto badframe;
1365 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1366 set_sigmask(&set);
1367
1368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1369 goto badframe;
1370 }
1371
1372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1373 get_sp_from_cpustate(env)) == -EFAULT) {
1374 goto badframe;
1375 }
1376
1377 unlock_user_struct(frame, frame_addr, 0);
1378 return -TARGET_QEMU_ESIGRETURN;
1379
1380 badframe:
1381 unlock_user_struct(frame, frame_addr, 0);
1382 force_sig(TARGET_SIGSEGV);
1383 return -TARGET_QEMU_ESIGRETURN;
1384 }
1385
1386 #elif defined(TARGET_SPARC)
1387
1388 #define __SUNOS_MAXWIN 31
1389
1390 /* This is what SunOS does, so shall I. */
1391 struct target_sigcontext {
1392 abi_ulong sigc_onstack; /* state to restore */
1393
1394 abi_ulong sigc_mask; /* sigmask to restore */
1395 abi_ulong sigc_sp; /* stack pointer */
1396 abi_ulong sigc_pc; /* program counter */
1397 abi_ulong sigc_npc; /* next program counter */
1398 abi_ulong sigc_psr; /* for condition codes etc */
1399 abi_ulong sigc_g1; /* User uses these two registers */
1400 abi_ulong sigc_o0; /* within the trampoline code. */
1401
1402 /* Now comes information regarding the users window set
1403 * at the time of the signal.
1404 */
1405 abi_ulong sigc_oswins; /* outstanding windows */
1406
1407 /* stack ptrs for each regwin buf */
1408 char *sigc_spbuf[__SUNOS_MAXWIN];
1409
1410 /* Windows to restore after signal */
1411 struct {
1412 abi_ulong locals[8];
1413 abi_ulong ins[8];
1414 } sigc_wbuf[__SUNOS_MAXWIN];
1415 };
1416 /* A Sparc stack frame */
1417 struct sparc_stackf {
1418 abi_ulong locals[8];
1419 abi_ulong ins[8];
1420 /* It's simpler to treat fp and callers_pc as elements of ins[]
1421 * since we never need to access them ourselves.
1422 */
1423 char *structptr;
1424 abi_ulong xargs[6];
1425 abi_ulong xxargs[1];
1426 };
1427
1428 typedef struct {
1429 struct {
1430 abi_ulong psr;
1431 abi_ulong pc;
1432 abi_ulong npc;
1433 abi_ulong y;
1434 abi_ulong u_regs[16]; /* globals and ins */
1435 } si_regs;
1436 int si_mask;
1437 } __siginfo_t;
1438
1439 typedef struct {
1440 abi_ulong si_float_regs[32];
1441 unsigned long si_fsr;
1442 unsigned long si_fpqdepth;
1443 struct {
1444 unsigned long *insn_addr;
1445 unsigned long insn;
1446 } si_fpqueue [16];
1447 } qemu_siginfo_fpu_t;
1448
1449
1450 struct target_signal_frame {
1451 struct sparc_stackf ss;
1452 __siginfo_t info;
1453 abi_ulong fpu_save;
1454 abi_ulong insns[2] __attribute__ ((aligned (8)));
1455 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
1456 abi_ulong extra_size; /* Should be 0 */
1457 qemu_siginfo_fpu_t fpu_state;
1458 };
1459 struct target_rt_signal_frame {
1460 struct sparc_stackf ss;
1461 siginfo_t info;
1462 abi_ulong regs[20];
1463 sigset_t mask;
1464 abi_ulong fpu_save;
1465 unsigned int insns[2];
1466 stack_t stack;
1467 unsigned int extra_size; /* Should be 0 */
1468 qemu_siginfo_fpu_t fpu_state;
1469 };
1470
1471 #define UREG_O0 16
1472 #define UREG_O6 22
1473 #define UREG_I0 0
1474 #define UREG_I1 1
1475 #define UREG_I2 2
1476 #define UREG_I3 3
1477 #define UREG_I4 4
1478 #define UREG_I5 5
1479 #define UREG_I6 6
1480 #define UREG_I7 7
1481 #define UREG_L0 8
1482 #define UREG_FP UREG_I6
1483 #define UREG_SP UREG_O6
1484
1485 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
1486 CPUSPARCState *env,
1487 unsigned long framesize)
1488 {
1489 abi_ulong sp;
1490
1491 sp = env->regwptr[UREG_FP];
1492
1493 /* This is the X/Open sanctioned signal stack switching. */
1494 if (sa->sa_flags & TARGET_SA_ONSTACK) {
1495 if (!on_sig_stack(sp)
1496 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
1497 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1498 }
1499 }
1500 return sp - framesize;
1501 }
1502
1503 static int
1504 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
1505 {
1506 int err = 0, i;
1507
1508 __put_user(env->psr, &si->si_regs.psr);
1509 __put_user(env->pc, &si->si_regs.pc);
1510 __put_user(env->npc, &si->si_regs.npc);
1511 __put_user(env->y, &si->si_regs.y);
1512 for (i=0; i < 8; i++) {
1513 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
1514 }
1515 for (i=0; i < 8; i++) {
1516 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
1517 }
1518 __put_user(mask, &si->si_mask);
1519 return err;
1520 }
1521
1522 #if 0
1523 static int
1524 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1525 CPUSPARCState *env, unsigned long mask)
1526 {
1527 int err = 0;
1528
1529 __put_user(mask, &sc->sigc_mask);
1530 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
1531 __put_user(env->pc, &sc->sigc_pc);
1532 __put_user(env->npc, &sc->sigc_npc);
1533 __put_user(env->psr, &sc->sigc_psr);
1534 __put_user(env->gregs[1], &sc->sigc_g1);
1535 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
1536
1537 return err;
1538 }
1539 #endif
1540 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
1541
1542 static void setup_frame(int sig, struct target_sigaction *ka,
1543 target_sigset_t *set, CPUSPARCState *env)
1544 {
1545 abi_ulong sf_addr;
1546 struct target_signal_frame *sf;
1547 int sigframe_size, err, i;
1548
1549 /* 1. Make sure everything is clean */
1550 //synchronize_user_stack();
1551
1552 sigframe_size = NF_ALIGNEDSZ;
1553 sf_addr = get_sigframe(ka, env, sigframe_size);
1554 trace_user_setup_frame(env, sf_addr);
1555
1556 sf = lock_user(VERIFY_WRITE, sf_addr,
1557 sizeof(struct target_signal_frame), 0);
1558 if (!sf) {
1559 goto sigsegv;
1560 }
1561 #if 0
1562 if (invalid_frame_pointer(sf, sigframe_size))
1563 goto sigill_and_return;
1564 #endif
1565 /* 2. Save the current process state */
1566 err = setup___siginfo(&sf->info, env, set->sig[0]);
1567 __put_user(0, &sf->extra_size);
1568
1569 //save_fpu_state(regs, &sf->fpu_state);
1570 //__put_user(&sf->fpu_state, &sf->fpu_save);
1571
1572 __put_user(set->sig[0], &sf->info.si_mask);
1573 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
1574 __put_user(set->sig[i + 1], &sf->extramask[i]);
1575 }
1576
1577 for (i = 0; i < 8; i++) {
1578 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
1579 }
1580 for (i = 0; i < 8; i++) {
1581 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
1582 }
1583 if (err)
1584 goto sigsegv;
1585
1586 /* 3. signal handler back-trampoline and parameters */
1587 env->regwptr[UREG_FP] = sf_addr;
1588 env->regwptr[UREG_I0] = sig;
1589 env->regwptr[UREG_I1] = sf_addr +
1590 offsetof(struct target_signal_frame, info);
1591 env->regwptr[UREG_I2] = sf_addr +
1592 offsetof(struct target_signal_frame, info);
1593
1594 /* 4. signal handler */
1595 env->pc = ka->_sa_handler;
1596 env->npc = (env->pc + 4);
1597 /* 5. return to kernel instructions */
1598 if (ka->ka_restorer) {
1599 env->regwptr[UREG_I7] = ka->ka_restorer;
1600 } else {
1601 uint32_t val32;
1602
1603 env->regwptr[UREG_I7] = sf_addr +
1604 offsetof(struct target_signal_frame, insns) - 2 * 4;
1605
1606 /* mov __NR_sigreturn, %g1 */
1607 val32 = 0x821020d8;
1608 __put_user(val32, &sf->insns[0]);
1609
1610 /* t 0x10 */
1611 val32 = 0x91d02010;
1612 __put_user(val32, &sf->insns[1]);
1613 if (err)
1614 goto sigsegv;
1615
1616 /* Flush instruction space. */
1617 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
1618 // tb_flush(env);
1619 }
1620 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1621 return;
1622 #if 0
1623 sigill_and_return:
1624 force_sig(TARGET_SIGILL);
1625 #endif
1626 sigsegv:
1627 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
1628 force_sigsegv(sig);
1629 }
1630
1631 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1632 target_siginfo_t *info,
1633 target_sigset_t *set, CPUSPARCState *env)
1634 {
1635 fprintf(stderr, "setup_rt_frame: not implemented\n");
1636 }
1637
1638 long do_sigreturn(CPUSPARCState *env)
1639 {
1640 abi_ulong sf_addr;
1641 struct target_signal_frame *sf;
1642 uint32_t up_psr, pc, npc;
1643 target_sigset_t set;
1644 sigset_t host_set;
1645 int err=0, i;
1646
1647 sf_addr = env->regwptr[UREG_FP];
1648 trace_user_do_sigreturn(env, sf_addr);
1649 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
1650 goto segv_and_exit;
1651 }
1652
1653 /* 1. Make sure we are not getting garbage from the user */
1654
1655 if (sf_addr & 3)
1656 goto segv_and_exit;
1657
1658 __get_user(pc, &sf->info.si_regs.pc);
1659 __get_user(npc, &sf->info.si_regs.npc);
1660
1661 if ((pc | npc) & 3) {
1662 goto segv_and_exit;
1663 }
1664
1665 /* 2. Restore the state */
1666 __get_user(up_psr, &sf->info.si_regs.psr);
1667
1668 /* User can only change condition codes and FPU enabling in %psr. */
1669 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
1670 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
1671
1672 env->pc = pc;
1673 env->npc = npc;
1674 __get_user(env->y, &sf->info.si_regs.y);
1675 for (i=0; i < 8; i++) {
1676 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
1677 }
1678 for (i=0; i < 8; i++) {
1679 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
1680 }
1681
1682 /* FIXME: implement FPU save/restore:
1683 * __get_user(fpu_save, &sf->fpu_save);
1684 * if (fpu_save)
1685 * err |= restore_fpu_state(env, fpu_save);
1686 */
1687
1688 /* This is pretty much atomic, no amount locking would prevent
1689 * the races which exist anyways.
1690 */
1691 __get_user(set.sig[0], &sf->info.si_mask);
1692 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1693 __get_user(set.sig[i], &sf->extramask[i - 1]);
1694 }
1695
1696 target_to_host_sigset_internal(&host_set, &set);
1697 set_sigmask(&host_set);
1698
1699 if (err) {
1700 goto segv_and_exit;
1701 }
1702 unlock_user_struct(sf, sf_addr, 0);
1703 return -TARGET_QEMU_ESIGRETURN;
1704
1705 segv_and_exit:
1706 unlock_user_struct(sf, sf_addr, 0);
1707 force_sig(TARGET_SIGSEGV);
1708 return -TARGET_QEMU_ESIGRETURN;
1709 }
1710
1711 long do_rt_sigreturn(CPUSPARCState *env)
1712 {
1713 trace_user_do_rt_sigreturn(env, 0);
1714 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
1715 return -TARGET_ENOSYS;
1716 }
1717
1718 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1719 #define SPARC_MC_TSTATE 0
1720 #define SPARC_MC_PC 1
1721 #define SPARC_MC_NPC 2
1722 #define SPARC_MC_Y 3
1723 #define SPARC_MC_G1 4
1724 #define SPARC_MC_G2 5
1725 #define SPARC_MC_G3 6
1726 #define SPARC_MC_G4 7
1727 #define SPARC_MC_G5 8
1728 #define SPARC_MC_G6 9
1729 #define SPARC_MC_G7 10
1730 #define SPARC_MC_O0 11
1731 #define SPARC_MC_O1 12
1732 #define SPARC_MC_O2 13
1733 #define SPARC_MC_O3 14
1734 #define SPARC_MC_O4 15
1735 #define SPARC_MC_O5 16
1736 #define SPARC_MC_O6 17
1737 #define SPARC_MC_O7 18
1738 #define SPARC_MC_NGREG 19
1739
1740 typedef abi_ulong target_mc_greg_t;
1741 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
1742
1743 struct target_mc_fq {
1744 abi_ulong *mcfq_addr;
1745 uint32_t mcfq_insn;
1746 };
1747
1748 struct target_mc_fpu {
1749 union {
1750 uint32_t sregs[32];
1751 uint64_t dregs[32];
1752 //uint128_t qregs[16];
1753 } mcfpu_fregs;
1754 abi_ulong mcfpu_fsr;
1755 abi_ulong mcfpu_fprs;
1756 abi_ulong mcfpu_gsr;
1757 struct target_mc_fq *mcfpu_fq;
1758 unsigned char mcfpu_qcnt;
1759 unsigned char mcfpu_qentsz;
1760 unsigned char mcfpu_enab;
1761 };
1762 typedef struct target_mc_fpu target_mc_fpu_t;
1763
1764 typedef struct {
1765 target_mc_gregset_t mc_gregs;
1766 target_mc_greg_t mc_fp;
1767 target_mc_greg_t mc_i7;
1768 target_mc_fpu_t mc_fpregs;
1769 } target_mcontext_t;
1770
1771 struct target_ucontext {
1772 struct target_ucontext *tuc_link;
1773 abi_ulong tuc_flags;
1774 target_sigset_t tuc_sigmask;
1775 target_mcontext_t tuc_mcontext;
1776 };
1777
1778 /* A V9 register window */
1779 struct target_reg_window {
1780 abi_ulong locals[8];
1781 abi_ulong ins[8];
1782 };
1783
1784 #define TARGET_STACK_BIAS 2047
1785
1786 /* {set, get}context() needed for 64-bit SparcLinux userland. */
1787 void sparc64_set_context(CPUSPARCState *env)
1788 {
1789 abi_ulong ucp_addr;
1790 struct target_ucontext *ucp;
1791 target_mc_gregset_t *grp;
1792 abi_ulong pc, npc, tstate;
1793 abi_ulong fp, i7, w_addr;
1794 unsigned int i;
1795
1796 ucp_addr = env->regwptr[UREG_I0];
1797 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
1798 goto do_sigsegv;
1799 }
1800 grp = &ucp->tuc_mcontext.mc_gregs;
1801 __get_user(pc, &((*grp)[SPARC_MC_PC]));
1802 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
1803 if ((pc | npc) & 3) {
1804 goto do_sigsegv;
1805 }
1806 if (env->regwptr[UREG_I1]) {
1807 target_sigset_t target_set;
1808 sigset_t set;
1809
1810 if (TARGET_NSIG_WORDS == 1) {
1811 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
1812 } else {
1813 abi_ulong *src, *dst;
1814 src = ucp->tuc_sigmask.sig;
1815 dst = target_set.sig;
1816 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1817 __get_user(*dst, src);
1818 }
1819 }
1820 target_to_host_sigset_internal(&set, &target_set);
1821 set_sigmask(&set);
1822 }
1823 env->pc = pc;
1824 env->npc = npc;
1825 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
1826 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
1827 env->asi = (tstate >> 24) & 0xff;
1828 cpu_put_ccr(env, tstate >> 32);
1829 cpu_put_cwp64(env, tstate & 0x1f);
1830 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
1831 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
1832 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
1833 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
1834 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
1835 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
1836 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
1837 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
1838 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
1839 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
1840 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
1841 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
1842 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
1843 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
1844 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
1845
1846 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
1847 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
1848
1849 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1850 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1851 abi_ulong) != 0) {
1852 goto do_sigsegv;
1853 }
1854 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1855 abi_ulong) != 0) {
1856 goto do_sigsegv;
1857 }
1858 /* FIXME this does not match how the kernel handles the FPU in
1859 * its sparc64_set_context implementation. In particular the FPU
1860 * is only restored if fenab is non-zero in:
1861 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
1862 */
1863 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
1864 {
1865 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1866 for (i = 0; i < 64; i++, src++) {
1867 if (i & 1) {
1868 __get_user(env->fpr[i/2].l.lower, src);
1869 } else {
1870 __get_user(env->fpr[i/2].l.upper, src);
1871 }
1872 }
1873 }
1874 __get_user(env->fsr,
1875 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
1876 __get_user(env->gsr,
1877 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
1878 unlock_user_struct(ucp, ucp_addr, 0);
1879 return;
1880 do_sigsegv:
1881 unlock_user_struct(ucp, ucp_addr, 0);
1882 force_sig(TARGET_SIGSEGV);
1883 }
1884
1885 void sparc64_get_context(CPUSPARCState *env)
1886 {
1887 abi_ulong ucp_addr;
1888 struct target_ucontext *ucp;
1889 target_mc_gregset_t *grp;
1890 target_mcontext_t *mcp;
1891 abi_ulong fp, i7, w_addr;
1892 int err;
1893 unsigned int i;
1894 target_sigset_t target_set;
1895 sigset_t set;
1896
1897 ucp_addr = env->regwptr[UREG_I0];
1898 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
1899 goto do_sigsegv;
1900 }
1901
1902 mcp = &ucp->tuc_mcontext;
1903 grp = &mcp->mc_gregs;
1904
1905 /* Skip over the trap instruction, first. */
1906 env->pc = env->npc;
1907 env->npc += 4;
1908
1909 /* If we're only reading the signal mask then do_sigprocmask()
1910 * is guaranteed not to fail, which is important because we don't
1911 * have any way to signal a failure or restart this operation since
1912 * this is not a normal syscall.
1913 */
1914 err = do_sigprocmask(0, NULL, &set);
1915 assert(err == 0);
1916 host_to_target_sigset_internal(&target_set, &set);
1917 if (TARGET_NSIG_WORDS == 1) {
1918 __put_user(target_set.sig[0],
1919 (abi_ulong *)&ucp->tuc_sigmask);
1920 } else {
1921 abi_ulong *src, *dst;
1922 src = target_set.sig;
1923 dst = ucp->tuc_sigmask.sig;
1924 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
1925 __put_user(*src, dst);
1926 }
1927 if (err)
1928 goto do_sigsegv;
1929 }
1930
1931 /* XXX: tstate must be saved properly */
1932 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
1933 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
1934 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
1935 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
1936 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
1937 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
1938 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
1939 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
1940 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
1941 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
1942 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
1943 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
1944 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
1945 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
1946 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
1947 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
1948 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
1949 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
1950 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
1951
1952 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
1953 fp = i7 = 0;
1954 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
1955 abi_ulong) != 0) {
1956 goto do_sigsegv;
1957 }
1958 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
1959 abi_ulong) != 0) {
1960 goto do_sigsegv;
1961 }
1962 __put_user(fp, &(mcp->mc_fp));
1963 __put_user(i7, &(mcp->mc_i7));
1964
1965 {
1966 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
1967 for (i = 0; i < 64; i++, dst++) {
1968 if (i & 1) {
1969 __put_user(env->fpr[i/2].l.lower, dst);
1970 } else {
1971 __put_user(env->fpr[i/2].l.upper, dst);
1972 }
1973 }
1974 }
1975 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
1976 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
1977 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
1978
1979 if (err)
1980 goto do_sigsegv;
1981 unlock_user_struct(ucp, ucp_addr, 1);
1982 return;
1983 do_sigsegv:
1984 unlock_user_struct(ucp, ucp_addr, 1);
1985 force_sig(TARGET_SIGSEGV);
1986 }
1987 #endif
1988 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
1989
1990 # if defined(TARGET_ABI_MIPSO32)
1991 struct target_sigcontext {
1992 uint32_t sc_regmask; /* Unused */
1993 uint32_t sc_status;
1994 uint64_t sc_pc;
1995 uint64_t sc_regs[32];
1996 uint64_t sc_fpregs[32];
1997 uint32_t sc_ownedfp; /* Unused */
1998 uint32_t sc_fpc_csr;
1999 uint32_t sc_fpc_eir; /* Unused */
2000 uint32_t sc_used_math;
2001 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2002 uint32_t pad0;
2003 uint64_t sc_mdhi;
2004 uint64_t sc_mdlo;
2005 target_ulong sc_hi1; /* Was sc_cause */
2006 target_ulong sc_lo1; /* Was sc_badvaddr */
2007 target_ulong sc_hi2; /* Was sc_sigset[4] */
2008 target_ulong sc_lo2;
2009 target_ulong sc_hi3;
2010 target_ulong sc_lo3;
2011 };
2012 # else /* N32 || N64 */
2013 struct target_sigcontext {
2014 uint64_t sc_regs[32];
2015 uint64_t sc_fpregs[32];
2016 uint64_t sc_mdhi;
2017 uint64_t sc_hi1;
2018 uint64_t sc_hi2;
2019 uint64_t sc_hi3;
2020 uint64_t sc_mdlo;
2021 uint64_t sc_lo1;
2022 uint64_t sc_lo2;
2023 uint64_t sc_lo3;
2024 uint64_t sc_pc;
2025 uint32_t sc_fpc_csr;
2026 uint32_t sc_used_math;
2027 uint32_t sc_dsp;
2028 uint32_t sc_reserved;
2029 };
2030 # endif /* O32 */
2031
2032 struct sigframe {
2033 uint32_t sf_ass[4]; /* argument save space for o32 */
2034 uint32_t sf_code[2]; /* signal trampoline */
2035 struct target_sigcontext sf_sc;
2036 target_sigset_t sf_mask;
2037 };
2038
2039 struct target_ucontext {
2040 target_ulong tuc_flags;
2041 target_ulong tuc_link;
2042 target_stack_t tuc_stack;
2043 target_ulong pad0;
2044 struct target_sigcontext tuc_mcontext;
2045 target_sigset_t tuc_sigmask;
2046 };
2047
2048 struct target_rt_sigframe {
2049 uint32_t rs_ass[4]; /* argument save space for o32 */
2050 uint32_t rs_code[2]; /* signal trampoline */
2051 struct target_siginfo rs_info;
2052 struct target_ucontext rs_uc;
2053 };
2054
2055 /* Install trampoline to jump back from signal handler */
2056 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2057 {
2058 int err = 0;
2059
2060 /*
2061 * Set up the return code ...
2062 *
2063 * li v0, __NR__foo_sigreturn
2064 * syscall
2065 */
2066
2067 __put_user(0x24020000 + syscall, tramp + 0);
2068 __put_user(0x0000000c , tramp + 1);
2069 return err;
2070 }
2071
2072 static inline void setup_sigcontext(CPUMIPSState *regs,
2073 struct target_sigcontext *sc)
2074 {
2075 int i;
2076
2077 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2078 regs->hflags &= ~MIPS_HFLAG_BMASK;
2079
2080 __put_user(0, &sc->sc_regs[0]);
2081 for (i = 1; i < 32; ++i) {
2082 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2083 }
2084
2085 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2086 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2087
2088 /* Rather than checking for dsp existence, always copy. The storage
2089 would just be garbage otherwise. */
2090 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2091 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2092 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2093 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2094 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2095 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2096 {
2097 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2098 __put_user(dsp, &sc->sc_dsp);
2099 }
2100
2101 __put_user(1, &sc->sc_used_math);
2102
2103 for (i = 0; i < 32; ++i) {
2104 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2105 }
2106 }
2107
2108 static inline void
2109 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2110 {
2111 int i;
2112
2113 __get_user(regs->CP0_EPC, &sc->sc_pc);
2114
2115 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2116 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2117
2118 for (i = 1; i < 32; ++i) {
2119 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2120 }
2121
2122 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2123 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2124 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2125 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2126 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2127 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2128 {
2129 uint32_t dsp;
2130 __get_user(dsp, &sc->sc_dsp);
2131 cpu_wrdsp(dsp, 0x3ff, regs);
2132 }
2133
2134 for (i = 0; i < 32; ++i) {
2135 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2136 }
2137 }
2138
2139 /*
2140 * Determine which stack to use..
2141 */
2142 static inline abi_ulong
2143 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2144 {
2145 unsigned long sp;
2146
2147 /* Default to using normal stack */
2148 sp = regs->active_tc.gpr[29];
2149
2150 /*
2151 * FPU emulator may have its own trampoline active just
2152 * above the user stack, 16-bytes before the next lowest
2153 * 16 byte boundary. Try to avoid trashing it.
2154 */
2155 sp -= 32;
2156
2157 /* This is the X/Open sanctioned signal stack switching. */
2158 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2159 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2160 }
2161
2162 return (sp - frame_size) & ~7;
2163 }
2164
2165 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2166 {
2167 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2168 env->hflags &= ~MIPS_HFLAG_M16;
2169 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2170 env->active_tc.PC &= ~(target_ulong) 1;
2171 }
2172 }
2173
2174 # if defined(TARGET_ABI_MIPSO32)
2175 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2176 static void setup_frame(int sig, struct target_sigaction * ka,
2177 target_sigset_t *set, CPUMIPSState *regs)
2178 {
2179 struct sigframe *frame;
2180 abi_ulong frame_addr;
2181 int i;
2182
2183 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2184 trace_user_setup_frame(regs, frame_addr);
2185 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2186 goto give_sigsegv;
2187 }
2188
2189 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2190
2191 setup_sigcontext(regs, &frame->sf_sc);
2192
2193 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2194 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2195 }
2196
2197 /*
2198 * Arguments to signal handler:
2199 *
2200 * a0 = signal number
2201 * a1 = 0 (should be cause)
2202 * a2 = pointer to struct sigcontext
2203 *
2204 * $25 and PC point to the signal handler, $29 points to the
2205 * struct sigframe.
2206 */
2207 regs->active_tc.gpr[ 4] = sig;
2208 regs->active_tc.gpr[ 5] = 0;
2209 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2210 regs->active_tc.gpr[29] = frame_addr;
2211 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2212 /* The original kernel code sets CP0_EPC to the handler
2213 * since it returns to userland using eret
2214 * we cannot do this here, and we must set PC directly */
2215 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2216 mips_set_hflags_isa_mode_from_pc(regs);
2217 unlock_user_struct(frame, frame_addr, 1);
2218 return;
2219
2220 give_sigsegv:
2221 force_sigsegv(sig);
2222 }
2223
2224 long do_sigreturn(CPUMIPSState *regs)
2225 {
2226 struct sigframe *frame;
2227 abi_ulong frame_addr;
2228 sigset_t blocked;
2229 target_sigset_t target_set;
2230 int i;
2231
2232 frame_addr = regs->active_tc.gpr[29];
2233 trace_user_do_sigreturn(regs, frame_addr);
2234 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2235 goto badframe;
2236
2237 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2238 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2239 }
2240
2241 target_to_host_sigset_internal(&blocked, &target_set);
2242 set_sigmask(&blocked);
2243
2244 restore_sigcontext(regs, &frame->sf_sc);
2245
2246 #if 0
2247 /*
2248 * Don't let your children do this ...
2249 */
2250 __asm__ __volatile__(
2251 "move\t$29, %0\n\t"
2252 "j\tsyscall_exit"
2253 :/* no outputs */
2254 :"r" (&regs));
2255 /* Unreached */
2256 #endif
2257
2258 regs->active_tc.PC = regs->CP0_EPC;
2259 mips_set_hflags_isa_mode_from_pc(regs);
2260 /* I am not sure this is right, but it seems to work
2261 * maybe a problem with nested signals ? */
2262 regs->CP0_EPC = 0;
2263 return -TARGET_QEMU_ESIGRETURN;
2264
2265 badframe:
2266 force_sig(TARGET_SIGSEGV);
2267 return -TARGET_QEMU_ESIGRETURN;
2268 }
2269 # endif /* O32 */
2270
2271 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2272 target_siginfo_t *info,
2273 target_sigset_t *set, CPUMIPSState *env)
2274 {
2275 struct target_rt_sigframe *frame;
2276 abi_ulong frame_addr;
2277 int i;
2278
2279 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2280 trace_user_setup_rt_frame(env, frame_addr);
2281 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2282 goto give_sigsegv;
2283 }
2284
2285 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
2286
2287 tswap_siginfo(&frame->rs_info, info);
2288
2289 __put_user(0, &frame->rs_uc.tuc_flags);
2290 __put_user(0, &frame->rs_uc.tuc_link);
2291 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
2292 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
2293 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
2294 &frame->rs_uc.tuc_stack.ss_flags);
2295
2296 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2297
2298 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2299 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
2300 }
2301
2302 /*
2303 * Arguments to signal handler:
2304 *
2305 * a0 = signal number
2306 * a1 = pointer to siginfo_t
2307 * a2 = pointer to ucontext_t
2308 *
2309 * $25 and PC point to the signal handler, $29 points to the
2310 * struct sigframe.
2311 */
2312 env->active_tc.gpr[ 4] = sig;
2313 env->active_tc.gpr[ 5] = frame_addr
2314 + offsetof(struct target_rt_sigframe, rs_info);
2315 env->active_tc.gpr[ 6] = frame_addr
2316 + offsetof(struct target_rt_sigframe, rs_uc);
2317 env->active_tc.gpr[29] = frame_addr;
2318 env->active_tc.gpr[31] = frame_addr
2319 + offsetof(struct target_rt_sigframe, rs_code);
2320 /* The original kernel code sets CP0_EPC to the handler
2321 * since it returns to userland using eret
2322 * we cannot do this here, and we must set PC directly */
2323 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
2324 mips_set_hflags_isa_mode_from_pc(env);
2325 unlock_user_struct(frame, frame_addr, 1);
2326 return;
2327
2328 give_sigsegv:
2329 unlock_user_struct(frame, frame_addr, 1);
2330 force_sigsegv(sig);
2331 }
2332
2333 long do_rt_sigreturn(CPUMIPSState *env)
2334 {
2335 struct target_rt_sigframe *frame;
2336 abi_ulong frame_addr;
2337 sigset_t blocked;
2338
2339 frame_addr = env->active_tc.gpr[29];
2340 trace_user_do_rt_sigreturn(env, frame_addr);
2341 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2342 goto badframe;
2343 }
2344
2345 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
2346 set_sigmask(&blocked);
2347
2348 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
2349
2350 if (do_sigaltstack(frame_addr +
2351 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
2352 0, get_sp_from_cpustate(env)) == -EFAULT)
2353 goto badframe;
2354
2355 env->active_tc.PC = env->CP0_EPC;
2356 mips_set_hflags_isa_mode_from_pc(env);
2357 /* I am not sure this is right, but it seems to work
2358 * maybe a problem with nested signals ? */
2359 env->CP0_EPC = 0;
2360 return -TARGET_QEMU_ESIGRETURN;
2361
2362 badframe:
2363 force_sig(TARGET_SIGSEGV);
2364 return -TARGET_QEMU_ESIGRETURN;
2365 }
2366
2367 #elif defined(TARGET_PPC)
2368
2369 /* Size of dummy stack frame allocated when calling signal handler.
2370 See arch/powerpc/include/asm/ptrace.h. */
2371 #if defined(TARGET_PPC64)
2372 #define SIGNAL_FRAMESIZE 128
2373 #else
2374 #define SIGNAL_FRAMESIZE 64
2375 #endif
2376
2377 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
2378 on 64-bit PPC, sigcontext and mcontext are one and the same. */
2379 struct target_mcontext {
2380 target_ulong mc_gregs[48];
2381 /* Includes fpscr. */
2382 uint64_t mc_fregs[33];
2383 #if defined(TARGET_PPC64)
2384 /* Pointer to the vector regs */
2385 target_ulong v_regs;
2386 #else
2387 target_ulong mc_pad[2];
2388 #endif
2389 /* We need to handle Altivec and SPE at the same time, which no
2390 kernel needs to do. Fortunately, the kernel defines this bit to
2391 be Altivec-register-large all the time, rather than trying to
2392 twiddle it based on the specific platform. */
2393 union {
2394 /* SPE vector registers. One extra for SPEFSCR. */
2395 uint32_t spe[33];
2396 /* Altivec vector registers. The packing of VSCR and VRSAVE
2397 varies depending on whether we're PPC64 or not: PPC64 splits
2398 them apart; PPC32 stuffs them together.
2399 We also need to account for the VSX registers on PPC64
2400 */
2401 #if defined(TARGET_PPC64)
2402 #define QEMU_NVRREG (34 + 16)
2403 /* On ppc64, this mcontext structure is naturally *unaligned*,
2404 * or rather it is aligned on a 8 bytes boundary but not on
2405 * a 16 bytes one. This pad fixes it up. This is also why the
2406 * vector regs are referenced by the v_regs pointer above so
2407 * any amount of padding can be added here
2408 */
2409 target_ulong pad;
2410 #else
2411 /* On ppc32, we are already aligned to 16 bytes */
2412 #define QEMU_NVRREG 33
2413 #endif
2414 /* We cannot use ppc_avr_t here as we do *not* want the implied
2415 * 16-bytes alignment that would result from it. This would have
2416 * the effect of making the whole struct target_mcontext aligned
2417 * which breaks the layout of struct target_ucontext on ppc64.
2418 */
2419 uint64_t altivec[QEMU_NVRREG][2];
2420 #undef QEMU_NVRREG
2421 } mc_vregs;
2422 };
2423
2424 /* See arch/powerpc/include/asm/sigcontext.h. */
2425 struct target_sigcontext {
2426 target_ulong _unused[4];
2427 int32_t signal;
2428 #if defined(TARGET_PPC64)
2429 int32_t pad0;
2430 #endif
2431 target_ulong handler;
2432 target_ulong oldmask;
2433 target_ulong regs; /* struct pt_regs __user * */
2434 #if defined(TARGET_PPC64)
2435 struct target_mcontext mcontext;
2436 #endif
2437 };
2438
2439 /* Indices for target_mcontext.mc_gregs, below.
2440 See arch/powerpc/include/asm/ptrace.h for details. */
2441 enum {
2442 TARGET_PT_R0 = 0,
2443 TARGET_PT_R1 = 1,
2444 TARGET_PT_R2 = 2,
2445 TARGET_PT_R3 = 3,
2446 TARGET_PT_R4 = 4,
2447 TARGET_PT_R5 = 5,
2448 TARGET_PT_R6 = 6,
2449 TARGET_PT_R7 = 7,
2450 TARGET_PT_R8 = 8,
2451 TARGET_PT_R9 = 9,
2452 TARGET_PT_R10 = 10,
2453 TARGET_PT_R11 = 11,
2454 TARGET_PT_R12 = 12,
2455 TARGET_PT_R13 = 13,
2456 TARGET_PT_R14 = 14,
2457 TARGET_PT_R15 = 15,
2458 TARGET_PT_R16 = 16,
2459 TARGET_PT_R17 = 17,
2460 TARGET_PT_R18 = 18,
2461 TARGET_PT_R19 = 19,
2462 TARGET_PT_R20 = 20,
2463 TARGET_PT_R21 = 21,
2464 TARGET_PT_R22 = 22,
2465 TARGET_PT_R23 = 23,
2466 TARGET_PT_R24 = 24,
2467 TARGET_PT_R25 = 25,
2468 TARGET_PT_R26 = 26,
2469 TARGET_PT_R27 = 27,
2470 TARGET_PT_R28 = 28,
2471 TARGET_PT_R29 = 29,
2472 TARGET_PT_R30 = 30,
2473 TARGET_PT_R31 = 31,
2474 TARGET_PT_NIP = 32,
2475 TARGET_PT_MSR = 33,
2476 TARGET_PT_ORIG_R3 = 34,
2477 TARGET_PT_CTR = 35,
2478 TARGET_PT_LNK = 36,
2479 TARGET_PT_XER = 37,
2480 TARGET_PT_CCR = 38,
2481 /* Yes, there are two registers with #39. One is 64-bit only. */
2482 TARGET_PT_MQ = 39,
2483 TARGET_PT_SOFTE = 39,
2484 TARGET_PT_TRAP = 40,
2485 TARGET_PT_DAR = 41,
2486 TARGET_PT_DSISR = 42,
2487 TARGET_PT_RESULT = 43,
2488 TARGET_PT_REGS_COUNT = 44
2489 };
2490
2491
2492 struct target_ucontext {
2493 target_ulong tuc_flags;
2494 target_ulong tuc_link; /* ucontext_t __user * */
2495 struct target_sigaltstack tuc_stack;
2496 #if !defined(TARGET_PPC64)
2497 int32_t tuc_pad[7];
2498 target_ulong tuc_regs; /* struct mcontext __user *
2499 points to uc_mcontext field */
2500 #endif
2501 target_sigset_t tuc_sigmask;
2502 #if defined(TARGET_PPC64)
2503 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
2504 struct target_sigcontext tuc_sigcontext;
2505 #else
2506 int32_t tuc_maskext[30];
2507 int32_t tuc_pad2[3];
2508 struct target_mcontext tuc_mcontext;
2509 #endif
2510 };
2511
2512 /* See arch/powerpc/kernel/signal_32.c. */
2513 struct target_sigframe {
2514 struct target_sigcontext sctx;
2515 struct target_mcontext mctx;
2516 int32_t abigap[56];
2517 };
2518
2519 #if defined(TARGET_PPC64)
2520
2521 #define TARGET_TRAMP_SIZE 6
2522
2523 struct target_rt_sigframe {
2524 /* sys_rt_sigreturn requires the ucontext be the first field */
2525 struct target_ucontext uc;
2526 target_ulong _unused[2];
2527 uint32_t trampoline[TARGET_TRAMP_SIZE];
2528 target_ulong pinfo; /* struct siginfo __user * */
2529 target_ulong puc; /* void __user * */
2530 struct target_siginfo info;
2531 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
2532 char abigap[288];
2533 } __attribute__((aligned(16)));
2534
2535 #else
2536
2537 struct target_rt_sigframe {
2538 struct target_siginfo info;
2539 struct target_ucontext uc;
2540 int32_t abigap[56];
2541 };
2542
2543 #endif
2544
2545 #if defined(TARGET_PPC64)
2546
2547 struct target_func_ptr {
2548 target_ulong entry;
2549 target_ulong toc;
2550 };
2551
2552 #endif
2553
2554 /* We use the mc_pad field for the signal return trampoline. */
2555 #define tramp mc_pad
2556
2557 /* See arch/powerpc/kernel/signal.c. */
2558 static target_ulong get_sigframe(struct target_sigaction *ka,
2559 CPUPPCState *env,
2560 int frame_size)
2561 {
2562 target_ulong oldsp;
2563
2564 oldsp = env->gpr[1];
2565
2566 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
2567 (sas_ss_flags(oldsp) == 0)) {
2568 oldsp = (target_sigaltstack_used.ss_sp
2569 + target_sigaltstack_used.ss_size);
2570 }
2571
2572 return (oldsp - frame_size) & ~0xFUL;
2573 }
2574
2575 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
2576 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
2577 #define PPC_VEC_HI 0
2578 #define PPC_VEC_LO 1
2579 #else
2580 #define PPC_VEC_HI 1
2581 #define PPC_VEC_LO 0
2582 #endif
2583
2584
2585 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
2586 {
2587 target_ulong msr = env->msr;
2588 int i;
2589 target_ulong ccr = 0;
2590
2591 /* In general, the kernel attempts to be intelligent about what it
2592 needs to save for Altivec/FP/SPE registers. We don't care that
2593 much, so we just go ahead and save everything. */
2594
2595 /* Save general registers. */
2596 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2597 __put_user(env->gpr[i], &frame->mc_gregs[i]);
2598 }
2599 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
2600 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
2601 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
2602 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
2603
2604 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
2605 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
2606 }
2607 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
2608
2609 /* Save Altivec registers if necessary. */
2610 if (env->insns_flags & PPC_ALTIVEC) {
2611 uint32_t *vrsave;
2612 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
2613 ppc_avr_t *avr = &env->avr[i];
2614 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
2615
2616 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
2617 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
2618 }
2619 /* Set MSR_VR in the saved MSR value to indicate that
2620 frame->mc_vregs contains valid data. */
2621 msr |= MSR_VR;
2622 #if defined(TARGET_PPC64)
2623 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
2624 /* 64-bit needs to put a pointer to the vectors in the frame */
2625 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
2626 #else
2627 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
2628 #endif
2629 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
2630 }
2631
2632 /* Save VSX second halves */
2633 if (env->insns_flags2 & PPC2_VSX) {
2634 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
2635 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
2636 __put_user(env->vsr[i], &vsregs[i]);
2637 }
2638 }
2639
2640 /* Save floating point registers. */
2641 if (env->insns_flags & PPC_FLOAT) {
2642 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
2643 __put_user(env->fpr[i], &frame->mc_fregs[i]);
2644 }
2645 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
2646 }
2647
2648 /* Save SPE registers. The kernel only saves the high half. */
2649 if (env->insns_flags & PPC_SPE) {
2650 #if defined(TARGET_PPC64)
2651 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2652 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
2653 }
2654 #else
2655 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
2656 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
2657 }
2658 #endif
2659 /* Set MSR_SPE in the saved MSR value to indicate that
2660 frame->mc_vregs contains valid data. */
2661 msr |= MSR_SPE;
2662 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
2663 }
2664
2665 /* Store MSR. */
2666 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
2667 }
2668
2669 static void encode_trampoline(int sigret, uint32_t *tramp)
2670 {
2671 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
2672 if (sigret) {
2673 __put_user(0x38000000 | sigret, &tramp[0]);
2674 __put_user(0x44000002, &tramp[1]);
2675 }
2676 }
2677
2678 static void restore_user_regs(CPUPPCState *env,
2679 struct target_mcontext *frame, int sig)
2680 {
2681 target_ulong save_r2 = 0;
2682 target_ulong msr;
2683 target_ulong ccr;
2684
2685 int i;
2686
2687 if (!sig) {
2688 save_r2 = env->gpr[2];
2689 }
2690
2691 /* Restore general registers. */
2692 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2693 __get_user(env->gpr[i], &frame->mc_gregs[i]);
2694 }
2695 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
2696 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
2697 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
2698 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
2699 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
2700
2701 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
2702 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
2703 }
2704
2705 if (!sig) {
2706 env->gpr[2] = save_r2;
2707 }
2708 /* Restore MSR. */
2709 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
2710
2711 /* If doing signal return, restore the previous little-endian mode. */
2712 if (sig)
2713 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
2714
2715 /* Restore Altivec registers if necessary. */
2716 if (env->insns_flags & PPC_ALTIVEC) {
2717 ppc_avr_t *v_regs;
2718 uint32_t *vrsave;
2719 #if defined(TARGET_PPC64)
2720 uint64_t v_addr;
2721 /* 64-bit needs to recover the pointer to the vectors from the frame */
2722 __get_user(v_addr, &frame->v_regs);
2723 v_regs = g2h(v_addr);
2724 #else
2725 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
2726 #endif
2727 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
2728 ppc_avr_t *avr = &env->avr[i];
2729 ppc_avr_t *vreg = &v_regs[i];
2730
2731 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
2732 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
2733 }
2734 /* Set MSR_VEC in the saved MSR value to indicate that
2735 frame->mc_vregs contains valid data. */
2736 #if defined(TARGET_PPC64)
2737 vrsave = (uint32_t *)&v_regs[33];
2738 #else
2739 vrsave = (uint32_t *)&v_regs[32];
2740 #endif
2741 __get_user(env->spr[SPR_VRSAVE], vrsave);
2742 }
2743
2744 /* Restore VSX second halves */
2745 if (env->insns_flags2 & PPC2_VSX) {
2746 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
2747 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
2748 __get_user(env->vsr[i], &vsregs[i]);
2749 }
2750 }
2751
2752 /* Restore floating point registers. */
2753 if (env->insns_flags & PPC_FLOAT) {
2754 uint64_t fpscr;
2755 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
2756 __get_user(env->fpr[i], &frame->mc_fregs[i]);
2757 }
2758 __get_user(fpscr, &frame->mc_fregs[32]);
2759 env->fpscr = (uint32_t) fpscr;
2760 }
2761
2762 /* Save SPE registers. The kernel only saves the high half. */
2763 if (env->insns_flags & PPC_SPE) {
2764 #if defined(TARGET_PPC64)
2765 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
2766 uint32_t hi;
2767
2768 __get_user(hi, &frame->mc_vregs.spe[i]);
2769 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
2770 }
2771 #else
2772 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
2773 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
2774 }
2775 #endif
2776 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
2777 }
2778 }
2779
2780 #if !defined(TARGET_PPC64)
2781 static void setup_frame(int sig, struct target_sigaction *ka,
2782 target_sigset_t *set, CPUPPCState *env)
2783 {
2784 struct target_sigframe *frame;
2785 struct target_sigcontext *sc;
2786 target_ulong frame_addr, newsp;
2787 int err = 0;
2788
2789 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2790 trace_user_setup_frame(env, frame_addr);
2791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
2792 goto sigsegv;
2793 sc = &frame->sctx;
2794
2795 __put_user(ka->_sa_handler, &sc->handler);
2796 __put_user(set->sig[0], &sc->oldmask);
2797 __put_user(set->sig[1], &sc->_unused[3]);
2798 __put_user(h2g(&frame->mctx), &sc->regs);
2799 __put_user(sig, &sc->signal);
2800
2801 /* Save user regs. */
2802 save_user_regs(env, &frame->mctx);
2803
2804 /* Construct the trampoline code on the stack. */
2805 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
2806
2807 /* The kernel checks for the presence of a VDSO here. We don't
2808 emulate a vdso, so use a sigreturn system call. */
2809 env->lr = (target_ulong) h2g(frame->mctx.tramp);
2810
2811 /* Turn off all fp exceptions. */
2812 env->fpscr = 0;
2813
2814 /* Create a stack frame for the caller of the handler. */
2815 newsp = frame_addr - SIGNAL_FRAMESIZE;
2816 err |= put_user(env->gpr[1], newsp, target_ulong);
2817
2818 if (err)
2819 goto sigsegv;
2820
2821 /* Set up registers for signal handler. */
2822 env->gpr[1] = newsp;
2823 env->gpr[3] = sig;
2824 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
2825
2826 env->nip = (target_ulong) ka->_sa_handler;
2827
2828 /* Signal handlers are entered in big-endian mode. */
2829 env->msr &= ~(1ull << MSR_LE);
2830
2831 unlock_user_struct(frame, frame_addr, 1);
2832 return;
2833
2834 sigsegv:
2835 unlock_user_struct(frame, frame_addr, 1);
2836 force_sigsegv(sig);
2837 }
2838 #endif /* !defined(TARGET_PPC64) */
2839
2840 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2841 target_siginfo_t *info,
2842 target_sigset_t *set, CPUPPCState *env)
2843 {
2844 struct target_rt_sigframe *rt_sf;
2845 uint32_t *trampptr = 0;
2846 struct target_mcontext *mctx = 0;
2847 target_ulong rt_sf_addr, newsp = 0;
2848 int i, err = 0;
2849 #if defined(TARGET_PPC64)
2850 struct target_sigcontext *sc = 0;
2851 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
2852 #endif
2853
2854 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
2855 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
2856 goto sigsegv;
2857
2858 tswap_siginfo(&rt_sf->info, info);
2859
2860 __put_user(0, &rt_sf->uc.tuc_flags);
2861 __put_user(0, &rt_sf->uc.tuc_link);
2862 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
2863 &rt_sf->uc.tuc_stack.ss_sp);
2864 __put_user(sas_ss_flags(env->gpr[1]),
2865 &rt_sf->uc.tuc_stack.ss_flags);
2866 __put_user(target_sigaltstack_used.ss_size,
2867 &rt_sf->uc.tuc_stack.ss_size);
2868 #if !defined(TARGET_PPC64)
2869 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
2870 &rt_sf->uc.tuc_regs);
2871 #endif
2872 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2873 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
2874 }
2875
2876 #if defined(TARGET_PPC64)
2877 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
2878 trampptr = &rt_sf->trampoline[0];
2879
2880 sc = &rt_sf->uc.tuc_sigcontext;
2881 __put_user(h2g(mctx), &sc->regs);
2882 __put_user(sig, &sc->signal);
2883 #else
2884 mctx = &rt_sf->uc.tuc_mcontext;
2885 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
2886 #endif
2887
2888 save_user_regs(env, mctx);
2889 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
2890
2891 /* The kernel checks for the presence of a VDSO here. We don't
2892 emulate a vdso, so use a sigreturn system call. */
2893 env->lr = (target_ulong) h2g(trampptr);
2894
2895 /* Turn off all fp exceptions. */
2896 env->fpscr = 0;
2897
2898 /* Create a stack frame for the caller of the handler. */
2899 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
2900 err |= put_user(env->gpr[1], newsp, target_ulong);
2901
2902 if (err)
2903 goto sigsegv;
2904
2905 /* Set up registers for signal handler. */
2906 env->gpr[1] = newsp;
2907 env->gpr[3] = (target_ulong) sig;
2908 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
2909 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
2910 env->gpr[6] = (target_ulong) h2g(rt_sf);
2911
2912 #if defined(TARGET_PPC64)
2913 if (get_ppc64_abi(image) < 2) {
2914 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
2915 struct target_func_ptr *handler =
2916 (struct target_func_ptr *)g2h(ka->_sa_handler);
2917 env->nip = tswapl(handler->entry);
2918 env->gpr[2] = tswapl(handler->toc);
2919 } else {
2920 /* ELFv2 PPC64 function pointers are entry points, but R12
2921 * must also be set */
2922 env->nip = tswapl((target_ulong) ka->_sa_handler);
2923 env->gpr[12] = env->nip;
2924 }
2925 #else
2926 env->nip = (target_ulong) ka->_sa_handler;
2927 #endif
2928
2929 /* Signal handlers are entered in big-endian mode. */
2930 env->msr &= ~(1ull << MSR_LE);
2931
2932 unlock_user_struct(rt_sf, rt_sf_addr, 1);
2933 return;
2934
2935 sigsegv:
2936 unlock_user_struct(rt_sf, rt_sf_addr, 1);
2937 force_sigsegv(sig);
2938
2939 }
2940
2941 #if !defined(TARGET_PPC64)
2942 long do_sigreturn(CPUPPCState *env)
2943 {
2944 struct target_sigcontext *sc = NULL;
2945 struct target_mcontext *sr = NULL;
2946 target_ulong sr_addr = 0, sc_addr;
2947 sigset_t blocked;
2948 target_sigset_t set;
2949
2950 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
2951 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
2952 goto sigsegv;
2953
2954 #if defined(TARGET_PPC64)
2955 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
2956 #else
2957 __get_user(set.sig[0], &sc->oldmask);
2958 __get_user(set.sig[1], &sc->_unused[3]);
2959 #endif
2960 target_to_host_sigset_internal(&blocked, &set);
2961 set_sigmask(&blocked);
2962
2963 __get_user(sr_addr, &sc->regs);
2964 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
2965 goto sigsegv;
2966 restore_user_regs(env, sr, 1);
2967
2968 unlock_user_struct(sr, sr_addr, 1);
2969 unlock_user_struct(sc, sc_addr, 1);
2970 return -TARGET_QEMU_ESIGRETURN;
2971
2972 sigsegv:
2973 unlock_user_struct(sr, sr_addr, 1);
2974 unlock_user_struct(sc, sc_addr, 1);
2975 force_sig(TARGET_SIGSEGV);
2976 return -TARGET_QEMU_ESIGRETURN;
2977 }
2978 #endif /* !defined(TARGET_PPC64) */
2979
2980 /* See arch/powerpc/kernel/signal_32.c. */
2981 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
2982 {
2983 struct target_mcontext *mcp;
2984 target_ulong mcp_addr;
2985 sigset_t blocked;
2986 target_sigset_t set;
2987
2988 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
2989 sizeof (set)))
2990 return 1;
2991
2992 #if defined(TARGET_PPC64)
2993 mcp_addr = h2g(ucp) +
2994 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
2995 #else
2996 __get_user(mcp_addr, &ucp->tuc_regs);
2997 #endif
2998
2999 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
3000 return 1;
3001
3002 target_to_host_sigset_internal(&blocked, &set);
3003 set_sigmask(&blocked);
3004 restore_user_regs(env, mcp, sig);
3005
3006 unlock_user_struct(mcp, mcp_addr, 1);
3007 return 0;
3008 }
3009
3010 long do_rt_sigreturn(CPUPPCState *env)
3011 {
3012 struct target_rt_sigframe *rt_sf = NULL;
3013 target_ulong rt_sf_addr;
3014
3015 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
3016 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
3017 goto sigsegv;
3018
3019 if (do_setcontext(&rt_sf->uc, env, 1))
3020 goto sigsegv;
3021
3022 do_sigaltstack(rt_sf_addr
3023 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
3024 0, env->gpr[1]);
3025
3026 unlock_user_struct(rt_sf, rt_sf_addr, 1);
3027 return -TARGET_QEMU_ESIGRETURN;
3028
3029 sigsegv:
3030 unlock_user_struct(rt_sf, rt_sf_addr, 1);
3031 force_sig(TARGET_SIGSEGV);
3032 return -TARGET_QEMU_ESIGRETURN;
3033 }
3034
3035 #elif defined(TARGET_XTENSA)
3036
3037 struct target_sigcontext {
3038 abi_ulong sc_pc;
3039 abi_ulong sc_ps;
3040 abi_ulong sc_lbeg;
3041 abi_ulong sc_lend;
3042 abi_ulong sc_lcount;
3043 abi_ulong sc_sar;
3044 abi_ulong sc_acclo;
3045 abi_ulong sc_acchi;
3046 abi_ulong sc_a[16];
3047 abi_ulong sc_xtregs;
3048 };
3049
3050 struct target_ucontext {
3051 abi_ulong tuc_flags;
3052 abi_ulong tuc_link;
3053 target_stack_t tuc_stack;
3054 struct target_sigcontext tuc_mcontext;
3055 target_sigset_t tuc_sigmask;
3056 };
3057
3058 struct target_rt_sigframe {
3059 target_siginfo_t info;
3060 struct target_ucontext uc;
3061 /* TODO: xtregs */
3062 uint8_t retcode[6];
3063 abi_ulong window[4];
3064 };
3065
3066 static abi_ulong get_sigframe(struct target_sigaction *sa,
3067 CPUXtensaState *env,
3068 unsigned long framesize)
3069 {
3070 abi_ulong sp = env->regs[1];
3071
3072 /* This is the X/Open sanctioned signal stack switching. */
3073 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
3074 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3075 }
3076 return (sp - framesize) & -16;
3077 }
3078
3079 static int flush_window_regs(CPUXtensaState *env)
3080 {
3081 uint32_t wb = env->sregs[WINDOW_BASE];
3082 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1);
3083 unsigned d = ctz32(ws) + 1;
3084 unsigned i;
3085 int ret = 0;
3086
3087 for (i = d; i < env->config->nareg / 4; i += d) {
3088 uint32_t ssp, osp;
3089 unsigned j;
3090
3091 ws >>= d;
3092 xtensa_rotate_window(env, d);
3093
3094 if (ws & 0x1) {
3095 ssp = env->regs[5];
3096 d = 1;
3097 } else if (ws & 0x2) {
3098 ssp = env->regs[9];
3099 ret |= get_user_ual(osp, env->regs[1] - 12);
3100 osp -= 32;
3101 d = 2;
3102 } else if (ws & 0x4) {
3103 ssp = env->regs[13];
3104 ret |= get_user_ual(osp, env->regs[1] - 12);
3105 osp -= 48;
3106 d = 3;
3107 } else {
3108 g_assert_not_reached();
3109 }
3110
3111 for (j = 0; j < 4; ++j) {
3112 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4);
3113 }
3114 for (j = 4; j < d * 4; ++j) {
3115 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4);
3116 }
3117 }
3118 xtensa_rotate_window(env, d);
3119 g_assert(env->sregs[WINDOW_BASE] == wb);
3120 return ret == 0;
3121 }
3122
3123 static int setup_sigcontext(struct target_rt_sigframe *frame,
3124 CPUXtensaState *env)
3125 {
3126 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
3127 int i;
3128
3129 __put_user(env->pc, &sc->sc_pc);
3130 __put_user(env->sregs[PS], &sc->sc_ps);
3131 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
3132 __put_user(env->sregs[LEND], &sc->sc_lend);
3133 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
3134 if (!flush_window_regs(env)) {
3135 return 0;
3136 }
3137 for (i = 0; i < 16; ++i) {
3138 __put_user(env->regs[i], sc->sc_a + i);
3139 }
3140 __put_user(0, &sc->sc_xtregs);
3141 /* TODO: xtregs */
3142 return 1;
3143 }
3144
3145 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3146 target_siginfo_t *info,
3147 target_sigset_t *set, CPUXtensaState *env)
3148 {
3149 abi_ulong frame_addr;
3150 struct target_rt_sigframe *frame;
3151 uint32_t ra;
3152 int i;
3153
3154 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3155 trace_user_setup_rt_frame(env, frame_addr);
3156
3157 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3158 goto give_sigsegv;
3159 }
3160
3161 if (ka->sa_flags & SA_SIGINFO) {
3162 tswap_siginfo(&frame->info, info);
3163 }
3164
3165 __put_user(0, &frame->uc.tuc_flags);
3166 __put_user(0, &frame->uc.tuc_link);
3167 __put_user(target_sigaltstack_used.ss_sp,
3168 &frame->uc.tuc_stack.ss_sp);
3169 __put_user(sas_ss_flags(env->regs[1]),
3170 &frame->uc.tuc_stack.ss_flags);
3171 __put_user(target_sigaltstack_used.ss_size,
3172 &frame->uc.tuc_stack.ss_size);
3173 if (!setup_sigcontext(frame, env)) {
3174 unlock_user_struct(frame, frame_addr, 0);
3175 goto give_sigsegv;
3176 }
3177 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
3178 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3179 }
3180
3181 if (ka->sa_flags & TARGET_SA_RESTORER) {
3182 ra = ka->sa_restorer;
3183 } else {
3184 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
3185 #ifdef TARGET_WORDS_BIGENDIAN
3186 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
3187 __put_user(0x22, &frame->retcode[0]);
3188 __put_user(0x0a, &frame->retcode[1]);
3189 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
3190 /* Generate instruction: SYSCALL */
3191 __put_user(0x00, &frame->retcode[3]);
3192 __put_user(0x05, &frame->retcode[4]);
3193 __put_user(0x00, &frame->retcode[5]);
3194 #else
3195 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
3196 __put_user(0x22, &frame->retcode[0]);
3197 __put_user(0xa0, &frame->retcode[1]);
3198 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
3199 /* Generate instruction: SYSCALL */
3200 __put_user(0x00, &frame->retcode[3]);
3201 __put_user(0x50, &frame->retcode[4]);
3202 __put_user(0x00, &frame->retcode[5]);
3203 #endif
3204 }
3205 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
3206 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
3207 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
3208 }
3209 memset(env->regs, 0, sizeof(env->regs));
3210 env->pc = ka->_sa_handler;
3211 env->regs[1] = frame_addr;
3212 env->sregs[WINDOW_BASE] = 0;
3213 env->sregs[WINDOW_START] = 1;
3214
3215 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
3216 env->regs[6] = sig;
3217 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
3218 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
3219 unlock_user_struct(frame, frame_addr, 1);
3220 return;
3221
3222 give_sigsegv:
3223 force_sigsegv(sig);
3224 return;
3225 }
3226
3227 static void restore_sigcontext(CPUXtensaState *env,
3228 struct target_rt_sigframe *frame)
3229 {
3230 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
3231 uint32_t ps;
3232 int i;
3233
3234 __get_user(env->pc, &sc->sc_pc);
3235 __get_user(ps, &sc->sc_ps);
3236 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
3237 __get_user(env->sregs[LEND], &sc->sc_lend);
3238 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
3239
3240 env->sregs[WINDOW_BASE] = 0;
3241 env->sregs[WINDOW_START] = 1;
3242 env->sregs[PS] = deposit32(env->sregs[PS],
3243 PS_CALLINC_SHIFT,
3244 PS_CALLINC_LEN,
3245 extract32(ps, PS_CALLINC_SHIFT,
3246 PS_CALLINC_LEN));
3247 for (i = 0; i < 16; ++i) {
3248 __get_user(env->regs[i], sc->sc_a + i);
3249 }
3250 /* TODO: xtregs */
3251 }
3252
3253 long do_rt_sigreturn(CPUXtensaState *env)
3254 {
3255 abi_ulong frame_addr = env->regs[1];
3256 struct target_rt_sigframe *frame;
3257 sigset_t set;
3258
3259 trace_user_do_rt_sigreturn(env, frame_addr);
3260 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3261 goto badframe;
3262 }
3263 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
3264 set_sigmask(&set);
3265
3266 restore_sigcontext(env, frame);
3267
3268 if (do_sigaltstack(frame_addr +
3269 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3270 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
3271 goto badframe;
3272 }
3273 unlock_user_struct(frame, frame_addr, 0);
3274 return -TARGET_QEMU_ESIGRETURN;
3275
3276 badframe:
3277 unlock_user_struct(frame, frame_addr, 0);
3278 force_sig(TARGET_SIGSEGV);
3279 return -TARGET_QEMU_ESIGRETURN;
3280 }
3281 #endif
3282
3283 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
3284 struct emulated_sigtable *k)
3285 {
3286 CPUState *cpu = ENV_GET_CPU(cpu_env);
3287 abi_ulong handler;
3288 sigset_t set;
3289 target_sigset_t target_old_set;
3290 struct target_sigaction *sa;
3291 TaskState *ts = cpu->opaque;
3292
3293 trace_user_handle_signal(cpu_env, sig);
3294 /* dequeue signal */
3295 k->pending = 0;
3296
3297 sig = gdb_handlesig(cpu, sig);
3298 if (!sig) {
3299 sa = NULL;
3300 handler = TARGET_SIG_IGN;
3301 } else {
3302 sa = &sigact_table[sig - 1];
3303 handler = sa->_sa_handler;
3304 }
3305
3306 if (do_strace) {
3307 print_taken_signal(sig, &k->info);
3308 }
3309
3310 if (handler == TARGET_SIG_DFL) {
3311 /* default handler : ignore some signal. The other are job control or fatal */
3312 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
3313 kill(getpid(),SIGSTOP);
3314 } else if (sig != TARGET_SIGCHLD &&
3315 sig != TARGET_SIGURG &&
3316 sig != TARGET_SIGWINCH &&
3317 sig != TARGET_SIGCONT) {
3318 dump_core_and_abort(sig);
3319 }
3320 } else if (handler == TARGET_SIG_IGN) {
3321 /* ignore sig */
3322 } else if (handler == TARGET_SIG_ERR) {
3323 dump_core_and_abort(sig);
3324 } else {
3325 /* compute the blocked signals during the handler execution */
3326 sigset_t *blocked_set;
3327
3328 target_to_host_sigset(&set, &sa->sa_mask);
3329 /* SA_NODEFER indicates that the current signal should not be
3330 blocked during the handler */
3331 if (!(sa->sa_flags & TARGET_SA_NODEFER))
3332 sigaddset(&set, target_to_host_signal(sig));
3333
3334 /* save the previous blocked signal state to restore it at the
3335 end of the signal execution (see do_sigreturn) */
3336 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
3337
3338 /* block signals in the handler */
3339 blocked_set = ts->in_sigsuspend ?
3340 &ts->sigsuspend_mask : &ts->signal_mask;
3341 sigorset(&ts->signal_mask, blocked_set, &set);
3342 ts->in_sigsuspend = 0;
3343
3344 /* if the CPU is in VM86 mode, we restore the 32 bit values */
3345 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
3346 {
3347 CPUX86State *env = cpu_env;
3348 if (env->eflags & VM_MASK)
3349 save_v86_state(env);
3350 }
3351 #endif
3352 /* prepare the stack frame of the virtual CPU */
3353 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
3354 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
3355 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
3356 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
3357 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
3358 /* These targets do not have traditional signals. */
3359 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
3360 #else
3361 if (sa->sa_flags & TARGET_SA_SIGINFO)
3362 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
3363 else
3364 setup_frame(sig, sa, &target_old_set, cpu_env);
3365 #endif
3366 if (sa->sa_flags & TARGET_SA_RESETHAND) {
3367 sa->_sa_handler = TARGET_SIG_DFL;
3368 }
3369 }
3370 }
3371
3372 void process_pending_signals(CPUArchState *cpu_env)
3373 {
3374 CPUState *cpu = ENV_GET_CPU(cpu_env);
3375 int sig;
3376 TaskState *ts = cpu->opaque;
3377 sigset_t set;
3378 sigset_t *blocked_set;
3379
3380 while (atomic_read(&ts->signal_pending)) {
3381 /* FIXME: This is not threadsafe. */
3382 sigfillset(&set);
3383 sigprocmask(SIG_SETMASK, &set, 0);
3384
3385 restart_scan:
3386 sig = ts->sync_signal.pending;
3387 if (sig) {
3388 /* Synchronous signals are forced,
3389 * see force_sig_info() and callers in Linux
3390 * Note that not all of our queue_signal() calls in QEMU correspond
3391 * to force_sig_info() calls in Linux (some are send_sig_info()).
3392 * However it seems like a kernel bug to me to allow the process
3393 * to block a synchronous signal since it could then just end up
3394 * looping round and round indefinitely.
3395 */
3396 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
3397 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
3398 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
3399 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
3400 }
3401
3402 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
3403 }
3404
3405 for (sig = 1; sig <= TARGET_NSIG; sig++) {
3406 blocked_set = ts->in_sigsuspend ?
3407 &ts->sigsuspend_mask : &ts->signal_mask;
3408
3409 if (ts->sigtab[sig - 1].pending &&
3410 (!sigismember(blocked_set,
3411 target_to_host_signal_table[sig]))) {
3412 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
3413 /* Restart scan from the beginning, as handle_pending_signal
3414 * might have resulted in a new synchronous signal (eg SIGSEGV).
3415 */
3416 goto restart_scan;
3417 }
3418 }
3419
3420 /* if no signal is pending, unblock signals and recheck (the act
3421 * of unblocking might cause us to take another host signal which
3422 * will set signal_pending again).
3423 */
3424 atomic_set(&ts->signal_pending, 0);
3425 ts->in_sigsuspend = 0;
3426 set = ts->signal_mask;
3427 sigdelset(&set, SIGSEGV);
3428 sigdelset(&set, SIGBUS);
3429 sigprocmask(SIG_SETMASK, &set, 0);
3430 }
3431 ts->in_sigsuspend = 0;
3432 }