]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
Merge remote-tracking branch 'remotes/kraxel/tags/vga-20180312-pull-request' into...
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 return atomic_xchg(&ts->signal_pending, 1);
207 }
208
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
215 */
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
217 {
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
219
220 if (oldset) {
221 *oldset = ts->signal_mask;
222 }
223
224 if (set) {
225 int i;
226
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
229 }
230
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
239 }
240 }
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
247 }
248
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
252 }
253 return 0;
254 }
255
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_NIOS2)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
260 */
261 static void set_sigmask(const sigset_t *set)
262 {
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
264
265 ts->signal_mask = *set;
266 }
267 #endif
268
269 /* siginfo conversion */
270
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
273 {
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
280
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
286 */
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
288
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
297 *
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
302 */
303
304 switch (si_code) {
305 case SI_USER:
306 case SI_TKILL:
307 case SI_KERNEL:
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
310 */
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
314 break;
315 default:
316 /* Everything else is spoofable. Make best guess based on signal */
317 switch (sig) {
318 case TARGET_SIGCHLD:
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
326 break;
327 case TARGET_SIGIO:
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
331 break;
332 default:
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
340 break;
341 }
342 break;
343 }
344
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
346 }
347
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
350 {
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
353
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
357
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
361 */
362 switch (si_type) {
363 case QEMU_SI_KILL:
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
366 break;
367 case QEMU_SI_TIMER:
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
372 break;
373 case QEMU_SI_POLL:
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
378 break;
379 case QEMU_SI_FAULT:
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
382 break;
383 case QEMU_SI_CHLD:
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
394 break;
395 case QEMU_SI_RT:
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
400 break;
401 default:
402 g_assert_not_reached();
403 }
404 }
405
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
407 {
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
411 }
412
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
416 {
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
419 */
420 abi_ulong sival_ptr;
421
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
429 }
430
431 static int fatal_signal (int sig)
432 {
433 switch (sig) {
434 case TARGET_SIGCHLD:
435 case TARGET_SIGURG:
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
438 return 0;
439 case TARGET_SIGCONT:
440 case TARGET_SIGSTOP:
441 case TARGET_SIGTSTP:
442 case TARGET_SIGTTIN:
443 case TARGET_SIGTTOU:
444 /* Job control signals. */
445 return 0;
446 default:
447 return 1;
448 }
449 }
450
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
453 {
454 switch (sig) {
455 case TARGET_SIGABRT:
456 case TARGET_SIGFPE:
457 case TARGET_SIGILL:
458 case TARGET_SIGQUIT:
459 case TARGET_SIGSEGV:
460 case TARGET_SIGTRAP:
461 case TARGET_SIGBUS:
462 return (1);
463 default:
464 return (0);
465 }
466 }
467
468 void signal_init(void)
469 {
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
473 int i, j;
474 int host_sig;
475
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
480 }
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
484 }
485
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
488
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
492
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
503 }
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
512 }
513 }
514
515 #ifndef TARGET_UNICORE32
516 /* Force a synchronously taken signal. The kernel force_sig() function
517 * also forces the signal to "not blocked, not ignored", but for QEMU
518 * that work is done in process_pending_signals().
519 */
520 static void force_sig(int sig)
521 {
522 CPUState *cpu = thread_cpu;
523 CPUArchState *env = cpu->env_ptr;
524 target_siginfo_t info;
525
526 info.si_signo = sig;
527 info.si_errno = 0;
528 info.si_code = TARGET_SI_KERNEL;
529 info._sifields._kill._pid = 0;
530 info._sifields._kill._uid = 0;
531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
532 }
533
534 /* Force a SIGSEGV if we couldn't write to memory trying to set
535 * up the signal frame. oldsig is the signal we were trying to handle
536 * at the point of failure.
537 */
538 #if !defined(TARGET_RISCV)
539 static void force_sigsegv(int oldsig)
540 {
541 if (oldsig == SIGSEGV) {
542 /* Make sure we don't try to deliver the signal again; this will
543 * end up with handle_pending_signal() calling dump_core_and_abort().
544 */
545 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
546 }
547 force_sig(TARGET_SIGSEGV);
548 }
549 #endif
550
551 #endif
552
553 /* abort execution with signal */
554 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
555 {
556 CPUState *cpu = thread_cpu;
557 CPUArchState *env = cpu->env_ptr;
558 TaskState *ts = (TaskState *)cpu->opaque;
559 int host_sig, core_dumped = 0;
560 struct sigaction act;
561
562 host_sig = target_to_host_signal(target_sig);
563 trace_user_force_sig(env, target_sig, host_sig);
564 gdb_signalled(env, target_sig);
565
566 /* dump core if supported by target binary format */
567 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
568 stop_all_tasks();
569 core_dumped =
570 ((*ts->bprm->core_dump)(target_sig, env) == 0);
571 }
572 if (core_dumped) {
573 /* we already dumped the core of target process, we don't want
574 * a coredump of qemu itself */
575 struct rlimit nodump;
576 getrlimit(RLIMIT_CORE, &nodump);
577 nodump.rlim_cur=0;
578 setrlimit(RLIMIT_CORE, &nodump);
579 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
580 target_sig, strsignal(host_sig), "core dumped" );
581 }
582
583 /* The proper exit code for dying from an uncaught signal is
584 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
585 * a negative value. To get the proper exit code we need to
586 * actually die from an uncaught signal. Here the default signal
587 * handler is installed, we send ourself a signal and we wait for
588 * it to arrive. */
589 sigfillset(&act.sa_mask);
590 act.sa_handler = SIG_DFL;
591 act.sa_flags = 0;
592 sigaction(host_sig, &act, NULL);
593
594 /* For some reason raise(host_sig) doesn't send the signal when
595 * statically linked on x86-64. */
596 kill(getpid(), host_sig);
597
598 /* Make sure the signal isn't masked (just reuse the mask inside
599 of act) */
600 sigdelset(&act.sa_mask, host_sig);
601 sigsuspend(&act.sa_mask);
602
603 /* unreachable */
604 abort();
605 }
606
607 /* queue a signal so that it will be send to the virtual CPU as soon
608 as possible */
609 int queue_signal(CPUArchState *env, int sig, int si_type,
610 target_siginfo_t *info)
611 {
612 CPUState *cpu = ENV_GET_CPU(env);
613 TaskState *ts = cpu->opaque;
614
615 trace_user_queue_signal(env, sig);
616
617 info->si_code = deposit32(info->si_code, 16, 16, si_type);
618
619 ts->sync_signal.info = *info;
620 ts->sync_signal.pending = sig;
621 /* signal that a new signal is pending */
622 atomic_set(&ts->signal_pending, 1);
623 return 1; /* indicates that the signal was queued */
624 }
625
626 #ifndef HAVE_SAFE_SYSCALL
627 static inline void rewind_if_in_safe_syscall(void *puc)
628 {
629 /* Default version: never rewind */
630 }
631 #endif
632
633 static void host_signal_handler(int host_signum, siginfo_t *info,
634 void *puc)
635 {
636 CPUArchState *env = thread_cpu->env_ptr;
637 CPUState *cpu = ENV_GET_CPU(env);
638 TaskState *ts = cpu->opaque;
639
640 int sig;
641 target_siginfo_t tinfo;
642 ucontext_t *uc = puc;
643 struct emulated_sigtable *k;
644
645 /* the CPU emulator uses some host signals to detect exceptions,
646 we forward to it some signals */
647 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
648 && info->si_code > 0) {
649 if (cpu_signal_handler(host_signum, info, puc))
650 return;
651 }
652
653 /* get target signal number */
654 sig = host_to_target_signal(host_signum);
655 if (sig < 1 || sig > TARGET_NSIG)
656 return;
657 trace_user_host_signal(env, host_signum, sig);
658
659 rewind_if_in_safe_syscall(puc);
660
661 host_to_target_siginfo_noswap(&tinfo, info);
662 k = &ts->sigtab[sig - 1];
663 k->info = tinfo;
664 k->pending = sig;
665 ts->signal_pending = 1;
666
667 /* Block host signals until target signal handler entered. We
668 * can't block SIGSEGV or SIGBUS while we're executing guest
669 * code in case the guest code provokes one in the window between
670 * now and it getting out to the main loop. Signals will be
671 * unblocked again in process_pending_signals().
672 *
673 * WARNING: we cannot use sigfillset() here because the uc_sigmask
674 * field is a kernel sigset_t, which is much smaller than the
675 * libc sigset_t which sigfillset() operates on. Using sigfillset()
676 * would write 0xff bytes off the end of the structure and trash
677 * data on the struct.
678 * We can't use sizeof(uc->uc_sigmask) either, because the libc
679 * headers define the struct field with the wrong (too large) type.
680 */
681 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
682 sigdelset(&uc->uc_sigmask, SIGSEGV);
683 sigdelset(&uc->uc_sigmask, SIGBUS);
684
685 /* interrupt the virtual CPU as soon as possible */
686 cpu_exit(thread_cpu);
687 }
688
689 /* do_sigaltstack() returns target values and errnos. */
690 /* compare linux/kernel/signal.c:do_sigaltstack() */
691 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
692 {
693 int ret;
694 struct target_sigaltstack oss;
695
696 /* XXX: test errors */
697 if(uoss_addr)
698 {
699 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
700 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
701 __put_user(sas_ss_flags(sp), &oss.ss_flags);
702 }
703
704 if(uss_addr)
705 {
706 struct target_sigaltstack *uss;
707 struct target_sigaltstack ss;
708 size_t minstacksize = TARGET_MINSIGSTKSZ;
709
710 #if defined(TARGET_PPC64)
711 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
712 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
713 if (get_ppc64_abi(image) > 1) {
714 minstacksize = 4096;
715 }
716 #endif
717
718 ret = -TARGET_EFAULT;
719 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
720 goto out;
721 }
722 __get_user(ss.ss_sp, &uss->ss_sp);
723 __get_user(ss.ss_size, &uss->ss_size);
724 __get_user(ss.ss_flags, &uss->ss_flags);
725 unlock_user_struct(uss, uss_addr, 0);
726
727 ret = -TARGET_EPERM;
728 if (on_sig_stack(sp))
729 goto out;
730
731 ret = -TARGET_EINVAL;
732 if (ss.ss_flags != TARGET_SS_DISABLE
733 && ss.ss_flags != TARGET_SS_ONSTACK
734 && ss.ss_flags != 0)
735 goto out;
736
737 if (ss.ss_flags == TARGET_SS_DISABLE) {
738 ss.ss_size = 0;
739 ss.ss_sp = 0;
740 } else {
741 ret = -TARGET_ENOMEM;
742 if (ss.ss_size < minstacksize) {
743 goto out;
744 }
745 }
746
747 target_sigaltstack_used.ss_sp = ss.ss_sp;
748 target_sigaltstack_used.ss_size = ss.ss_size;
749 }
750
751 if (uoss_addr) {
752 ret = -TARGET_EFAULT;
753 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
754 goto out;
755 }
756
757 ret = 0;
758 out:
759 return ret;
760 }
761
762 /* do_sigaction() return target values and host errnos */
763 int do_sigaction(int sig, const struct target_sigaction *act,
764 struct target_sigaction *oact)
765 {
766 struct target_sigaction *k;
767 struct sigaction act1;
768 int host_sig;
769 int ret = 0;
770
771 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
772 return -TARGET_EINVAL;
773 }
774
775 if (block_signals()) {
776 return -TARGET_ERESTARTSYS;
777 }
778
779 k = &sigact_table[sig - 1];
780 if (oact) {
781 __put_user(k->_sa_handler, &oact->_sa_handler);
782 __put_user(k->sa_flags, &oact->sa_flags);
783 #ifdef TARGET_ARCH_HAS_SA_RESTORER
784 __put_user(k->sa_restorer, &oact->sa_restorer);
785 #endif
786 /* Not swapped. */
787 oact->sa_mask = k->sa_mask;
788 }
789 if (act) {
790 /* FIXME: This is not threadsafe. */
791 __get_user(k->_sa_handler, &act->_sa_handler);
792 __get_user(k->sa_flags, &act->sa_flags);
793 #ifdef TARGET_ARCH_HAS_SA_RESTORER
794 __get_user(k->sa_restorer, &act->sa_restorer);
795 #endif
796 /* To be swapped in target_to_host_sigset. */
797 k->sa_mask = act->sa_mask;
798
799 /* we update the host linux signal state */
800 host_sig = target_to_host_signal(sig);
801 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
802 sigfillset(&act1.sa_mask);
803 act1.sa_flags = SA_SIGINFO;
804 if (k->sa_flags & TARGET_SA_RESTART)
805 act1.sa_flags |= SA_RESTART;
806 /* NOTE: it is important to update the host kernel signal
807 ignore state to avoid getting unexpected interrupted
808 syscalls */
809 if (k->_sa_handler == TARGET_SIG_IGN) {
810 act1.sa_sigaction = (void *)SIG_IGN;
811 } else if (k->_sa_handler == TARGET_SIG_DFL) {
812 if (fatal_signal (sig))
813 act1.sa_sigaction = host_signal_handler;
814 else
815 act1.sa_sigaction = (void *)SIG_DFL;
816 } else {
817 act1.sa_sigaction = host_signal_handler;
818 }
819 ret = sigaction(host_sig, &act1, NULL);
820 }
821 }
822 return ret;
823 }
824
825 #if defined(TARGET_I386)
826 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
827
828 struct target_fpreg {
829 uint16_t significand[4];
830 uint16_t exponent;
831 };
832
833 struct target_fpxreg {
834 uint16_t significand[4];
835 uint16_t exponent;
836 uint16_t padding[3];
837 };
838
839 struct target_xmmreg {
840 uint32_t element[4];
841 };
842
843 struct target_fpstate_32 {
844 /* Regular FPU environment */
845 uint32_t cw;
846 uint32_t sw;
847 uint32_t tag;
848 uint32_t ipoff;
849 uint32_t cssel;
850 uint32_t dataoff;
851 uint32_t datasel;
852 struct target_fpreg st[8];
853 uint16_t status;
854 uint16_t magic; /* 0xffff = regular FPU data only */
855
856 /* FXSR FPU environment */
857 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
858 uint32_t mxcsr;
859 uint32_t reserved;
860 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
861 struct target_xmmreg xmm[8];
862 uint32_t padding[56];
863 };
864
865 struct target_fpstate_64 {
866 /* FXSAVE format */
867 uint16_t cw;
868 uint16_t sw;
869 uint16_t twd;
870 uint16_t fop;
871 uint64_t rip;
872 uint64_t rdp;
873 uint32_t mxcsr;
874 uint32_t mxcsr_mask;
875 uint32_t st_space[32];
876 uint32_t xmm_space[64];
877 uint32_t reserved[24];
878 };
879
880 #ifndef TARGET_X86_64
881 # define target_fpstate target_fpstate_32
882 #else
883 # define target_fpstate target_fpstate_64
884 #endif
885
886 struct target_sigcontext_32 {
887 uint16_t gs, __gsh;
888 uint16_t fs, __fsh;
889 uint16_t es, __esh;
890 uint16_t ds, __dsh;
891 uint32_t edi;
892 uint32_t esi;
893 uint32_t ebp;
894 uint32_t esp;
895 uint32_t ebx;
896 uint32_t edx;
897 uint32_t ecx;
898 uint32_t eax;
899 uint32_t trapno;
900 uint32_t err;
901 uint32_t eip;
902 uint16_t cs, __csh;
903 uint32_t eflags;
904 uint32_t esp_at_signal;
905 uint16_t ss, __ssh;
906 uint32_t fpstate; /* pointer */
907 uint32_t oldmask;
908 uint32_t cr2;
909 };
910
911 struct target_sigcontext_64 {
912 uint64_t r8;
913 uint64_t r9;
914 uint64_t r10;
915 uint64_t r11;
916 uint64_t r12;
917 uint64_t r13;
918 uint64_t r14;
919 uint64_t r15;
920
921 uint64_t rdi;
922 uint64_t rsi;
923 uint64_t rbp;
924 uint64_t rbx;
925 uint64_t rdx;
926 uint64_t rax;
927 uint64_t rcx;
928 uint64_t rsp;
929 uint64_t rip;
930
931 uint64_t eflags;
932
933 uint16_t cs;
934 uint16_t gs;
935 uint16_t fs;
936 uint16_t ss;
937
938 uint64_t err;
939 uint64_t trapno;
940 uint64_t oldmask;
941 uint64_t cr2;
942
943 uint64_t fpstate; /* pointer */
944 uint64_t padding[8];
945 };
946
947 #ifndef TARGET_X86_64
948 # define target_sigcontext target_sigcontext_32
949 #else
950 # define target_sigcontext target_sigcontext_64
951 #endif
952
953 /* see Linux/include/uapi/asm-generic/ucontext.h */
954 struct target_ucontext {
955 abi_ulong tuc_flags;
956 abi_ulong tuc_link;
957 target_stack_t tuc_stack;
958 struct target_sigcontext tuc_mcontext;
959 target_sigset_t tuc_sigmask; /* mask last for extensibility */
960 };
961
962 #ifndef TARGET_X86_64
963 struct sigframe {
964 abi_ulong pretcode;
965 int sig;
966 struct target_sigcontext sc;
967 struct target_fpstate fpstate;
968 abi_ulong extramask[TARGET_NSIG_WORDS-1];
969 char retcode[8];
970 };
971
972 struct rt_sigframe {
973 abi_ulong pretcode;
974 int sig;
975 abi_ulong pinfo;
976 abi_ulong puc;
977 struct target_siginfo info;
978 struct target_ucontext uc;
979 struct target_fpstate fpstate;
980 char retcode[8];
981 };
982
983 #else
984
985 struct rt_sigframe {
986 abi_ulong pretcode;
987 struct target_ucontext uc;
988 struct target_siginfo info;
989 struct target_fpstate fpstate;
990 };
991
992 #endif
993
994 /*
995 * Set up a signal frame.
996 */
997
998 /* XXX: save x87 state */
999 static void setup_sigcontext(struct target_sigcontext *sc,
1000 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
1001 abi_ulong fpstate_addr)
1002 {
1003 CPUState *cs = CPU(x86_env_get_cpu(env));
1004 #ifndef TARGET_X86_64
1005 uint16_t magic;
1006
1007 /* already locked in setup_frame() */
1008 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1009 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1010 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1011 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1012 __put_user(env->regs[R_EDI], &sc->edi);
1013 __put_user(env->regs[R_ESI], &sc->esi);
1014 __put_user(env->regs[R_EBP], &sc->ebp);
1015 __put_user(env->regs[R_ESP], &sc->esp);
1016 __put_user(env->regs[R_EBX], &sc->ebx);
1017 __put_user(env->regs[R_EDX], &sc->edx);
1018 __put_user(env->regs[R_ECX], &sc->ecx);
1019 __put_user(env->regs[R_EAX], &sc->eax);
1020 __put_user(cs->exception_index, &sc->trapno);
1021 __put_user(env->error_code, &sc->err);
1022 __put_user(env->eip, &sc->eip);
1023 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1024 __put_user(env->eflags, &sc->eflags);
1025 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1026 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1027
1028 cpu_x86_fsave(env, fpstate_addr, 1);
1029 fpstate->status = fpstate->sw;
1030 magic = 0xffff;
1031 __put_user(magic, &fpstate->magic);
1032 __put_user(fpstate_addr, &sc->fpstate);
1033
1034 /* non-iBCS2 extensions.. */
1035 __put_user(mask, &sc->oldmask);
1036 __put_user(env->cr[2], &sc->cr2);
1037 #else
1038 __put_user(env->regs[R_EDI], &sc->rdi);
1039 __put_user(env->regs[R_ESI], &sc->rsi);
1040 __put_user(env->regs[R_EBP], &sc->rbp);
1041 __put_user(env->regs[R_ESP], &sc->rsp);
1042 __put_user(env->regs[R_EBX], &sc->rbx);
1043 __put_user(env->regs[R_EDX], &sc->rdx);
1044 __put_user(env->regs[R_ECX], &sc->rcx);
1045 __put_user(env->regs[R_EAX], &sc->rax);
1046
1047 __put_user(env->regs[8], &sc->r8);
1048 __put_user(env->regs[9], &sc->r9);
1049 __put_user(env->regs[10], &sc->r10);
1050 __put_user(env->regs[11], &sc->r11);
1051 __put_user(env->regs[12], &sc->r12);
1052 __put_user(env->regs[13], &sc->r13);
1053 __put_user(env->regs[14], &sc->r14);
1054 __put_user(env->regs[15], &sc->r15);
1055
1056 __put_user(cs->exception_index, &sc->trapno);
1057 __put_user(env->error_code, &sc->err);
1058 __put_user(env->eip, &sc->rip);
1059
1060 __put_user(env->eflags, &sc->eflags);
1061 __put_user(env->segs[R_CS].selector, &sc->cs);
1062 __put_user((uint16_t)0, &sc->gs);
1063 __put_user((uint16_t)0, &sc->fs);
1064 __put_user(env->segs[R_SS].selector, &sc->ss);
1065
1066 __put_user(mask, &sc->oldmask);
1067 __put_user(env->cr[2], &sc->cr2);
1068
1069 /* fpstate_addr must be 16 byte aligned for fxsave */
1070 assert(!(fpstate_addr & 0xf));
1071
1072 cpu_x86_fxsave(env, fpstate_addr);
1073 __put_user(fpstate_addr, &sc->fpstate);
1074 #endif
1075 }
1076
1077 /*
1078 * Determine which stack to use..
1079 */
1080
1081 static inline abi_ulong
1082 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1083 {
1084 unsigned long esp;
1085
1086 /* Default to using normal stack */
1087 esp = env->regs[R_ESP];
1088 #ifdef TARGET_X86_64
1089 esp -= 128; /* this is the redzone */
1090 #endif
1091
1092 /* This is the X/Open sanctioned signal stack switching. */
1093 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1094 if (sas_ss_flags(esp) == 0) {
1095 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1096 }
1097 } else {
1098 #ifndef TARGET_X86_64
1099 /* This is the legacy signal stack switching. */
1100 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1101 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1102 ka->sa_restorer) {
1103 esp = (unsigned long) ka->sa_restorer;
1104 }
1105 #endif
1106 }
1107
1108 #ifndef TARGET_X86_64
1109 return (esp - frame_size) & -8ul;
1110 #else
1111 return ((esp - frame_size) & (~15ul)) - 8;
1112 #endif
1113 }
1114
1115 #ifndef TARGET_X86_64
1116 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1117 static void setup_frame(int sig, struct target_sigaction *ka,
1118 target_sigset_t *set, CPUX86State *env)
1119 {
1120 abi_ulong frame_addr;
1121 struct sigframe *frame;
1122 int i;
1123
1124 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1125 trace_user_setup_frame(env, frame_addr);
1126
1127 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1128 goto give_sigsegv;
1129
1130 __put_user(sig, &frame->sig);
1131
1132 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1133 frame_addr + offsetof(struct sigframe, fpstate));
1134
1135 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1136 __put_user(set->sig[i], &frame->extramask[i - 1]);
1137 }
1138
1139 /* Set up to return from userspace. If provided, use a stub
1140 already in userspace. */
1141 if (ka->sa_flags & TARGET_SA_RESTORER) {
1142 __put_user(ka->sa_restorer, &frame->pretcode);
1143 } else {
1144 uint16_t val16;
1145 abi_ulong retcode_addr;
1146 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1147 __put_user(retcode_addr, &frame->pretcode);
1148 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1149 val16 = 0xb858;
1150 __put_user(val16, (uint16_t *)(frame->retcode+0));
1151 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1152 val16 = 0x80cd;
1153 __put_user(val16, (uint16_t *)(frame->retcode+6));
1154 }
1155
1156 /* Set up registers for signal handler */
1157 env->regs[R_ESP] = frame_addr;
1158 env->eip = ka->_sa_handler;
1159
1160 cpu_x86_load_seg(env, R_DS, __USER_DS);
1161 cpu_x86_load_seg(env, R_ES, __USER_DS);
1162 cpu_x86_load_seg(env, R_SS, __USER_DS);
1163 cpu_x86_load_seg(env, R_CS, __USER_CS);
1164 env->eflags &= ~TF_MASK;
1165
1166 unlock_user_struct(frame, frame_addr, 1);
1167
1168 return;
1169
1170 give_sigsegv:
1171 force_sigsegv(sig);
1172 }
1173 #endif
1174
1175 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1176 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1177 target_siginfo_t *info,
1178 target_sigset_t *set, CPUX86State *env)
1179 {
1180 abi_ulong frame_addr;
1181 #ifndef TARGET_X86_64
1182 abi_ulong addr;
1183 #endif
1184 struct rt_sigframe *frame;
1185 int i;
1186
1187 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1188 trace_user_setup_rt_frame(env, frame_addr);
1189
1190 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1191 goto give_sigsegv;
1192
1193 /* These fields are only in rt_sigframe on 32 bit */
1194 #ifndef TARGET_X86_64
1195 __put_user(sig, &frame->sig);
1196 addr = frame_addr + offsetof(struct rt_sigframe, info);
1197 __put_user(addr, &frame->pinfo);
1198 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1199 __put_user(addr, &frame->puc);
1200 #endif
1201 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1202 tswap_siginfo(&frame->info, info);
1203 }
1204
1205 /* Create the ucontext. */
1206 __put_user(0, &frame->uc.tuc_flags);
1207 __put_user(0, &frame->uc.tuc_link);
1208 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1209 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1210 &frame->uc.tuc_stack.ss_flags);
1211 __put_user(target_sigaltstack_used.ss_size,
1212 &frame->uc.tuc_stack.ss_size);
1213 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1214 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1215
1216 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1217 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1218 }
1219
1220 /* Set up to return from userspace. If provided, use a stub
1221 already in userspace. */
1222 #ifndef TARGET_X86_64
1223 if (ka->sa_flags & TARGET_SA_RESTORER) {
1224 __put_user(ka->sa_restorer, &frame->pretcode);
1225 } else {
1226 uint16_t val16;
1227 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1228 __put_user(addr, &frame->pretcode);
1229 /* This is movl $,%eax ; int $0x80 */
1230 __put_user(0xb8, (char *)(frame->retcode+0));
1231 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1232 val16 = 0x80cd;
1233 __put_user(val16, (uint16_t *)(frame->retcode+5));
1234 }
1235 #else
1236 /* XXX: Would be slightly better to return -EFAULT here if test fails
1237 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1238 __put_user(ka->sa_restorer, &frame->pretcode);
1239 #endif
1240
1241 /* Set up registers for signal handler */
1242 env->regs[R_ESP] = frame_addr;
1243 env->eip = ka->_sa_handler;
1244
1245 #ifndef TARGET_X86_64
1246 env->regs[R_EAX] = sig;
1247 env->regs[R_EDX] = (unsigned long)&frame->info;
1248 env->regs[R_ECX] = (unsigned long)&frame->uc;
1249 #else
1250 env->regs[R_EAX] = 0;
1251 env->regs[R_EDI] = sig;
1252 env->regs[R_ESI] = (unsigned long)&frame->info;
1253 env->regs[R_EDX] = (unsigned long)&frame->uc;
1254 #endif
1255
1256 cpu_x86_load_seg(env, R_DS, __USER_DS);
1257 cpu_x86_load_seg(env, R_ES, __USER_DS);
1258 cpu_x86_load_seg(env, R_CS, __USER_CS);
1259 cpu_x86_load_seg(env, R_SS, __USER_DS);
1260 env->eflags &= ~TF_MASK;
1261
1262 unlock_user_struct(frame, frame_addr, 1);
1263
1264 return;
1265
1266 give_sigsegv:
1267 force_sigsegv(sig);
1268 }
1269
1270 static int
1271 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1272 {
1273 unsigned int err = 0;
1274 abi_ulong fpstate_addr;
1275 unsigned int tmpflags;
1276
1277 #ifndef TARGET_X86_64
1278 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1279 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1280 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1281 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1282
1283 env->regs[R_EDI] = tswapl(sc->edi);
1284 env->regs[R_ESI] = tswapl(sc->esi);
1285 env->regs[R_EBP] = tswapl(sc->ebp);
1286 env->regs[R_ESP] = tswapl(sc->esp);
1287 env->regs[R_EBX] = tswapl(sc->ebx);
1288 env->regs[R_EDX] = tswapl(sc->edx);
1289 env->regs[R_ECX] = tswapl(sc->ecx);
1290 env->regs[R_EAX] = tswapl(sc->eax);
1291
1292 env->eip = tswapl(sc->eip);
1293 #else
1294 env->regs[8] = tswapl(sc->r8);
1295 env->regs[9] = tswapl(sc->r9);
1296 env->regs[10] = tswapl(sc->r10);
1297 env->regs[11] = tswapl(sc->r11);
1298 env->regs[12] = tswapl(sc->r12);
1299 env->regs[13] = tswapl(sc->r13);
1300 env->regs[14] = tswapl(sc->r14);
1301 env->regs[15] = tswapl(sc->r15);
1302
1303 env->regs[R_EDI] = tswapl(sc->rdi);
1304 env->regs[R_ESI] = tswapl(sc->rsi);
1305 env->regs[R_EBP] = tswapl(sc->rbp);
1306 env->regs[R_EBX] = tswapl(sc->rbx);
1307 env->regs[R_EDX] = tswapl(sc->rdx);
1308 env->regs[R_EAX] = tswapl(sc->rax);
1309 env->regs[R_ECX] = tswapl(sc->rcx);
1310 env->regs[R_ESP] = tswapl(sc->rsp);
1311
1312 env->eip = tswapl(sc->rip);
1313 #endif
1314
1315 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1316 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1317
1318 tmpflags = tswapl(sc->eflags);
1319 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1320 // regs->orig_eax = -1; /* disable syscall checks */
1321
1322 fpstate_addr = tswapl(sc->fpstate);
1323 if (fpstate_addr != 0) {
1324 if (!access_ok(VERIFY_READ, fpstate_addr,
1325 sizeof(struct target_fpstate)))
1326 goto badframe;
1327 #ifndef TARGET_X86_64
1328 cpu_x86_frstor(env, fpstate_addr, 1);
1329 #else
1330 cpu_x86_fxrstor(env, fpstate_addr);
1331 #endif
1332 }
1333
1334 return err;
1335 badframe:
1336 return 1;
1337 }
1338
1339 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1340 #ifndef TARGET_X86_64
1341 long do_sigreturn(CPUX86State *env)
1342 {
1343 struct sigframe *frame;
1344 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1345 target_sigset_t target_set;
1346 sigset_t set;
1347 int i;
1348
1349 trace_user_do_sigreturn(env, frame_addr);
1350 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1351 goto badframe;
1352 /* set blocked signals */
1353 __get_user(target_set.sig[0], &frame->sc.oldmask);
1354 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1355 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1356 }
1357
1358 target_to_host_sigset_internal(&set, &target_set);
1359 set_sigmask(&set);
1360
1361 /* restore registers */
1362 if (restore_sigcontext(env, &frame->sc))
1363 goto badframe;
1364 unlock_user_struct(frame, frame_addr, 0);
1365 return -TARGET_QEMU_ESIGRETURN;
1366
1367 badframe:
1368 unlock_user_struct(frame, frame_addr, 0);
1369 force_sig(TARGET_SIGSEGV);
1370 return -TARGET_QEMU_ESIGRETURN;
1371 }
1372 #endif
1373
1374 long do_rt_sigreturn(CPUX86State *env)
1375 {
1376 abi_ulong frame_addr;
1377 struct rt_sigframe *frame;
1378 sigset_t set;
1379
1380 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1381 trace_user_do_rt_sigreturn(env, frame_addr);
1382 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1383 goto badframe;
1384 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1385 set_sigmask(&set);
1386
1387 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1388 goto badframe;
1389 }
1390
1391 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1392 get_sp_from_cpustate(env)) == -EFAULT) {
1393 goto badframe;
1394 }
1395
1396 unlock_user_struct(frame, frame_addr, 0);
1397 return -TARGET_QEMU_ESIGRETURN;
1398
1399 badframe:
1400 unlock_user_struct(frame, frame_addr, 0);
1401 force_sig(TARGET_SIGSEGV);
1402 return -TARGET_QEMU_ESIGRETURN;
1403 }
1404
1405 #elif defined(TARGET_AARCH64)
1406
1407 struct target_sigcontext {
1408 uint64_t fault_address;
1409 /* AArch64 registers */
1410 uint64_t regs[31];
1411 uint64_t sp;
1412 uint64_t pc;
1413 uint64_t pstate;
1414 /* 4K reserved for FP/SIMD state and future expansion */
1415 char __reserved[4096] __attribute__((__aligned__(16)));
1416 };
1417
1418 struct target_ucontext {
1419 abi_ulong tuc_flags;
1420 abi_ulong tuc_link;
1421 target_stack_t tuc_stack;
1422 target_sigset_t tuc_sigmask;
1423 /* glibc uses a 1024-bit sigset_t */
1424 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1425 /* last for future expansion */
1426 struct target_sigcontext tuc_mcontext;
1427 };
1428
1429 /*
1430 * Header to be used at the beginning of structures extending the user
1431 * context. Such structures must be placed after the rt_sigframe on the stack
1432 * and be 16-byte aligned. The last structure must be a dummy one with the
1433 * magic and size set to 0.
1434 */
1435 struct target_aarch64_ctx {
1436 uint32_t magic;
1437 uint32_t size;
1438 };
1439
1440 #define TARGET_FPSIMD_MAGIC 0x46508001
1441
1442 struct target_fpsimd_context {
1443 struct target_aarch64_ctx head;
1444 uint32_t fpsr;
1445 uint32_t fpcr;
1446 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1447 };
1448
1449 #define TARGET_EXTRA_MAGIC 0x45585401
1450
1451 struct target_extra_context {
1452 struct target_aarch64_ctx head;
1453 uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
1454 uint32_t size; /* size in bytes of the extra space */
1455 uint32_t reserved[3];
1456 };
1457
1458 #define TARGET_SVE_MAGIC 0x53564501
1459
1460 struct target_sve_context {
1461 struct target_aarch64_ctx head;
1462 uint16_t vl;
1463 uint16_t reserved[3];
1464 /* The actual SVE data immediately follows. It is layed out
1465 * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
1466 * the original struct pointer.
1467 */
1468 };
1469
1470 #define TARGET_SVE_VQ_BYTES 16
1471
1472 #define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
1473 #define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
1474
1475 #define TARGET_SVE_SIG_REGS_OFFSET \
1476 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
1477 #define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
1478 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
1479 #define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
1480 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
1481 #define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
1482 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
1483 #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
1484 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
1485
1486 struct target_rt_sigframe {
1487 struct target_siginfo info;
1488 struct target_ucontext uc;
1489 };
1490
1491 struct target_rt_frame_record {
1492 uint64_t fp;
1493 uint64_t lr;
1494 uint32_t tramp[2];
1495 };
1496
1497 static void target_setup_general_frame(struct target_rt_sigframe *sf,
1498 CPUARMState *env, target_sigset_t *set)
1499 {
1500 int i;
1501
1502 __put_user(0, &sf->uc.tuc_flags);
1503 __put_user(0, &sf->uc.tuc_link);
1504
1505 __put_user(target_sigaltstack_used.ss_sp, &sf->uc.tuc_stack.ss_sp);
1506 __put_user(sas_ss_flags(env->xregs[31]), &sf->uc.tuc_stack.ss_flags);
1507 __put_user(target_sigaltstack_used.ss_size, &sf->uc.tuc_stack.ss_size);
1508
1509 for (i = 0; i < 31; i++) {
1510 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1511 }
1512 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1513 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1514 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1515
1516 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1517
1518 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1519 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1520 }
1521 }
1522
1523 static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
1524 CPUARMState *env)
1525 {
1526 int i;
1527
1528 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
1529 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
1530 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
1531 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
1532
1533 for (i = 0; i < 32; i++) {
1534 uint64_t *q = aa64_vfp_qreg(env, i);
1535 #ifdef TARGET_WORDS_BIGENDIAN
1536 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
1537 __put_user(q[1], &fpsimd->vregs[i * 2]);
1538 #else
1539 __put_user(q[0], &fpsimd->vregs[i * 2]);
1540 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
1541 #endif
1542 }
1543 }
1544
1545 static void target_setup_extra_record(struct target_extra_context *extra,
1546 uint64_t datap, uint32_t extra_size)
1547 {
1548 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
1549 __put_user(sizeof(struct target_extra_context), &extra->head.size);
1550 __put_user(datap, &extra->datap);
1551 __put_user(extra_size, &extra->size);
1552 }
1553
1554 static void target_setup_end_record(struct target_aarch64_ctx *end)
1555 {
1556 __put_user(0, &end->magic);
1557 __put_user(0, &end->size);
1558 }
1559
1560 static void target_setup_sve_record(struct target_sve_context *sve,
1561 CPUARMState *env, int vq, int size)
1562 {
1563 int i, j;
1564
1565 __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
1566 __put_user(size, &sve->head.size);
1567 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
1568
1569 /* Note that SVE regs are stored as a byte stream, with each byte element
1570 * at a subsequent address. This corresponds to a little-endian store
1571 * of our 64-bit hunks.
1572 */
1573 for (i = 0; i < 32; ++i) {
1574 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
1575 for (j = 0; j < vq * 2; ++j) {
1576 __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
1577 }
1578 }
1579 for (i = 0; i <= 16; ++i) {
1580 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
1581 for (j = 0; j < vq; ++j) {
1582 uint64_t r = env->vfp.pregs[i].p[j >> 2];
1583 __put_user_e(r >> ((j & 3) * 16), p + j, le);
1584 }
1585 }
1586 }
1587
1588 static void target_restore_general_frame(CPUARMState *env,
1589 struct target_rt_sigframe *sf)
1590 {
1591 sigset_t set;
1592 uint64_t pstate;
1593 int i;
1594
1595 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1596 set_sigmask(&set);
1597
1598 for (i = 0; i < 31; i++) {
1599 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1600 }
1601
1602 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1603 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1604 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1605 pstate_write(env, pstate);
1606 }
1607
1608 static void target_restore_fpsimd_record(CPUARMState *env,
1609 struct target_fpsimd_context *fpsimd)
1610 {
1611 uint32_t fpsr, fpcr;
1612 int i;
1613
1614 __get_user(fpsr, &fpsimd->fpsr);
1615 vfp_set_fpsr(env, fpsr);
1616 __get_user(fpcr, &fpsimd->fpcr);
1617 vfp_set_fpcr(env, fpcr);
1618
1619 for (i = 0; i < 32; i++) {
1620 uint64_t *q = aa64_vfp_qreg(env, i);
1621 #ifdef TARGET_WORDS_BIGENDIAN
1622 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
1623 __get_user(q[1], &fpsimd->vregs[i * 2]);
1624 #else
1625 __get_user(q[0], &fpsimd->vregs[i * 2]);
1626 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
1627 #endif
1628 }
1629 }
1630
1631 static void target_restore_sve_record(CPUARMState *env,
1632 struct target_sve_context *sve, int vq)
1633 {
1634 int i, j;
1635
1636 /* Note that SVE regs are stored as a byte stream, with each byte element
1637 * at a subsequent address. This corresponds to a little-endian load
1638 * of our 64-bit hunks.
1639 */
1640 for (i = 0; i < 32; ++i) {
1641 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
1642 for (j = 0; j < vq * 2; ++j) {
1643 __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
1644 }
1645 }
1646 for (i = 0; i <= 16; ++i) {
1647 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
1648 for (j = 0; j < vq; ++j) {
1649 uint16_t r;
1650 __get_user_e(r, p + j, le);
1651 if (j & 3) {
1652 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
1653 } else {
1654 env->vfp.pregs[i].p[j >> 2] = r;
1655 }
1656 }
1657 }
1658 }
1659
1660 static int target_restore_sigframe(CPUARMState *env,
1661 struct target_rt_sigframe *sf)
1662 {
1663 struct target_aarch64_ctx *ctx, *extra = NULL;
1664 struct target_fpsimd_context *fpsimd = NULL;
1665 struct target_sve_context *sve = NULL;
1666 uint64_t extra_datap = 0;
1667 bool used_extra = false;
1668 bool err = false;
1669 int vq = 0, sve_size = 0;
1670
1671 target_restore_general_frame(env, sf);
1672
1673 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
1674 while (ctx) {
1675 uint32_t magic, size, extra_size;
1676
1677 __get_user(magic, &ctx->magic);
1678 __get_user(size, &ctx->size);
1679 switch (magic) {
1680 case 0:
1681 if (size != 0) {
1682 err = true;
1683 goto exit;
1684 }
1685 if (used_extra) {
1686 ctx = NULL;
1687 } else {
1688 ctx = extra;
1689 used_extra = true;
1690 }
1691 continue;
1692
1693 case TARGET_FPSIMD_MAGIC:
1694 if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
1695 err = true;
1696 goto exit;
1697 }
1698 fpsimd = (struct target_fpsimd_context *)ctx;
1699 break;
1700
1701 case TARGET_SVE_MAGIC:
1702 if (arm_feature(env, ARM_FEATURE_SVE)) {
1703 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
1704 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
1705 if (!sve && size == sve_size) {
1706 sve = (struct target_sve_context *)ctx;
1707 break;
1708 }
1709 }
1710 err = true;
1711 goto exit;
1712
1713 case TARGET_EXTRA_MAGIC:
1714 if (extra || size != sizeof(struct target_extra_context)) {
1715 err = true;
1716 goto exit;
1717 }
1718 __get_user(extra_datap,
1719 &((struct target_extra_context *)ctx)->datap);
1720 __get_user(extra_size,
1721 &((struct target_extra_context *)ctx)->size);
1722 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
1723 break;
1724
1725 default:
1726 /* Unknown record -- we certainly didn't generate it.
1727 * Did we in fact get out of sync?
1728 */
1729 err = true;
1730 goto exit;
1731 }
1732 ctx = (void *)ctx + size;
1733 }
1734
1735 /* Require FPSIMD always. */
1736 if (fpsimd) {
1737 target_restore_fpsimd_record(env, fpsimd);
1738 } else {
1739 err = true;
1740 }
1741
1742 /* SVE data, if present, overwrites FPSIMD data. */
1743 if (sve) {
1744 target_restore_sve_record(env, sve, vq);
1745 }
1746
1747 exit:
1748 unlock_user(extra, extra_datap, 0);
1749 return err;
1750 }
1751
1752 static abi_ulong get_sigframe(struct target_sigaction *ka,
1753 CPUARMState *env, int size)
1754 {
1755 abi_ulong sp;
1756
1757 sp = env->xregs[31];
1758
1759 /*
1760 * This is the X/Open sanctioned signal stack switching.
1761 */
1762 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1763 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1764 }
1765
1766 sp = (sp - size) & ~15;
1767
1768 return sp;
1769 }
1770
1771 typedef struct {
1772 int total_size;
1773 int extra_base;
1774 int extra_size;
1775 int std_end_ofs;
1776 int extra_ofs;
1777 int extra_end_ofs;
1778 } target_sigframe_layout;
1779
1780 static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
1781 {
1782 /* Make sure there will always be space for the end marker. */
1783 const int std_size = sizeof(struct target_rt_sigframe)
1784 - sizeof(struct target_aarch64_ctx);
1785 int this_loc = l->total_size;
1786
1787 if (l->extra_base) {
1788 /* Once we have begun an extra space, all allocations go there. */
1789 l->extra_size += this_size;
1790 } else if (this_size + this_loc > std_size) {
1791 /* This allocation does not fit in the standard space. */
1792 /* Allocate the extra record. */
1793 l->extra_ofs = this_loc;
1794 l->total_size += sizeof(struct target_extra_context);
1795
1796 /* Allocate the standard end record. */
1797 l->std_end_ofs = l->total_size;
1798 l->total_size += sizeof(struct target_aarch64_ctx);
1799
1800 /* Allocate the requested record. */
1801 l->extra_base = this_loc = l->total_size;
1802 l->extra_size = this_size;
1803 }
1804 l->total_size += this_size;
1805
1806 return this_loc;
1807 }
1808
1809 static void target_setup_frame(int usig, struct target_sigaction *ka,
1810 target_siginfo_t *info, target_sigset_t *set,
1811 CPUARMState *env)
1812 {
1813 target_sigframe_layout layout = {
1814 /* Begin with the size pointing to the reserved space. */
1815 .total_size = offsetof(struct target_rt_sigframe,
1816 uc.tuc_mcontext.__reserved),
1817 };
1818 int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
1819 struct target_rt_sigframe *frame;
1820 struct target_rt_frame_record *fr;
1821 abi_ulong frame_addr, return_addr;
1822
1823 /* FPSIMD record is always in the standard space. */
1824 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
1825 &layout);
1826
1827 /* SVE state needs saving only if it exists. */
1828 if (arm_feature(env, ARM_FEATURE_SVE)) {
1829 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
1830 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
1831 sve_ofs = alloc_sigframe_space(sve_size, &layout);
1832 }
1833
1834 if (layout.extra_ofs) {
1835 /* Reserve space for the extra end marker. The standard end marker
1836 * will have been allocated when we allocated the extra record.
1837 */
1838 layout.extra_end_ofs
1839 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
1840 } else {
1841 /* Reserve space for the standard end marker.
1842 * Do not use alloc_sigframe_space because we cheat
1843 * std_size therein to reserve space for this.
1844 */
1845 layout.std_end_ofs = layout.total_size;
1846 layout.total_size += sizeof(struct target_aarch64_ctx);
1847 }
1848
1849 /* Reserve space for the return code. On a real system this would
1850 * be within the VDSO. So, despite the name this is not a "real"
1851 * record within the frame.
1852 */
1853 fr_ofs = layout.total_size;
1854 layout.total_size += sizeof(struct target_rt_frame_record);
1855
1856 frame_addr = get_sigframe(ka, env, layout.total_size);
1857 trace_user_setup_frame(env, frame_addr);
1858 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1859 goto give_sigsegv;
1860 }
1861
1862 target_setup_general_frame(frame, env, set);
1863 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
1864 target_setup_end_record((void *)frame + layout.std_end_ofs);
1865 if (layout.extra_ofs) {
1866 target_setup_extra_record((void *)frame + layout.extra_ofs,
1867 frame_addr + layout.extra_base,
1868 layout.extra_size);
1869 target_setup_end_record((void *)frame + layout.extra_end_ofs);
1870 }
1871 if (sve_ofs) {
1872 target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
1873 }
1874
1875 /* Set up the stack frame for unwinding. */
1876 fr = (void *)frame + fr_ofs;
1877 __put_user(env->xregs[29], &fr->fp);
1878 __put_user(env->xregs[30], &fr->lr);
1879
1880 if (ka->sa_flags & TARGET_SA_RESTORER) {
1881 return_addr = ka->sa_restorer;
1882 } else {
1883 /*
1884 * mov x8,#__NR_rt_sigreturn; svc #0
1885 * Since these are instructions they need to be put as little-endian
1886 * regardless of target default or current CPU endianness.
1887 */
1888 __put_user_e(0xd2801168, &fr->tramp[0], le);
1889 __put_user_e(0xd4000001, &fr->tramp[1], le);
1890 return_addr = frame_addr + fr_ofs
1891 + offsetof(struct target_rt_frame_record, tramp);
1892 }
1893 env->xregs[0] = usig;
1894 env->xregs[31] = frame_addr;
1895 env->xregs[29] = frame_addr + fr_ofs;
1896 env->pc = ka->_sa_handler;
1897 env->xregs[30] = return_addr;
1898 if (info) {
1899 tswap_siginfo(&frame->info, info);
1900 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1901 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1902 }
1903
1904 unlock_user_struct(frame, frame_addr, 1);
1905 return;
1906
1907 give_sigsegv:
1908 unlock_user_struct(frame, frame_addr, 1);
1909 force_sigsegv(usig);
1910 }
1911
1912 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1913 target_siginfo_t *info, target_sigset_t *set,
1914 CPUARMState *env)
1915 {
1916 target_setup_frame(sig, ka, info, set, env);
1917 }
1918
1919 static void setup_frame(int sig, struct target_sigaction *ka,
1920 target_sigset_t *set, CPUARMState *env)
1921 {
1922 target_setup_frame(sig, ka, 0, set, env);
1923 }
1924
1925 long do_rt_sigreturn(CPUARMState *env)
1926 {
1927 struct target_rt_sigframe *frame = NULL;
1928 abi_ulong frame_addr = env->xregs[31];
1929
1930 trace_user_do_rt_sigreturn(env, frame_addr);
1931 if (frame_addr & 15) {
1932 goto badframe;
1933 }
1934
1935 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1936 goto badframe;
1937 }
1938
1939 if (target_restore_sigframe(env, frame)) {
1940 goto badframe;
1941 }
1942
1943 if (do_sigaltstack(frame_addr +
1944 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1945 0, get_sp_from_cpustate(env)) == -EFAULT) {
1946 goto badframe;
1947 }
1948
1949 unlock_user_struct(frame, frame_addr, 0);
1950 return -TARGET_QEMU_ESIGRETURN;
1951
1952 badframe:
1953 unlock_user_struct(frame, frame_addr, 0);
1954 force_sig(TARGET_SIGSEGV);
1955 return -TARGET_QEMU_ESIGRETURN;
1956 }
1957
1958 long do_sigreturn(CPUARMState *env)
1959 {
1960 return do_rt_sigreturn(env);
1961 }
1962
1963 #elif defined(TARGET_ARM)
1964
1965 struct target_sigcontext {
1966 abi_ulong trap_no;
1967 abi_ulong error_code;
1968 abi_ulong oldmask;
1969 abi_ulong arm_r0;
1970 abi_ulong arm_r1;
1971 abi_ulong arm_r2;
1972 abi_ulong arm_r3;
1973 abi_ulong arm_r4;
1974 abi_ulong arm_r5;
1975 abi_ulong arm_r6;
1976 abi_ulong arm_r7;
1977 abi_ulong arm_r8;
1978 abi_ulong arm_r9;
1979 abi_ulong arm_r10;
1980 abi_ulong arm_fp;
1981 abi_ulong arm_ip;
1982 abi_ulong arm_sp;
1983 abi_ulong arm_lr;
1984 abi_ulong arm_pc;
1985 abi_ulong arm_cpsr;
1986 abi_ulong fault_address;
1987 };
1988
1989 struct target_ucontext_v1 {
1990 abi_ulong tuc_flags;
1991 abi_ulong tuc_link;
1992 target_stack_t tuc_stack;
1993 struct target_sigcontext tuc_mcontext;
1994 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1995 };
1996
1997 struct target_ucontext_v2 {
1998 abi_ulong tuc_flags;
1999 abi_ulong tuc_link;
2000 target_stack_t tuc_stack;
2001 struct target_sigcontext tuc_mcontext;
2002 target_sigset_t tuc_sigmask; /* mask last for extensibility */
2003 char __unused[128 - sizeof(target_sigset_t)];
2004 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
2005 };
2006
2007 struct target_user_vfp {
2008 uint64_t fpregs[32];
2009 abi_ulong fpscr;
2010 };
2011
2012 struct target_user_vfp_exc {
2013 abi_ulong fpexc;
2014 abi_ulong fpinst;
2015 abi_ulong fpinst2;
2016 };
2017
2018 struct target_vfp_sigframe {
2019 abi_ulong magic;
2020 abi_ulong size;
2021 struct target_user_vfp ufp;
2022 struct target_user_vfp_exc ufp_exc;
2023 } __attribute__((__aligned__(8)));
2024
2025 struct target_iwmmxt_sigframe {
2026 abi_ulong magic;
2027 abi_ulong size;
2028 uint64_t regs[16];
2029 /* Note that not all the coprocessor control registers are stored here */
2030 uint32_t wcssf;
2031 uint32_t wcasf;
2032 uint32_t wcgr0;
2033 uint32_t wcgr1;
2034 uint32_t wcgr2;
2035 uint32_t wcgr3;
2036 } __attribute__((__aligned__(8)));
2037
2038 #define TARGET_VFP_MAGIC 0x56465001
2039 #define TARGET_IWMMXT_MAGIC 0x12ef842a
2040
2041 struct sigframe_v1
2042 {
2043 struct target_sigcontext sc;
2044 abi_ulong extramask[TARGET_NSIG_WORDS-1];
2045 abi_ulong retcode;
2046 };
2047
2048 struct sigframe_v2
2049 {
2050 struct target_ucontext_v2 uc;
2051 abi_ulong retcode;
2052 };
2053
2054 struct rt_sigframe_v1
2055 {
2056 abi_ulong pinfo;
2057 abi_ulong puc;
2058 struct target_siginfo info;
2059 struct target_ucontext_v1 uc;
2060 abi_ulong retcode;
2061 };
2062
2063 struct rt_sigframe_v2
2064 {
2065 struct target_siginfo info;
2066 struct target_ucontext_v2 uc;
2067 abi_ulong retcode;
2068 };
2069
2070 #define TARGET_CONFIG_CPU_32 1
2071
2072 /*
2073 * For ARM syscalls, we encode the syscall number into the instruction.
2074 */
2075 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
2076 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
2077
2078 /*
2079 * For Thumb syscalls, we pass the syscall number via r7. We therefore
2080 * need two 16-bit instructions.
2081 */
2082 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
2083 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
2084
2085 static const abi_ulong retcodes[4] = {
2086 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
2087 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
2088 };
2089
2090
2091 static inline int valid_user_regs(CPUARMState *regs)
2092 {
2093 return 1;
2094 }
2095
2096 static void
2097 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2098 CPUARMState *env, abi_ulong mask)
2099 {
2100 __put_user(env->regs[0], &sc->arm_r0);
2101 __put_user(env->regs[1], &sc->arm_r1);
2102 __put_user(env->regs[2], &sc->arm_r2);
2103 __put_user(env->regs[3], &sc->arm_r3);
2104 __put_user(env->regs[4], &sc->arm_r4);
2105 __put_user(env->regs[5], &sc->arm_r5);
2106 __put_user(env->regs[6], &sc->arm_r6);
2107 __put_user(env->regs[7], &sc->arm_r7);
2108 __put_user(env->regs[8], &sc->arm_r8);
2109 __put_user(env->regs[9], &sc->arm_r9);
2110 __put_user(env->regs[10], &sc->arm_r10);
2111 __put_user(env->regs[11], &sc->arm_fp);
2112 __put_user(env->regs[12], &sc->arm_ip);
2113 __put_user(env->regs[13], &sc->arm_sp);
2114 __put_user(env->regs[14], &sc->arm_lr);
2115 __put_user(env->regs[15], &sc->arm_pc);
2116 #ifdef TARGET_CONFIG_CPU_32
2117 __put_user(cpsr_read(env), &sc->arm_cpsr);
2118 #endif
2119
2120 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
2121 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
2122 __put_user(/* current->thread.address */ 0, &sc->fault_address);
2123 __put_user(mask, &sc->oldmask);
2124 }
2125
2126 static inline abi_ulong
2127 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
2128 {
2129 unsigned long sp = regs->regs[13];
2130
2131 /*
2132 * This is the X/Open sanctioned signal stack switching.
2133 */
2134 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
2135 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2136 }
2137 /*
2138 * ATPCS B01 mandates 8-byte alignment
2139 */
2140 return (sp - framesize) & ~7;
2141 }
2142
2143 static void
2144 setup_return(CPUARMState *env, struct target_sigaction *ka,
2145 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
2146 {
2147 abi_ulong handler = ka->_sa_handler;
2148 abi_ulong retcode;
2149 int thumb = handler & 1;
2150 uint32_t cpsr = cpsr_read(env);
2151
2152 cpsr &= ~CPSR_IT;
2153 if (thumb) {
2154 cpsr |= CPSR_T;
2155 } else {
2156 cpsr &= ~CPSR_T;
2157 }
2158
2159 if (ka->sa_flags & TARGET_SA_RESTORER) {
2160 retcode = ka->sa_restorer;
2161 } else {
2162 unsigned int idx = thumb;
2163
2164 if (ka->sa_flags & TARGET_SA_SIGINFO) {
2165 idx += 2;
2166 }
2167
2168 __put_user(retcodes[idx], rc);
2169
2170 retcode = rc_addr + thumb;
2171 }
2172
2173 env->regs[0] = usig;
2174 env->regs[13] = frame_addr;
2175 env->regs[14] = retcode;
2176 env->regs[15] = handler & (thumb ? ~1 : ~3);
2177 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
2178 }
2179
2180 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
2181 {
2182 int i;
2183 struct target_vfp_sigframe *vfpframe;
2184 vfpframe = (struct target_vfp_sigframe *)regspace;
2185 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
2186 __put_user(sizeof(*vfpframe), &vfpframe->size);
2187 for (i = 0; i < 32; i++) {
2188 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2189 }
2190 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
2191 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
2192 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2193 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2194 return (abi_ulong*)(vfpframe+1);
2195 }
2196
2197 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
2198 CPUARMState *env)
2199 {
2200 int i;
2201 struct target_iwmmxt_sigframe *iwmmxtframe;
2202 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2203 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
2204 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
2205 for (i = 0; i < 16; i++) {
2206 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2207 }
2208 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2209 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2210 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2211 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2212 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2213 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2214 return (abi_ulong*)(iwmmxtframe+1);
2215 }
2216
2217 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
2218 target_sigset_t *set, CPUARMState *env)
2219 {
2220 struct target_sigaltstack stack;
2221 int i;
2222 abi_ulong *regspace;
2223
2224 /* Clear all the bits of the ucontext we don't use. */
2225 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
2226
2227 memset(&stack, 0, sizeof(stack));
2228 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2229 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2230 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2231 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
2232
2233 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
2234 /* Save coprocessor signal frame. */
2235 regspace = uc->tuc_regspace;
2236 if (arm_feature(env, ARM_FEATURE_VFP)) {
2237 regspace = setup_sigframe_v2_vfp(regspace, env);
2238 }
2239 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2240 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
2241 }
2242
2243 /* Write terminating magic word */
2244 __put_user(0, regspace);
2245
2246 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2247 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
2248 }
2249 }
2250
2251 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
2252 static void setup_frame_v1(int usig, struct target_sigaction *ka,
2253 target_sigset_t *set, CPUARMState *regs)
2254 {
2255 struct sigframe_v1 *frame;
2256 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2257 int i;
2258
2259 trace_user_setup_frame(regs, frame_addr);
2260 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2261 goto sigsegv;
2262 }
2263
2264 setup_sigcontext(&frame->sc, regs, set->sig[0]);
2265
2266 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2267 __put_user(set->sig[i], &frame->extramask[i - 1]);
2268 }
2269
2270 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2271 frame_addr + offsetof(struct sigframe_v1, retcode));
2272
2273 unlock_user_struct(frame, frame_addr, 1);
2274 return;
2275 sigsegv:
2276 force_sigsegv(usig);
2277 }
2278
2279 static void setup_frame_v2(int usig, struct target_sigaction *ka,
2280 target_sigset_t *set, CPUARMState *regs)
2281 {
2282 struct sigframe_v2 *frame;
2283 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2284
2285 trace_user_setup_frame(regs, frame_addr);
2286 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2287 goto sigsegv;
2288 }
2289
2290 setup_sigframe_v2(&frame->uc, set, regs);
2291
2292 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2293 frame_addr + offsetof(struct sigframe_v2, retcode));
2294
2295 unlock_user_struct(frame, frame_addr, 1);
2296 return;
2297 sigsegv:
2298 force_sigsegv(usig);
2299 }
2300
2301 static void setup_frame(int usig, struct target_sigaction *ka,
2302 target_sigset_t *set, CPUARMState *regs)
2303 {
2304 if (get_osversion() >= 0x020612) {
2305 setup_frame_v2(usig, ka, set, regs);
2306 } else {
2307 setup_frame_v1(usig, ka, set, regs);
2308 }
2309 }
2310
2311 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2312 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2313 target_siginfo_t *info,
2314 target_sigset_t *set, CPUARMState *env)
2315 {
2316 struct rt_sigframe_v1 *frame;
2317 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2318 struct target_sigaltstack stack;
2319 int i;
2320 abi_ulong info_addr, uc_addr;
2321
2322 trace_user_setup_rt_frame(env, frame_addr);
2323 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2324 goto sigsegv;
2325 }
2326
2327 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2328 __put_user(info_addr, &frame->pinfo);
2329 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2330 __put_user(uc_addr, &frame->puc);
2331 tswap_siginfo(&frame->info, info);
2332
2333 /* Clear all the bits of the ucontext we don't use. */
2334 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2335
2336 memset(&stack, 0, sizeof(stack));
2337 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2338 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2339 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2340 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2341
2342 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2343 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2344 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2345 }
2346
2347 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2348 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2349
2350 env->regs[1] = info_addr;
2351 env->regs[2] = uc_addr;
2352
2353 unlock_user_struct(frame, frame_addr, 1);
2354 return;
2355 sigsegv:
2356 force_sigsegv(usig);
2357 }
2358
2359 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2360 target_siginfo_t *info,
2361 target_sigset_t *set, CPUARMState *env)
2362 {
2363 struct rt_sigframe_v2 *frame;
2364 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2365 abi_ulong info_addr, uc_addr;
2366
2367 trace_user_setup_rt_frame(env, frame_addr);
2368 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2369 goto sigsegv;
2370 }
2371
2372 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2373 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2374 tswap_siginfo(&frame->info, info);
2375
2376 setup_sigframe_v2(&frame->uc, set, env);
2377
2378 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2379 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2380
2381 env->regs[1] = info_addr;
2382 env->regs[2] = uc_addr;
2383
2384 unlock_user_struct(frame, frame_addr, 1);
2385 return;
2386 sigsegv:
2387 force_sigsegv(usig);
2388 }
2389
2390 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2391 target_siginfo_t *info,
2392 target_sigset_t *set, CPUARMState *env)
2393 {
2394 if (get_osversion() >= 0x020612) {
2395 setup_rt_frame_v2(usig, ka, info, set, env);
2396 } else {
2397 setup_rt_frame_v1(usig, ka, info, set, env);
2398 }
2399 }
2400
2401 static int
2402 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2403 {
2404 int err = 0;
2405 uint32_t cpsr;
2406
2407 __get_user(env->regs[0], &sc->arm_r0);
2408 __get_user(env->regs[1], &sc->arm_r1);
2409 __get_user(env->regs[2], &sc->arm_r2);
2410 __get_user(env->regs[3], &sc->arm_r3);
2411 __get_user(env->regs[4], &sc->arm_r4);
2412 __get_user(env->regs[5], &sc->arm_r5);
2413 __get_user(env->regs[6], &sc->arm_r6);
2414 __get_user(env->regs[7], &sc->arm_r7);
2415 __get_user(env->regs[8], &sc->arm_r8);
2416 __get_user(env->regs[9], &sc->arm_r9);
2417 __get_user(env->regs[10], &sc->arm_r10);
2418 __get_user(env->regs[11], &sc->arm_fp);
2419 __get_user(env->regs[12], &sc->arm_ip);
2420 __get_user(env->regs[13], &sc->arm_sp);
2421 __get_user(env->regs[14], &sc->arm_lr);
2422 __get_user(env->regs[15], &sc->arm_pc);
2423 #ifdef TARGET_CONFIG_CPU_32
2424 __get_user(cpsr, &sc->arm_cpsr);
2425 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2426 #endif
2427
2428 err |= !valid_user_regs(env);
2429
2430 return err;
2431 }
2432
2433 static long do_sigreturn_v1(CPUARMState *env)
2434 {
2435 abi_ulong frame_addr;
2436 struct sigframe_v1 *frame = NULL;
2437 target_sigset_t set;
2438 sigset_t host_set;
2439 int i;
2440
2441 /*
2442 * Since we stacked the signal on a 64-bit boundary,
2443 * then 'sp' should be word aligned here. If it's
2444 * not, then the user is trying to mess with us.
2445 */
2446 frame_addr = env->regs[13];
2447 trace_user_do_sigreturn(env, frame_addr);
2448 if (frame_addr & 7) {
2449 goto badframe;
2450 }
2451
2452 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2453 goto badframe;
2454 }
2455
2456 __get_user(set.sig[0], &frame->sc.oldmask);
2457 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2458 __get_user(set.sig[i], &frame->extramask[i - 1]);
2459 }
2460
2461 target_to_host_sigset_internal(&host_set, &set);
2462 set_sigmask(&host_set);
2463
2464 if (restore_sigcontext(env, &frame->sc)) {
2465 goto badframe;
2466 }
2467
2468 #if 0
2469 /* Send SIGTRAP if we're single-stepping */
2470 if (ptrace_cancel_bpt(current))
2471 send_sig(SIGTRAP, current, 1);
2472 #endif
2473 unlock_user_struct(frame, frame_addr, 0);
2474 return -TARGET_QEMU_ESIGRETURN;
2475
2476 badframe:
2477 force_sig(TARGET_SIGSEGV);
2478 return -TARGET_QEMU_ESIGRETURN;
2479 }
2480
2481 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2482 {
2483 int i;
2484 abi_ulong magic, sz;
2485 uint32_t fpscr, fpexc;
2486 struct target_vfp_sigframe *vfpframe;
2487 vfpframe = (struct target_vfp_sigframe *)regspace;
2488
2489 __get_user(magic, &vfpframe->magic);
2490 __get_user(sz, &vfpframe->size);
2491 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2492 return 0;
2493 }
2494 for (i = 0; i < 32; i++) {
2495 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2496 }
2497 __get_user(fpscr, &vfpframe->ufp.fpscr);
2498 vfp_set_fpscr(env, fpscr);
2499 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2500 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2501 * and the exception flag is cleared
2502 */
2503 fpexc |= (1 << 30);
2504 fpexc &= ~((1 << 31) | (1 << 28));
2505 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2506 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2507 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2508 return (abi_ulong*)(vfpframe + 1);
2509 }
2510
2511 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2512 abi_ulong *regspace)
2513 {
2514 int i;
2515 abi_ulong magic, sz;
2516 struct target_iwmmxt_sigframe *iwmmxtframe;
2517 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2518
2519 __get_user(magic, &iwmmxtframe->magic);
2520 __get_user(sz, &iwmmxtframe->size);
2521 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2522 return 0;
2523 }
2524 for (i = 0; i < 16; i++) {
2525 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2526 }
2527 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2528 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2529 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2530 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2531 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2532 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2533 return (abi_ulong*)(iwmmxtframe + 1);
2534 }
2535
2536 static int do_sigframe_return_v2(CPUARMState *env,
2537 target_ulong context_addr,
2538 struct target_ucontext_v2 *uc)
2539 {
2540 sigset_t host_set;
2541 abi_ulong *regspace;
2542
2543 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2544 set_sigmask(&host_set);
2545
2546 if (restore_sigcontext(env, &uc->tuc_mcontext))
2547 return 1;
2548
2549 /* Restore coprocessor signal frame */
2550 regspace = uc->tuc_regspace;
2551 if (arm_feature(env, ARM_FEATURE_VFP)) {
2552 regspace = restore_sigframe_v2_vfp(env, regspace);
2553 if (!regspace) {
2554 return 1;
2555 }
2556 }
2557 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2558 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2559 if (!regspace) {
2560 return 1;
2561 }
2562 }
2563
2564 if (do_sigaltstack(context_addr
2565 + offsetof(struct target_ucontext_v2, tuc_stack),
2566 0, get_sp_from_cpustate(env)) == -EFAULT) {
2567 return 1;
2568 }
2569
2570 #if 0
2571 /* Send SIGTRAP if we're single-stepping */
2572 if (ptrace_cancel_bpt(current))
2573 send_sig(SIGTRAP, current, 1);
2574 #endif
2575
2576 return 0;
2577 }
2578
2579 static long do_sigreturn_v2(CPUARMState *env)
2580 {
2581 abi_ulong frame_addr;
2582 struct sigframe_v2 *frame = NULL;
2583
2584 /*
2585 * Since we stacked the signal on a 64-bit boundary,
2586 * then 'sp' should be word aligned here. If it's
2587 * not, then the user is trying to mess with us.
2588 */
2589 frame_addr = env->regs[13];
2590 trace_user_do_sigreturn(env, frame_addr);
2591 if (frame_addr & 7) {
2592 goto badframe;
2593 }
2594
2595 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2596 goto badframe;
2597 }
2598
2599 if (do_sigframe_return_v2(env,
2600 frame_addr
2601 + offsetof(struct sigframe_v2, uc),
2602 &frame->uc)) {
2603 goto badframe;
2604 }
2605
2606 unlock_user_struct(frame, frame_addr, 0);
2607 return -TARGET_QEMU_ESIGRETURN;
2608
2609 badframe:
2610 unlock_user_struct(frame, frame_addr, 0);
2611 force_sig(TARGET_SIGSEGV);
2612 return -TARGET_QEMU_ESIGRETURN;
2613 }
2614
2615 long do_sigreturn(CPUARMState *env)
2616 {
2617 if (get_osversion() >= 0x020612) {
2618 return do_sigreturn_v2(env);
2619 } else {
2620 return do_sigreturn_v1(env);
2621 }
2622 }
2623
2624 static long do_rt_sigreturn_v1(CPUARMState *env)
2625 {
2626 abi_ulong frame_addr;
2627 struct rt_sigframe_v1 *frame = NULL;
2628 sigset_t host_set;
2629
2630 /*
2631 * Since we stacked the signal on a 64-bit boundary,
2632 * then 'sp' should be word aligned here. If it's
2633 * not, then the user is trying to mess with us.
2634 */
2635 frame_addr = env->regs[13];
2636 trace_user_do_rt_sigreturn(env, frame_addr);
2637 if (frame_addr & 7) {
2638 goto badframe;
2639 }
2640
2641 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2642 goto badframe;
2643 }
2644
2645 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2646 set_sigmask(&host_set);
2647
2648 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2649 goto badframe;
2650 }
2651
2652 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2653 goto badframe;
2654
2655 #if 0
2656 /* Send SIGTRAP if we're single-stepping */
2657 if (ptrace_cancel_bpt(current))
2658 send_sig(SIGTRAP, current, 1);
2659 #endif
2660 unlock_user_struct(frame, frame_addr, 0);
2661 return -TARGET_QEMU_ESIGRETURN;
2662
2663 badframe:
2664 unlock_user_struct(frame, frame_addr, 0);
2665 force_sig(TARGET_SIGSEGV);
2666 return -TARGET_QEMU_ESIGRETURN;
2667 }
2668
2669 static long do_rt_sigreturn_v2(CPUARMState *env)
2670 {
2671 abi_ulong frame_addr;
2672 struct rt_sigframe_v2 *frame = NULL;
2673
2674 /*
2675 * Since we stacked the signal on a 64-bit boundary,
2676 * then 'sp' should be word aligned here. If it's
2677 * not, then the user is trying to mess with us.
2678 */
2679 frame_addr = env->regs[13];
2680 trace_user_do_rt_sigreturn(env, frame_addr);
2681 if (frame_addr & 7) {
2682 goto badframe;
2683 }
2684
2685 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2686 goto badframe;
2687 }
2688
2689 if (do_sigframe_return_v2(env,
2690 frame_addr
2691 + offsetof(struct rt_sigframe_v2, uc),
2692 &frame->uc)) {
2693 goto badframe;
2694 }
2695
2696 unlock_user_struct(frame, frame_addr, 0);
2697 return -TARGET_QEMU_ESIGRETURN;
2698
2699 badframe:
2700 unlock_user_struct(frame, frame_addr, 0);
2701 force_sig(TARGET_SIGSEGV);
2702 return -TARGET_QEMU_ESIGRETURN;
2703 }
2704
2705 long do_rt_sigreturn(CPUARMState *env)
2706 {
2707 if (get_osversion() >= 0x020612) {
2708 return do_rt_sigreturn_v2(env);
2709 } else {
2710 return do_rt_sigreturn_v1(env);
2711 }
2712 }
2713
2714 #elif defined(TARGET_SPARC)
2715
2716 #define __SUNOS_MAXWIN 31
2717
2718 /* This is what SunOS does, so shall I. */
2719 struct target_sigcontext {
2720 abi_ulong sigc_onstack; /* state to restore */
2721
2722 abi_ulong sigc_mask; /* sigmask to restore */
2723 abi_ulong sigc_sp; /* stack pointer */
2724 abi_ulong sigc_pc; /* program counter */
2725 abi_ulong sigc_npc; /* next program counter */
2726 abi_ulong sigc_psr; /* for condition codes etc */
2727 abi_ulong sigc_g1; /* User uses these two registers */
2728 abi_ulong sigc_o0; /* within the trampoline code. */
2729
2730 /* Now comes information regarding the users window set
2731 * at the time of the signal.
2732 */
2733 abi_ulong sigc_oswins; /* outstanding windows */
2734
2735 /* stack ptrs for each regwin buf */
2736 char *sigc_spbuf[__SUNOS_MAXWIN];
2737
2738 /* Windows to restore after signal */
2739 struct {
2740 abi_ulong locals[8];
2741 abi_ulong ins[8];
2742 } sigc_wbuf[__SUNOS_MAXWIN];
2743 };
2744 /* A Sparc stack frame */
2745 struct sparc_stackf {
2746 abi_ulong locals[8];
2747 abi_ulong ins[8];
2748 /* It's simpler to treat fp and callers_pc as elements of ins[]
2749 * since we never need to access them ourselves.
2750 */
2751 char *structptr;
2752 abi_ulong xargs[6];
2753 abi_ulong xxargs[1];
2754 };
2755
2756 typedef struct {
2757 struct {
2758 abi_ulong psr;
2759 abi_ulong pc;
2760 abi_ulong npc;
2761 abi_ulong y;
2762 abi_ulong u_regs[16]; /* globals and ins */
2763 } si_regs;
2764 int si_mask;
2765 } __siginfo_t;
2766
2767 typedef struct {
2768 abi_ulong si_float_regs[32];
2769 unsigned long si_fsr;
2770 unsigned long si_fpqdepth;
2771 struct {
2772 unsigned long *insn_addr;
2773 unsigned long insn;
2774 } si_fpqueue [16];
2775 } qemu_siginfo_fpu_t;
2776
2777
2778 struct target_signal_frame {
2779 struct sparc_stackf ss;
2780 __siginfo_t info;
2781 abi_ulong fpu_save;
2782 abi_ulong insns[2] __attribute__ ((aligned (8)));
2783 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2784 abi_ulong extra_size; /* Should be 0 */
2785 qemu_siginfo_fpu_t fpu_state;
2786 };
2787 struct target_rt_signal_frame {
2788 struct sparc_stackf ss;
2789 siginfo_t info;
2790 abi_ulong regs[20];
2791 sigset_t mask;
2792 abi_ulong fpu_save;
2793 unsigned int insns[2];
2794 stack_t stack;
2795 unsigned int extra_size; /* Should be 0 */
2796 qemu_siginfo_fpu_t fpu_state;
2797 };
2798
2799 #define UREG_O0 16
2800 #define UREG_O6 22
2801 #define UREG_I0 0
2802 #define UREG_I1 1
2803 #define UREG_I2 2
2804 #define UREG_I3 3
2805 #define UREG_I4 4
2806 #define UREG_I5 5
2807 #define UREG_I6 6
2808 #define UREG_I7 7
2809 #define UREG_L0 8
2810 #define UREG_FP UREG_I6
2811 #define UREG_SP UREG_O6
2812
2813 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2814 CPUSPARCState *env,
2815 unsigned long framesize)
2816 {
2817 abi_ulong sp;
2818
2819 sp = env->regwptr[UREG_FP];
2820
2821 /* This is the X/Open sanctioned signal stack switching. */
2822 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2823 if (!on_sig_stack(sp)
2824 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2825 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2826 }
2827 }
2828 return sp - framesize;
2829 }
2830
2831 static int
2832 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2833 {
2834 int err = 0, i;
2835
2836 __put_user(env->psr, &si->si_regs.psr);
2837 __put_user(env->pc, &si->si_regs.pc);
2838 __put_user(env->npc, &si->si_regs.npc);
2839 __put_user(env->y, &si->si_regs.y);
2840 for (i=0; i < 8; i++) {
2841 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2842 }
2843 for (i=0; i < 8; i++) {
2844 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2845 }
2846 __put_user(mask, &si->si_mask);
2847 return err;
2848 }
2849
2850 #if 0
2851 static int
2852 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2853 CPUSPARCState *env, unsigned long mask)
2854 {
2855 int err = 0;
2856
2857 __put_user(mask, &sc->sigc_mask);
2858 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2859 __put_user(env->pc, &sc->sigc_pc);
2860 __put_user(env->npc, &sc->sigc_npc);
2861 __put_user(env->psr, &sc->sigc_psr);
2862 __put_user(env->gregs[1], &sc->sigc_g1);
2863 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2864
2865 return err;
2866 }
2867 #endif
2868 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2869
2870 static void setup_frame(int sig, struct target_sigaction *ka,
2871 target_sigset_t *set, CPUSPARCState *env)
2872 {
2873 abi_ulong sf_addr;
2874 struct target_signal_frame *sf;
2875 int sigframe_size, err, i;
2876
2877 /* 1. Make sure everything is clean */
2878 //synchronize_user_stack();
2879
2880 sigframe_size = NF_ALIGNEDSZ;
2881 sf_addr = get_sigframe(ka, env, sigframe_size);
2882 trace_user_setup_frame(env, sf_addr);
2883
2884 sf = lock_user(VERIFY_WRITE, sf_addr,
2885 sizeof(struct target_signal_frame), 0);
2886 if (!sf) {
2887 goto sigsegv;
2888 }
2889 #if 0
2890 if (invalid_frame_pointer(sf, sigframe_size))
2891 goto sigill_and_return;
2892 #endif
2893 /* 2. Save the current process state */
2894 err = setup___siginfo(&sf->info, env, set->sig[0]);
2895 __put_user(0, &sf->extra_size);
2896
2897 //save_fpu_state(regs, &sf->fpu_state);
2898 //__put_user(&sf->fpu_state, &sf->fpu_save);
2899
2900 __put_user(set->sig[0], &sf->info.si_mask);
2901 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2902 __put_user(set->sig[i + 1], &sf->extramask[i]);
2903 }
2904
2905 for (i = 0; i < 8; i++) {
2906 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2907 }
2908 for (i = 0; i < 8; i++) {
2909 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2910 }
2911 if (err)
2912 goto sigsegv;
2913
2914 /* 3. signal handler back-trampoline and parameters */
2915 env->regwptr[UREG_FP] = sf_addr;
2916 env->regwptr[UREG_I0] = sig;
2917 env->regwptr[UREG_I1] = sf_addr +
2918 offsetof(struct target_signal_frame, info);
2919 env->regwptr[UREG_I2] = sf_addr +
2920 offsetof(struct target_signal_frame, info);
2921
2922 /* 4. signal handler */
2923 env->pc = ka->_sa_handler;
2924 env->npc = (env->pc + 4);
2925 /* 5. return to kernel instructions */
2926 if (ka->sa_restorer) {
2927 env->regwptr[UREG_I7] = ka->sa_restorer;
2928 } else {
2929 uint32_t val32;
2930
2931 env->regwptr[UREG_I7] = sf_addr +
2932 offsetof(struct target_signal_frame, insns) - 2 * 4;
2933
2934 /* mov __NR_sigreturn, %g1 */
2935 val32 = 0x821020d8;
2936 __put_user(val32, &sf->insns[0]);
2937
2938 /* t 0x10 */
2939 val32 = 0x91d02010;
2940 __put_user(val32, &sf->insns[1]);
2941 if (err)
2942 goto sigsegv;
2943
2944 /* Flush instruction space. */
2945 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2946 // tb_flush(env);
2947 }
2948 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2949 return;
2950 #if 0
2951 sigill_and_return:
2952 force_sig(TARGET_SIGILL);
2953 #endif
2954 sigsegv:
2955 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2956 force_sigsegv(sig);
2957 }
2958
2959 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2960 target_siginfo_t *info,
2961 target_sigset_t *set, CPUSPARCState *env)
2962 {
2963 fprintf(stderr, "setup_rt_frame: not implemented\n");
2964 }
2965
2966 long do_sigreturn(CPUSPARCState *env)
2967 {
2968 abi_ulong sf_addr;
2969 struct target_signal_frame *sf;
2970 uint32_t up_psr, pc, npc;
2971 target_sigset_t set;
2972 sigset_t host_set;
2973 int err=0, i;
2974
2975 sf_addr = env->regwptr[UREG_FP];
2976 trace_user_do_sigreturn(env, sf_addr);
2977 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2978 goto segv_and_exit;
2979 }
2980
2981 /* 1. Make sure we are not getting garbage from the user */
2982
2983 if (sf_addr & 3)
2984 goto segv_and_exit;
2985
2986 __get_user(pc, &sf->info.si_regs.pc);
2987 __get_user(npc, &sf->info.si_regs.npc);
2988
2989 if ((pc | npc) & 3) {
2990 goto segv_and_exit;
2991 }
2992
2993 /* 2. Restore the state */
2994 __get_user(up_psr, &sf->info.si_regs.psr);
2995
2996 /* User can only change condition codes and FPU enabling in %psr. */
2997 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2998 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2999
3000 env->pc = pc;
3001 env->npc = npc;
3002 __get_user(env->y, &sf->info.si_regs.y);
3003 for (i=0; i < 8; i++) {
3004 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
3005 }
3006 for (i=0; i < 8; i++) {
3007 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
3008 }
3009
3010 /* FIXME: implement FPU save/restore:
3011 * __get_user(fpu_save, &sf->fpu_save);
3012 * if (fpu_save)
3013 * err |= restore_fpu_state(env, fpu_save);
3014 */
3015
3016 /* This is pretty much atomic, no amount locking would prevent
3017 * the races which exist anyways.
3018 */
3019 __get_user(set.sig[0], &sf->info.si_mask);
3020 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3021 __get_user(set.sig[i], &sf->extramask[i - 1]);
3022 }
3023
3024 target_to_host_sigset_internal(&host_set, &set);
3025 set_sigmask(&host_set);
3026
3027 if (err) {
3028 goto segv_and_exit;
3029 }
3030 unlock_user_struct(sf, sf_addr, 0);
3031 return -TARGET_QEMU_ESIGRETURN;
3032
3033 segv_and_exit:
3034 unlock_user_struct(sf, sf_addr, 0);
3035 force_sig(TARGET_SIGSEGV);
3036 return -TARGET_QEMU_ESIGRETURN;
3037 }
3038
3039 long do_rt_sigreturn(CPUSPARCState *env)
3040 {
3041 trace_user_do_rt_sigreturn(env, 0);
3042 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
3043 return -TARGET_ENOSYS;
3044 }
3045
3046 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
3047 #define SPARC_MC_TSTATE 0
3048 #define SPARC_MC_PC 1
3049 #define SPARC_MC_NPC 2
3050 #define SPARC_MC_Y 3
3051 #define SPARC_MC_G1 4
3052 #define SPARC_MC_G2 5
3053 #define SPARC_MC_G3 6
3054 #define SPARC_MC_G4 7
3055 #define SPARC_MC_G5 8
3056 #define SPARC_MC_G6 9
3057 #define SPARC_MC_G7 10
3058 #define SPARC_MC_O0 11
3059 #define SPARC_MC_O1 12
3060 #define SPARC_MC_O2 13
3061 #define SPARC_MC_O3 14
3062 #define SPARC_MC_O4 15
3063 #define SPARC_MC_O5 16
3064 #define SPARC_MC_O6 17
3065 #define SPARC_MC_O7 18
3066 #define SPARC_MC_NGREG 19
3067
3068 typedef abi_ulong target_mc_greg_t;
3069 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
3070
3071 struct target_mc_fq {
3072 abi_ulong *mcfq_addr;
3073 uint32_t mcfq_insn;
3074 };
3075
3076 struct target_mc_fpu {
3077 union {
3078 uint32_t sregs[32];
3079 uint64_t dregs[32];
3080 //uint128_t qregs[16];
3081 } mcfpu_fregs;
3082 abi_ulong mcfpu_fsr;
3083 abi_ulong mcfpu_fprs;
3084 abi_ulong mcfpu_gsr;
3085 struct target_mc_fq *mcfpu_fq;
3086 unsigned char mcfpu_qcnt;
3087 unsigned char mcfpu_qentsz;
3088 unsigned char mcfpu_enab;
3089 };
3090 typedef struct target_mc_fpu target_mc_fpu_t;
3091
3092 typedef struct {
3093 target_mc_gregset_t mc_gregs;
3094 target_mc_greg_t mc_fp;
3095 target_mc_greg_t mc_i7;
3096 target_mc_fpu_t mc_fpregs;
3097 } target_mcontext_t;
3098
3099 struct target_ucontext {
3100 struct target_ucontext *tuc_link;
3101 abi_ulong tuc_flags;
3102 target_sigset_t tuc_sigmask;
3103 target_mcontext_t tuc_mcontext;
3104 };
3105
3106 /* A V9 register window */
3107 struct target_reg_window {
3108 abi_ulong locals[8];
3109 abi_ulong ins[8];
3110 };
3111
3112 #define TARGET_STACK_BIAS 2047
3113
3114 /* {set, get}context() needed for 64-bit SparcLinux userland. */
3115 void sparc64_set_context(CPUSPARCState *env)
3116 {
3117 abi_ulong ucp_addr;
3118 struct target_ucontext *ucp;
3119 target_mc_gregset_t *grp;
3120 abi_ulong pc, npc, tstate;
3121 abi_ulong fp, i7, w_addr;
3122 unsigned int i;
3123
3124 ucp_addr = env->regwptr[UREG_I0];
3125 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
3126 goto do_sigsegv;
3127 }
3128 grp = &ucp->tuc_mcontext.mc_gregs;
3129 __get_user(pc, &((*grp)[SPARC_MC_PC]));
3130 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
3131 if ((pc | npc) & 3) {
3132 goto do_sigsegv;
3133 }
3134 if (env->regwptr[UREG_I1]) {
3135 target_sigset_t target_set;
3136 sigset_t set;
3137
3138 if (TARGET_NSIG_WORDS == 1) {
3139 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
3140 } else {
3141 abi_ulong *src, *dst;
3142 src = ucp->tuc_sigmask.sig;
3143 dst = target_set.sig;
3144 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
3145 __get_user(*dst, src);
3146 }
3147 }
3148 target_to_host_sigset_internal(&set, &target_set);
3149 set_sigmask(&set);
3150 }
3151 env->pc = pc;
3152 env->npc = npc;
3153 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
3154 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
3155 env->asi = (tstate >> 24) & 0xff;
3156 cpu_put_ccr(env, tstate >> 32);
3157 cpu_put_cwp64(env, tstate & 0x1f);
3158 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
3159 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
3160 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
3161 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
3162 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
3163 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
3164 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
3165 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
3166 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
3167 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
3168 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
3169 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
3170 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
3171 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
3172 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
3173
3174 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
3175 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
3176
3177 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3178 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3179 abi_ulong) != 0) {
3180 goto do_sigsegv;
3181 }
3182 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3183 abi_ulong) != 0) {
3184 goto do_sigsegv;
3185 }
3186 /* FIXME this does not match how the kernel handles the FPU in
3187 * its sparc64_set_context implementation. In particular the FPU
3188 * is only restored if fenab is non-zero in:
3189 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
3190 */
3191 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
3192 {
3193 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3194 for (i = 0; i < 64; i++, src++) {
3195 if (i & 1) {
3196 __get_user(env->fpr[i/2].l.lower, src);
3197 } else {
3198 __get_user(env->fpr[i/2].l.upper, src);
3199 }
3200 }
3201 }
3202 __get_user(env->fsr,
3203 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
3204 __get_user(env->gsr,
3205 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
3206 unlock_user_struct(ucp, ucp_addr, 0);
3207 return;
3208 do_sigsegv:
3209 unlock_user_struct(ucp, ucp_addr, 0);
3210 force_sig(TARGET_SIGSEGV);
3211 }
3212
3213 void sparc64_get_context(CPUSPARCState *env)
3214 {
3215 abi_ulong ucp_addr;
3216 struct target_ucontext *ucp;
3217 target_mc_gregset_t *grp;
3218 target_mcontext_t *mcp;
3219 abi_ulong fp, i7, w_addr;
3220 int err;
3221 unsigned int i;
3222 target_sigset_t target_set;
3223 sigset_t set;
3224
3225 ucp_addr = env->regwptr[UREG_I0];
3226 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
3227 goto do_sigsegv;
3228 }
3229
3230 mcp = &ucp->tuc_mcontext;
3231 grp = &mcp->mc_gregs;
3232
3233 /* Skip over the trap instruction, first. */
3234 env->pc = env->npc;
3235 env->npc += 4;
3236
3237 /* If we're only reading the signal mask then do_sigprocmask()
3238 * is guaranteed not to fail, which is important because we don't
3239 * have any way to signal a failure or restart this operation since
3240 * this is not a normal syscall.
3241 */
3242 err = do_sigprocmask(0, NULL, &set);
3243 assert(err == 0);
3244 host_to_target_sigset_internal(&target_set, &set);
3245 if (TARGET_NSIG_WORDS == 1) {
3246 __put_user(target_set.sig[0],
3247 (abi_ulong *)&ucp->tuc_sigmask);
3248 } else {
3249 abi_ulong *src, *dst;
3250 src = target_set.sig;
3251 dst = ucp->tuc_sigmask.sig;
3252 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
3253 __put_user(*src, dst);
3254 }
3255 if (err)
3256 goto do_sigsegv;
3257 }
3258
3259 /* XXX: tstate must be saved properly */
3260 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
3261 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
3262 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
3263 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
3264 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
3265 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
3266 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
3267 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
3268 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
3269 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
3270 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
3271 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
3272 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
3273 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
3274 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
3275 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
3276 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
3277 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
3278 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
3279
3280 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3281 fp = i7 = 0;
3282 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3283 abi_ulong) != 0) {
3284 goto do_sigsegv;
3285 }
3286 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3287 abi_ulong) != 0) {
3288 goto do_sigsegv;
3289 }
3290 __put_user(fp, &(mcp->mc_fp));
3291 __put_user(i7, &(mcp->mc_i7));
3292
3293 {
3294 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3295 for (i = 0; i < 64; i++, dst++) {
3296 if (i & 1) {
3297 __put_user(env->fpr[i/2].l.lower, dst);
3298 } else {
3299 __put_user(env->fpr[i/2].l.upper, dst);
3300 }
3301 }
3302 }
3303 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3304 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3305 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3306
3307 if (err)
3308 goto do_sigsegv;
3309 unlock_user_struct(ucp, ucp_addr, 1);
3310 return;
3311 do_sigsegv:
3312 unlock_user_struct(ucp, ucp_addr, 1);
3313 force_sig(TARGET_SIGSEGV);
3314 }
3315 #endif
3316 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3317
3318 # if defined(TARGET_ABI_MIPSO32)
3319 struct target_sigcontext {
3320 uint32_t sc_regmask; /* Unused */
3321 uint32_t sc_status;
3322 uint64_t sc_pc;
3323 uint64_t sc_regs[32];
3324 uint64_t sc_fpregs[32];
3325 uint32_t sc_ownedfp; /* Unused */
3326 uint32_t sc_fpc_csr;
3327 uint32_t sc_fpc_eir; /* Unused */
3328 uint32_t sc_used_math;
3329 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3330 uint32_t pad0;
3331 uint64_t sc_mdhi;
3332 uint64_t sc_mdlo;
3333 target_ulong sc_hi1; /* Was sc_cause */
3334 target_ulong sc_lo1; /* Was sc_badvaddr */
3335 target_ulong sc_hi2; /* Was sc_sigset[4] */
3336 target_ulong sc_lo2;
3337 target_ulong sc_hi3;
3338 target_ulong sc_lo3;
3339 };
3340 # else /* N32 || N64 */
3341 struct target_sigcontext {
3342 uint64_t sc_regs[32];
3343 uint64_t sc_fpregs[32];
3344 uint64_t sc_mdhi;
3345 uint64_t sc_hi1;
3346 uint64_t sc_hi2;
3347 uint64_t sc_hi3;
3348 uint64_t sc_mdlo;
3349 uint64_t sc_lo1;
3350 uint64_t sc_lo2;
3351 uint64_t sc_lo3;
3352 uint64_t sc_pc;
3353 uint32_t sc_fpc_csr;
3354 uint32_t sc_used_math;
3355 uint32_t sc_dsp;
3356 uint32_t sc_reserved;
3357 };
3358 # endif /* O32 */
3359
3360 struct sigframe {
3361 uint32_t sf_ass[4]; /* argument save space for o32 */
3362 uint32_t sf_code[2]; /* signal trampoline */
3363 struct target_sigcontext sf_sc;
3364 target_sigset_t sf_mask;
3365 };
3366
3367 struct target_ucontext {
3368 target_ulong tuc_flags;
3369 target_ulong tuc_link;
3370 target_stack_t tuc_stack;
3371 target_ulong pad0;
3372 struct target_sigcontext tuc_mcontext;
3373 target_sigset_t tuc_sigmask;
3374 };
3375
3376 struct target_rt_sigframe {
3377 uint32_t rs_ass[4]; /* argument save space for o32 */
3378 uint32_t rs_code[2]; /* signal trampoline */
3379 struct target_siginfo rs_info;
3380 struct target_ucontext rs_uc;
3381 };
3382
3383 /* Install trampoline to jump back from signal handler */
3384 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3385 {
3386 int err = 0;
3387
3388 /*
3389 * Set up the return code ...
3390 *
3391 * li v0, __NR__foo_sigreturn
3392 * syscall
3393 */
3394
3395 __put_user(0x24020000 + syscall, tramp + 0);
3396 __put_user(0x0000000c , tramp + 1);
3397 return err;
3398 }
3399
3400 static inline void setup_sigcontext(CPUMIPSState *regs,
3401 struct target_sigcontext *sc)
3402 {
3403 int i;
3404
3405 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3406 regs->hflags &= ~MIPS_HFLAG_BMASK;
3407
3408 __put_user(0, &sc->sc_regs[0]);
3409 for (i = 1; i < 32; ++i) {
3410 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3411 }
3412
3413 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3414 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3415
3416 /* Rather than checking for dsp existence, always copy. The storage
3417 would just be garbage otherwise. */
3418 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3419 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3420 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3421 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3422 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3423 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3424 {
3425 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3426 __put_user(dsp, &sc->sc_dsp);
3427 }
3428
3429 __put_user(1, &sc->sc_used_math);
3430
3431 for (i = 0; i < 32; ++i) {
3432 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3433 }
3434 }
3435
3436 static inline void
3437 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3438 {
3439 int i;
3440
3441 __get_user(regs->CP0_EPC, &sc->sc_pc);
3442
3443 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3444 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3445
3446 for (i = 1; i < 32; ++i) {
3447 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3448 }
3449
3450 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3451 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3452 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3453 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3454 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3455 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3456 {
3457 uint32_t dsp;
3458 __get_user(dsp, &sc->sc_dsp);
3459 cpu_wrdsp(dsp, 0x3ff, regs);
3460 }
3461
3462 for (i = 0; i < 32; ++i) {
3463 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3464 }
3465 }
3466
3467 /*
3468 * Determine which stack to use..
3469 */
3470 static inline abi_ulong
3471 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3472 {
3473 unsigned long sp;
3474
3475 /* Default to using normal stack */
3476 sp = regs->active_tc.gpr[29];
3477
3478 /*
3479 * FPU emulator may have its own trampoline active just
3480 * above the user stack, 16-bytes before the next lowest
3481 * 16 byte boundary. Try to avoid trashing it.
3482 */
3483 sp -= 32;
3484
3485 /* This is the X/Open sanctioned signal stack switching. */
3486 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3487 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3488 }
3489
3490 return (sp - frame_size) & ~7;
3491 }
3492
3493 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3494 {
3495 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3496 env->hflags &= ~MIPS_HFLAG_M16;
3497 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3498 env->active_tc.PC &= ~(target_ulong) 1;
3499 }
3500 }
3501
3502 # if defined(TARGET_ABI_MIPSO32)
3503 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3504 static void setup_frame(int sig, struct target_sigaction * ka,
3505 target_sigset_t *set, CPUMIPSState *regs)
3506 {
3507 struct sigframe *frame;
3508 abi_ulong frame_addr;
3509 int i;
3510
3511 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3512 trace_user_setup_frame(regs, frame_addr);
3513 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3514 goto give_sigsegv;
3515 }
3516
3517 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3518
3519 setup_sigcontext(regs, &frame->sf_sc);
3520
3521 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3522 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3523 }
3524
3525 /*
3526 * Arguments to signal handler:
3527 *
3528 * a0 = signal number
3529 * a1 = 0 (should be cause)
3530 * a2 = pointer to struct sigcontext
3531 *
3532 * $25 and PC point to the signal handler, $29 points to the
3533 * struct sigframe.
3534 */
3535 regs->active_tc.gpr[ 4] = sig;
3536 regs->active_tc.gpr[ 5] = 0;
3537 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3538 regs->active_tc.gpr[29] = frame_addr;
3539 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3540 /* The original kernel code sets CP0_EPC to the handler
3541 * since it returns to userland using eret
3542 * we cannot do this here, and we must set PC directly */
3543 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3544 mips_set_hflags_isa_mode_from_pc(regs);
3545 unlock_user_struct(frame, frame_addr, 1);
3546 return;
3547
3548 give_sigsegv:
3549 force_sigsegv(sig);
3550 }
3551
3552 long do_sigreturn(CPUMIPSState *regs)
3553 {
3554 struct sigframe *frame;
3555 abi_ulong frame_addr;
3556 sigset_t blocked;
3557 target_sigset_t target_set;
3558 int i;
3559
3560 frame_addr = regs->active_tc.gpr[29];
3561 trace_user_do_sigreturn(regs, frame_addr);
3562 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3563 goto badframe;
3564
3565 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3566 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3567 }
3568
3569 target_to_host_sigset_internal(&blocked, &target_set);
3570 set_sigmask(&blocked);
3571
3572 restore_sigcontext(regs, &frame->sf_sc);
3573
3574 #if 0
3575 /*
3576 * Don't let your children do this ...
3577 */
3578 __asm__ __volatile__(
3579 "move\t$29, %0\n\t"
3580 "j\tsyscall_exit"
3581 :/* no outputs */
3582 :"r" (&regs));
3583 /* Unreached */
3584 #endif
3585
3586 regs->active_tc.PC = regs->CP0_EPC;
3587 mips_set_hflags_isa_mode_from_pc(regs);
3588 /* I am not sure this is right, but it seems to work
3589 * maybe a problem with nested signals ? */
3590 regs->CP0_EPC = 0;
3591 return -TARGET_QEMU_ESIGRETURN;
3592
3593 badframe:
3594 force_sig(TARGET_SIGSEGV);
3595 return -TARGET_QEMU_ESIGRETURN;
3596 }
3597 # endif /* O32 */
3598
3599 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3600 target_siginfo_t *info,
3601 target_sigset_t *set, CPUMIPSState *env)
3602 {
3603 struct target_rt_sigframe *frame;
3604 abi_ulong frame_addr;
3605 int i;
3606
3607 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3608 trace_user_setup_rt_frame(env, frame_addr);
3609 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3610 goto give_sigsegv;
3611 }
3612
3613 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3614
3615 tswap_siginfo(&frame->rs_info, info);
3616
3617 __put_user(0, &frame->rs_uc.tuc_flags);
3618 __put_user(0, &frame->rs_uc.tuc_link);
3619 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3620 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3621 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3622 &frame->rs_uc.tuc_stack.ss_flags);
3623
3624 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3625
3626 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3627 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3628 }
3629
3630 /*
3631 * Arguments to signal handler:
3632 *
3633 * a0 = signal number
3634 * a1 = pointer to siginfo_t
3635 * a2 = pointer to ucontext_t
3636 *
3637 * $25 and PC point to the signal handler, $29 points to the
3638 * struct sigframe.
3639 */
3640 env->active_tc.gpr[ 4] = sig;
3641 env->active_tc.gpr[ 5] = frame_addr
3642 + offsetof(struct target_rt_sigframe, rs_info);
3643 env->active_tc.gpr[ 6] = frame_addr
3644 + offsetof(struct target_rt_sigframe, rs_uc);
3645 env->active_tc.gpr[29] = frame_addr;
3646 env->active_tc.gpr[31] = frame_addr
3647 + offsetof(struct target_rt_sigframe, rs_code);
3648 /* The original kernel code sets CP0_EPC to the handler
3649 * since it returns to userland using eret
3650 * we cannot do this here, and we must set PC directly */
3651 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3652 mips_set_hflags_isa_mode_from_pc(env);
3653 unlock_user_struct(frame, frame_addr, 1);
3654 return;
3655
3656 give_sigsegv:
3657 unlock_user_struct(frame, frame_addr, 1);
3658 force_sigsegv(sig);
3659 }
3660
3661 long do_rt_sigreturn(CPUMIPSState *env)
3662 {
3663 struct target_rt_sigframe *frame;
3664 abi_ulong frame_addr;
3665 sigset_t blocked;
3666
3667 frame_addr = env->active_tc.gpr[29];
3668 trace_user_do_rt_sigreturn(env, frame_addr);
3669 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3670 goto badframe;
3671 }
3672
3673 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3674 set_sigmask(&blocked);
3675
3676 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3677
3678 if (do_sigaltstack(frame_addr +
3679 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3680 0, get_sp_from_cpustate(env)) == -EFAULT)
3681 goto badframe;
3682
3683 env->active_tc.PC = env->CP0_EPC;
3684 mips_set_hflags_isa_mode_from_pc(env);
3685 /* I am not sure this is right, but it seems to work
3686 * maybe a problem with nested signals ? */
3687 env->CP0_EPC = 0;
3688 return -TARGET_QEMU_ESIGRETURN;
3689
3690 badframe:
3691 force_sig(TARGET_SIGSEGV);
3692 return -TARGET_QEMU_ESIGRETURN;
3693 }
3694
3695 #elif defined(TARGET_SH4)
3696
3697 /*
3698 * code and data structures from linux kernel:
3699 * include/asm-sh/sigcontext.h
3700 * arch/sh/kernel/signal.c
3701 */
3702
3703 struct target_sigcontext {
3704 target_ulong oldmask;
3705
3706 /* CPU registers */
3707 target_ulong sc_gregs[16];
3708 target_ulong sc_pc;
3709 target_ulong sc_pr;
3710 target_ulong sc_sr;
3711 target_ulong sc_gbr;
3712 target_ulong sc_mach;
3713 target_ulong sc_macl;
3714
3715 /* FPU registers */
3716 target_ulong sc_fpregs[16];
3717 target_ulong sc_xfpregs[16];
3718 unsigned int sc_fpscr;
3719 unsigned int sc_fpul;
3720 unsigned int sc_ownedfp;
3721 };
3722
3723 struct target_sigframe
3724 {
3725 struct target_sigcontext sc;
3726 target_ulong extramask[TARGET_NSIG_WORDS-1];
3727 uint16_t retcode[3];
3728 };
3729
3730
3731 struct target_ucontext {
3732 target_ulong tuc_flags;
3733 struct target_ucontext *tuc_link;
3734 target_stack_t tuc_stack;
3735 struct target_sigcontext tuc_mcontext;
3736 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3737 };
3738
3739 struct target_rt_sigframe
3740 {
3741 struct target_siginfo info;
3742 struct target_ucontext uc;
3743 uint16_t retcode[3];
3744 };
3745
3746
3747 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3748 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3749
3750 static abi_ulong get_sigframe(struct target_sigaction *ka,
3751 unsigned long sp, size_t frame_size)
3752 {
3753 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3754 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3755 }
3756
3757 return (sp - frame_size) & -8ul;
3758 }
3759
3760 /* Notice when we're in the middle of a gUSA region and reset.
3761 Note that this will only occur for !parallel_cpus, as we will
3762 translate such sequences differently in a parallel context. */
3763 static void unwind_gusa(CPUSH4State *regs)
3764 {
3765 /* If the stack pointer is sufficiently negative, and we haven't
3766 completed the sequence, then reset to the entry to the region. */
3767 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3768 However, the page mappings in qemu linux-user aren't as restricted
3769 and we wind up with the normal stack mapped above 0xF0000000.
3770 That said, there is no reason why the kernel should be allowing
3771 a gUSA region that spans 1GB. Use a tighter check here, for what
3772 can actually be enabled by the immediate move. */
3773 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3774 /* Reset the PC to before the gUSA region, as computed from
3775 R0 = region end, SP = -(region size), plus one more for the
3776 insn that actually initializes SP to the region size. */
3777 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3778
3779 /* Reset the SP to the saved version in R1. */
3780 regs->gregs[15] = regs->gregs[1];
3781 }
3782 }
3783
3784 static void setup_sigcontext(struct target_sigcontext *sc,
3785 CPUSH4State *regs, unsigned long mask)
3786 {
3787 int i;
3788
3789 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3790 COPY(gregs[0]); COPY(gregs[1]);
3791 COPY(gregs[2]); COPY(gregs[3]);
3792 COPY(gregs[4]); COPY(gregs[5]);
3793 COPY(gregs[6]); COPY(gregs[7]);
3794 COPY(gregs[8]); COPY(gregs[9]);
3795 COPY(gregs[10]); COPY(gregs[11]);
3796 COPY(gregs[12]); COPY(gregs[13]);
3797 COPY(gregs[14]); COPY(gregs[15]);
3798 COPY(gbr); COPY(mach);
3799 COPY(macl); COPY(pr);
3800 COPY(sr); COPY(pc);
3801 #undef COPY
3802
3803 for (i=0; i<16; i++) {
3804 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3805 }
3806 __put_user(regs->fpscr, &sc->sc_fpscr);
3807 __put_user(regs->fpul, &sc->sc_fpul);
3808
3809 /* non-iBCS2 extensions.. */
3810 __put_user(mask, &sc->oldmask);
3811 }
3812
3813 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3814 {
3815 int i;
3816
3817 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3818 COPY(gregs[0]); COPY(gregs[1]);
3819 COPY(gregs[2]); COPY(gregs[3]);
3820 COPY(gregs[4]); COPY(gregs[5]);
3821 COPY(gregs[6]); COPY(gregs[7]);
3822 COPY(gregs[8]); COPY(gregs[9]);
3823 COPY(gregs[10]); COPY(gregs[11]);
3824 COPY(gregs[12]); COPY(gregs[13]);
3825 COPY(gregs[14]); COPY(gregs[15]);
3826 COPY(gbr); COPY(mach);
3827 COPY(macl); COPY(pr);
3828 COPY(sr); COPY(pc);
3829 #undef COPY
3830
3831 for (i=0; i<16; i++) {
3832 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3833 }
3834 __get_user(regs->fpscr, &sc->sc_fpscr);
3835 __get_user(regs->fpul, &sc->sc_fpul);
3836
3837 regs->tra = -1; /* disable syscall checks */
3838 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3839 }
3840
3841 static void setup_frame(int sig, struct target_sigaction *ka,
3842 target_sigset_t *set, CPUSH4State *regs)
3843 {
3844 struct target_sigframe *frame;
3845 abi_ulong frame_addr;
3846 int i;
3847
3848 unwind_gusa(regs);
3849
3850 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3851 trace_user_setup_frame(regs, frame_addr);
3852 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3853 goto give_sigsegv;
3854 }
3855
3856 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3857
3858 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3859 __put_user(set->sig[i + 1], &frame->extramask[i]);
3860 }
3861
3862 /* Set up to return from userspace. If provided, use a stub
3863 already in userspace. */
3864 if (ka->sa_flags & TARGET_SA_RESTORER) {
3865 regs->pr = (unsigned long) ka->sa_restorer;
3866 } else {
3867 /* Generate return code (system call to sigreturn) */
3868 abi_ulong retcode_addr = frame_addr +
3869 offsetof(struct target_sigframe, retcode);
3870 __put_user(MOVW(2), &frame->retcode[0]);
3871 __put_user(TRAP_NOARG, &frame->retcode[1]);
3872 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3873 regs->pr = (unsigned long) retcode_addr;
3874 }
3875
3876 /* Set up registers for signal handler */
3877 regs->gregs[15] = frame_addr;
3878 regs->gregs[4] = sig; /* Arg for signal handler */
3879 regs->gregs[5] = 0;
3880 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3881 regs->pc = (unsigned long) ka->_sa_handler;
3882 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3883
3884 unlock_user_struct(frame, frame_addr, 1);
3885 return;
3886
3887 give_sigsegv:
3888 unlock_user_struct(frame, frame_addr, 1);
3889 force_sigsegv(sig);
3890 }
3891
3892 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3893 target_siginfo_t *info,
3894 target_sigset_t *set, CPUSH4State *regs)
3895 {
3896 struct target_rt_sigframe *frame;
3897 abi_ulong frame_addr;
3898 int i;
3899
3900 unwind_gusa(regs);
3901
3902 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3903 trace_user_setup_rt_frame(regs, frame_addr);
3904 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3905 goto give_sigsegv;
3906 }
3907
3908 tswap_siginfo(&frame->info, info);
3909
3910 /* Create the ucontext. */
3911 __put_user(0, &frame->uc.tuc_flags);
3912 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3913 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3914 &frame->uc.tuc_stack.ss_sp);
3915 __put_user(sas_ss_flags(regs->gregs[15]),
3916 &frame->uc.tuc_stack.ss_flags);
3917 __put_user(target_sigaltstack_used.ss_size,
3918 &frame->uc.tuc_stack.ss_size);
3919 setup_sigcontext(&frame->uc.tuc_mcontext,
3920 regs, set->sig[0]);
3921 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3922 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3923 }
3924
3925 /* Set up to return from userspace. If provided, use a stub
3926 already in userspace. */
3927 if (ka->sa_flags & TARGET_SA_RESTORER) {
3928 regs->pr = (unsigned long) ka->sa_restorer;
3929 } else {
3930 /* Generate return code (system call to sigreturn) */
3931 abi_ulong retcode_addr = frame_addr +
3932 offsetof(struct target_rt_sigframe, retcode);
3933 __put_user(MOVW(2), &frame->retcode[0]);
3934 __put_user(TRAP_NOARG, &frame->retcode[1]);
3935 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3936 regs->pr = (unsigned long) retcode_addr;
3937 }
3938
3939 /* Set up registers for signal handler */
3940 regs->gregs[15] = frame_addr;
3941 regs->gregs[4] = sig; /* Arg for signal handler */
3942 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3943 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3944 regs->pc = (unsigned long) ka->_sa_handler;
3945 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3946
3947 unlock_user_struct(frame, frame_addr, 1);
3948 return;
3949
3950 give_sigsegv:
3951 unlock_user_struct(frame, frame_addr, 1);
3952 force_sigsegv(sig);
3953 }
3954
3955 long do_sigreturn(CPUSH4State *regs)
3956 {
3957 struct target_sigframe *frame;
3958 abi_ulong frame_addr;
3959 sigset_t blocked;
3960 target_sigset_t target_set;
3961 int i;
3962 int err = 0;
3963
3964 frame_addr = regs->gregs[15];
3965 trace_user_do_sigreturn(regs, frame_addr);
3966 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3967 goto badframe;
3968 }
3969
3970 __get_user(target_set.sig[0], &frame->sc.oldmask);
3971 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3972 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3973 }
3974
3975 if (err)
3976 goto badframe;
3977
3978 target_to_host_sigset_internal(&blocked, &target_set);
3979 set_sigmask(&blocked);
3980
3981 restore_sigcontext(regs, &frame->sc);
3982
3983 unlock_user_struct(frame, frame_addr, 0);
3984 return -TARGET_QEMU_ESIGRETURN;
3985
3986 badframe:
3987 unlock_user_struct(frame, frame_addr, 0);
3988 force_sig(TARGET_SIGSEGV);
3989 return -TARGET_QEMU_ESIGRETURN;
3990 }
3991
3992 long do_rt_sigreturn(CPUSH4State *regs)
3993 {
3994 struct target_rt_sigframe *frame;
3995 abi_ulong frame_addr;
3996 sigset_t blocked;
3997
3998 frame_addr = regs->gregs[15];
3999 trace_user_do_rt_sigreturn(regs, frame_addr);
4000 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4001 goto badframe;
4002 }
4003
4004 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
4005 set_sigmask(&blocked);
4006
4007 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
4008
4009 if (do_sigaltstack(frame_addr +
4010 offsetof(struct target_rt_sigframe, uc.tuc_stack),
4011 0, get_sp_from_cpustate(regs)) == -EFAULT) {
4012 goto badframe;
4013 }
4014
4015 unlock_user_struct(frame, frame_addr, 0);
4016 return -TARGET_QEMU_ESIGRETURN;
4017
4018 badframe:
4019 unlock_user_struct(frame, frame_addr, 0);
4020 force_sig(TARGET_SIGSEGV);
4021 return -TARGET_QEMU_ESIGRETURN;
4022 }
4023 #elif defined(TARGET_MICROBLAZE)
4024
4025 struct target_sigcontext {
4026 struct target_pt_regs regs; /* needs to be first */
4027 uint32_t oldmask;
4028 };
4029
4030 struct target_stack_t {
4031 abi_ulong ss_sp;
4032 int ss_flags;
4033 unsigned int ss_size;
4034 };
4035
4036 struct target_ucontext {
4037 abi_ulong tuc_flags;
4038 abi_ulong tuc_link;
4039 struct target_stack_t tuc_stack;
4040 struct target_sigcontext tuc_mcontext;
4041 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
4042 };
4043
4044 /* Signal frames. */
4045 struct target_signal_frame {
4046 struct target_ucontext uc;
4047 uint32_t extramask[TARGET_NSIG_WORDS - 1];
4048 uint32_t tramp[2];
4049 };
4050
4051 struct rt_signal_frame {
4052 siginfo_t info;
4053 ucontext_t uc;
4054 uint32_t tramp[2];
4055 };
4056
4057 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
4058 {
4059 __put_user(env->regs[0], &sc->regs.r0);
4060 __put_user(env->regs[1], &sc->regs.r1);
4061 __put_user(env->regs[2], &sc->regs.r2);
4062 __put_user(env->regs[3], &sc->regs.r3);
4063 __put_user(env->regs[4], &sc->regs.r4);
4064 __put_user(env->regs[5], &sc->regs.r5);
4065 __put_user(env->regs[6], &sc->regs.r6);
4066 __put_user(env->regs[7], &sc->regs.r7);
4067 __put_user(env->regs[8], &sc->regs.r8);
4068 __put_user(env->regs[9], &sc->regs.r9);
4069 __put_user(env->regs[10], &sc->regs.r10);
4070 __put_user(env->regs[11], &sc->regs.r11);
4071 __put_user(env->regs[12], &sc->regs.r12);
4072 __put_user(env->regs[13], &sc->regs.r13);
4073 __put_user(env->regs[14], &sc->regs.r14);
4074 __put_user(env->regs[15], &sc->regs.r15);
4075 __put_user(env->regs[16], &sc->regs.r16);
4076 __put_user(env->regs[17], &sc->regs.r17);
4077 __put_user(env->regs[18], &sc->regs.r18);
4078 __put_user(env->regs[19], &sc->regs.r19);
4079 __put_user(env->regs[20], &sc->regs.r20);
4080 __put_user(env->regs[21], &sc->regs.r21);
4081 __put_user(env->regs[22], &sc->regs.r22);
4082 __put_user(env->regs[23], &sc->regs.r23);
4083 __put_user(env->regs[24], &sc->regs.r24);
4084 __put_user(env->regs[25], &sc->regs.r25);
4085 __put_user(env->regs[26], &sc->regs.r26);
4086 __put_user(env->regs[27], &sc->regs.r27);
4087 __put_user(env->regs[28], &sc->regs.r28);
4088 __put_user(env->regs[29], &sc->regs.r29);
4089 __put_user(env->regs[30], &sc->regs.r30);
4090 __put_user(env->regs[31], &sc->regs.r31);
4091 __put_user(env->sregs[SR_PC], &sc->regs.pc);
4092 }
4093
4094 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
4095 {
4096 __get_user(env->regs[0], &sc->regs.r0);
4097 __get_user(env->regs[1], &sc->regs.r1);
4098 __get_user(env->regs[2], &sc->regs.r2);
4099 __get_user(env->regs[3], &sc->regs.r3);
4100 __get_user(env->regs[4], &sc->regs.r4);
4101 __get_user(env->regs[5], &sc->regs.r5);
4102 __get_user(env->regs[6], &sc->regs.r6);
4103 __get_user(env->regs[7], &sc->regs.r7);
4104 __get_user(env->regs[8], &sc->regs.r8);
4105 __get_user(env->regs[9], &sc->regs.r9);
4106 __get_user(env->regs[10], &sc->regs.r10);
4107 __get_user(env->regs[11], &sc->regs.r11);
4108 __get_user(env->regs[12], &sc->regs.r12);
4109 __get_user(env->regs[13], &sc->regs.r13);
4110 __get_user(env->regs[14], &sc->regs.r14);
4111 __get_user(env->regs[15], &sc->regs.r15);
4112 __get_user(env->regs[16], &sc->regs.r16);
4113 __get_user(env->regs[17], &sc->regs.r17);
4114 __get_user(env->regs[18], &sc->regs.r18);
4115 __get_user(env->regs[19], &sc->regs.r19);
4116 __get_user(env->regs[20], &sc->regs.r20);
4117 __get_user(env->regs[21], &sc->regs.r21);
4118 __get_user(env->regs[22], &sc->regs.r22);
4119 __get_user(env->regs[23], &sc->regs.r23);
4120 __get_user(env->regs[24], &sc->regs.r24);
4121 __get_user(env->regs[25], &sc->regs.r25);
4122 __get_user(env->regs[26], &sc->regs.r26);
4123 __get_user(env->regs[27], &sc->regs.r27);
4124 __get_user(env->regs[28], &sc->regs.r28);
4125 __get_user(env->regs[29], &sc->regs.r29);
4126 __get_user(env->regs[30], &sc->regs.r30);
4127 __get_user(env->regs[31], &sc->regs.r31);
4128 __get_user(env->sregs[SR_PC], &sc->regs.pc);
4129 }
4130
4131 static abi_ulong get_sigframe(struct target_sigaction *ka,
4132 CPUMBState *env, int frame_size)
4133 {
4134 abi_ulong sp = env->regs[1];
4135
4136 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
4137 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4138 }
4139
4140 return ((sp - frame_size) & -8UL);
4141 }
4142
4143 static void setup_frame(int sig, struct target_sigaction *ka,
4144 target_sigset_t *set, CPUMBState *env)
4145 {
4146 struct target_signal_frame *frame;
4147 abi_ulong frame_addr;
4148 int i;
4149
4150 frame_addr = get_sigframe(ka, env, sizeof *frame);
4151 trace_user_setup_frame(env, frame_addr);
4152 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4153 goto badframe;
4154
4155 /* Save the mask. */
4156 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
4157
4158 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4159 __put_user(set->sig[i], &frame->extramask[i - 1]);
4160 }
4161
4162 setup_sigcontext(&frame->uc.tuc_mcontext, env);
4163
4164 /* Set up to return from userspace. If provided, use a stub
4165 already in userspace. */
4166 /* minus 8 is offset to cater for "rtsd r15,8" offset */
4167 if (ka->sa_flags & TARGET_SA_RESTORER) {
4168 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
4169 } else {
4170 uint32_t t;
4171 /* Note, these encodings are _big endian_! */
4172 /* addi r12, r0, __NR_sigreturn */
4173 t = 0x31800000UL | TARGET_NR_sigreturn;
4174 __put_user(t, frame->tramp + 0);
4175 /* brki r14, 0x8 */
4176 t = 0xb9cc0008UL;
4177 __put_user(t, frame->tramp + 1);
4178
4179 /* Return from sighandler will jump to the tramp.
4180 Negative 8 offset because return is rtsd r15, 8 */
4181 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
4182 - 8;
4183 }
4184
4185 /* Set up registers for signal handler */
4186 env->regs[1] = frame_addr;
4187 /* Signal handler args: */
4188 env->regs[5] = sig; /* Arg 0: signum */
4189 env->regs[6] = 0;
4190 /* arg 1: sigcontext */
4191 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
4192
4193 /* Offset of 4 to handle microblaze rtid r14, 0 */
4194 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
4195
4196 unlock_user_struct(frame, frame_addr, 1);
4197 return;
4198 badframe:
4199 force_sigsegv(sig);
4200 }
4201
4202 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4203 target_siginfo_t *info,
4204 target_sigset_t *set, CPUMBState *env)
4205 {
4206 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
4207 }
4208
4209 long do_sigreturn(CPUMBState *env)
4210 {
4211 struct target_signal_frame *frame;
4212 abi_ulong frame_addr;
4213 target_sigset_t target_set;
4214 sigset_t set;
4215 int i;
4216
4217 frame_addr = env->regs[R_SP];
4218 trace_user_do_sigreturn(env, frame_addr);
4219 /* Make sure the guest isn't playing games. */
4220 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4221 goto badframe;
4222
4223 /* Restore blocked signals */
4224 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
4225 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4226 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4227 }
4228 target_to_host_sigset_internal(&set, &target_set);
4229 set_sigmask(&set);
4230
4231 restore_sigcontext(&frame->uc.tuc_mcontext, env);
4232 /* We got here through a sigreturn syscall, our path back is via an
4233 rtb insn so setup r14 for that. */
4234 env->regs[14] = env->sregs[SR_PC];
4235
4236 unlock_user_struct(frame, frame_addr, 0);
4237 return -TARGET_QEMU_ESIGRETURN;
4238 badframe:
4239 force_sig(TARGET_SIGSEGV);
4240 return -TARGET_QEMU_ESIGRETURN;
4241 }
4242
4243 long do_rt_sigreturn(CPUMBState *env)
4244 {
4245 trace_user_do_rt_sigreturn(env, 0);
4246 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
4247 return -TARGET_ENOSYS;
4248 }
4249
4250 #elif defined(TARGET_CRIS)
4251
4252 struct target_sigcontext {
4253 struct target_pt_regs regs; /* needs to be first */
4254 uint32_t oldmask;
4255 uint32_t usp; /* usp before stacking this gunk on it */
4256 };
4257
4258 /* Signal frames. */
4259 struct target_signal_frame {
4260 struct target_sigcontext sc;
4261 uint32_t extramask[TARGET_NSIG_WORDS - 1];
4262 uint16_t retcode[4]; /* Trampoline code. */
4263 };
4264
4265 struct rt_signal_frame {
4266 siginfo_t *pinfo;
4267 void *puc;
4268 siginfo_t info;
4269 ucontext_t uc;
4270 uint16_t retcode[4]; /* Trampoline code. */
4271 };
4272
4273 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4274 {
4275 __put_user(env->regs[0], &sc->regs.r0);
4276 __put_user(env->regs[1], &sc->regs.r1);
4277 __put_user(env->regs[2], &sc->regs.r2);
4278 __put_user(env->regs[3], &sc->regs.r3);
4279 __put_user(env->regs[4], &sc->regs.r4);
4280 __put_user(env->regs[5], &sc->regs.r5);
4281 __put_user(env->regs[6], &sc->regs.r6);
4282 __put_user(env->regs[7], &sc->regs.r7);
4283 __put_user(env->regs[8], &sc->regs.r8);
4284 __put_user(env->regs[9], &sc->regs.r9);
4285 __put_user(env->regs[10], &sc->regs.r10);
4286 __put_user(env->regs[11], &sc->regs.r11);
4287 __put_user(env->regs[12], &sc->regs.r12);
4288 __put_user(env->regs[13], &sc->regs.r13);
4289 __put_user(env->regs[14], &sc->usp);
4290 __put_user(env->regs[15], &sc->regs.acr);
4291 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4292 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4293 __put_user(env->pc, &sc->regs.erp);
4294 }
4295
4296 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4297 {
4298 __get_user(env->regs[0], &sc->regs.r0);
4299 __get_user(env->regs[1], &sc->regs.r1);
4300 __get_user(env->regs[2], &sc->regs.r2);
4301 __get_user(env->regs[3], &sc->regs.r3);
4302 __get_user(env->regs[4], &sc->regs.r4);
4303 __get_user(env->regs[5], &sc->regs.r5);
4304 __get_user(env->regs[6], &sc->regs.r6);
4305 __get_user(env->regs[7], &sc->regs.r7);
4306 __get_user(env->regs[8], &sc->regs.r8);
4307 __get_user(env->regs[9], &sc->regs.r9);
4308 __get_user(env->regs[10], &sc->regs.r10);
4309 __get_user(env->regs[11], &sc->regs.r11);
4310 __get_user(env->regs[12], &sc->regs.r12);
4311 __get_user(env->regs[13], &sc->regs.r13);
4312 __get_user(env->regs[14], &sc->usp);
4313 __get_user(env->regs[15], &sc->regs.acr);
4314 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4315 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4316 __get_user(env->pc, &sc->regs.erp);
4317 }
4318
4319 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4320 {
4321 abi_ulong sp;
4322 /* Align the stack downwards to 4. */
4323 sp = (env->regs[R_SP] & ~3);
4324 return sp - framesize;
4325 }
4326
4327 static void setup_frame(int sig, struct target_sigaction *ka,
4328 target_sigset_t *set, CPUCRISState *env)
4329 {
4330 struct target_signal_frame *frame;
4331 abi_ulong frame_addr;
4332 int i;
4333
4334 frame_addr = get_sigframe(env, sizeof *frame);
4335 trace_user_setup_frame(env, frame_addr);
4336 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4337 goto badframe;
4338
4339 /*
4340 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4341 * use this trampoline anymore but it sets it up for GDB.
4342 * In QEMU, using the trampoline simplifies things a bit so we use it.
4343 *
4344 * This is movu.w __NR_sigreturn, r9; break 13;
4345 */
4346 __put_user(0x9c5f, frame->retcode+0);
4347 __put_user(TARGET_NR_sigreturn,
4348 frame->retcode + 1);
4349 __put_user(0xe93d, frame->retcode + 2);
4350
4351 /* Save the mask. */
4352 __put_user(set->sig[0], &frame->sc.oldmask);
4353
4354 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4355 __put_user(set->sig[i], &frame->extramask[i - 1]);
4356 }
4357
4358 setup_sigcontext(&frame->sc, env);
4359
4360 /* Move the stack and setup the arguments for the handler. */
4361 env->regs[R_SP] = frame_addr;
4362 env->regs[10] = sig;
4363 env->pc = (unsigned long) ka->_sa_handler;
4364 /* Link SRP so the guest returns through the trampoline. */
4365 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4366
4367 unlock_user_struct(frame, frame_addr, 1);
4368 return;
4369 badframe:
4370 force_sigsegv(sig);
4371 }
4372
4373 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4374 target_siginfo_t *info,
4375 target_sigset_t *set, CPUCRISState *env)
4376 {
4377 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4378 }
4379
4380 long do_sigreturn(CPUCRISState *env)
4381 {
4382 struct target_signal_frame *frame;
4383 abi_ulong frame_addr;
4384 target_sigset_t target_set;
4385 sigset_t set;
4386 int i;
4387
4388 frame_addr = env->regs[R_SP];
4389 trace_user_do_sigreturn(env, frame_addr);
4390 /* Make sure the guest isn't playing games. */
4391 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4392 goto badframe;
4393 }
4394
4395 /* Restore blocked signals */
4396 __get_user(target_set.sig[0], &frame->sc.oldmask);
4397 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4398 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4399 }
4400 target_to_host_sigset_internal(&set, &target_set);
4401 set_sigmask(&set);
4402
4403 restore_sigcontext(&frame->sc, env);
4404 unlock_user_struct(frame, frame_addr, 0);
4405 return -TARGET_QEMU_ESIGRETURN;
4406 badframe:
4407 force_sig(TARGET_SIGSEGV);
4408 return -TARGET_QEMU_ESIGRETURN;
4409 }
4410
4411 long do_rt_sigreturn(CPUCRISState *env)
4412 {
4413 trace_user_do_rt_sigreturn(env, 0);
4414 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4415 return -TARGET_ENOSYS;
4416 }
4417
4418 #elif defined(TARGET_NIOS2)
4419
4420 #define MCONTEXT_VERSION 2
4421
4422 struct target_sigcontext {
4423 int version;
4424 unsigned long gregs[32];
4425 };
4426
4427 struct target_ucontext {
4428 abi_ulong tuc_flags;
4429 abi_ulong tuc_link;
4430 target_stack_t tuc_stack;
4431 struct target_sigcontext tuc_mcontext;
4432 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4433 };
4434
4435 struct target_rt_sigframe {
4436 struct target_siginfo info;
4437 struct target_ucontext uc;
4438 };
4439
4440 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4441 {
4442 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4443 #ifdef CONFIG_STACK_GROWSUP
4444 return target_sigaltstack_used.ss_sp;
4445 #else
4446 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4447 #endif
4448 }
4449 return sp;
4450 }
4451
4452 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4453 {
4454 unsigned long *gregs = uc->tuc_mcontext.gregs;
4455
4456 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4457 __put_user(env->regs[1], &gregs[0]);
4458 __put_user(env->regs[2], &gregs[1]);
4459 __put_user(env->regs[3], &gregs[2]);
4460 __put_user(env->regs[4], &gregs[3]);
4461 __put_user(env->regs[5], &gregs[4]);
4462 __put_user(env->regs[6], &gregs[5]);
4463 __put_user(env->regs[7], &gregs[6]);
4464 __put_user(env->regs[8], &gregs[7]);
4465 __put_user(env->regs[9], &gregs[8]);
4466 __put_user(env->regs[10], &gregs[9]);
4467 __put_user(env->regs[11], &gregs[10]);
4468 __put_user(env->regs[12], &gregs[11]);
4469 __put_user(env->regs[13], &gregs[12]);
4470 __put_user(env->regs[14], &gregs[13]);
4471 __put_user(env->regs[15], &gregs[14]);
4472 __put_user(env->regs[16], &gregs[15]);
4473 __put_user(env->regs[17], &gregs[16]);
4474 __put_user(env->regs[18], &gregs[17]);
4475 __put_user(env->regs[19], &gregs[18]);
4476 __put_user(env->regs[20], &gregs[19]);
4477 __put_user(env->regs[21], &gregs[20]);
4478 __put_user(env->regs[22], &gregs[21]);
4479 __put_user(env->regs[23], &gregs[22]);
4480 __put_user(env->regs[R_RA], &gregs[23]);
4481 __put_user(env->regs[R_FP], &gregs[24]);
4482 __put_user(env->regs[R_GP], &gregs[25]);
4483 __put_user(env->regs[R_EA], &gregs[27]);
4484 __put_user(env->regs[R_SP], &gregs[28]);
4485
4486 return 0;
4487 }
4488
4489 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4490 int *pr2)
4491 {
4492 int temp;
4493 abi_ulong off, frame_addr = env->regs[R_SP];
4494 unsigned long *gregs = uc->tuc_mcontext.gregs;
4495 int err;
4496
4497 /* Always make any pending restarted system calls return -EINTR */
4498 /* current->restart_block.fn = do_no_restart_syscall; */
4499
4500 __get_user(temp, &uc->tuc_mcontext.version);
4501 if (temp != MCONTEXT_VERSION) {
4502 return 1;
4503 }
4504
4505 /* restore passed registers */
4506 __get_user(env->regs[1], &gregs[0]);
4507 __get_user(env->regs[2], &gregs[1]);
4508 __get_user(env->regs[3], &gregs[2]);
4509 __get_user(env->regs[4], &gregs[3]);
4510 __get_user(env->regs[5], &gregs[4]);
4511 __get_user(env->regs[6], &gregs[5]);
4512 __get_user(env->regs[7], &gregs[6]);
4513 __get_user(env->regs[8], &gregs[7]);
4514 __get_user(env->regs[9], &gregs[8]);
4515 __get_user(env->regs[10], &gregs[9]);
4516 __get_user(env->regs[11], &gregs[10]);
4517 __get_user(env->regs[12], &gregs[11]);
4518 __get_user(env->regs[13], &gregs[12]);
4519 __get_user(env->regs[14], &gregs[13]);
4520 __get_user(env->regs[15], &gregs[14]);
4521 __get_user(env->regs[16], &gregs[15]);
4522 __get_user(env->regs[17], &gregs[16]);
4523 __get_user(env->regs[18], &gregs[17]);
4524 __get_user(env->regs[19], &gregs[18]);
4525 __get_user(env->regs[20], &gregs[19]);
4526 __get_user(env->regs[21], &gregs[20]);
4527 __get_user(env->regs[22], &gregs[21]);
4528 __get_user(env->regs[23], &gregs[22]);
4529 /* gregs[23] is handled below */
4530 /* Verify, should this be settable */
4531 __get_user(env->regs[R_FP], &gregs[24]);
4532 /* Verify, should this be settable */
4533 __get_user(env->regs[R_GP], &gregs[25]);
4534 /* Not really necessary no user settable bits */
4535 __get_user(temp, &gregs[26]);
4536 __get_user(env->regs[R_EA], &gregs[27]);
4537
4538 __get_user(env->regs[R_RA], &gregs[23]);
4539 __get_user(env->regs[R_SP], &gregs[28]);
4540
4541 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4542 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4543 if (err == -EFAULT) {
4544 return 1;
4545 }
4546
4547 *pr2 = env->regs[2];
4548 return 0;
4549 }
4550
4551 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4552 size_t frame_size)
4553 {
4554 unsigned long usp;
4555
4556 /* Default to using normal stack. */
4557 usp = env->regs[R_SP];
4558
4559 /* This is the X/Open sanctioned signal stack switching. */
4560 usp = sigsp(usp, ka);
4561
4562 /* Verify, is it 32 or 64 bit aligned */
4563 return (void *)((usp - frame_size) & -8UL);
4564 }
4565
4566 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4567 target_siginfo_t *info,
4568 target_sigset_t *set,
4569 CPUNios2State *env)
4570 {
4571 struct target_rt_sigframe *frame;
4572 int i, err = 0;
4573
4574 frame = get_sigframe(ka, env, sizeof(*frame));
4575
4576 if (ka->sa_flags & SA_SIGINFO) {
4577 tswap_siginfo(&frame->info, info);
4578 }
4579
4580 /* Create the ucontext. */
4581 __put_user(0, &frame->uc.tuc_flags);
4582 __put_user(0, &frame->uc.tuc_link);
4583 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4584 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4585 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4586 err |= rt_setup_ucontext(&frame->uc, env);
4587 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4588 __put_user((abi_ulong)set->sig[i],
4589 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4590 }
4591
4592 if (err) {
4593 goto give_sigsegv;
4594 }
4595
4596 /* Set up to return from userspace; jump to fixed address sigreturn
4597 trampoline on kuser page. */
4598 env->regs[R_RA] = (unsigned long) (0x1044);
4599
4600 /* Set up registers for signal handler */
4601 env->regs[R_SP] = (unsigned long) frame;
4602 env->regs[4] = (unsigned long) sig;
4603 env->regs[5] = (unsigned long) &frame->info;
4604 env->regs[6] = (unsigned long) &frame->uc;
4605 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4606 return;
4607
4608 give_sigsegv:
4609 if (sig == TARGET_SIGSEGV) {
4610 ka->_sa_handler = TARGET_SIG_DFL;
4611 }
4612 force_sigsegv(sig);
4613 return;
4614 }
4615
4616 long do_sigreturn(CPUNios2State *env)
4617 {
4618 trace_user_do_sigreturn(env, 0);
4619 fprintf(stderr, "do_sigreturn: not implemented\n");
4620 return -TARGET_ENOSYS;
4621 }
4622
4623 long do_rt_sigreturn(CPUNios2State *env)
4624 {
4625 /* Verify, can we follow the stack back */
4626 abi_ulong frame_addr = env->regs[R_SP];
4627 struct target_rt_sigframe *frame;
4628 sigset_t set;
4629 int rval;
4630
4631 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4632 goto badframe;
4633 }
4634
4635 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4636 do_sigprocmask(SIG_SETMASK, &set, NULL);
4637
4638 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4639 goto badframe;
4640 }
4641
4642 unlock_user_struct(frame, frame_addr, 0);
4643 return rval;
4644
4645 badframe:
4646 unlock_user_struct(frame, frame_addr, 0);
4647 force_sig(TARGET_SIGSEGV);
4648 return 0;
4649 }
4650 /* TARGET_NIOS2 */
4651
4652 #elif defined(TARGET_OPENRISC)
4653
4654 struct target_sigcontext {
4655 struct target_pt_regs regs;
4656 abi_ulong oldmask;
4657 abi_ulong usp;
4658 };
4659
4660 struct target_ucontext {
4661 abi_ulong tuc_flags;
4662 abi_ulong tuc_link;
4663 target_stack_t tuc_stack;
4664 struct target_sigcontext tuc_mcontext;
4665 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4666 };
4667
4668 struct target_rt_sigframe {
4669 abi_ulong pinfo;
4670 uint64_t puc;
4671 struct target_siginfo info;
4672 struct target_sigcontext sc;
4673 struct target_ucontext uc;
4674 unsigned char retcode[16]; /* trampoline code */
4675 };
4676
4677 /* This is the asm-generic/ucontext.h version */
4678 #if 0
4679 static int restore_sigcontext(CPUOpenRISCState *regs,
4680 struct target_sigcontext *sc)
4681 {
4682 unsigned int err = 0;
4683 unsigned long old_usp;
4684
4685 /* Alwys make any pending restarted system call return -EINTR */
4686 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4687
4688 /* restore the regs from &sc->regs (same as sc, since regs is first)
4689 * (sc is already checked for VERIFY_READ since the sigframe was
4690 * checked in sys_sigreturn previously)
4691 */
4692
4693 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4694 goto badframe;
4695 }
4696
4697 /* make sure the U-flag is set so user-mode cannot fool us */
4698
4699 regs->sr &= ~SR_SM;
4700
4701 /* restore the old USP as it was before we stacked the sc etc.
4702 * (we cannot just pop the sigcontext since we aligned the sp and
4703 * stuff after pushing it)
4704 */
4705
4706 __get_user(old_usp, &sc->usp);
4707 phx_signal("old_usp 0x%lx", old_usp);
4708
4709 __PHX__ REALLY /* ??? */
4710 wrusp(old_usp);
4711 regs->gpr[1] = old_usp;
4712
4713 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4714 * after this completes, but we don't use that mechanism. maybe we can
4715 * use it now ?
4716 */
4717
4718 return err;
4719
4720 badframe:
4721 return 1;
4722 }
4723 #endif
4724
4725 /* Set up a signal frame. */
4726
4727 static void setup_sigcontext(struct target_sigcontext *sc,
4728 CPUOpenRISCState *regs,
4729 unsigned long mask)
4730 {
4731 unsigned long usp = cpu_get_gpr(regs, 1);
4732
4733 /* copy the regs. they are first in sc so we can use sc directly */
4734
4735 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4736
4737 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4738 the signal handler. The frametype will be restored to its previous
4739 value in restore_sigcontext. */
4740 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4741
4742 /* then some other stuff */
4743 __put_user(mask, &sc->oldmask);
4744 __put_user(usp, &sc->usp);
4745 }
4746
4747 static inline unsigned long align_sigframe(unsigned long sp)
4748 {
4749 return sp & ~3UL;
4750 }
4751
4752 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4753 CPUOpenRISCState *regs,
4754 size_t frame_size)
4755 {
4756 unsigned long sp = cpu_get_gpr(regs, 1);
4757 int onsigstack = on_sig_stack(sp);
4758
4759 /* redzone */
4760 /* This is the X/Open sanctioned signal stack switching. */
4761 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4762 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4763 }
4764
4765 sp = align_sigframe(sp - frame_size);
4766
4767 /*
4768 * If we are on the alternate signal stack and would overflow it, don't.
4769 * Return an always-bogus address instead so we will die with SIGSEGV.
4770 */
4771
4772 if (onsigstack && !likely(on_sig_stack(sp))) {
4773 return -1L;
4774 }
4775
4776 return sp;
4777 }
4778
4779 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4780 target_siginfo_t *info,
4781 target_sigset_t *set, CPUOpenRISCState *env)
4782 {
4783 int err = 0;
4784 abi_ulong frame_addr;
4785 unsigned long return_ip;
4786 struct target_rt_sigframe *frame;
4787 abi_ulong info_addr, uc_addr;
4788
4789 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4790 trace_user_setup_rt_frame(env, frame_addr);
4791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4792 goto give_sigsegv;
4793 }
4794
4795 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4796 __put_user(info_addr, &frame->pinfo);
4797 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4798 __put_user(uc_addr, &frame->puc);
4799
4800 if (ka->sa_flags & SA_SIGINFO) {
4801 tswap_siginfo(&frame->info, info);
4802 }
4803
4804 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4805 __put_user(0, &frame->uc.tuc_flags);
4806 __put_user(0, &frame->uc.tuc_link);
4807 __put_user(target_sigaltstack_used.ss_sp,
4808 &frame->uc.tuc_stack.ss_sp);
4809 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4810 &frame->uc.tuc_stack.ss_flags);
4811 __put_user(target_sigaltstack_used.ss_size,
4812 &frame->uc.tuc_stack.ss_size);
4813 setup_sigcontext(&frame->sc, env, set->sig[0]);
4814
4815 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4816
4817 /* trampoline - the desired return ip is the retcode itself */
4818 return_ip = (unsigned long)&frame->retcode;
4819 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4820 __put_user(0xa960, (short *)(frame->retcode + 0));
4821 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4822 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4823 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4824
4825 if (err) {
4826 goto give_sigsegv;
4827 }
4828
4829 /* TODO what is the current->exec_domain stuff and invmap ? */
4830
4831 /* Set up registers for signal handler */
4832 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4833 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4834 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4835 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4836 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4837
4838 /* actually move the usp to reflect the stacked frame */
4839 cpu_set_gpr(env, 1, (unsigned long)frame);
4840
4841 return;
4842
4843 give_sigsegv:
4844 unlock_user_struct(frame, frame_addr, 1);
4845 force_sigsegv(sig);
4846 }
4847
4848 long do_sigreturn(CPUOpenRISCState *env)
4849 {
4850 trace_user_do_sigreturn(env, 0);
4851 fprintf(stderr, "do_sigreturn: not implemented\n");
4852 return -TARGET_ENOSYS;
4853 }
4854
4855 long do_rt_sigreturn(CPUOpenRISCState *env)
4856 {
4857 trace_user_do_rt_sigreturn(env, 0);
4858 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4859 return -TARGET_ENOSYS;
4860 }
4861 /* TARGET_OPENRISC */
4862
4863 #elif defined(TARGET_S390X)
4864
4865 #define __NUM_GPRS 16
4866 #define __NUM_FPRS 16
4867 #define __NUM_ACRS 16
4868
4869 #define S390_SYSCALL_SIZE 2
4870 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4871
4872 #define _SIGCONTEXT_NSIG 64
4873 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4874 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4875 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4876 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4877 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4878
4879 typedef struct {
4880 target_psw_t psw;
4881 target_ulong gprs[__NUM_GPRS];
4882 unsigned int acrs[__NUM_ACRS];
4883 } target_s390_regs_common;
4884
4885 typedef struct {
4886 unsigned int fpc;
4887 double fprs[__NUM_FPRS];
4888 } target_s390_fp_regs;
4889
4890 typedef struct {
4891 target_s390_regs_common regs;
4892 target_s390_fp_regs fpregs;
4893 } target_sigregs;
4894
4895 struct target_sigcontext {
4896 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4897 target_sigregs *sregs;
4898 };
4899
4900 typedef struct {
4901 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4902 struct target_sigcontext sc;
4903 target_sigregs sregs;
4904 int signo;
4905 uint8_t retcode[S390_SYSCALL_SIZE];
4906 } sigframe;
4907
4908 struct target_ucontext {
4909 target_ulong tuc_flags;
4910 struct target_ucontext *tuc_link;
4911 target_stack_t tuc_stack;
4912 target_sigregs tuc_mcontext;
4913 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4914 };
4915
4916 typedef struct {
4917 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4918 uint8_t retcode[S390_SYSCALL_SIZE];
4919 struct target_siginfo info;
4920 struct target_ucontext uc;
4921 } rt_sigframe;
4922
4923 static inline abi_ulong
4924 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4925 {
4926 abi_ulong sp;
4927
4928 /* Default to using normal stack */
4929 sp = env->regs[15];
4930
4931 /* This is the X/Open sanctioned signal stack switching. */
4932 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4933 if (!sas_ss_flags(sp)) {
4934 sp = target_sigaltstack_used.ss_sp +
4935 target_sigaltstack_used.ss_size;
4936 }
4937 }
4938
4939 /* This is the legacy signal stack switching. */
4940 else if (/* FIXME !user_mode(regs) */ 0 &&
4941 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4942 ka->sa_restorer) {
4943 sp = (abi_ulong) ka->sa_restorer;
4944 }
4945
4946 return (sp - frame_size) & -8ul;
4947 }
4948
4949 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4950 {
4951 int i;
4952 //save_access_regs(current->thread.acrs); FIXME
4953
4954 /* Copy a 'clean' PSW mask to the user to avoid leaking
4955 information about whether PER is currently on. */
4956 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4957 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4958 for (i = 0; i < 16; i++) {
4959 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4960 }
4961 for (i = 0; i < 16; i++) {
4962 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4963 }
4964 /*
4965 * We have to store the fp registers to current->thread.fp_regs
4966 * to merge them with the emulated registers.
4967 */
4968 //save_fp_regs(&current->thread.fp_regs); FIXME
4969 for (i = 0; i < 16; i++) {
4970 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4971 }
4972 }
4973
4974 static void setup_frame(int sig, struct target_sigaction *ka,
4975 target_sigset_t *set, CPUS390XState *env)
4976 {
4977 sigframe *frame;
4978 abi_ulong frame_addr;
4979
4980 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4981 trace_user_setup_frame(env, frame_addr);
4982 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4983 goto give_sigsegv;
4984 }
4985
4986 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4987
4988 save_sigregs(env, &frame->sregs);
4989
4990 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4991 (abi_ulong *)&frame->sc.sregs);
4992
4993 /* Set up to return from userspace. If provided, use a stub
4994 already in userspace. */
4995 if (ka->sa_flags & TARGET_SA_RESTORER) {
4996 env->regs[14] = (unsigned long)
4997 ka->sa_restorer | PSW_ADDR_AMODE;
4998 } else {
4999 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
5000 | PSW_ADDR_AMODE;
5001 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
5002 (uint16_t *)(frame->retcode));
5003 }
5004
5005 /* Set up backchain. */
5006 __put_user(env->regs[15], (abi_ulong *) frame);
5007
5008 /* Set up registers for signal handler */
5009 env->regs[15] = frame_addr;
5010 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
5011
5012 env->regs[2] = sig; //map_signal(sig);
5013 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
5014
5015 /* We forgot to include these in the sigcontext.
5016 To avoid breaking binary compatibility, they are passed as args. */
5017 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
5018 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
5019
5020 /* Place signal number on stack to allow backtrace from handler. */
5021 __put_user(env->regs[2], &frame->signo);
5022 unlock_user_struct(frame, frame_addr, 1);
5023 return;
5024
5025 give_sigsegv:
5026 force_sigsegv(sig);
5027 }
5028
5029 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5030 target_siginfo_t *info,
5031 target_sigset_t *set, CPUS390XState *env)
5032 {
5033 int i;
5034 rt_sigframe *frame;
5035 abi_ulong frame_addr;
5036
5037 frame_addr = get_sigframe(ka, env, sizeof *frame);
5038 trace_user_setup_rt_frame(env, frame_addr);
5039 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5040 goto give_sigsegv;
5041 }
5042
5043 tswap_siginfo(&frame->info, info);
5044
5045 /* Create the ucontext. */
5046 __put_user(0, &frame->uc.tuc_flags);
5047 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
5048 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5049 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
5050 &frame->uc.tuc_stack.ss_flags);
5051 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5052 save_sigregs(env, &frame->uc.tuc_mcontext);
5053 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
5054 __put_user((abi_ulong)set->sig[i],
5055 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
5056 }
5057
5058 /* Set up to return from userspace. If provided, use a stub
5059 already in userspace. */
5060 if (ka->sa_flags & TARGET_SA_RESTORER) {
5061 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
5062 } else {
5063 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
5064 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
5065 (uint16_t *)(frame->retcode));
5066 }
5067
5068 /* Set up backchain. */
5069 __put_user(env->regs[15], (abi_ulong *) frame);
5070
5071 /* Set up registers for signal handler */
5072 env->regs[15] = frame_addr;
5073 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
5074
5075 env->regs[2] = sig; //map_signal(sig);
5076 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
5077 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
5078 return;
5079
5080 give_sigsegv:
5081 force_sigsegv(sig);
5082 }
5083
5084 static int
5085 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
5086 {
5087 int err = 0;
5088 int i;
5089
5090 for (i = 0; i < 16; i++) {
5091 __get_user(env->regs[i], &sc->regs.gprs[i]);
5092 }
5093
5094 __get_user(env->psw.mask, &sc->regs.psw.mask);
5095 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
5096 (unsigned long long)env->psw.addr);
5097 __get_user(env->psw.addr, &sc->regs.psw.addr);
5098 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
5099
5100 for (i = 0; i < 16; i++) {
5101 __get_user(env->aregs[i], &sc->regs.acrs[i]);
5102 }
5103 for (i = 0; i < 16; i++) {
5104 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
5105 }
5106
5107 return err;
5108 }
5109
5110 long do_sigreturn(CPUS390XState *env)
5111 {
5112 sigframe *frame;
5113 abi_ulong frame_addr = env->regs[15];
5114 target_sigset_t target_set;
5115 sigset_t set;
5116
5117 trace_user_do_sigreturn(env, frame_addr);
5118 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5119 goto badframe;
5120 }
5121 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
5122
5123 target_to_host_sigset_internal(&set, &target_set);
5124 set_sigmask(&set); /* ~_BLOCKABLE? */
5125
5126 if (restore_sigregs(env, &frame->sregs)) {
5127 goto badframe;
5128 }
5129
5130 unlock_user_struct(frame, frame_addr, 0);
5131 return -TARGET_QEMU_ESIGRETURN;
5132
5133 badframe:
5134 force_sig(TARGET_SIGSEGV);
5135 return -TARGET_QEMU_ESIGRETURN;
5136 }
5137
5138 long do_rt_sigreturn(CPUS390XState *env)
5139 {
5140 rt_sigframe *frame;
5141 abi_ulong frame_addr = env->regs[15];
5142 sigset_t set;
5143
5144 trace_user_do_rt_sigreturn(env, frame_addr);
5145 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5146 goto badframe;
5147 }
5148 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5149
5150 set_sigmask(&set); /* ~_BLOCKABLE? */
5151
5152 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
5153 goto badframe;
5154 }
5155
5156 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
5157 get_sp_from_cpustate(env)) == -EFAULT) {
5158 goto badframe;
5159 }
5160 unlock_user_struct(frame, frame_addr, 0);
5161 return -TARGET_QEMU_ESIGRETURN;
5162
5163 badframe:
5164 unlock_user_struct(frame, frame_addr, 0);
5165 force_sig(TARGET_SIGSEGV);
5166 return -TARGET_QEMU_ESIGRETURN;
5167 }
5168
5169 #elif defined(TARGET_PPC)
5170
5171 /* Size of dummy stack frame allocated when calling signal handler.
5172 See arch/powerpc/include/asm/ptrace.h. */
5173 #if defined(TARGET_PPC64)
5174 #define SIGNAL_FRAMESIZE 128
5175 #else
5176 #define SIGNAL_FRAMESIZE 64
5177 #endif
5178
5179 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
5180 on 64-bit PPC, sigcontext and mcontext are one and the same. */
5181 struct target_mcontext {
5182 target_ulong mc_gregs[48];
5183 /* Includes fpscr. */
5184 uint64_t mc_fregs[33];
5185 #if defined(TARGET_PPC64)
5186 /* Pointer to the vector regs */
5187 target_ulong v_regs;
5188 #else
5189 target_ulong mc_pad[2];
5190 #endif
5191 /* We need to handle Altivec and SPE at the same time, which no
5192 kernel needs to do. Fortunately, the kernel defines this bit to
5193 be Altivec-register-large all the time, rather than trying to
5194 twiddle it based on the specific platform. */
5195 union {
5196 /* SPE vector registers. One extra for SPEFSCR. */
5197 uint32_t spe[33];
5198 /* Altivec vector registers. The packing of VSCR and VRSAVE
5199 varies depending on whether we're PPC64 or not: PPC64 splits
5200 them apart; PPC32 stuffs them together.
5201 We also need to account for the VSX registers on PPC64
5202 */
5203 #if defined(TARGET_PPC64)
5204 #define QEMU_NVRREG (34 + 16)
5205 /* On ppc64, this mcontext structure is naturally *unaligned*,
5206 * or rather it is aligned on a 8 bytes boundary but not on
5207 * a 16 bytes one. This pad fixes it up. This is also why the
5208 * vector regs are referenced by the v_regs pointer above so
5209 * any amount of padding can be added here
5210 */
5211 target_ulong pad;
5212 #else
5213 /* On ppc32, we are already aligned to 16 bytes */
5214 #define QEMU_NVRREG 33
5215 #endif
5216 /* We cannot use ppc_avr_t here as we do *not* want the implied
5217 * 16-bytes alignment that would result from it. This would have
5218 * the effect of making the whole struct target_mcontext aligned
5219 * which breaks the layout of struct target_ucontext on ppc64.
5220 */
5221 uint64_t altivec[QEMU_NVRREG][2];
5222 #undef QEMU_NVRREG
5223 } mc_vregs;
5224 };
5225
5226 /* See arch/powerpc/include/asm/sigcontext.h. */
5227 struct target_sigcontext {
5228 target_ulong _unused[4];
5229 int32_t signal;
5230 #if defined(TARGET_PPC64)
5231 int32_t pad0;
5232 #endif
5233 target_ulong handler;
5234 target_ulong oldmask;
5235 target_ulong regs; /* struct pt_regs __user * */
5236 #if defined(TARGET_PPC64)
5237 struct target_mcontext mcontext;
5238 #endif
5239 };
5240
5241 /* Indices for target_mcontext.mc_gregs, below.
5242 See arch/powerpc/include/asm/ptrace.h for details. */
5243 enum {
5244 TARGET_PT_R0 = 0,
5245 TARGET_PT_R1 = 1,
5246 TARGET_PT_R2 = 2,
5247 TARGET_PT_R3 = 3,
5248 TARGET_PT_R4 = 4,
5249 TARGET_PT_R5 = 5,
5250 TARGET_PT_R6 = 6,
5251 TARGET_PT_R7 = 7,
5252 TARGET_PT_R8 = 8,
5253 TARGET_PT_R9 = 9,
5254 TARGET_PT_R10 = 10,
5255 TARGET_PT_R11 = 11,
5256 TARGET_PT_R12 = 12,
5257 TARGET_PT_R13 = 13,
5258 TARGET_PT_R14 = 14,
5259 TARGET_PT_R15 = 15,
5260 TARGET_PT_R16 = 16,
5261 TARGET_PT_R17 = 17,
5262 TARGET_PT_R18 = 18,
5263 TARGET_PT_R19 = 19,
5264 TARGET_PT_R20 = 20,
5265 TARGET_PT_R21 = 21,
5266 TARGET_PT_R22 = 22,
5267 TARGET_PT_R23 = 23,
5268 TARGET_PT_R24 = 24,
5269 TARGET_PT_R25 = 25,
5270 TARGET_PT_R26 = 26,
5271 TARGET_PT_R27 = 27,
5272 TARGET_PT_R28 = 28,
5273 TARGET_PT_R29 = 29,
5274 TARGET_PT_R30 = 30,
5275 TARGET_PT_R31 = 31,
5276 TARGET_PT_NIP = 32,
5277 TARGET_PT_MSR = 33,
5278 TARGET_PT_ORIG_R3 = 34,
5279 TARGET_PT_CTR = 35,
5280 TARGET_PT_LNK = 36,
5281 TARGET_PT_XER = 37,
5282 TARGET_PT_CCR = 38,
5283 /* Yes, there are two registers with #39. One is 64-bit only. */
5284 TARGET_PT_MQ = 39,
5285 TARGET_PT_SOFTE = 39,
5286 TARGET_PT_TRAP = 40,
5287 TARGET_PT_DAR = 41,
5288 TARGET_PT_DSISR = 42,
5289 TARGET_PT_RESULT = 43,
5290 TARGET_PT_REGS_COUNT = 44
5291 };
5292
5293
5294 struct target_ucontext {
5295 target_ulong tuc_flags;
5296 target_ulong tuc_link; /* ucontext_t __user * */
5297 struct target_sigaltstack tuc_stack;
5298 #if !defined(TARGET_PPC64)
5299 int32_t tuc_pad[7];
5300 target_ulong tuc_regs; /* struct mcontext __user *
5301 points to uc_mcontext field */
5302 #endif
5303 target_sigset_t tuc_sigmask;
5304 #if defined(TARGET_PPC64)
5305 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5306 struct target_sigcontext tuc_sigcontext;
5307 #else
5308 int32_t tuc_maskext[30];
5309 int32_t tuc_pad2[3];
5310 struct target_mcontext tuc_mcontext;
5311 #endif
5312 };
5313
5314 /* See arch/powerpc/kernel/signal_32.c. */
5315 struct target_sigframe {
5316 struct target_sigcontext sctx;
5317 struct target_mcontext mctx;
5318 int32_t abigap[56];
5319 };
5320
5321 #if defined(TARGET_PPC64)
5322
5323 #define TARGET_TRAMP_SIZE 6
5324
5325 struct target_rt_sigframe {
5326 /* sys_rt_sigreturn requires the ucontext be the first field */
5327 struct target_ucontext uc;
5328 target_ulong _unused[2];
5329 uint32_t trampoline[TARGET_TRAMP_SIZE];
5330 target_ulong pinfo; /* struct siginfo __user * */
5331 target_ulong puc; /* void __user * */
5332 struct target_siginfo info;
5333 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5334 char abigap[288];
5335 } __attribute__((aligned(16)));
5336
5337 #else
5338
5339 struct target_rt_sigframe {
5340 struct target_siginfo info;
5341 struct target_ucontext uc;
5342 int32_t abigap[56];
5343 };
5344
5345 #endif
5346
5347 #if defined(TARGET_PPC64)
5348
5349 struct target_func_ptr {
5350 target_ulong entry;
5351 target_ulong toc;
5352 };
5353
5354 #endif
5355
5356 /* We use the mc_pad field for the signal return trampoline. */
5357 #define tramp mc_pad
5358
5359 /* See arch/powerpc/kernel/signal.c. */
5360 static target_ulong get_sigframe(struct target_sigaction *ka,
5361 CPUPPCState *env,
5362 int frame_size)
5363 {
5364 target_ulong oldsp;
5365
5366 oldsp = env->gpr[1];
5367
5368 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5369 (sas_ss_flags(oldsp) == 0)) {
5370 oldsp = (target_sigaltstack_used.ss_sp
5371 + target_sigaltstack_used.ss_size);
5372 }
5373
5374 return (oldsp - frame_size) & ~0xFUL;
5375 }
5376
5377 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5378 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5379 #define PPC_VEC_HI 0
5380 #define PPC_VEC_LO 1
5381 #else
5382 #define PPC_VEC_HI 1
5383 #define PPC_VEC_LO 0
5384 #endif
5385
5386
5387 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5388 {
5389 target_ulong msr = env->msr;
5390 int i;
5391 target_ulong ccr = 0;
5392
5393 /* In general, the kernel attempts to be intelligent about what it
5394 needs to save for Altivec/FP/SPE registers. We don't care that
5395 much, so we just go ahead and save everything. */
5396
5397 /* Save general registers. */
5398 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5399 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5400 }
5401 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5402 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5403 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5404 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5405
5406 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5407 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5408 }
5409 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5410
5411 /* Save Altivec registers if necessary. */
5412 if (env->insns_flags & PPC_ALTIVEC) {
5413 uint32_t *vrsave;
5414 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5415 ppc_avr_t *avr = &env->avr[i];
5416 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5417
5418 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5419 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5420 }
5421 /* Set MSR_VR in the saved MSR value to indicate that
5422 frame->mc_vregs contains valid data. */
5423 msr |= MSR_VR;
5424 #if defined(TARGET_PPC64)
5425 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5426 /* 64-bit needs to put a pointer to the vectors in the frame */
5427 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5428 #else
5429 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5430 #endif
5431 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5432 }
5433
5434 /* Save VSX second halves */
5435 if (env->insns_flags2 & PPC2_VSX) {
5436 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5437 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5438 __put_user(env->vsr[i], &vsregs[i]);
5439 }
5440 }
5441
5442 /* Save floating point registers. */
5443 if (env->insns_flags & PPC_FLOAT) {
5444 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5445 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5446 }
5447 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5448 }
5449
5450 /* Save SPE registers. The kernel only saves the high half. */
5451 if (env->insns_flags & PPC_SPE) {
5452 #if defined(TARGET_PPC64)
5453 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5454 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5455 }
5456 #else
5457 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5458 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5459 }
5460 #endif
5461 /* Set MSR_SPE in the saved MSR value to indicate that
5462 frame->mc_vregs contains valid data. */
5463 msr |= MSR_SPE;
5464 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5465 }
5466
5467 /* Store MSR. */
5468 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5469 }
5470
5471 static void encode_trampoline(int sigret, uint32_t *tramp)
5472 {
5473 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5474 if (sigret) {
5475 __put_user(0x38000000 | sigret, &tramp[0]);
5476 __put_user(0x44000002, &tramp[1]);
5477 }
5478 }
5479
5480 static void restore_user_regs(CPUPPCState *env,
5481 struct target_mcontext *frame, int sig)
5482 {
5483 target_ulong save_r2 = 0;
5484 target_ulong msr;
5485 target_ulong ccr;
5486
5487 int i;
5488
5489 if (!sig) {
5490 save_r2 = env->gpr[2];
5491 }
5492
5493 /* Restore general registers. */
5494 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5495 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5496 }
5497 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5498 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5499 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5500 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5501 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5502
5503 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5504 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5505 }
5506
5507 if (!sig) {
5508 env->gpr[2] = save_r2;
5509 }
5510 /* Restore MSR. */
5511 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5512
5513 /* If doing signal return, restore the previous little-endian mode. */
5514 if (sig)
5515 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5516
5517 /* Restore Altivec registers if necessary. */
5518 if (env->insns_flags & PPC_ALTIVEC) {
5519 ppc_avr_t *v_regs;
5520 uint32_t *vrsave;
5521 #if defined(TARGET_PPC64)
5522 uint64_t v_addr;
5523 /* 64-bit needs to recover the pointer to the vectors from the frame */
5524 __get_user(v_addr, &frame->v_regs);
5525 v_regs = g2h(v_addr);
5526 #else
5527 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5528 #endif
5529 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5530 ppc_avr_t *avr = &env->avr[i];
5531 ppc_avr_t *vreg = &v_regs[i];
5532
5533 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5534 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5535 }
5536 /* Set MSR_VEC in the saved MSR value to indicate that
5537 frame->mc_vregs contains valid data. */
5538 #if defined(TARGET_PPC64)
5539 vrsave = (uint32_t *)&v_regs[33];
5540 #else
5541 vrsave = (uint32_t *)&v_regs[32];
5542 #endif
5543 __get_user(env->spr[SPR_VRSAVE], vrsave);
5544 }
5545
5546 /* Restore VSX second halves */
5547 if (env->insns_flags2 & PPC2_VSX) {
5548 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5549 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5550 __get_user(env->vsr[i], &vsregs[i]);
5551 }
5552 }
5553
5554 /* Restore floating point registers. */
5555 if (env->insns_flags & PPC_FLOAT) {
5556 uint64_t fpscr;
5557 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5558 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5559 }
5560 __get_user(fpscr, &frame->mc_fregs[32]);
5561 env->fpscr = (uint32_t) fpscr;
5562 }
5563
5564 /* Save SPE registers. The kernel only saves the high half. */
5565 if (env->insns_flags & PPC_SPE) {
5566 #if defined(TARGET_PPC64)
5567 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5568 uint32_t hi;
5569
5570 __get_user(hi, &frame->mc_vregs.spe[i]);
5571 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5572 }
5573 #else
5574 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5575 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5576 }
5577 #endif
5578 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5579 }
5580 }
5581
5582 #if !defined(TARGET_PPC64)
5583 static void setup_frame(int sig, struct target_sigaction *ka,
5584 target_sigset_t *set, CPUPPCState *env)
5585 {
5586 struct target_sigframe *frame;
5587 struct target_sigcontext *sc;
5588 target_ulong frame_addr, newsp;
5589 int err = 0;
5590
5591 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5592 trace_user_setup_frame(env, frame_addr);
5593 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5594 goto sigsegv;
5595 sc = &frame->sctx;
5596
5597 __put_user(ka->_sa_handler, &sc->handler);
5598 __put_user(set->sig[0], &sc->oldmask);
5599 __put_user(set->sig[1], &sc->_unused[3]);
5600 __put_user(h2g(&frame->mctx), &sc->regs);
5601 __put_user(sig, &sc->signal);
5602
5603 /* Save user regs. */
5604 save_user_regs(env, &frame->mctx);
5605
5606 /* Construct the trampoline code on the stack. */
5607 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5608
5609 /* The kernel checks for the presence of a VDSO here. We don't
5610 emulate a vdso, so use a sigreturn system call. */
5611 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5612
5613 /* Turn off all fp exceptions. */
5614 env->fpscr = 0;
5615
5616 /* Create a stack frame for the caller of the handler. */
5617 newsp = frame_addr - SIGNAL_FRAMESIZE;
5618 err |= put_user(env->gpr[1], newsp, target_ulong);
5619
5620 if (err)
5621 goto sigsegv;
5622
5623 /* Set up registers for signal handler. */
5624 env->gpr[1] = newsp;
5625 env->gpr[3] = sig;
5626 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5627
5628 env->nip = (target_ulong) ka->_sa_handler;
5629
5630 /* Signal handlers are entered in big-endian mode. */
5631 env->msr &= ~(1ull << MSR_LE);
5632
5633 unlock_user_struct(frame, frame_addr, 1);
5634 return;
5635
5636 sigsegv:
5637 unlock_user_struct(frame, frame_addr, 1);
5638 force_sigsegv(sig);
5639 }
5640 #endif /* !defined(TARGET_PPC64) */
5641
5642 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5643 target_siginfo_t *info,
5644 target_sigset_t *set, CPUPPCState *env)
5645 {
5646 struct target_rt_sigframe *rt_sf;
5647 uint32_t *trampptr = 0;
5648 struct target_mcontext *mctx = 0;
5649 target_ulong rt_sf_addr, newsp = 0;
5650 int i, err = 0;
5651 #if defined(TARGET_PPC64)
5652 struct target_sigcontext *sc = 0;
5653 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5654 #endif
5655
5656 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5657 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5658 goto sigsegv;
5659
5660 tswap_siginfo(&rt_sf->info, info);
5661
5662 __put_user(0, &rt_sf->uc.tuc_flags);
5663 __put_user(0, &rt_sf->uc.tuc_link);
5664 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5665 &rt_sf->uc.tuc_stack.ss_sp);
5666 __put_user(sas_ss_flags(env->gpr[1]),
5667 &rt_sf->uc.tuc_stack.ss_flags);
5668 __put_user(target_sigaltstack_used.ss_size,
5669 &rt_sf->uc.tuc_stack.ss_size);
5670 #if !defined(TARGET_PPC64)
5671 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5672 &rt_sf->uc.tuc_regs);
5673 #endif
5674 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5675 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5676 }
5677
5678 #if defined(TARGET_PPC64)
5679 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5680 trampptr = &rt_sf->trampoline[0];
5681
5682 sc = &rt_sf->uc.tuc_sigcontext;
5683 __put_user(h2g(mctx), &sc->regs);
5684 __put_user(sig, &sc->signal);
5685 #else
5686 mctx = &rt_sf->uc.tuc_mcontext;
5687 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5688 #endif
5689
5690 save_user_regs(env, mctx);
5691 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5692
5693 /* The kernel checks for the presence of a VDSO here. We don't
5694 emulate a vdso, so use a sigreturn system call. */
5695 env->lr = (target_ulong) h2g(trampptr);
5696
5697 /* Turn off all fp exceptions. */
5698 env->fpscr = 0;
5699
5700 /* Create a stack frame for the caller of the handler. */
5701 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5702 err |= put_user(env->gpr[1], newsp, target_ulong);
5703
5704 if (err)
5705 goto sigsegv;
5706
5707 /* Set up registers for signal handler. */
5708 env->gpr[1] = newsp;
5709 env->gpr[3] = (target_ulong) sig;
5710 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5711 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5712 env->gpr[6] = (target_ulong) h2g(rt_sf);
5713
5714 #if defined(TARGET_PPC64)
5715 if (get_ppc64_abi(image) < 2) {
5716 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5717 struct target_func_ptr *handler =
5718 (struct target_func_ptr *)g2h(ka->_sa_handler);
5719 env->nip = tswapl(handler->entry);
5720 env->gpr[2] = tswapl(handler->toc);
5721 } else {
5722 /* ELFv2 PPC64 function pointers are entry points, but R12
5723 * must also be set */
5724 env->nip = tswapl((target_ulong) ka->_sa_handler);
5725 env->gpr[12] = env->nip;
5726 }
5727 #else
5728 env->nip = (target_ulong) ka->_sa_handler;
5729 #endif
5730
5731 /* Signal handlers are entered in big-endian mode. */
5732 env->msr &= ~(1ull << MSR_LE);
5733
5734 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5735 return;
5736
5737 sigsegv:
5738 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5739 force_sigsegv(sig);
5740
5741 }
5742
5743 #if !defined(TARGET_PPC64)
5744 long do_sigreturn(CPUPPCState *env)
5745 {
5746 struct target_sigcontext *sc = NULL;
5747 struct target_mcontext *sr = NULL;
5748 target_ulong sr_addr = 0, sc_addr;
5749 sigset_t blocked;
5750 target_sigset_t set;
5751
5752 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5753 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5754 goto sigsegv;
5755
5756 #if defined(TARGET_PPC64)
5757 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5758 #else
5759 __get_user(set.sig[0], &sc->oldmask);
5760 __get_user(set.sig[1], &sc->_unused[3]);
5761 #endif
5762 target_to_host_sigset_internal(&blocked, &set);
5763 set_sigmask(&blocked);
5764
5765 __get_user(sr_addr, &sc->regs);
5766 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5767 goto sigsegv;
5768 restore_user_regs(env, sr, 1);
5769
5770 unlock_user_struct(sr, sr_addr, 1);
5771 unlock_user_struct(sc, sc_addr, 1);
5772 return -TARGET_QEMU_ESIGRETURN;
5773
5774 sigsegv:
5775 unlock_user_struct(sr, sr_addr, 1);
5776 unlock_user_struct(sc, sc_addr, 1);
5777 force_sig(TARGET_SIGSEGV);
5778 return -TARGET_QEMU_ESIGRETURN;
5779 }
5780 #endif /* !defined(TARGET_PPC64) */
5781
5782 /* See arch/powerpc/kernel/signal_32.c. */
5783 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5784 {
5785 struct target_mcontext *mcp;
5786 target_ulong mcp_addr;
5787 sigset_t blocked;
5788 target_sigset_t set;
5789
5790 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5791 sizeof (set)))
5792 return 1;
5793
5794 #if defined(TARGET_PPC64)
5795 mcp_addr = h2g(ucp) +
5796 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5797 #else
5798 __get_user(mcp_addr, &ucp->tuc_regs);
5799 #endif
5800
5801 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5802 return 1;
5803
5804 target_to_host_sigset_internal(&blocked, &set);
5805 set_sigmask(&blocked);
5806 restore_user_regs(env, mcp, sig);
5807
5808 unlock_user_struct(mcp, mcp_addr, 1);
5809 return 0;
5810 }
5811
5812 long do_rt_sigreturn(CPUPPCState *env)
5813 {
5814 struct target_rt_sigframe *rt_sf = NULL;
5815 target_ulong rt_sf_addr;
5816
5817 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5818 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5819 goto sigsegv;
5820
5821 if (do_setcontext(&rt_sf->uc, env, 1))
5822 goto sigsegv;
5823
5824 do_sigaltstack(rt_sf_addr
5825 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5826 0, env->gpr[1]);
5827
5828 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5829 return -TARGET_QEMU_ESIGRETURN;
5830
5831 sigsegv:
5832 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5833 force_sig(TARGET_SIGSEGV);
5834 return -TARGET_QEMU_ESIGRETURN;
5835 }
5836
5837 #elif defined(TARGET_M68K)
5838
5839 struct target_sigcontext {
5840 abi_ulong sc_mask;
5841 abi_ulong sc_usp;
5842 abi_ulong sc_d0;
5843 abi_ulong sc_d1;
5844 abi_ulong sc_a0;
5845 abi_ulong sc_a1;
5846 unsigned short sc_sr;
5847 abi_ulong sc_pc;
5848 };
5849
5850 struct target_sigframe
5851 {
5852 abi_ulong pretcode;
5853 int sig;
5854 int code;
5855 abi_ulong psc;
5856 char retcode[8];
5857 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5858 struct target_sigcontext sc;
5859 };
5860
5861 typedef int target_greg_t;
5862 #define TARGET_NGREG 18
5863 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5864
5865 typedef struct target_fpregset {
5866 int f_fpcntl[3];
5867 int f_fpregs[8*3];
5868 } target_fpregset_t;
5869
5870 struct target_mcontext {
5871 int version;
5872 target_gregset_t gregs;
5873 target_fpregset_t fpregs;
5874 };
5875
5876 #define TARGET_MCONTEXT_VERSION 2
5877
5878 struct target_ucontext {
5879 abi_ulong tuc_flags;
5880 abi_ulong tuc_link;
5881 target_stack_t tuc_stack;
5882 struct target_mcontext tuc_mcontext;
5883 abi_long tuc_filler[80];
5884 target_sigset_t tuc_sigmask;
5885 };
5886
5887 struct target_rt_sigframe
5888 {
5889 abi_ulong pretcode;
5890 int sig;
5891 abi_ulong pinfo;
5892 abi_ulong puc;
5893 char retcode[8];
5894 struct target_siginfo info;
5895 struct target_ucontext uc;
5896 };
5897
5898 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5899 abi_ulong mask)
5900 {
5901 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5902 __put_user(mask, &sc->sc_mask);
5903 __put_user(env->aregs[7], &sc->sc_usp);
5904 __put_user(env->dregs[0], &sc->sc_d0);
5905 __put_user(env->dregs[1], &sc->sc_d1);
5906 __put_user(env->aregs[0], &sc->sc_a0);
5907 __put_user(env->aregs[1], &sc->sc_a1);
5908 __put_user(sr, &sc->sc_sr);
5909 __put_user(env->pc, &sc->sc_pc);
5910 }
5911
5912 static void
5913 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5914 {
5915 int temp;
5916
5917 __get_user(env->aregs[7], &sc->sc_usp);
5918 __get_user(env->dregs[0], &sc->sc_d0);
5919 __get_user(env->dregs[1], &sc->sc_d1);
5920 __get_user(env->aregs[0], &sc->sc_a0);
5921 __get_user(env->aregs[1], &sc->sc_a1);
5922 __get_user(env->pc, &sc->sc_pc);
5923 __get_user(temp, &sc->sc_sr);
5924 cpu_m68k_set_ccr(env, temp);
5925 }
5926
5927 /*
5928 * Determine which stack to use..
5929 */
5930 static inline abi_ulong
5931 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5932 size_t frame_size)
5933 {
5934 unsigned long sp;
5935
5936 sp = regs->aregs[7];
5937
5938 /* This is the X/Open sanctioned signal stack switching. */
5939 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5940 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5941 }
5942
5943 return ((sp - frame_size) & -8UL);
5944 }
5945
5946 static void setup_frame(int sig, struct target_sigaction *ka,
5947 target_sigset_t *set, CPUM68KState *env)
5948 {
5949 struct target_sigframe *frame;
5950 abi_ulong frame_addr;
5951 abi_ulong retcode_addr;
5952 abi_ulong sc_addr;
5953 int i;
5954
5955 frame_addr = get_sigframe(ka, env, sizeof *frame);
5956 trace_user_setup_frame(env, frame_addr);
5957 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5958 goto give_sigsegv;
5959 }
5960
5961 __put_user(sig, &frame->sig);
5962
5963 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5964 __put_user(sc_addr, &frame->psc);
5965
5966 setup_sigcontext(&frame->sc, env, set->sig[0]);
5967
5968 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5969 __put_user(set->sig[i], &frame->extramask[i - 1]);
5970 }
5971
5972 /* Set up to return from userspace. */
5973
5974 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5975 __put_user(retcode_addr, &frame->pretcode);
5976
5977 /* moveq #,d0; trap #0 */
5978
5979 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5980 (uint32_t *)(frame->retcode));
5981
5982 /* Set up to return from userspace */
5983
5984 env->aregs[7] = frame_addr;
5985 env->pc = ka->_sa_handler;
5986
5987 unlock_user_struct(frame, frame_addr, 1);
5988 return;
5989
5990 give_sigsegv:
5991 force_sigsegv(sig);
5992 }
5993
5994 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5995 CPUM68KState *env)
5996 {
5997 int i;
5998 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5999
6000 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
6001 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
6002 /* fpiar is not emulated */
6003
6004 for (i = 0; i < 8; i++) {
6005 uint32_t high = env->fregs[i].d.high << 16;
6006 __put_user(high, &fpregs->f_fpregs[i * 3]);
6007 __put_user(env->fregs[i].d.low,
6008 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
6009 }
6010 }
6011
6012 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
6013 CPUM68KState *env)
6014 {
6015 target_greg_t *gregs = uc->tuc_mcontext.gregs;
6016 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
6017
6018 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
6019 __put_user(env->dregs[0], &gregs[0]);
6020 __put_user(env->dregs[1], &gregs[1]);
6021 __put_user(env->dregs[2], &gregs[2]);
6022 __put_user(env->dregs[3], &gregs[3]);
6023 __put_user(env->dregs[4], &gregs[4]);
6024 __put_user(env->dregs[5], &gregs[5]);
6025 __put_user(env->dregs[6], &gregs[6]);
6026 __put_user(env->dregs[7], &gregs[7]);
6027 __put_user(env->aregs[0], &gregs[8]);
6028 __put_user(env->aregs[1], &gregs[9]);
6029 __put_user(env->aregs[2], &gregs[10]);
6030 __put_user(env->aregs[3], &gregs[11]);
6031 __put_user(env->aregs[4], &gregs[12]);
6032 __put_user(env->aregs[5], &gregs[13]);
6033 __put_user(env->aregs[6], &gregs[14]);
6034 __put_user(env->aregs[7], &gregs[15]);
6035 __put_user(env->pc, &gregs[16]);
6036 __put_user(sr, &gregs[17]);
6037
6038 target_rt_save_fpu_state(uc, env);
6039
6040 return 0;
6041 }
6042
6043 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
6044 struct target_ucontext *uc)
6045 {
6046 int i;
6047 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
6048 uint32_t fpcr;
6049
6050 __get_user(fpcr, &fpregs->f_fpcntl[0]);
6051 cpu_m68k_set_fpcr(env, fpcr);
6052 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
6053 /* fpiar is not emulated */
6054
6055 for (i = 0; i < 8; i++) {
6056 uint32_t high;
6057 __get_user(high, &fpregs->f_fpregs[i * 3]);
6058 env->fregs[i].d.high = high >> 16;
6059 __get_user(env->fregs[i].d.low,
6060 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
6061 }
6062 }
6063
6064 static inline int target_rt_restore_ucontext(CPUM68KState *env,
6065 struct target_ucontext *uc)
6066 {
6067 int temp;
6068 target_greg_t *gregs = uc->tuc_mcontext.gregs;
6069
6070 __get_user(temp, &uc->tuc_mcontext.version);
6071 if (temp != TARGET_MCONTEXT_VERSION)
6072 goto badframe;
6073
6074 /* restore passed registers */
6075 __get_user(env->dregs[0], &gregs[0]);
6076 __get_user(env->dregs[1], &gregs[1]);
6077 __get_user(env->dregs[2], &gregs[2]);
6078 __get_user(env->dregs[3], &gregs[3]);
6079 __get_user(env->dregs[4], &gregs[4]);
6080 __get_user(env->dregs[5], &gregs[5]);
6081 __get_user(env->dregs[6], &gregs[6]);
6082 __get_user(env->dregs[7], &gregs[7]);
6083 __get_user(env->aregs[0], &gregs[8]);
6084 __get_user(env->aregs[1], &gregs[9]);
6085 __get_user(env->aregs[2], &gregs[10]);
6086 __get_user(env->aregs[3], &gregs[11]);
6087 __get_user(env->aregs[4], &gregs[12]);
6088 __get_user(env->aregs[5], &gregs[13]);
6089 __get_user(env->aregs[6], &gregs[14]);
6090 __get_user(env->aregs[7], &gregs[15]);
6091 __get_user(env->pc, &gregs[16]);
6092 __get_user(temp, &gregs[17]);
6093 cpu_m68k_set_ccr(env, temp);
6094
6095 target_rt_restore_fpu_state(env, uc);
6096
6097 return 0;
6098
6099 badframe:
6100 return 1;
6101 }
6102
6103 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6104 target_siginfo_t *info,
6105 target_sigset_t *set, CPUM68KState *env)
6106 {
6107 struct target_rt_sigframe *frame;
6108 abi_ulong frame_addr;
6109 abi_ulong retcode_addr;
6110 abi_ulong info_addr;
6111 abi_ulong uc_addr;
6112 int err = 0;
6113 int i;
6114
6115 frame_addr = get_sigframe(ka, env, sizeof *frame);
6116 trace_user_setup_rt_frame(env, frame_addr);
6117 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6118 goto give_sigsegv;
6119 }
6120
6121 __put_user(sig, &frame->sig);
6122
6123 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
6124 __put_user(info_addr, &frame->pinfo);
6125
6126 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
6127 __put_user(uc_addr, &frame->puc);
6128
6129 tswap_siginfo(&frame->info, info);
6130
6131 /* Create the ucontext */
6132
6133 __put_user(0, &frame->uc.tuc_flags);
6134 __put_user(0, &frame->uc.tuc_link);
6135 __put_user(target_sigaltstack_used.ss_sp,
6136 &frame->uc.tuc_stack.ss_sp);
6137 __put_user(sas_ss_flags(env->aregs[7]),
6138 &frame->uc.tuc_stack.ss_flags);
6139 __put_user(target_sigaltstack_used.ss_size,
6140 &frame->uc.tuc_stack.ss_size);
6141 err |= target_rt_setup_ucontext(&frame->uc, env);
6142
6143 if (err)
6144 goto give_sigsegv;
6145
6146 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
6147 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6148 }
6149
6150 /* Set up to return from userspace. */
6151
6152 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
6153 __put_user(retcode_addr, &frame->pretcode);
6154
6155 /* moveq #,d0; notb d0; trap #0 */
6156
6157 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
6158 (uint32_t *)(frame->retcode + 0));
6159 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
6160
6161 if (err)
6162 goto give_sigsegv;
6163
6164 /* Set up to return from userspace */
6165
6166 env->aregs[7] = frame_addr;
6167 env->pc = ka->_sa_handler;
6168
6169 unlock_user_struct(frame, frame_addr, 1);
6170 return;
6171
6172 give_sigsegv:
6173 unlock_user_struct(frame, frame_addr, 1);
6174 force_sigsegv(sig);
6175 }
6176
6177 long do_sigreturn(CPUM68KState *env)
6178 {
6179 struct target_sigframe *frame;
6180 abi_ulong frame_addr = env->aregs[7] - 4;
6181 target_sigset_t target_set;
6182 sigset_t set;
6183 int i;
6184
6185 trace_user_do_sigreturn(env, frame_addr);
6186 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
6187 goto badframe;
6188
6189 /* set blocked signals */
6190
6191 __get_user(target_set.sig[0], &frame->sc.sc_mask);
6192
6193 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
6194 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
6195 }
6196
6197 target_to_host_sigset_internal(&set, &target_set);
6198 set_sigmask(&set);
6199
6200 /* restore registers */
6201
6202 restore_sigcontext(env, &frame->sc);
6203
6204 unlock_user_struct(frame, frame_addr, 0);
6205 return -TARGET_QEMU_ESIGRETURN;
6206
6207 badframe:
6208 force_sig(TARGET_SIGSEGV);
6209 return -TARGET_QEMU_ESIGRETURN;
6210 }
6211
6212 long do_rt_sigreturn(CPUM68KState *env)
6213 {
6214 struct target_rt_sigframe *frame;
6215 abi_ulong frame_addr = env->aregs[7] - 4;
6216 sigset_t set;
6217
6218 trace_user_do_rt_sigreturn(env, frame_addr);
6219 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
6220 goto badframe;
6221
6222 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6223 set_sigmask(&set);
6224
6225 /* restore registers */
6226
6227 if (target_rt_restore_ucontext(env, &frame->uc))
6228 goto badframe;
6229
6230 if (do_sigaltstack(frame_addr +
6231 offsetof(struct target_rt_sigframe, uc.tuc_stack),
6232 0, get_sp_from_cpustate(env)) == -EFAULT)
6233 goto badframe;
6234
6235 unlock_user_struct(frame, frame_addr, 0);
6236 return -TARGET_QEMU_ESIGRETURN;
6237
6238 badframe:
6239 unlock_user_struct(frame, frame_addr, 0);
6240 force_sig(TARGET_SIGSEGV);
6241 return -TARGET_QEMU_ESIGRETURN;
6242 }
6243
6244 #elif defined(TARGET_ALPHA)
6245
6246 struct target_sigcontext {
6247 abi_long sc_onstack;
6248 abi_long sc_mask;
6249 abi_long sc_pc;
6250 abi_long sc_ps;
6251 abi_long sc_regs[32];
6252 abi_long sc_ownedfp;
6253 abi_long sc_fpregs[32];
6254 abi_ulong sc_fpcr;
6255 abi_ulong sc_fp_control;
6256 abi_ulong sc_reserved1;
6257 abi_ulong sc_reserved2;
6258 abi_ulong sc_ssize;
6259 abi_ulong sc_sbase;
6260 abi_ulong sc_traparg_a0;
6261 abi_ulong sc_traparg_a1;
6262 abi_ulong sc_traparg_a2;
6263 abi_ulong sc_fp_trap_pc;
6264 abi_ulong sc_fp_trigger_sum;
6265 abi_ulong sc_fp_trigger_inst;
6266 };
6267
6268 struct target_ucontext {
6269 abi_ulong tuc_flags;
6270 abi_ulong tuc_link;
6271 abi_ulong tuc_osf_sigmask;
6272 target_stack_t tuc_stack;
6273 struct target_sigcontext tuc_mcontext;
6274 target_sigset_t tuc_sigmask;
6275 };
6276
6277 struct target_sigframe {
6278 struct target_sigcontext sc;
6279 unsigned int retcode[3];
6280 };
6281
6282 struct target_rt_sigframe {
6283 target_siginfo_t info;
6284 struct target_ucontext uc;
6285 unsigned int retcode[3];
6286 };
6287
6288 #define INSN_MOV_R30_R16 0x47fe0410
6289 #define INSN_LDI_R0 0x201f0000
6290 #define INSN_CALLSYS 0x00000083
6291
6292 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6293 abi_ulong frame_addr, target_sigset_t *set)
6294 {
6295 int i;
6296
6297 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6298 __put_user(set->sig[0], &sc->sc_mask);
6299 __put_user(env->pc, &sc->sc_pc);
6300 __put_user(8, &sc->sc_ps);
6301
6302 for (i = 0; i < 31; ++i) {
6303 __put_user(env->ir[i], &sc->sc_regs[i]);
6304 }
6305 __put_user(0, &sc->sc_regs[31]);
6306
6307 for (i = 0; i < 31; ++i) {
6308 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6309 }
6310 __put_user(0, &sc->sc_fpregs[31]);
6311 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6312
6313 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6314 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6315 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6316 }
6317
6318 static void restore_sigcontext(CPUAlphaState *env,
6319 struct target_sigcontext *sc)
6320 {
6321 uint64_t fpcr;
6322 int i;
6323
6324 __get_user(env->pc, &sc->sc_pc);
6325
6326 for (i = 0; i < 31; ++i) {
6327 __get_user(env->ir[i], &sc->sc_regs[i]);
6328 }
6329 for (i = 0; i < 31; ++i) {
6330 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6331 }
6332
6333 __get_user(fpcr, &sc->sc_fpcr);
6334 cpu_alpha_store_fpcr(env, fpcr);
6335 }
6336
6337 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6338 CPUAlphaState *env,
6339 unsigned long framesize)
6340 {
6341 abi_ulong sp = env->ir[IR_SP];
6342
6343 /* This is the X/Open sanctioned signal stack switching. */
6344 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6345 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6346 }
6347 return (sp - framesize) & -32;
6348 }
6349
6350 static void setup_frame(int sig, struct target_sigaction *ka,
6351 target_sigset_t *set, CPUAlphaState *env)
6352 {
6353 abi_ulong frame_addr, r26;
6354 struct target_sigframe *frame;
6355 int err = 0;
6356
6357 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6358 trace_user_setup_frame(env, frame_addr);
6359 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6360 goto give_sigsegv;
6361 }
6362
6363 setup_sigcontext(&frame->sc, env, frame_addr, set);
6364
6365 if (ka->sa_restorer) {
6366 r26 = ka->sa_restorer;
6367 } else {
6368 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6369 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6370 &frame->retcode[1]);
6371 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6372 /* imb() */
6373 r26 = frame_addr;
6374 }
6375
6376 unlock_user_struct(frame, frame_addr, 1);
6377
6378 if (err) {
6379 give_sigsegv:
6380 force_sigsegv(sig);
6381 return;
6382 }
6383
6384 env->ir[IR_RA] = r26;
6385 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6386 env->ir[IR_A0] = sig;
6387 env->ir[IR_A1] = 0;
6388 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6389 env->ir[IR_SP] = frame_addr;
6390 }
6391
6392 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6393 target_siginfo_t *info,
6394 target_sigset_t *set, CPUAlphaState *env)
6395 {
6396 abi_ulong frame_addr, r26;
6397 struct target_rt_sigframe *frame;
6398 int i, err = 0;
6399
6400 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6401 trace_user_setup_rt_frame(env, frame_addr);
6402 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6403 goto give_sigsegv;
6404 }
6405
6406 tswap_siginfo(&frame->info, info);
6407
6408 __put_user(0, &frame->uc.tuc_flags);
6409 __put_user(0, &frame->uc.tuc_link);
6410 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6411 __put_user(target_sigaltstack_used.ss_sp,
6412 &frame->uc.tuc_stack.ss_sp);
6413 __put_user(sas_ss_flags(env->ir[IR_SP]),
6414 &frame->uc.tuc_stack.ss_flags);
6415 __put_user(target_sigaltstack_used.ss_size,
6416 &frame->uc.tuc_stack.ss_size);
6417 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6418 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6419 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6420 }
6421
6422 if (ka->sa_restorer) {
6423 r26 = ka->sa_restorer;
6424 } else {
6425 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6426 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6427 &frame->retcode[1]);
6428 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6429 /* imb(); */
6430 r26 = frame_addr;
6431 }
6432
6433 if (err) {
6434 give_sigsegv:
6435 force_sigsegv(sig);
6436 return;
6437 }
6438
6439 env->ir[IR_RA] = r26;
6440 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6441 env->ir[IR_A0] = sig;
6442 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6443 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6444 env->ir[IR_SP] = frame_addr;
6445 }
6446
6447 long do_sigreturn(CPUAlphaState *env)
6448 {
6449 struct target_sigcontext *sc;
6450 abi_ulong sc_addr = env->ir[IR_A0];
6451 target_sigset_t target_set;
6452 sigset_t set;
6453
6454 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6455 goto badframe;
6456 }
6457
6458 target_sigemptyset(&target_set);
6459 __get_user(target_set.sig[0], &sc->sc_mask);
6460
6461 target_to_host_sigset_internal(&set, &target_set);
6462 set_sigmask(&set);
6463
6464 restore_sigcontext(env, sc);
6465 unlock_user_struct(sc, sc_addr, 0);
6466 return -TARGET_QEMU_ESIGRETURN;
6467
6468 badframe:
6469 force_sig(TARGET_SIGSEGV);
6470 return -TARGET_QEMU_ESIGRETURN;
6471 }
6472
6473 long do_rt_sigreturn(CPUAlphaState *env)
6474 {
6475 abi_ulong frame_addr = env->ir[IR_A0];
6476 struct target_rt_sigframe *frame;
6477 sigset_t set;
6478
6479 trace_user_do_rt_sigreturn(env, frame_addr);
6480 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6481 goto badframe;
6482 }
6483 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6484 set_sigmask(&set);
6485
6486 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6487 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6488 uc.tuc_stack),
6489 0, env->ir[IR_SP]) == -EFAULT) {
6490 goto badframe;
6491 }
6492
6493 unlock_user_struct(frame, frame_addr, 0);
6494 return -TARGET_QEMU_ESIGRETURN;
6495
6496
6497 badframe:
6498 unlock_user_struct(frame, frame_addr, 0);
6499 force_sig(TARGET_SIGSEGV);
6500 return -TARGET_QEMU_ESIGRETURN;
6501 }
6502
6503 #elif defined(TARGET_TILEGX)
6504
6505 struct target_sigcontext {
6506 union {
6507 /* General-purpose registers. */
6508 abi_ulong gregs[56];
6509 struct {
6510 abi_ulong __gregs[53];
6511 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6512 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6513 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6514 };
6515 };
6516 abi_ulong pc; /* Program counter. */
6517 abi_ulong ics; /* In Interrupt Critical Section? */
6518 abi_ulong faultnum; /* Fault number. */
6519 abi_ulong pad[5];
6520 };
6521
6522 struct target_ucontext {
6523 abi_ulong tuc_flags;
6524 abi_ulong tuc_link;
6525 target_stack_t tuc_stack;
6526 struct target_sigcontext tuc_mcontext;
6527 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6528 };
6529
6530 struct target_rt_sigframe {
6531 unsigned char save_area[16]; /* caller save area */
6532 struct target_siginfo info;
6533 struct target_ucontext uc;
6534 abi_ulong retcode[2];
6535 };
6536
6537 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6538 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6539
6540
6541 static void setup_sigcontext(struct target_sigcontext *sc,
6542 CPUArchState *env, int signo)
6543 {
6544 int i;
6545
6546 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6547 __put_user(env->regs[i], &sc->gregs[i]);
6548 }
6549
6550 __put_user(env->pc, &sc->pc);
6551 __put_user(0, &sc->ics);
6552 __put_user(signo, &sc->faultnum);
6553 }
6554
6555 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6556 {
6557 int i;
6558
6559 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6560 __get_user(env->regs[i], &sc->gregs[i]);
6561 }
6562
6563 __get_user(env->pc, &sc->pc);
6564 }
6565
6566 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6567 size_t frame_size)
6568 {
6569 unsigned long sp = env->regs[TILEGX_R_SP];
6570
6571 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6572 return -1UL;
6573 }
6574
6575 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6576 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6577 }
6578
6579 sp -= frame_size;
6580 sp &= -16UL;
6581 return sp;
6582 }
6583
6584 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6585 target_siginfo_t *info,
6586 target_sigset_t *set, CPUArchState *env)
6587 {
6588 abi_ulong frame_addr;
6589 struct target_rt_sigframe *frame;
6590 unsigned long restorer;
6591
6592 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6593 trace_user_setup_rt_frame(env, frame_addr);
6594 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6595 goto give_sigsegv;
6596 }
6597
6598 /* Always write at least the signal number for the stack backtracer. */
6599 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6600 /* At sigreturn time, restore the callee-save registers too. */
6601 tswap_siginfo(&frame->info, info);
6602 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6603 } else {
6604 __put_user(info->si_signo, &frame->info.si_signo);
6605 }
6606
6607 /* Create the ucontext. */
6608 __put_user(0, &frame->uc.tuc_flags);
6609 __put_user(0, &frame->uc.tuc_link);
6610 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6611 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6612 &frame->uc.tuc_stack.ss_flags);
6613 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6614 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6615
6616 if (ka->sa_flags & TARGET_SA_RESTORER) {
6617 restorer = (unsigned long) ka->sa_restorer;
6618 } else {
6619 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6620 __put_user(INSN_SWINT1, &frame->retcode[1]);
6621 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6622 }
6623 env->pc = (unsigned long) ka->_sa_handler;
6624 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6625 env->regs[TILEGX_R_LR] = restorer;
6626 env->regs[0] = (unsigned long) sig;
6627 env->regs[1] = (unsigned long) &frame->info;
6628 env->regs[2] = (unsigned long) &frame->uc;
6629 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6630
6631 unlock_user_struct(frame, frame_addr, 1);
6632 return;
6633
6634 give_sigsegv:
6635 force_sigsegv(sig);
6636 }
6637
6638 long do_rt_sigreturn(CPUTLGState *env)
6639 {
6640 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6641 struct target_rt_sigframe *frame;
6642 sigset_t set;
6643
6644 trace_user_do_rt_sigreturn(env, frame_addr);
6645 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6646 goto badframe;
6647 }
6648 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6649 set_sigmask(&set);
6650
6651 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6652 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6653 uc.tuc_stack),
6654 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6655 goto badframe;
6656 }
6657
6658 unlock_user_struct(frame, frame_addr, 0);
6659 return -TARGET_QEMU_ESIGRETURN;
6660
6661
6662 badframe:
6663 unlock_user_struct(frame, frame_addr, 0);
6664 force_sig(TARGET_SIGSEGV);
6665 return -TARGET_QEMU_ESIGRETURN;
6666 }
6667
6668 #elif defined(TARGET_RISCV)
6669
6670 /* Signal handler invocation must be transparent for the code being
6671 interrupted. Complete CPU (hart) state is saved on entry and restored
6672 before returning from the handler. Process sigmask is also saved to block
6673 signals while the handler is running. The handler gets its own stack,
6674 which also doubles as storage for the CPU state and sigmask.
6675
6676 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
6677
6678 struct target_sigcontext {
6679 abi_long pc;
6680 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
6681 uint64_t fpr[32];
6682 uint32_t fcsr;
6683 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
6684
6685 struct target_ucontext {
6686 unsigned long uc_flags;
6687 struct target_ucontext *uc_link;
6688 target_stack_t uc_stack;
6689 struct target_sigcontext uc_mcontext;
6690 target_sigset_t uc_sigmask;
6691 };
6692
6693 struct target_rt_sigframe {
6694 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
6695 struct target_siginfo info;
6696 struct target_ucontext uc;
6697 };
6698
6699 static abi_ulong get_sigframe(struct target_sigaction *ka,
6700 CPURISCVState *regs, size_t framesize)
6701 {
6702 abi_ulong sp = regs->gpr[xSP];
6703 int onsigstack = on_sig_stack(sp);
6704
6705 /* redzone */
6706 /* This is the X/Open sanctioned signal stack switching. */
6707 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
6708 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6709 }
6710
6711 sp -= framesize;
6712 sp &= ~3UL; /* align sp on 4-byte boundary */
6713
6714 /* If we are on the alternate signal stack and would overflow it, don't.
6715 Return an always-bogus address instead so we will die with SIGSEGV. */
6716 if (onsigstack && !likely(on_sig_stack(sp))) {
6717 return -1L;
6718 }
6719
6720 return sp;
6721 }
6722
6723 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
6724 {
6725 int i;
6726
6727 __put_user(env->pc, &sc->pc);
6728
6729 for (i = 1; i < 32; i++) {
6730 __put_user(env->gpr[i], &sc->gpr[i - 1]);
6731 }
6732 for (i = 0; i < 32; i++) {
6733 __put_user(env->fpr[i], &sc->fpr[i]);
6734 }
6735
6736 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
6737 __put_user(fcsr, &sc->fcsr);
6738 }
6739
6740 static void setup_ucontext(struct target_ucontext *uc,
6741 CPURISCVState *env, target_sigset_t *set)
6742 {
6743 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
6744 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
6745 abi_ulong ss_size = target_sigaltstack_used.ss_size;
6746
6747 __put_user(0, &(uc->uc_flags));
6748 __put_user(0, &(uc->uc_link));
6749
6750 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
6751 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
6752 __put_user(ss_size, &(uc->uc_stack.ss_size));
6753
6754 int i;
6755 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6756 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
6757 }
6758
6759 setup_sigcontext(&uc->uc_mcontext, env);
6760 }
6761
6762 static inline void install_sigtramp(uint32_t *tramp)
6763 {
6764 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
6765 __put_user(0x00000073, tramp + 1); /* ecall */
6766 }
6767
6768 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6769 target_siginfo_t *info,
6770 target_sigset_t *set, CPURISCVState *env)
6771 {
6772 abi_ulong frame_addr;
6773 struct target_rt_sigframe *frame;
6774
6775 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6776 trace_user_setup_rt_frame(env, frame_addr);
6777
6778 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6779 goto badframe;
6780 }
6781
6782 setup_ucontext(&frame->uc, env, set);
6783 tswap_siginfo(&frame->info, info);
6784 install_sigtramp(frame->tramp);
6785
6786 env->pc = ka->_sa_handler;
6787 env->gpr[xSP] = frame_addr;
6788 env->gpr[xA0] = sig;
6789 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6790 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6791 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
6792
6793 return;
6794
6795 badframe:
6796 unlock_user_struct(frame, frame_addr, 1);
6797 if (sig == TARGET_SIGSEGV) {
6798 ka->_sa_handler = TARGET_SIG_DFL;
6799 }
6800 force_sig(TARGET_SIGSEGV);
6801 }
6802
6803 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
6804 {
6805 int i;
6806
6807 __get_user(env->pc, &sc->pc);
6808
6809 for (i = 1; i < 32; ++i) {
6810 __get_user(env->gpr[i], &sc->gpr[i - 1]);
6811 }
6812 for (i = 0; i < 32; ++i) {
6813 __get_user(env->fpr[i], &sc->fpr[i]);
6814 }
6815
6816 uint32_t fcsr;
6817 __get_user(fcsr, &sc->fcsr);
6818 csr_write_helper(env, fcsr, CSR_FCSR);
6819 }
6820
6821 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
6822 {
6823 sigset_t blocked;
6824 target_sigset_t target_set;
6825 int i;
6826
6827 target_sigemptyset(&target_set);
6828 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6829 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
6830 }
6831
6832 target_to_host_sigset_internal(&blocked, &target_set);
6833 set_sigmask(&blocked);
6834
6835 restore_sigcontext(env, &uc->uc_mcontext);
6836 }
6837
6838 long do_rt_sigreturn(CPURISCVState *env)
6839 {
6840 struct target_rt_sigframe *frame;
6841 abi_ulong frame_addr;
6842
6843 frame_addr = env->gpr[xSP];
6844 trace_user_do_sigreturn(env, frame_addr);
6845 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6846 goto badframe;
6847 }
6848
6849 restore_ucontext(env, &frame->uc);
6850
6851 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6852 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
6853 goto badframe;
6854 }
6855
6856 unlock_user_struct(frame, frame_addr, 0);
6857 return -TARGET_QEMU_ESIGRETURN;
6858
6859 badframe:
6860 unlock_user_struct(frame, frame_addr, 0);
6861 force_sig(TARGET_SIGSEGV);
6862 return 0;
6863 }
6864
6865 #elif defined(TARGET_HPPA)
6866
6867 struct target_sigcontext {
6868 abi_ulong sc_flags;
6869 abi_ulong sc_gr[32];
6870 uint64_t sc_fr[32];
6871 abi_ulong sc_iasq[2];
6872 abi_ulong sc_iaoq[2];
6873 abi_ulong sc_sar;
6874 };
6875
6876 struct target_ucontext {
6877 abi_uint tuc_flags;
6878 abi_ulong tuc_link;
6879 target_stack_t tuc_stack;
6880 abi_uint pad[1];
6881 struct target_sigcontext tuc_mcontext;
6882 target_sigset_t tuc_sigmask;
6883 };
6884
6885 struct target_rt_sigframe {
6886 abi_uint tramp[9];
6887 target_siginfo_t info;
6888 struct target_ucontext uc;
6889 /* hidden location of upper halves of pa2.0 64-bit gregs */
6890 };
6891
6892 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6893 {
6894 int flags = 0;
6895 int i;
6896
6897 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6898
6899 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6900 /* In the gateway page, executing a syscall. */
6901 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6902 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6903 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6904 } else {
6905 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6906 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6907 }
6908 __put_user(0, &sc->sc_iasq[0]);
6909 __put_user(0, &sc->sc_iasq[1]);
6910 __put_user(flags, &sc->sc_flags);
6911
6912 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6913 for (i = 1; i < 32; ++i) {
6914 __put_user(env->gr[i], &sc->sc_gr[i]);
6915 }
6916
6917 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6918 for (i = 1; i < 32; ++i) {
6919 __put_user(env->fr[i], &sc->sc_fr[i]);
6920 }
6921
6922 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6923 }
6924
6925 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6926 {
6927 target_ulong psw;
6928 int i;
6929
6930 __get_user(psw, &sc->sc_gr[0]);
6931 cpu_hppa_put_psw(env, psw);
6932
6933 for (i = 1; i < 32; ++i) {
6934 __get_user(env->gr[i], &sc->sc_gr[i]);
6935 }
6936 for (i = 0; i < 32; ++i) {
6937 __get_user(env->fr[i], &sc->sc_fr[i]);
6938 }
6939 cpu_hppa_loaded_fr0(env);
6940
6941 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6942 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6943 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6944 }
6945
6946 /* No, this doesn't look right, but it's copied straight from the kernel. */
6947 #define PARISC_RT_SIGFRAME_SIZE32 \
6948 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6949
6950 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6951 target_siginfo_t *info,
6952 target_sigset_t *set, CPUArchState *env)
6953 {
6954 abi_ulong frame_addr, sp, haddr;
6955 struct target_rt_sigframe *frame;
6956 int i;
6957
6958 sp = env->gr[30];
6959 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6960 if (sas_ss_flags(sp) == 0) {
6961 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6962 }
6963 }
6964 frame_addr = QEMU_ALIGN_UP(sp, 64);
6965 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6966
6967 trace_user_setup_rt_frame(env, frame_addr);
6968
6969 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6970 goto give_sigsegv;
6971 }
6972
6973 tswap_siginfo(&frame->info, info);
6974 frame->uc.tuc_flags = 0;
6975 frame->uc.tuc_link = 0;
6976
6977 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6978 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6979 &frame->uc.tuc_stack.ss_flags);
6980 __put_user(target_sigaltstack_used.ss_size,
6981 &frame->uc.tuc_stack.ss_size);
6982
6983 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6984 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6985 }
6986
6987 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6988
6989 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6990 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6991 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6992 __put_user(0x08000240, frame->tramp + 3); /* nop */
6993
6994 unlock_user_struct(frame, frame_addr, 1);
6995
6996 env->gr[2] = h2g(frame->tramp);
6997 env->gr[30] = sp;
6998 env->gr[26] = sig;
6999 env->gr[25] = h2g(&frame->info);
7000 env->gr[24] = h2g(&frame->uc);
7001
7002 haddr = ka->_sa_handler;
7003 if (haddr & 2) {
7004 /* Function descriptor. */
7005 target_ulong *fdesc, dest;
7006
7007 haddr &= -4;
7008 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
7009 goto give_sigsegv;
7010 }
7011 __get_user(dest, fdesc);
7012 __get_user(env->gr[19], fdesc + 1);
7013 unlock_user_struct(fdesc, haddr, 1);
7014 haddr = dest;
7015 }
7016 env->iaoq_f = haddr;
7017 env->iaoq_b = haddr + 4;
7018 return;
7019
7020 give_sigsegv:
7021 force_sigsegv(sig);
7022 }
7023
7024 long do_rt_sigreturn(CPUArchState *env)
7025 {
7026 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
7027 struct target_rt_sigframe *frame;
7028 sigset_t set;
7029
7030 trace_user_do_rt_sigreturn(env, frame_addr);
7031 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
7032 goto badframe;
7033 }
7034 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
7035 set_sigmask(&set);
7036
7037 restore_sigcontext(env, &frame->uc.tuc_mcontext);
7038 unlock_user_struct(frame, frame_addr, 0);
7039
7040 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
7041 uc.tuc_stack),
7042 0, env->gr[30]) == -EFAULT) {
7043 goto badframe;
7044 }
7045
7046 unlock_user_struct(frame, frame_addr, 0);
7047 return -TARGET_QEMU_ESIGRETURN;
7048
7049 badframe:
7050 force_sig(TARGET_SIGSEGV);
7051 return -TARGET_QEMU_ESIGRETURN;
7052 }
7053
7054 #else
7055
7056 static void setup_frame(int sig, struct target_sigaction *ka,
7057 target_sigset_t *set, CPUArchState *env)
7058 {
7059 fprintf(stderr, "setup_frame: not implemented\n");
7060 }
7061
7062 static void setup_rt_frame(int sig, struct target_sigaction *ka,
7063 target_siginfo_t *info,
7064 target_sigset_t *set, CPUArchState *env)
7065 {
7066 fprintf(stderr, "setup_rt_frame: not implemented\n");
7067 }
7068
7069 long do_sigreturn(CPUArchState *env)
7070 {
7071 fprintf(stderr, "do_sigreturn: not implemented\n");
7072 return -TARGET_ENOSYS;
7073 }
7074
7075 long do_rt_sigreturn(CPUArchState *env)
7076 {
7077 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
7078 return -TARGET_ENOSYS;
7079 }
7080
7081 #endif
7082
7083 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
7084 struct emulated_sigtable *k)
7085 {
7086 CPUState *cpu = ENV_GET_CPU(cpu_env);
7087 abi_ulong handler;
7088 sigset_t set;
7089 target_sigset_t target_old_set;
7090 struct target_sigaction *sa;
7091 TaskState *ts = cpu->opaque;
7092
7093 trace_user_handle_signal(cpu_env, sig);
7094 /* dequeue signal */
7095 k->pending = 0;
7096
7097 sig = gdb_handlesig(cpu, sig);
7098 if (!sig) {
7099 sa = NULL;
7100 handler = TARGET_SIG_IGN;
7101 } else {
7102 sa = &sigact_table[sig - 1];
7103 handler = sa->_sa_handler;
7104 }
7105
7106 if (do_strace) {
7107 print_taken_signal(sig, &k->info);
7108 }
7109
7110 if (handler == TARGET_SIG_DFL) {
7111 /* default handler : ignore some signal. The other are job control or fatal */
7112 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
7113 kill(getpid(),SIGSTOP);
7114 } else if (sig != TARGET_SIGCHLD &&
7115 sig != TARGET_SIGURG &&
7116 sig != TARGET_SIGWINCH &&
7117 sig != TARGET_SIGCONT) {
7118 dump_core_and_abort(sig);
7119 }
7120 } else if (handler == TARGET_SIG_IGN) {
7121 /* ignore sig */
7122 } else if (handler == TARGET_SIG_ERR) {
7123 dump_core_and_abort(sig);
7124 } else {
7125 /* compute the blocked signals during the handler execution */
7126 sigset_t *blocked_set;
7127
7128 target_to_host_sigset(&set, &sa->sa_mask);
7129 /* SA_NODEFER indicates that the current signal should not be
7130 blocked during the handler */
7131 if (!(sa->sa_flags & TARGET_SA_NODEFER))
7132 sigaddset(&set, target_to_host_signal(sig));
7133
7134 /* save the previous blocked signal state to restore it at the
7135 end of the signal execution (see do_sigreturn) */
7136 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
7137
7138 /* block signals in the handler */
7139 blocked_set = ts->in_sigsuspend ?
7140 &ts->sigsuspend_mask : &ts->signal_mask;
7141 sigorset(&ts->signal_mask, blocked_set, &set);
7142 ts->in_sigsuspend = 0;
7143
7144 /* if the CPU is in VM86 mode, we restore the 32 bit values */
7145 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
7146 {
7147 CPUX86State *env = cpu_env;
7148 if (env->eflags & VM_MASK)
7149 save_v86_state(env);
7150 }
7151 #endif
7152 /* prepare the stack frame of the virtual CPU */
7153 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
7154 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
7155 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
7156 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
7157 || defined(TARGET_RISCV)
7158 /* These targets do not have traditional signals. */
7159 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
7160 #else
7161 if (sa->sa_flags & TARGET_SA_SIGINFO)
7162 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
7163 else
7164 setup_frame(sig, sa, &target_old_set, cpu_env);
7165 #endif
7166 if (sa->sa_flags & TARGET_SA_RESETHAND) {
7167 sa->_sa_handler = TARGET_SIG_DFL;
7168 }
7169 }
7170 }
7171
7172 void process_pending_signals(CPUArchState *cpu_env)
7173 {
7174 CPUState *cpu = ENV_GET_CPU(cpu_env);
7175 int sig;
7176 TaskState *ts = cpu->opaque;
7177 sigset_t set;
7178 sigset_t *blocked_set;
7179
7180 while (atomic_read(&ts->signal_pending)) {
7181 /* FIXME: This is not threadsafe. */
7182 sigfillset(&set);
7183 sigprocmask(SIG_SETMASK, &set, 0);
7184
7185 restart_scan:
7186 sig = ts->sync_signal.pending;
7187 if (sig) {
7188 /* Synchronous signals are forced,
7189 * see force_sig_info() and callers in Linux
7190 * Note that not all of our queue_signal() calls in QEMU correspond
7191 * to force_sig_info() calls in Linux (some are send_sig_info()).
7192 * However it seems like a kernel bug to me to allow the process
7193 * to block a synchronous signal since it could then just end up
7194 * looping round and round indefinitely.
7195 */
7196 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
7197 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
7198 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
7199 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
7200 }
7201
7202 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
7203 }
7204
7205 for (sig = 1; sig <= TARGET_NSIG; sig++) {
7206 blocked_set = ts->in_sigsuspend ?
7207 &ts->sigsuspend_mask : &ts->signal_mask;
7208
7209 if (ts->sigtab[sig - 1].pending &&
7210 (!sigismember(blocked_set,
7211 target_to_host_signal_table[sig]))) {
7212 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
7213 /* Restart scan from the beginning, as handle_pending_signal
7214 * might have resulted in a new synchronous signal (eg SIGSEGV).
7215 */
7216 goto restart_scan;
7217 }
7218 }
7219
7220 /* if no signal is pending, unblock signals and recheck (the act
7221 * of unblocking might cause us to take another host signal which
7222 * will set signal_pending again).
7223 */
7224 atomic_set(&ts->signal_pending, 0);
7225 ts->in_sigsuspend = 0;
7226 set = ts->signal_mask;
7227 sigdelset(&set, SIGSEGV);
7228 sigdelset(&set, SIGBUS);
7229 sigprocmask(SIG_SETMASK, &set, 0);
7230 }
7231 ts->in_sigsuspend = 0;
7232 }