]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/signal.c
RISC-V Linux User Emulation
[mirror_qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
202 */
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
205
206 return atomic_xchg(&ts->signal_pending, 1);
207 }
208
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
215 */
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
217 {
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
219
220 if (oldset) {
221 *oldset = ts->signal_mask;
222 }
223
224 if (set) {
225 int i;
226
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
229 }
230
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
239 }
240 }
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
247 }
248
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
252 }
253 return 0;
254 }
255
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_NIOS2)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
260 */
261 static void set_sigmask(const sigset_t *set)
262 {
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
264
265 ts->signal_mask = *set;
266 }
267 #endif
268
269 /* siginfo conversion */
270
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
273 {
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
280
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
286 */
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
288
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
297 *
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
302 */
303
304 switch (si_code) {
305 case SI_USER:
306 case SI_TKILL:
307 case SI_KERNEL:
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
310 */
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
314 break;
315 default:
316 /* Everything else is spoofable. Make best guess based on signal */
317 switch (sig) {
318 case TARGET_SIGCHLD:
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
326 break;
327 case TARGET_SIGIO:
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
331 break;
332 default:
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
340 break;
341 }
342 break;
343 }
344
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
346 }
347
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
350 {
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
353
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
357
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
361 */
362 switch (si_type) {
363 case QEMU_SI_KILL:
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
366 break;
367 case QEMU_SI_TIMER:
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
372 break;
373 case QEMU_SI_POLL:
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
378 break;
379 case QEMU_SI_FAULT:
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
382 break;
383 case QEMU_SI_CHLD:
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
394 break;
395 case QEMU_SI_RT:
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
400 break;
401 default:
402 g_assert_not_reached();
403 }
404 }
405
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
407 {
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
411 }
412
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
416 {
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
419 */
420 abi_ulong sival_ptr;
421
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
429 }
430
431 static int fatal_signal (int sig)
432 {
433 switch (sig) {
434 case TARGET_SIGCHLD:
435 case TARGET_SIGURG:
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
438 return 0;
439 case TARGET_SIGCONT:
440 case TARGET_SIGSTOP:
441 case TARGET_SIGTSTP:
442 case TARGET_SIGTTIN:
443 case TARGET_SIGTTOU:
444 /* Job control signals. */
445 return 0;
446 default:
447 return 1;
448 }
449 }
450
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
453 {
454 switch (sig) {
455 case TARGET_SIGABRT:
456 case TARGET_SIGFPE:
457 case TARGET_SIGILL:
458 case TARGET_SIGQUIT:
459 case TARGET_SIGSEGV:
460 case TARGET_SIGTRAP:
461 case TARGET_SIGBUS:
462 return (1);
463 default:
464 return (0);
465 }
466 }
467
468 void signal_init(void)
469 {
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
473 int i, j;
474 int host_sig;
475
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
480 }
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
484 }
485
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
488
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
492
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
503 }
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
512 }
513 }
514
515 #ifndef TARGET_UNICORE32
516 /* Force a synchronously taken signal. The kernel force_sig() function
517 * also forces the signal to "not blocked, not ignored", but for QEMU
518 * that work is done in process_pending_signals().
519 */
520 static void force_sig(int sig)
521 {
522 CPUState *cpu = thread_cpu;
523 CPUArchState *env = cpu->env_ptr;
524 target_siginfo_t info;
525
526 info.si_signo = sig;
527 info.si_errno = 0;
528 info.si_code = TARGET_SI_KERNEL;
529 info._sifields._kill._pid = 0;
530 info._sifields._kill._uid = 0;
531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
532 }
533
534 /* Force a SIGSEGV if we couldn't write to memory trying to set
535 * up the signal frame. oldsig is the signal we were trying to handle
536 * at the point of failure.
537 */
538 #if !defined(TARGET_RISCV)
539 static void force_sigsegv(int oldsig)
540 {
541 if (oldsig == SIGSEGV) {
542 /* Make sure we don't try to deliver the signal again; this will
543 * end up with handle_pending_signal() calling dump_core_and_abort().
544 */
545 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
546 }
547 force_sig(TARGET_SIGSEGV);
548 }
549 #endif
550
551 #endif
552
553 /* abort execution with signal */
554 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
555 {
556 CPUState *cpu = thread_cpu;
557 CPUArchState *env = cpu->env_ptr;
558 TaskState *ts = (TaskState *)cpu->opaque;
559 int host_sig, core_dumped = 0;
560 struct sigaction act;
561
562 host_sig = target_to_host_signal(target_sig);
563 trace_user_force_sig(env, target_sig, host_sig);
564 gdb_signalled(env, target_sig);
565
566 /* dump core if supported by target binary format */
567 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
568 stop_all_tasks();
569 core_dumped =
570 ((*ts->bprm->core_dump)(target_sig, env) == 0);
571 }
572 if (core_dumped) {
573 /* we already dumped the core of target process, we don't want
574 * a coredump of qemu itself */
575 struct rlimit nodump;
576 getrlimit(RLIMIT_CORE, &nodump);
577 nodump.rlim_cur=0;
578 setrlimit(RLIMIT_CORE, &nodump);
579 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
580 target_sig, strsignal(host_sig), "core dumped" );
581 }
582
583 /* The proper exit code for dying from an uncaught signal is
584 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
585 * a negative value. To get the proper exit code we need to
586 * actually die from an uncaught signal. Here the default signal
587 * handler is installed, we send ourself a signal and we wait for
588 * it to arrive. */
589 sigfillset(&act.sa_mask);
590 act.sa_handler = SIG_DFL;
591 act.sa_flags = 0;
592 sigaction(host_sig, &act, NULL);
593
594 /* For some reason raise(host_sig) doesn't send the signal when
595 * statically linked on x86-64. */
596 kill(getpid(), host_sig);
597
598 /* Make sure the signal isn't masked (just reuse the mask inside
599 of act) */
600 sigdelset(&act.sa_mask, host_sig);
601 sigsuspend(&act.sa_mask);
602
603 /* unreachable */
604 abort();
605 }
606
607 /* queue a signal so that it will be send to the virtual CPU as soon
608 as possible */
609 int queue_signal(CPUArchState *env, int sig, int si_type,
610 target_siginfo_t *info)
611 {
612 CPUState *cpu = ENV_GET_CPU(env);
613 TaskState *ts = cpu->opaque;
614
615 trace_user_queue_signal(env, sig);
616
617 info->si_code = deposit32(info->si_code, 16, 16, si_type);
618
619 ts->sync_signal.info = *info;
620 ts->sync_signal.pending = sig;
621 /* signal that a new signal is pending */
622 atomic_set(&ts->signal_pending, 1);
623 return 1; /* indicates that the signal was queued */
624 }
625
626 #ifndef HAVE_SAFE_SYSCALL
627 static inline void rewind_if_in_safe_syscall(void *puc)
628 {
629 /* Default version: never rewind */
630 }
631 #endif
632
633 static void host_signal_handler(int host_signum, siginfo_t *info,
634 void *puc)
635 {
636 CPUArchState *env = thread_cpu->env_ptr;
637 CPUState *cpu = ENV_GET_CPU(env);
638 TaskState *ts = cpu->opaque;
639
640 int sig;
641 target_siginfo_t tinfo;
642 ucontext_t *uc = puc;
643 struct emulated_sigtable *k;
644
645 /* the CPU emulator uses some host signals to detect exceptions,
646 we forward to it some signals */
647 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
648 && info->si_code > 0) {
649 if (cpu_signal_handler(host_signum, info, puc))
650 return;
651 }
652
653 /* get target signal number */
654 sig = host_to_target_signal(host_signum);
655 if (sig < 1 || sig > TARGET_NSIG)
656 return;
657 trace_user_host_signal(env, host_signum, sig);
658
659 rewind_if_in_safe_syscall(puc);
660
661 host_to_target_siginfo_noswap(&tinfo, info);
662 k = &ts->sigtab[sig - 1];
663 k->info = tinfo;
664 k->pending = sig;
665 ts->signal_pending = 1;
666
667 /* Block host signals until target signal handler entered. We
668 * can't block SIGSEGV or SIGBUS while we're executing guest
669 * code in case the guest code provokes one in the window between
670 * now and it getting out to the main loop. Signals will be
671 * unblocked again in process_pending_signals().
672 *
673 * WARNING: we cannot use sigfillset() here because the uc_sigmask
674 * field is a kernel sigset_t, which is much smaller than the
675 * libc sigset_t which sigfillset() operates on. Using sigfillset()
676 * would write 0xff bytes off the end of the structure and trash
677 * data on the struct.
678 * We can't use sizeof(uc->uc_sigmask) either, because the libc
679 * headers define the struct field with the wrong (too large) type.
680 */
681 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
682 sigdelset(&uc->uc_sigmask, SIGSEGV);
683 sigdelset(&uc->uc_sigmask, SIGBUS);
684
685 /* interrupt the virtual CPU as soon as possible */
686 cpu_exit(thread_cpu);
687 }
688
689 /* do_sigaltstack() returns target values and errnos. */
690 /* compare linux/kernel/signal.c:do_sigaltstack() */
691 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
692 {
693 int ret;
694 struct target_sigaltstack oss;
695
696 /* XXX: test errors */
697 if(uoss_addr)
698 {
699 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
700 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
701 __put_user(sas_ss_flags(sp), &oss.ss_flags);
702 }
703
704 if(uss_addr)
705 {
706 struct target_sigaltstack *uss;
707 struct target_sigaltstack ss;
708 size_t minstacksize = TARGET_MINSIGSTKSZ;
709
710 #if defined(TARGET_PPC64)
711 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
712 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
713 if (get_ppc64_abi(image) > 1) {
714 minstacksize = 4096;
715 }
716 #endif
717
718 ret = -TARGET_EFAULT;
719 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
720 goto out;
721 }
722 __get_user(ss.ss_sp, &uss->ss_sp);
723 __get_user(ss.ss_size, &uss->ss_size);
724 __get_user(ss.ss_flags, &uss->ss_flags);
725 unlock_user_struct(uss, uss_addr, 0);
726
727 ret = -TARGET_EPERM;
728 if (on_sig_stack(sp))
729 goto out;
730
731 ret = -TARGET_EINVAL;
732 if (ss.ss_flags != TARGET_SS_DISABLE
733 && ss.ss_flags != TARGET_SS_ONSTACK
734 && ss.ss_flags != 0)
735 goto out;
736
737 if (ss.ss_flags == TARGET_SS_DISABLE) {
738 ss.ss_size = 0;
739 ss.ss_sp = 0;
740 } else {
741 ret = -TARGET_ENOMEM;
742 if (ss.ss_size < minstacksize) {
743 goto out;
744 }
745 }
746
747 target_sigaltstack_used.ss_sp = ss.ss_sp;
748 target_sigaltstack_used.ss_size = ss.ss_size;
749 }
750
751 if (uoss_addr) {
752 ret = -TARGET_EFAULT;
753 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
754 goto out;
755 }
756
757 ret = 0;
758 out:
759 return ret;
760 }
761
762 /* do_sigaction() return target values and host errnos */
763 int do_sigaction(int sig, const struct target_sigaction *act,
764 struct target_sigaction *oact)
765 {
766 struct target_sigaction *k;
767 struct sigaction act1;
768 int host_sig;
769 int ret = 0;
770
771 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
772 return -TARGET_EINVAL;
773 }
774
775 if (block_signals()) {
776 return -TARGET_ERESTARTSYS;
777 }
778
779 k = &sigact_table[sig - 1];
780 if (oact) {
781 __put_user(k->_sa_handler, &oact->_sa_handler);
782 __put_user(k->sa_flags, &oact->sa_flags);
783 #ifdef TARGET_ARCH_HAS_SA_RESTORER
784 __put_user(k->sa_restorer, &oact->sa_restorer);
785 #endif
786 /* Not swapped. */
787 oact->sa_mask = k->sa_mask;
788 }
789 if (act) {
790 /* FIXME: This is not threadsafe. */
791 __get_user(k->_sa_handler, &act->_sa_handler);
792 __get_user(k->sa_flags, &act->sa_flags);
793 #ifdef TARGET_ARCH_HAS_SA_RESTORER
794 __get_user(k->sa_restorer, &act->sa_restorer);
795 #endif
796 /* To be swapped in target_to_host_sigset. */
797 k->sa_mask = act->sa_mask;
798
799 /* we update the host linux signal state */
800 host_sig = target_to_host_signal(sig);
801 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
802 sigfillset(&act1.sa_mask);
803 act1.sa_flags = SA_SIGINFO;
804 if (k->sa_flags & TARGET_SA_RESTART)
805 act1.sa_flags |= SA_RESTART;
806 /* NOTE: it is important to update the host kernel signal
807 ignore state to avoid getting unexpected interrupted
808 syscalls */
809 if (k->_sa_handler == TARGET_SIG_IGN) {
810 act1.sa_sigaction = (void *)SIG_IGN;
811 } else if (k->_sa_handler == TARGET_SIG_DFL) {
812 if (fatal_signal (sig))
813 act1.sa_sigaction = host_signal_handler;
814 else
815 act1.sa_sigaction = (void *)SIG_DFL;
816 } else {
817 act1.sa_sigaction = host_signal_handler;
818 }
819 ret = sigaction(host_sig, &act1, NULL);
820 }
821 }
822 return ret;
823 }
824
825 #if defined(TARGET_I386)
826 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
827
828 struct target_fpreg {
829 uint16_t significand[4];
830 uint16_t exponent;
831 };
832
833 struct target_fpxreg {
834 uint16_t significand[4];
835 uint16_t exponent;
836 uint16_t padding[3];
837 };
838
839 struct target_xmmreg {
840 uint32_t element[4];
841 };
842
843 struct target_fpstate_32 {
844 /* Regular FPU environment */
845 uint32_t cw;
846 uint32_t sw;
847 uint32_t tag;
848 uint32_t ipoff;
849 uint32_t cssel;
850 uint32_t dataoff;
851 uint32_t datasel;
852 struct target_fpreg st[8];
853 uint16_t status;
854 uint16_t magic; /* 0xffff = regular FPU data only */
855
856 /* FXSR FPU environment */
857 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
858 uint32_t mxcsr;
859 uint32_t reserved;
860 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
861 struct target_xmmreg xmm[8];
862 uint32_t padding[56];
863 };
864
865 struct target_fpstate_64 {
866 /* FXSAVE format */
867 uint16_t cw;
868 uint16_t sw;
869 uint16_t twd;
870 uint16_t fop;
871 uint64_t rip;
872 uint64_t rdp;
873 uint32_t mxcsr;
874 uint32_t mxcsr_mask;
875 uint32_t st_space[32];
876 uint32_t xmm_space[64];
877 uint32_t reserved[24];
878 };
879
880 #ifndef TARGET_X86_64
881 # define target_fpstate target_fpstate_32
882 #else
883 # define target_fpstate target_fpstate_64
884 #endif
885
886 struct target_sigcontext_32 {
887 uint16_t gs, __gsh;
888 uint16_t fs, __fsh;
889 uint16_t es, __esh;
890 uint16_t ds, __dsh;
891 uint32_t edi;
892 uint32_t esi;
893 uint32_t ebp;
894 uint32_t esp;
895 uint32_t ebx;
896 uint32_t edx;
897 uint32_t ecx;
898 uint32_t eax;
899 uint32_t trapno;
900 uint32_t err;
901 uint32_t eip;
902 uint16_t cs, __csh;
903 uint32_t eflags;
904 uint32_t esp_at_signal;
905 uint16_t ss, __ssh;
906 uint32_t fpstate; /* pointer */
907 uint32_t oldmask;
908 uint32_t cr2;
909 };
910
911 struct target_sigcontext_64 {
912 uint64_t r8;
913 uint64_t r9;
914 uint64_t r10;
915 uint64_t r11;
916 uint64_t r12;
917 uint64_t r13;
918 uint64_t r14;
919 uint64_t r15;
920
921 uint64_t rdi;
922 uint64_t rsi;
923 uint64_t rbp;
924 uint64_t rbx;
925 uint64_t rdx;
926 uint64_t rax;
927 uint64_t rcx;
928 uint64_t rsp;
929 uint64_t rip;
930
931 uint64_t eflags;
932
933 uint16_t cs;
934 uint16_t gs;
935 uint16_t fs;
936 uint16_t ss;
937
938 uint64_t err;
939 uint64_t trapno;
940 uint64_t oldmask;
941 uint64_t cr2;
942
943 uint64_t fpstate; /* pointer */
944 uint64_t padding[8];
945 };
946
947 #ifndef TARGET_X86_64
948 # define target_sigcontext target_sigcontext_32
949 #else
950 # define target_sigcontext target_sigcontext_64
951 #endif
952
953 /* see Linux/include/uapi/asm-generic/ucontext.h */
954 struct target_ucontext {
955 abi_ulong tuc_flags;
956 abi_ulong tuc_link;
957 target_stack_t tuc_stack;
958 struct target_sigcontext tuc_mcontext;
959 target_sigset_t tuc_sigmask; /* mask last for extensibility */
960 };
961
962 #ifndef TARGET_X86_64
963 struct sigframe {
964 abi_ulong pretcode;
965 int sig;
966 struct target_sigcontext sc;
967 struct target_fpstate fpstate;
968 abi_ulong extramask[TARGET_NSIG_WORDS-1];
969 char retcode[8];
970 };
971
972 struct rt_sigframe {
973 abi_ulong pretcode;
974 int sig;
975 abi_ulong pinfo;
976 abi_ulong puc;
977 struct target_siginfo info;
978 struct target_ucontext uc;
979 struct target_fpstate fpstate;
980 char retcode[8];
981 };
982
983 #else
984
985 struct rt_sigframe {
986 abi_ulong pretcode;
987 struct target_ucontext uc;
988 struct target_siginfo info;
989 struct target_fpstate fpstate;
990 };
991
992 #endif
993
994 /*
995 * Set up a signal frame.
996 */
997
998 /* XXX: save x87 state */
999 static void setup_sigcontext(struct target_sigcontext *sc,
1000 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
1001 abi_ulong fpstate_addr)
1002 {
1003 CPUState *cs = CPU(x86_env_get_cpu(env));
1004 #ifndef TARGET_X86_64
1005 uint16_t magic;
1006
1007 /* already locked in setup_frame() */
1008 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1009 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1010 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1011 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1012 __put_user(env->regs[R_EDI], &sc->edi);
1013 __put_user(env->regs[R_ESI], &sc->esi);
1014 __put_user(env->regs[R_EBP], &sc->ebp);
1015 __put_user(env->regs[R_ESP], &sc->esp);
1016 __put_user(env->regs[R_EBX], &sc->ebx);
1017 __put_user(env->regs[R_EDX], &sc->edx);
1018 __put_user(env->regs[R_ECX], &sc->ecx);
1019 __put_user(env->regs[R_EAX], &sc->eax);
1020 __put_user(cs->exception_index, &sc->trapno);
1021 __put_user(env->error_code, &sc->err);
1022 __put_user(env->eip, &sc->eip);
1023 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1024 __put_user(env->eflags, &sc->eflags);
1025 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1026 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1027
1028 cpu_x86_fsave(env, fpstate_addr, 1);
1029 fpstate->status = fpstate->sw;
1030 magic = 0xffff;
1031 __put_user(magic, &fpstate->magic);
1032 __put_user(fpstate_addr, &sc->fpstate);
1033
1034 /* non-iBCS2 extensions.. */
1035 __put_user(mask, &sc->oldmask);
1036 __put_user(env->cr[2], &sc->cr2);
1037 #else
1038 __put_user(env->regs[R_EDI], &sc->rdi);
1039 __put_user(env->regs[R_ESI], &sc->rsi);
1040 __put_user(env->regs[R_EBP], &sc->rbp);
1041 __put_user(env->regs[R_ESP], &sc->rsp);
1042 __put_user(env->regs[R_EBX], &sc->rbx);
1043 __put_user(env->regs[R_EDX], &sc->rdx);
1044 __put_user(env->regs[R_ECX], &sc->rcx);
1045 __put_user(env->regs[R_EAX], &sc->rax);
1046
1047 __put_user(env->regs[8], &sc->r8);
1048 __put_user(env->regs[9], &sc->r9);
1049 __put_user(env->regs[10], &sc->r10);
1050 __put_user(env->regs[11], &sc->r11);
1051 __put_user(env->regs[12], &sc->r12);
1052 __put_user(env->regs[13], &sc->r13);
1053 __put_user(env->regs[14], &sc->r14);
1054 __put_user(env->regs[15], &sc->r15);
1055
1056 __put_user(cs->exception_index, &sc->trapno);
1057 __put_user(env->error_code, &sc->err);
1058 __put_user(env->eip, &sc->rip);
1059
1060 __put_user(env->eflags, &sc->eflags);
1061 __put_user(env->segs[R_CS].selector, &sc->cs);
1062 __put_user((uint16_t)0, &sc->gs);
1063 __put_user((uint16_t)0, &sc->fs);
1064 __put_user(env->segs[R_SS].selector, &sc->ss);
1065
1066 __put_user(mask, &sc->oldmask);
1067 __put_user(env->cr[2], &sc->cr2);
1068
1069 /* fpstate_addr must be 16 byte aligned for fxsave */
1070 assert(!(fpstate_addr & 0xf));
1071
1072 cpu_x86_fxsave(env, fpstate_addr);
1073 __put_user(fpstate_addr, &sc->fpstate);
1074 #endif
1075 }
1076
1077 /*
1078 * Determine which stack to use..
1079 */
1080
1081 static inline abi_ulong
1082 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1083 {
1084 unsigned long esp;
1085
1086 /* Default to using normal stack */
1087 esp = env->regs[R_ESP];
1088 #ifdef TARGET_X86_64
1089 esp -= 128; /* this is the redzone */
1090 #endif
1091
1092 /* This is the X/Open sanctioned signal stack switching. */
1093 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1094 if (sas_ss_flags(esp) == 0) {
1095 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1096 }
1097 } else {
1098 #ifndef TARGET_X86_64
1099 /* This is the legacy signal stack switching. */
1100 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1101 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1102 ka->sa_restorer) {
1103 esp = (unsigned long) ka->sa_restorer;
1104 }
1105 #endif
1106 }
1107
1108 #ifndef TARGET_X86_64
1109 return (esp - frame_size) & -8ul;
1110 #else
1111 return ((esp - frame_size) & (~15ul)) - 8;
1112 #endif
1113 }
1114
1115 #ifndef TARGET_X86_64
1116 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1117 static void setup_frame(int sig, struct target_sigaction *ka,
1118 target_sigset_t *set, CPUX86State *env)
1119 {
1120 abi_ulong frame_addr;
1121 struct sigframe *frame;
1122 int i;
1123
1124 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1125 trace_user_setup_frame(env, frame_addr);
1126
1127 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1128 goto give_sigsegv;
1129
1130 __put_user(sig, &frame->sig);
1131
1132 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1133 frame_addr + offsetof(struct sigframe, fpstate));
1134
1135 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1136 __put_user(set->sig[i], &frame->extramask[i - 1]);
1137 }
1138
1139 /* Set up to return from userspace. If provided, use a stub
1140 already in userspace. */
1141 if (ka->sa_flags & TARGET_SA_RESTORER) {
1142 __put_user(ka->sa_restorer, &frame->pretcode);
1143 } else {
1144 uint16_t val16;
1145 abi_ulong retcode_addr;
1146 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1147 __put_user(retcode_addr, &frame->pretcode);
1148 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1149 val16 = 0xb858;
1150 __put_user(val16, (uint16_t *)(frame->retcode+0));
1151 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1152 val16 = 0x80cd;
1153 __put_user(val16, (uint16_t *)(frame->retcode+6));
1154 }
1155
1156 /* Set up registers for signal handler */
1157 env->regs[R_ESP] = frame_addr;
1158 env->eip = ka->_sa_handler;
1159
1160 cpu_x86_load_seg(env, R_DS, __USER_DS);
1161 cpu_x86_load_seg(env, R_ES, __USER_DS);
1162 cpu_x86_load_seg(env, R_SS, __USER_DS);
1163 cpu_x86_load_seg(env, R_CS, __USER_CS);
1164 env->eflags &= ~TF_MASK;
1165
1166 unlock_user_struct(frame, frame_addr, 1);
1167
1168 return;
1169
1170 give_sigsegv:
1171 force_sigsegv(sig);
1172 }
1173 #endif
1174
1175 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1176 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1177 target_siginfo_t *info,
1178 target_sigset_t *set, CPUX86State *env)
1179 {
1180 abi_ulong frame_addr;
1181 #ifndef TARGET_X86_64
1182 abi_ulong addr;
1183 #endif
1184 struct rt_sigframe *frame;
1185 int i;
1186
1187 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1188 trace_user_setup_rt_frame(env, frame_addr);
1189
1190 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1191 goto give_sigsegv;
1192
1193 /* These fields are only in rt_sigframe on 32 bit */
1194 #ifndef TARGET_X86_64
1195 __put_user(sig, &frame->sig);
1196 addr = frame_addr + offsetof(struct rt_sigframe, info);
1197 __put_user(addr, &frame->pinfo);
1198 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1199 __put_user(addr, &frame->puc);
1200 #endif
1201 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1202 tswap_siginfo(&frame->info, info);
1203 }
1204
1205 /* Create the ucontext. */
1206 __put_user(0, &frame->uc.tuc_flags);
1207 __put_user(0, &frame->uc.tuc_link);
1208 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1209 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1210 &frame->uc.tuc_stack.ss_flags);
1211 __put_user(target_sigaltstack_used.ss_size,
1212 &frame->uc.tuc_stack.ss_size);
1213 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1214 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1215
1216 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1217 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1218 }
1219
1220 /* Set up to return from userspace. If provided, use a stub
1221 already in userspace. */
1222 #ifndef TARGET_X86_64
1223 if (ka->sa_flags & TARGET_SA_RESTORER) {
1224 __put_user(ka->sa_restorer, &frame->pretcode);
1225 } else {
1226 uint16_t val16;
1227 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1228 __put_user(addr, &frame->pretcode);
1229 /* This is movl $,%eax ; int $0x80 */
1230 __put_user(0xb8, (char *)(frame->retcode+0));
1231 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1232 val16 = 0x80cd;
1233 __put_user(val16, (uint16_t *)(frame->retcode+5));
1234 }
1235 #else
1236 /* XXX: Would be slightly better to return -EFAULT here if test fails
1237 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1238 __put_user(ka->sa_restorer, &frame->pretcode);
1239 #endif
1240
1241 /* Set up registers for signal handler */
1242 env->regs[R_ESP] = frame_addr;
1243 env->eip = ka->_sa_handler;
1244
1245 #ifndef TARGET_X86_64
1246 env->regs[R_EAX] = sig;
1247 env->regs[R_EDX] = (unsigned long)&frame->info;
1248 env->regs[R_ECX] = (unsigned long)&frame->uc;
1249 #else
1250 env->regs[R_EAX] = 0;
1251 env->regs[R_EDI] = sig;
1252 env->regs[R_ESI] = (unsigned long)&frame->info;
1253 env->regs[R_EDX] = (unsigned long)&frame->uc;
1254 #endif
1255
1256 cpu_x86_load_seg(env, R_DS, __USER_DS);
1257 cpu_x86_load_seg(env, R_ES, __USER_DS);
1258 cpu_x86_load_seg(env, R_CS, __USER_CS);
1259 cpu_x86_load_seg(env, R_SS, __USER_DS);
1260 env->eflags &= ~TF_MASK;
1261
1262 unlock_user_struct(frame, frame_addr, 1);
1263
1264 return;
1265
1266 give_sigsegv:
1267 force_sigsegv(sig);
1268 }
1269
1270 static int
1271 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1272 {
1273 unsigned int err = 0;
1274 abi_ulong fpstate_addr;
1275 unsigned int tmpflags;
1276
1277 #ifndef TARGET_X86_64
1278 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1279 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1280 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1281 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1282
1283 env->regs[R_EDI] = tswapl(sc->edi);
1284 env->regs[R_ESI] = tswapl(sc->esi);
1285 env->regs[R_EBP] = tswapl(sc->ebp);
1286 env->regs[R_ESP] = tswapl(sc->esp);
1287 env->regs[R_EBX] = tswapl(sc->ebx);
1288 env->regs[R_EDX] = tswapl(sc->edx);
1289 env->regs[R_ECX] = tswapl(sc->ecx);
1290 env->regs[R_EAX] = tswapl(sc->eax);
1291
1292 env->eip = tswapl(sc->eip);
1293 #else
1294 env->regs[8] = tswapl(sc->r8);
1295 env->regs[9] = tswapl(sc->r9);
1296 env->regs[10] = tswapl(sc->r10);
1297 env->regs[11] = tswapl(sc->r11);
1298 env->regs[12] = tswapl(sc->r12);
1299 env->regs[13] = tswapl(sc->r13);
1300 env->regs[14] = tswapl(sc->r14);
1301 env->regs[15] = tswapl(sc->r15);
1302
1303 env->regs[R_EDI] = tswapl(sc->rdi);
1304 env->regs[R_ESI] = tswapl(sc->rsi);
1305 env->regs[R_EBP] = tswapl(sc->rbp);
1306 env->regs[R_EBX] = tswapl(sc->rbx);
1307 env->regs[R_EDX] = tswapl(sc->rdx);
1308 env->regs[R_EAX] = tswapl(sc->rax);
1309 env->regs[R_ECX] = tswapl(sc->rcx);
1310 env->regs[R_ESP] = tswapl(sc->rsp);
1311
1312 env->eip = tswapl(sc->rip);
1313 #endif
1314
1315 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1316 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1317
1318 tmpflags = tswapl(sc->eflags);
1319 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1320 // regs->orig_eax = -1; /* disable syscall checks */
1321
1322 fpstate_addr = tswapl(sc->fpstate);
1323 if (fpstate_addr != 0) {
1324 if (!access_ok(VERIFY_READ, fpstate_addr,
1325 sizeof(struct target_fpstate)))
1326 goto badframe;
1327 #ifndef TARGET_X86_64
1328 cpu_x86_frstor(env, fpstate_addr, 1);
1329 #else
1330 cpu_x86_fxrstor(env, fpstate_addr);
1331 #endif
1332 }
1333
1334 return err;
1335 badframe:
1336 return 1;
1337 }
1338
1339 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1340 #ifndef TARGET_X86_64
1341 long do_sigreturn(CPUX86State *env)
1342 {
1343 struct sigframe *frame;
1344 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1345 target_sigset_t target_set;
1346 sigset_t set;
1347 int i;
1348
1349 trace_user_do_sigreturn(env, frame_addr);
1350 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1351 goto badframe;
1352 /* set blocked signals */
1353 __get_user(target_set.sig[0], &frame->sc.oldmask);
1354 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1355 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1356 }
1357
1358 target_to_host_sigset_internal(&set, &target_set);
1359 set_sigmask(&set);
1360
1361 /* restore registers */
1362 if (restore_sigcontext(env, &frame->sc))
1363 goto badframe;
1364 unlock_user_struct(frame, frame_addr, 0);
1365 return -TARGET_QEMU_ESIGRETURN;
1366
1367 badframe:
1368 unlock_user_struct(frame, frame_addr, 0);
1369 force_sig(TARGET_SIGSEGV);
1370 return -TARGET_QEMU_ESIGRETURN;
1371 }
1372 #endif
1373
1374 long do_rt_sigreturn(CPUX86State *env)
1375 {
1376 abi_ulong frame_addr;
1377 struct rt_sigframe *frame;
1378 sigset_t set;
1379
1380 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1381 trace_user_do_rt_sigreturn(env, frame_addr);
1382 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1383 goto badframe;
1384 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1385 set_sigmask(&set);
1386
1387 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1388 goto badframe;
1389 }
1390
1391 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1392 get_sp_from_cpustate(env)) == -EFAULT) {
1393 goto badframe;
1394 }
1395
1396 unlock_user_struct(frame, frame_addr, 0);
1397 return -TARGET_QEMU_ESIGRETURN;
1398
1399 badframe:
1400 unlock_user_struct(frame, frame_addr, 0);
1401 force_sig(TARGET_SIGSEGV);
1402 return -TARGET_QEMU_ESIGRETURN;
1403 }
1404
1405 #elif defined(TARGET_AARCH64)
1406
1407 struct target_sigcontext {
1408 uint64_t fault_address;
1409 /* AArch64 registers */
1410 uint64_t regs[31];
1411 uint64_t sp;
1412 uint64_t pc;
1413 uint64_t pstate;
1414 /* 4K reserved for FP/SIMD state and future expansion */
1415 char __reserved[4096] __attribute__((__aligned__(16)));
1416 };
1417
1418 struct target_ucontext {
1419 abi_ulong tuc_flags;
1420 abi_ulong tuc_link;
1421 target_stack_t tuc_stack;
1422 target_sigset_t tuc_sigmask;
1423 /* glibc uses a 1024-bit sigset_t */
1424 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1425 /* last for future expansion */
1426 struct target_sigcontext tuc_mcontext;
1427 };
1428
1429 /*
1430 * Header to be used at the beginning of structures extending the user
1431 * context. Such structures must be placed after the rt_sigframe on the stack
1432 * and be 16-byte aligned. The last structure must be a dummy one with the
1433 * magic and size set to 0.
1434 */
1435 struct target_aarch64_ctx {
1436 uint32_t magic;
1437 uint32_t size;
1438 };
1439
1440 #define TARGET_FPSIMD_MAGIC 0x46508001
1441
1442 struct target_fpsimd_context {
1443 struct target_aarch64_ctx head;
1444 uint32_t fpsr;
1445 uint32_t fpcr;
1446 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1447 };
1448
1449 /*
1450 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1451 * user space as it will change with the addition of new context. User space
1452 * should check the magic/size information.
1453 */
1454 struct target_aux_context {
1455 struct target_fpsimd_context fpsimd;
1456 /* additional context to be added before "end" */
1457 struct target_aarch64_ctx end;
1458 };
1459
1460 struct target_rt_sigframe {
1461 struct target_siginfo info;
1462 struct target_ucontext uc;
1463 uint64_t fp;
1464 uint64_t lr;
1465 uint32_t tramp[2];
1466 };
1467
1468 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1469 CPUARMState *env, target_sigset_t *set)
1470 {
1471 int i;
1472 struct target_aux_context *aux =
1473 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1474
1475 /* set up the stack frame for unwinding */
1476 __put_user(env->xregs[29], &sf->fp);
1477 __put_user(env->xregs[30], &sf->lr);
1478
1479 for (i = 0; i < 31; i++) {
1480 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1481 }
1482 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1483 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1484 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1485
1486 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1487
1488 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1489 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1490 }
1491
1492 for (i = 0; i < 32; i++) {
1493 uint64_t *q = aa64_vfp_qreg(env, i);
1494 #ifdef TARGET_WORDS_BIGENDIAN
1495 __put_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
1496 __put_user(q[1], &aux->fpsimd.vregs[i * 2]);
1497 #else
1498 __put_user(q[0], &aux->fpsimd.vregs[i * 2]);
1499 __put_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
1500 #endif
1501 }
1502 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1503 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1504 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1505 __put_user(sizeof(struct target_fpsimd_context),
1506 &aux->fpsimd.head.size);
1507
1508 /* set the "end" magic */
1509 __put_user(0, &aux->end.magic);
1510 __put_user(0, &aux->end.size);
1511
1512 return 0;
1513 }
1514
1515 static int target_restore_sigframe(CPUARMState *env,
1516 struct target_rt_sigframe *sf)
1517 {
1518 sigset_t set;
1519 int i;
1520 struct target_aux_context *aux =
1521 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1522 uint32_t magic, size, fpsr, fpcr;
1523 uint64_t pstate;
1524
1525 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1526 set_sigmask(&set);
1527
1528 for (i = 0; i < 31; i++) {
1529 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1530 }
1531
1532 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1533 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1534 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1535 pstate_write(env, pstate);
1536
1537 __get_user(magic, &aux->fpsimd.head.magic);
1538 __get_user(size, &aux->fpsimd.head.size);
1539
1540 if (magic != TARGET_FPSIMD_MAGIC
1541 || size != sizeof(struct target_fpsimd_context)) {
1542 return 1;
1543 }
1544
1545 for (i = 0; i < 32; i++) {
1546 uint64_t *q = aa64_vfp_qreg(env, i);
1547 #ifdef TARGET_WORDS_BIGENDIAN
1548 __get_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
1549 __get_user(q[1], &aux->fpsimd.vregs[i * 2]);
1550 #else
1551 __get_user(q[0], &aux->fpsimd.vregs[i * 2]);
1552 __get_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
1553 #endif
1554 }
1555 __get_user(fpsr, &aux->fpsimd.fpsr);
1556 vfp_set_fpsr(env, fpsr);
1557 __get_user(fpcr, &aux->fpsimd.fpcr);
1558 vfp_set_fpcr(env, fpcr);
1559
1560 return 0;
1561 }
1562
1563 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1564 {
1565 abi_ulong sp;
1566
1567 sp = env->xregs[31];
1568
1569 /*
1570 * This is the X/Open sanctioned signal stack switching.
1571 */
1572 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1573 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1574 }
1575
1576 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1577
1578 return sp;
1579 }
1580
1581 static void target_setup_frame(int usig, struct target_sigaction *ka,
1582 target_siginfo_t *info, target_sigset_t *set,
1583 CPUARMState *env)
1584 {
1585 struct target_rt_sigframe *frame;
1586 abi_ulong frame_addr, return_addr;
1587
1588 frame_addr = get_sigframe(ka, env);
1589 trace_user_setup_frame(env, frame_addr);
1590 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1591 goto give_sigsegv;
1592 }
1593
1594 __put_user(0, &frame->uc.tuc_flags);
1595 __put_user(0, &frame->uc.tuc_link);
1596
1597 __put_user(target_sigaltstack_used.ss_sp,
1598 &frame->uc.tuc_stack.ss_sp);
1599 __put_user(sas_ss_flags(env->xregs[31]),
1600 &frame->uc.tuc_stack.ss_flags);
1601 __put_user(target_sigaltstack_used.ss_size,
1602 &frame->uc.tuc_stack.ss_size);
1603 target_setup_sigframe(frame, env, set);
1604 if (ka->sa_flags & TARGET_SA_RESTORER) {
1605 return_addr = ka->sa_restorer;
1606 } else {
1607 /*
1608 * mov x8,#__NR_rt_sigreturn; svc #0
1609 * Since these are instructions they need to be put as little-endian
1610 * regardless of target default or current CPU endianness.
1611 */
1612 __put_user_e(0xd2801168, &frame->tramp[0], le);
1613 __put_user_e(0xd4000001, &frame->tramp[1], le);
1614 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1615 }
1616 env->xregs[0] = usig;
1617 env->xregs[31] = frame_addr;
1618 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1619 env->pc = ka->_sa_handler;
1620 env->xregs[30] = return_addr;
1621 if (info) {
1622 tswap_siginfo(&frame->info, info);
1623 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1624 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1625 }
1626
1627 unlock_user_struct(frame, frame_addr, 1);
1628 return;
1629
1630 give_sigsegv:
1631 unlock_user_struct(frame, frame_addr, 1);
1632 force_sigsegv(usig);
1633 }
1634
1635 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1636 target_siginfo_t *info, target_sigset_t *set,
1637 CPUARMState *env)
1638 {
1639 target_setup_frame(sig, ka, info, set, env);
1640 }
1641
1642 static void setup_frame(int sig, struct target_sigaction *ka,
1643 target_sigset_t *set, CPUARMState *env)
1644 {
1645 target_setup_frame(sig, ka, 0, set, env);
1646 }
1647
1648 long do_rt_sigreturn(CPUARMState *env)
1649 {
1650 struct target_rt_sigframe *frame = NULL;
1651 abi_ulong frame_addr = env->xregs[31];
1652
1653 trace_user_do_rt_sigreturn(env, frame_addr);
1654 if (frame_addr & 15) {
1655 goto badframe;
1656 }
1657
1658 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1659 goto badframe;
1660 }
1661
1662 if (target_restore_sigframe(env, frame)) {
1663 goto badframe;
1664 }
1665
1666 if (do_sigaltstack(frame_addr +
1667 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1668 0, get_sp_from_cpustate(env)) == -EFAULT) {
1669 goto badframe;
1670 }
1671
1672 unlock_user_struct(frame, frame_addr, 0);
1673 return -TARGET_QEMU_ESIGRETURN;
1674
1675 badframe:
1676 unlock_user_struct(frame, frame_addr, 0);
1677 force_sig(TARGET_SIGSEGV);
1678 return -TARGET_QEMU_ESIGRETURN;
1679 }
1680
1681 long do_sigreturn(CPUARMState *env)
1682 {
1683 return do_rt_sigreturn(env);
1684 }
1685
1686 #elif defined(TARGET_ARM)
1687
1688 struct target_sigcontext {
1689 abi_ulong trap_no;
1690 abi_ulong error_code;
1691 abi_ulong oldmask;
1692 abi_ulong arm_r0;
1693 abi_ulong arm_r1;
1694 abi_ulong arm_r2;
1695 abi_ulong arm_r3;
1696 abi_ulong arm_r4;
1697 abi_ulong arm_r5;
1698 abi_ulong arm_r6;
1699 abi_ulong arm_r7;
1700 abi_ulong arm_r8;
1701 abi_ulong arm_r9;
1702 abi_ulong arm_r10;
1703 abi_ulong arm_fp;
1704 abi_ulong arm_ip;
1705 abi_ulong arm_sp;
1706 abi_ulong arm_lr;
1707 abi_ulong arm_pc;
1708 abi_ulong arm_cpsr;
1709 abi_ulong fault_address;
1710 };
1711
1712 struct target_ucontext_v1 {
1713 abi_ulong tuc_flags;
1714 abi_ulong tuc_link;
1715 target_stack_t tuc_stack;
1716 struct target_sigcontext tuc_mcontext;
1717 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1718 };
1719
1720 struct target_ucontext_v2 {
1721 abi_ulong tuc_flags;
1722 abi_ulong tuc_link;
1723 target_stack_t tuc_stack;
1724 struct target_sigcontext tuc_mcontext;
1725 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1726 char __unused[128 - sizeof(target_sigset_t)];
1727 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1728 };
1729
1730 struct target_user_vfp {
1731 uint64_t fpregs[32];
1732 abi_ulong fpscr;
1733 };
1734
1735 struct target_user_vfp_exc {
1736 abi_ulong fpexc;
1737 abi_ulong fpinst;
1738 abi_ulong fpinst2;
1739 };
1740
1741 struct target_vfp_sigframe {
1742 abi_ulong magic;
1743 abi_ulong size;
1744 struct target_user_vfp ufp;
1745 struct target_user_vfp_exc ufp_exc;
1746 } __attribute__((__aligned__(8)));
1747
1748 struct target_iwmmxt_sigframe {
1749 abi_ulong magic;
1750 abi_ulong size;
1751 uint64_t regs[16];
1752 /* Note that not all the coprocessor control registers are stored here */
1753 uint32_t wcssf;
1754 uint32_t wcasf;
1755 uint32_t wcgr0;
1756 uint32_t wcgr1;
1757 uint32_t wcgr2;
1758 uint32_t wcgr3;
1759 } __attribute__((__aligned__(8)));
1760
1761 #define TARGET_VFP_MAGIC 0x56465001
1762 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1763
1764 struct sigframe_v1
1765 {
1766 struct target_sigcontext sc;
1767 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1768 abi_ulong retcode;
1769 };
1770
1771 struct sigframe_v2
1772 {
1773 struct target_ucontext_v2 uc;
1774 abi_ulong retcode;
1775 };
1776
1777 struct rt_sigframe_v1
1778 {
1779 abi_ulong pinfo;
1780 abi_ulong puc;
1781 struct target_siginfo info;
1782 struct target_ucontext_v1 uc;
1783 abi_ulong retcode;
1784 };
1785
1786 struct rt_sigframe_v2
1787 {
1788 struct target_siginfo info;
1789 struct target_ucontext_v2 uc;
1790 abi_ulong retcode;
1791 };
1792
1793 #define TARGET_CONFIG_CPU_32 1
1794
1795 /*
1796 * For ARM syscalls, we encode the syscall number into the instruction.
1797 */
1798 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1799 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1800
1801 /*
1802 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1803 * need two 16-bit instructions.
1804 */
1805 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1806 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1807
1808 static const abi_ulong retcodes[4] = {
1809 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1810 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1811 };
1812
1813
1814 static inline int valid_user_regs(CPUARMState *regs)
1815 {
1816 return 1;
1817 }
1818
1819 static void
1820 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1821 CPUARMState *env, abi_ulong mask)
1822 {
1823 __put_user(env->regs[0], &sc->arm_r0);
1824 __put_user(env->regs[1], &sc->arm_r1);
1825 __put_user(env->regs[2], &sc->arm_r2);
1826 __put_user(env->regs[3], &sc->arm_r3);
1827 __put_user(env->regs[4], &sc->arm_r4);
1828 __put_user(env->regs[5], &sc->arm_r5);
1829 __put_user(env->regs[6], &sc->arm_r6);
1830 __put_user(env->regs[7], &sc->arm_r7);
1831 __put_user(env->regs[8], &sc->arm_r8);
1832 __put_user(env->regs[9], &sc->arm_r9);
1833 __put_user(env->regs[10], &sc->arm_r10);
1834 __put_user(env->regs[11], &sc->arm_fp);
1835 __put_user(env->regs[12], &sc->arm_ip);
1836 __put_user(env->regs[13], &sc->arm_sp);
1837 __put_user(env->regs[14], &sc->arm_lr);
1838 __put_user(env->regs[15], &sc->arm_pc);
1839 #ifdef TARGET_CONFIG_CPU_32
1840 __put_user(cpsr_read(env), &sc->arm_cpsr);
1841 #endif
1842
1843 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1844 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1845 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1846 __put_user(mask, &sc->oldmask);
1847 }
1848
1849 static inline abi_ulong
1850 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1851 {
1852 unsigned long sp = regs->regs[13];
1853
1854 /*
1855 * This is the X/Open sanctioned signal stack switching.
1856 */
1857 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1858 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1859 }
1860 /*
1861 * ATPCS B01 mandates 8-byte alignment
1862 */
1863 return (sp - framesize) & ~7;
1864 }
1865
1866 static void
1867 setup_return(CPUARMState *env, struct target_sigaction *ka,
1868 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1869 {
1870 abi_ulong handler = ka->_sa_handler;
1871 abi_ulong retcode;
1872 int thumb = handler & 1;
1873 uint32_t cpsr = cpsr_read(env);
1874
1875 cpsr &= ~CPSR_IT;
1876 if (thumb) {
1877 cpsr |= CPSR_T;
1878 } else {
1879 cpsr &= ~CPSR_T;
1880 }
1881
1882 if (ka->sa_flags & TARGET_SA_RESTORER) {
1883 retcode = ka->sa_restorer;
1884 } else {
1885 unsigned int idx = thumb;
1886
1887 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1888 idx += 2;
1889 }
1890
1891 __put_user(retcodes[idx], rc);
1892
1893 retcode = rc_addr + thumb;
1894 }
1895
1896 env->regs[0] = usig;
1897 env->regs[13] = frame_addr;
1898 env->regs[14] = retcode;
1899 env->regs[15] = handler & (thumb ? ~1 : ~3);
1900 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1901 }
1902
1903 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1904 {
1905 int i;
1906 struct target_vfp_sigframe *vfpframe;
1907 vfpframe = (struct target_vfp_sigframe *)regspace;
1908 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1909 __put_user(sizeof(*vfpframe), &vfpframe->size);
1910 for (i = 0; i < 32; i++) {
1911 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
1912 }
1913 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1914 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1915 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1916 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1917 return (abi_ulong*)(vfpframe+1);
1918 }
1919
1920 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1921 CPUARMState *env)
1922 {
1923 int i;
1924 struct target_iwmmxt_sigframe *iwmmxtframe;
1925 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1926 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1927 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1928 for (i = 0; i < 16; i++) {
1929 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1930 }
1931 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1932 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1933 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1934 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1935 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1936 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1937 return (abi_ulong*)(iwmmxtframe+1);
1938 }
1939
1940 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1941 target_sigset_t *set, CPUARMState *env)
1942 {
1943 struct target_sigaltstack stack;
1944 int i;
1945 abi_ulong *regspace;
1946
1947 /* Clear all the bits of the ucontext we don't use. */
1948 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1949
1950 memset(&stack, 0, sizeof(stack));
1951 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1952 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1953 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1954 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1955
1956 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1957 /* Save coprocessor signal frame. */
1958 regspace = uc->tuc_regspace;
1959 if (arm_feature(env, ARM_FEATURE_VFP)) {
1960 regspace = setup_sigframe_v2_vfp(regspace, env);
1961 }
1962 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1963 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1964 }
1965
1966 /* Write terminating magic word */
1967 __put_user(0, regspace);
1968
1969 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1970 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1971 }
1972 }
1973
1974 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1975 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1976 target_sigset_t *set, CPUARMState *regs)
1977 {
1978 struct sigframe_v1 *frame;
1979 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1980 int i;
1981
1982 trace_user_setup_frame(regs, frame_addr);
1983 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1984 goto sigsegv;
1985 }
1986
1987 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1988
1989 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1990 __put_user(set->sig[i], &frame->extramask[i - 1]);
1991 }
1992
1993 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1994 frame_addr + offsetof(struct sigframe_v1, retcode));
1995
1996 unlock_user_struct(frame, frame_addr, 1);
1997 return;
1998 sigsegv:
1999 force_sigsegv(usig);
2000 }
2001
2002 static void setup_frame_v2(int usig, struct target_sigaction *ka,
2003 target_sigset_t *set, CPUARMState *regs)
2004 {
2005 struct sigframe_v2 *frame;
2006 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2007
2008 trace_user_setup_frame(regs, frame_addr);
2009 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2010 goto sigsegv;
2011 }
2012
2013 setup_sigframe_v2(&frame->uc, set, regs);
2014
2015 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2016 frame_addr + offsetof(struct sigframe_v2, retcode));
2017
2018 unlock_user_struct(frame, frame_addr, 1);
2019 return;
2020 sigsegv:
2021 force_sigsegv(usig);
2022 }
2023
2024 static void setup_frame(int usig, struct target_sigaction *ka,
2025 target_sigset_t *set, CPUARMState *regs)
2026 {
2027 if (get_osversion() >= 0x020612) {
2028 setup_frame_v2(usig, ka, set, regs);
2029 } else {
2030 setup_frame_v1(usig, ka, set, regs);
2031 }
2032 }
2033
2034 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2035 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2036 target_siginfo_t *info,
2037 target_sigset_t *set, CPUARMState *env)
2038 {
2039 struct rt_sigframe_v1 *frame;
2040 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2041 struct target_sigaltstack stack;
2042 int i;
2043 abi_ulong info_addr, uc_addr;
2044
2045 trace_user_setup_rt_frame(env, frame_addr);
2046 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2047 goto sigsegv;
2048 }
2049
2050 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2051 __put_user(info_addr, &frame->pinfo);
2052 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2053 __put_user(uc_addr, &frame->puc);
2054 tswap_siginfo(&frame->info, info);
2055
2056 /* Clear all the bits of the ucontext we don't use. */
2057 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2058
2059 memset(&stack, 0, sizeof(stack));
2060 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2061 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2062 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2063 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2064
2065 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2066 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2067 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2068 }
2069
2070 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2071 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2072
2073 env->regs[1] = info_addr;
2074 env->regs[2] = uc_addr;
2075
2076 unlock_user_struct(frame, frame_addr, 1);
2077 return;
2078 sigsegv:
2079 force_sigsegv(usig);
2080 }
2081
2082 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2083 target_siginfo_t *info,
2084 target_sigset_t *set, CPUARMState *env)
2085 {
2086 struct rt_sigframe_v2 *frame;
2087 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2088 abi_ulong info_addr, uc_addr;
2089
2090 trace_user_setup_rt_frame(env, frame_addr);
2091 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2092 goto sigsegv;
2093 }
2094
2095 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2096 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2097 tswap_siginfo(&frame->info, info);
2098
2099 setup_sigframe_v2(&frame->uc, set, env);
2100
2101 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2102 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2103
2104 env->regs[1] = info_addr;
2105 env->regs[2] = uc_addr;
2106
2107 unlock_user_struct(frame, frame_addr, 1);
2108 return;
2109 sigsegv:
2110 force_sigsegv(usig);
2111 }
2112
2113 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2114 target_siginfo_t *info,
2115 target_sigset_t *set, CPUARMState *env)
2116 {
2117 if (get_osversion() >= 0x020612) {
2118 setup_rt_frame_v2(usig, ka, info, set, env);
2119 } else {
2120 setup_rt_frame_v1(usig, ka, info, set, env);
2121 }
2122 }
2123
2124 static int
2125 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2126 {
2127 int err = 0;
2128 uint32_t cpsr;
2129
2130 __get_user(env->regs[0], &sc->arm_r0);
2131 __get_user(env->regs[1], &sc->arm_r1);
2132 __get_user(env->regs[2], &sc->arm_r2);
2133 __get_user(env->regs[3], &sc->arm_r3);
2134 __get_user(env->regs[4], &sc->arm_r4);
2135 __get_user(env->regs[5], &sc->arm_r5);
2136 __get_user(env->regs[6], &sc->arm_r6);
2137 __get_user(env->regs[7], &sc->arm_r7);
2138 __get_user(env->regs[8], &sc->arm_r8);
2139 __get_user(env->regs[9], &sc->arm_r9);
2140 __get_user(env->regs[10], &sc->arm_r10);
2141 __get_user(env->regs[11], &sc->arm_fp);
2142 __get_user(env->regs[12], &sc->arm_ip);
2143 __get_user(env->regs[13], &sc->arm_sp);
2144 __get_user(env->regs[14], &sc->arm_lr);
2145 __get_user(env->regs[15], &sc->arm_pc);
2146 #ifdef TARGET_CONFIG_CPU_32
2147 __get_user(cpsr, &sc->arm_cpsr);
2148 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2149 #endif
2150
2151 err |= !valid_user_regs(env);
2152
2153 return err;
2154 }
2155
2156 static long do_sigreturn_v1(CPUARMState *env)
2157 {
2158 abi_ulong frame_addr;
2159 struct sigframe_v1 *frame = NULL;
2160 target_sigset_t set;
2161 sigset_t host_set;
2162 int i;
2163
2164 /*
2165 * Since we stacked the signal on a 64-bit boundary,
2166 * then 'sp' should be word aligned here. If it's
2167 * not, then the user is trying to mess with us.
2168 */
2169 frame_addr = env->regs[13];
2170 trace_user_do_sigreturn(env, frame_addr);
2171 if (frame_addr & 7) {
2172 goto badframe;
2173 }
2174
2175 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2176 goto badframe;
2177 }
2178
2179 __get_user(set.sig[0], &frame->sc.oldmask);
2180 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2181 __get_user(set.sig[i], &frame->extramask[i - 1]);
2182 }
2183
2184 target_to_host_sigset_internal(&host_set, &set);
2185 set_sigmask(&host_set);
2186
2187 if (restore_sigcontext(env, &frame->sc)) {
2188 goto badframe;
2189 }
2190
2191 #if 0
2192 /* Send SIGTRAP if we're single-stepping */
2193 if (ptrace_cancel_bpt(current))
2194 send_sig(SIGTRAP, current, 1);
2195 #endif
2196 unlock_user_struct(frame, frame_addr, 0);
2197 return -TARGET_QEMU_ESIGRETURN;
2198
2199 badframe:
2200 force_sig(TARGET_SIGSEGV);
2201 return -TARGET_QEMU_ESIGRETURN;
2202 }
2203
2204 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2205 {
2206 int i;
2207 abi_ulong magic, sz;
2208 uint32_t fpscr, fpexc;
2209 struct target_vfp_sigframe *vfpframe;
2210 vfpframe = (struct target_vfp_sigframe *)regspace;
2211
2212 __get_user(magic, &vfpframe->magic);
2213 __get_user(sz, &vfpframe->size);
2214 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2215 return 0;
2216 }
2217 for (i = 0; i < 32; i++) {
2218 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2219 }
2220 __get_user(fpscr, &vfpframe->ufp.fpscr);
2221 vfp_set_fpscr(env, fpscr);
2222 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2223 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2224 * and the exception flag is cleared
2225 */
2226 fpexc |= (1 << 30);
2227 fpexc &= ~((1 << 31) | (1 << 28));
2228 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2229 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2230 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2231 return (abi_ulong*)(vfpframe + 1);
2232 }
2233
2234 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2235 abi_ulong *regspace)
2236 {
2237 int i;
2238 abi_ulong magic, sz;
2239 struct target_iwmmxt_sigframe *iwmmxtframe;
2240 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2241
2242 __get_user(magic, &iwmmxtframe->magic);
2243 __get_user(sz, &iwmmxtframe->size);
2244 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2245 return 0;
2246 }
2247 for (i = 0; i < 16; i++) {
2248 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2249 }
2250 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2251 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2252 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2253 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2254 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2255 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2256 return (abi_ulong*)(iwmmxtframe + 1);
2257 }
2258
2259 static int do_sigframe_return_v2(CPUARMState *env,
2260 target_ulong context_addr,
2261 struct target_ucontext_v2 *uc)
2262 {
2263 sigset_t host_set;
2264 abi_ulong *regspace;
2265
2266 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2267 set_sigmask(&host_set);
2268
2269 if (restore_sigcontext(env, &uc->tuc_mcontext))
2270 return 1;
2271
2272 /* Restore coprocessor signal frame */
2273 regspace = uc->tuc_regspace;
2274 if (arm_feature(env, ARM_FEATURE_VFP)) {
2275 regspace = restore_sigframe_v2_vfp(env, regspace);
2276 if (!regspace) {
2277 return 1;
2278 }
2279 }
2280 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2281 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2282 if (!regspace) {
2283 return 1;
2284 }
2285 }
2286
2287 if (do_sigaltstack(context_addr
2288 + offsetof(struct target_ucontext_v2, tuc_stack),
2289 0, get_sp_from_cpustate(env)) == -EFAULT) {
2290 return 1;
2291 }
2292
2293 #if 0
2294 /* Send SIGTRAP if we're single-stepping */
2295 if (ptrace_cancel_bpt(current))
2296 send_sig(SIGTRAP, current, 1);
2297 #endif
2298
2299 return 0;
2300 }
2301
2302 static long do_sigreturn_v2(CPUARMState *env)
2303 {
2304 abi_ulong frame_addr;
2305 struct sigframe_v2 *frame = NULL;
2306
2307 /*
2308 * Since we stacked the signal on a 64-bit boundary,
2309 * then 'sp' should be word aligned here. If it's
2310 * not, then the user is trying to mess with us.
2311 */
2312 frame_addr = env->regs[13];
2313 trace_user_do_sigreturn(env, frame_addr);
2314 if (frame_addr & 7) {
2315 goto badframe;
2316 }
2317
2318 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2319 goto badframe;
2320 }
2321
2322 if (do_sigframe_return_v2(env,
2323 frame_addr
2324 + offsetof(struct sigframe_v2, uc),
2325 &frame->uc)) {
2326 goto badframe;
2327 }
2328
2329 unlock_user_struct(frame, frame_addr, 0);
2330 return -TARGET_QEMU_ESIGRETURN;
2331
2332 badframe:
2333 unlock_user_struct(frame, frame_addr, 0);
2334 force_sig(TARGET_SIGSEGV);
2335 return -TARGET_QEMU_ESIGRETURN;
2336 }
2337
2338 long do_sigreturn(CPUARMState *env)
2339 {
2340 if (get_osversion() >= 0x020612) {
2341 return do_sigreturn_v2(env);
2342 } else {
2343 return do_sigreturn_v1(env);
2344 }
2345 }
2346
2347 static long do_rt_sigreturn_v1(CPUARMState *env)
2348 {
2349 abi_ulong frame_addr;
2350 struct rt_sigframe_v1 *frame = NULL;
2351 sigset_t host_set;
2352
2353 /*
2354 * Since we stacked the signal on a 64-bit boundary,
2355 * then 'sp' should be word aligned here. If it's
2356 * not, then the user is trying to mess with us.
2357 */
2358 frame_addr = env->regs[13];
2359 trace_user_do_rt_sigreturn(env, frame_addr);
2360 if (frame_addr & 7) {
2361 goto badframe;
2362 }
2363
2364 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2365 goto badframe;
2366 }
2367
2368 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2369 set_sigmask(&host_set);
2370
2371 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2372 goto badframe;
2373 }
2374
2375 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2376 goto badframe;
2377
2378 #if 0
2379 /* Send SIGTRAP if we're single-stepping */
2380 if (ptrace_cancel_bpt(current))
2381 send_sig(SIGTRAP, current, 1);
2382 #endif
2383 unlock_user_struct(frame, frame_addr, 0);
2384 return -TARGET_QEMU_ESIGRETURN;
2385
2386 badframe:
2387 unlock_user_struct(frame, frame_addr, 0);
2388 force_sig(TARGET_SIGSEGV);
2389 return -TARGET_QEMU_ESIGRETURN;
2390 }
2391
2392 static long do_rt_sigreturn_v2(CPUARMState *env)
2393 {
2394 abi_ulong frame_addr;
2395 struct rt_sigframe_v2 *frame = NULL;
2396
2397 /*
2398 * Since we stacked the signal on a 64-bit boundary,
2399 * then 'sp' should be word aligned here. If it's
2400 * not, then the user is trying to mess with us.
2401 */
2402 frame_addr = env->regs[13];
2403 trace_user_do_rt_sigreturn(env, frame_addr);
2404 if (frame_addr & 7) {
2405 goto badframe;
2406 }
2407
2408 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2409 goto badframe;
2410 }
2411
2412 if (do_sigframe_return_v2(env,
2413 frame_addr
2414 + offsetof(struct rt_sigframe_v2, uc),
2415 &frame->uc)) {
2416 goto badframe;
2417 }
2418
2419 unlock_user_struct(frame, frame_addr, 0);
2420 return -TARGET_QEMU_ESIGRETURN;
2421
2422 badframe:
2423 unlock_user_struct(frame, frame_addr, 0);
2424 force_sig(TARGET_SIGSEGV);
2425 return -TARGET_QEMU_ESIGRETURN;
2426 }
2427
2428 long do_rt_sigreturn(CPUARMState *env)
2429 {
2430 if (get_osversion() >= 0x020612) {
2431 return do_rt_sigreturn_v2(env);
2432 } else {
2433 return do_rt_sigreturn_v1(env);
2434 }
2435 }
2436
2437 #elif defined(TARGET_SPARC)
2438
2439 #define __SUNOS_MAXWIN 31
2440
2441 /* This is what SunOS does, so shall I. */
2442 struct target_sigcontext {
2443 abi_ulong sigc_onstack; /* state to restore */
2444
2445 abi_ulong sigc_mask; /* sigmask to restore */
2446 abi_ulong sigc_sp; /* stack pointer */
2447 abi_ulong sigc_pc; /* program counter */
2448 abi_ulong sigc_npc; /* next program counter */
2449 abi_ulong sigc_psr; /* for condition codes etc */
2450 abi_ulong sigc_g1; /* User uses these two registers */
2451 abi_ulong sigc_o0; /* within the trampoline code. */
2452
2453 /* Now comes information regarding the users window set
2454 * at the time of the signal.
2455 */
2456 abi_ulong sigc_oswins; /* outstanding windows */
2457
2458 /* stack ptrs for each regwin buf */
2459 char *sigc_spbuf[__SUNOS_MAXWIN];
2460
2461 /* Windows to restore after signal */
2462 struct {
2463 abi_ulong locals[8];
2464 abi_ulong ins[8];
2465 } sigc_wbuf[__SUNOS_MAXWIN];
2466 };
2467 /* A Sparc stack frame */
2468 struct sparc_stackf {
2469 abi_ulong locals[8];
2470 abi_ulong ins[8];
2471 /* It's simpler to treat fp and callers_pc as elements of ins[]
2472 * since we never need to access them ourselves.
2473 */
2474 char *structptr;
2475 abi_ulong xargs[6];
2476 abi_ulong xxargs[1];
2477 };
2478
2479 typedef struct {
2480 struct {
2481 abi_ulong psr;
2482 abi_ulong pc;
2483 abi_ulong npc;
2484 abi_ulong y;
2485 abi_ulong u_regs[16]; /* globals and ins */
2486 } si_regs;
2487 int si_mask;
2488 } __siginfo_t;
2489
2490 typedef struct {
2491 abi_ulong si_float_regs[32];
2492 unsigned long si_fsr;
2493 unsigned long si_fpqdepth;
2494 struct {
2495 unsigned long *insn_addr;
2496 unsigned long insn;
2497 } si_fpqueue [16];
2498 } qemu_siginfo_fpu_t;
2499
2500
2501 struct target_signal_frame {
2502 struct sparc_stackf ss;
2503 __siginfo_t info;
2504 abi_ulong fpu_save;
2505 abi_ulong insns[2] __attribute__ ((aligned (8)));
2506 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2507 abi_ulong extra_size; /* Should be 0 */
2508 qemu_siginfo_fpu_t fpu_state;
2509 };
2510 struct target_rt_signal_frame {
2511 struct sparc_stackf ss;
2512 siginfo_t info;
2513 abi_ulong regs[20];
2514 sigset_t mask;
2515 abi_ulong fpu_save;
2516 unsigned int insns[2];
2517 stack_t stack;
2518 unsigned int extra_size; /* Should be 0 */
2519 qemu_siginfo_fpu_t fpu_state;
2520 };
2521
2522 #define UREG_O0 16
2523 #define UREG_O6 22
2524 #define UREG_I0 0
2525 #define UREG_I1 1
2526 #define UREG_I2 2
2527 #define UREG_I3 3
2528 #define UREG_I4 4
2529 #define UREG_I5 5
2530 #define UREG_I6 6
2531 #define UREG_I7 7
2532 #define UREG_L0 8
2533 #define UREG_FP UREG_I6
2534 #define UREG_SP UREG_O6
2535
2536 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2537 CPUSPARCState *env,
2538 unsigned long framesize)
2539 {
2540 abi_ulong sp;
2541
2542 sp = env->regwptr[UREG_FP];
2543
2544 /* This is the X/Open sanctioned signal stack switching. */
2545 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2546 if (!on_sig_stack(sp)
2547 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2548 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2549 }
2550 }
2551 return sp - framesize;
2552 }
2553
2554 static int
2555 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2556 {
2557 int err = 0, i;
2558
2559 __put_user(env->psr, &si->si_regs.psr);
2560 __put_user(env->pc, &si->si_regs.pc);
2561 __put_user(env->npc, &si->si_regs.npc);
2562 __put_user(env->y, &si->si_regs.y);
2563 for (i=0; i < 8; i++) {
2564 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2565 }
2566 for (i=0; i < 8; i++) {
2567 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2568 }
2569 __put_user(mask, &si->si_mask);
2570 return err;
2571 }
2572
2573 #if 0
2574 static int
2575 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2576 CPUSPARCState *env, unsigned long mask)
2577 {
2578 int err = 0;
2579
2580 __put_user(mask, &sc->sigc_mask);
2581 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2582 __put_user(env->pc, &sc->sigc_pc);
2583 __put_user(env->npc, &sc->sigc_npc);
2584 __put_user(env->psr, &sc->sigc_psr);
2585 __put_user(env->gregs[1], &sc->sigc_g1);
2586 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2587
2588 return err;
2589 }
2590 #endif
2591 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2592
2593 static void setup_frame(int sig, struct target_sigaction *ka,
2594 target_sigset_t *set, CPUSPARCState *env)
2595 {
2596 abi_ulong sf_addr;
2597 struct target_signal_frame *sf;
2598 int sigframe_size, err, i;
2599
2600 /* 1. Make sure everything is clean */
2601 //synchronize_user_stack();
2602
2603 sigframe_size = NF_ALIGNEDSZ;
2604 sf_addr = get_sigframe(ka, env, sigframe_size);
2605 trace_user_setup_frame(env, sf_addr);
2606
2607 sf = lock_user(VERIFY_WRITE, sf_addr,
2608 sizeof(struct target_signal_frame), 0);
2609 if (!sf) {
2610 goto sigsegv;
2611 }
2612 #if 0
2613 if (invalid_frame_pointer(sf, sigframe_size))
2614 goto sigill_and_return;
2615 #endif
2616 /* 2. Save the current process state */
2617 err = setup___siginfo(&sf->info, env, set->sig[0]);
2618 __put_user(0, &sf->extra_size);
2619
2620 //save_fpu_state(regs, &sf->fpu_state);
2621 //__put_user(&sf->fpu_state, &sf->fpu_save);
2622
2623 __put_user(set->sig[0], &sf->info.si_mask);
2624 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2625 __put_user(set->sig[i + 1], &sf->extramask[i]);
2626 }
2627
2628 for (i = 0; i < 8; i++) {
2629 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2630 }
2631 for (i = 0; i < 8; i++) {
2632 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2633 }
2634 if (err)
2635 goto sigsegv;
2636
2637 /* 3. signal handler back-trampoline and parameters */
2638 env->regwptr[UREG_FP] = sf_addr;
2639 env->regwptr[UREG_I0] = sig;
2640 env->regwptr[UREG_I1] = sf_addr +
2641 offsetof(struct target_signal_frame, info);
2642 env->regwptr[UREG_I2] = sf_addr +
2643 offsetof(struct target_signal_frame, info);
2644
2645 /* 4. signal handler */
2646 env->pc = ka->_sa_handler;
2647 env->npc = (env->pc + 4);
2648 /* 5. return to kernel instructions */
2649 if (ka->sa_restorer) {
2650 env->regwptr[UREG_I7] = ka->sa_restorer;
2651 } else {
2652 uint32_t val32;
2653
2654 env->regwptr[UREG_I7] = sf_addr +
2655 offsetof(struct target_signal_frame, insns) - 2 * 4;
2656
2657 /* mov __NR_sigreturn, %g1 */
2658 val32 = 0x821020d8;
2659 __put_user(val32, &sf->insns[0]);
2660
2661 /* t 0x10 */
2662 val32 = 0x91d02010;
2663 __put_user(val32, &sf->insns[1]);
2664 if (err)
2665 goto sigsegv;
2666
2667 /* Flush instruction space. */
2668 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2669 // tb_flush(env);
2670 }
2671 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2672 return;
2673 #if 0
2674 sigill_and_return:
2675 force_sig(TARGET_SIGILL);
2676 #endif
2677 sigsegv:
2678 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2679 force_sigsegv(sig);
2680 }
2681
2682 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2683 target_siginfo_t *info,
2684 target_sigset_t *set, CPUSPARCState *env)
2685 {
2686 fprintf(stderr, "setup_rt_frame: not implemented\n");
2687 }
2688
2689 long do_sigreturn(CPUSPARCState *env)
2690 {
2691 abi_ulong sf_addr;
2692 struct target_signal_frame *sf;
2693 uint32_t up_psr, pc, npc;
2694 target_sigset_t set;
2695 sigset_t host_set;
2696 int err=0, i;
2697
2698 sf_addr = env->regwptr[UREG_FP];
2699 trace_user_do_sigreturn(env, sf_addr);
2700 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2701 goto segv_and_exit;
2702 }
2703
2704 /* 1. Make sure we are not getting garbage from the user */
2705
2706 if (sf_addr & 3)
2707 goto segv_and_exit;
2708
2709 __get_user(pc, &sf->info.si_regs.pc);
2710 __get_user(npc, &sf->info.si_regs.npc);
2711
2712 if ((pc | npc) & 3) {
2713 goto segv_and_exit;
2714 }
2715
2716 /* 2. Restore the state */
2717 __get_user(up_psr, &sf->info.si_regs.psr);
2718
2719 /* User can only change condition codes and FPU enabling in %psr. */
2720 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2721 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2722
2723 env->pc = pc;
2724 env->npc = npc;
2725 __get_user(env->y, &sf->info.si_regs.y);
2726 for (i=0; i < 8; i++) {
2727 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2728 }
2729 for (i=0; i < 8; i++) {
2730 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2731 }
2732
2733 /* FIXME: implement FPU save/restore:
2734 * __get_user(fpu_save, &sf->fpu_save);
2735 * if (fpu_save)
2736 * err |= restore_fpu_state(env, fpu_save);
2737 */
2738
2739 /* This is pretty much atomic, no amount locking would prevent
2740 * the races which exist anyways.
2741 */
2742 __get_user(set.sig[0], &sf->info.si_mask);
2743 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2744 __get_user(set.sig[i], &sf->extramask[i - 1]);
2745 }
2746
2747 target_to_host_sigset_internal(&host_set, &set);
2748 set_sigmask(&host_set);
2749
2750 if (err) {
2751 goto segv_and_exit;
2752 }
2753 unlock_user_struct(sf, sf_addr, 0);
2754 return -TARGET_QEMU_ESIGRETURN;
2755
2756 segv_and_exit:
2757 unlock_user_struct(sf, sf_addr, 0);
2758 force_sig(TARGET_SIGSEGV);
2759 return -TARGET_QEMU_ESIGRETURN;
2760 }
2761
2762 long do_rt_sigreturn(CPUSPARCState *env)
2763 {
2764 trace_user_do_rt_sigreturn(env, 0);
2765 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2766 return -TARGET_ENOSYS;
2767 }
2768
2769 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2770 #define SPARC_MC_TSTATE 0
2771 #define SPARC_MC_PC 1
2772 #define SPARC_MC_NPC 2
2773 #define SPARC_MC_Y 3
2774 #define SPARC_MC_G1 4
2775 #define SPARC_MC_G2 5
2776 #define SPARC_MC_G3 6
2777 #define SPARC_MC_G4 7
2778 #define SPARC_MC_G5 8
2779 #define SPARC_MC_G6 9
2780 #define SPARC_MC_G7 10
2781 #define SPARC_MC_O0 11
2782 #define SPARC_MC_O1 12
2783 #define SPARC_MC_O2 13
2784 #define SPARC_MC_O3 14
2785 #define SPARC_MC_O4 15
2786 #define SPARC_MC_O5 16
2787 #define SPARC_MC_O6 17
2788 #define SPARC_MC_O7 18
2789 #define SPARC_MC_NGREG 19
2790
2791 typedef abi_ulong target_mc_greg_t;
2792 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
2793
2794 struct target_mc_fq {
2795 abi_ulong *mcfq_addr;
2796 uint32_t mcfq_insn;
2797 };
2798
2799 struct target_mc_fpu {
2800 union {
2801 uint32_t sregs[32];
2802 uint64_t dregs[32];
2803 //uint128_t qregs[16];
2804 } mcfpu_fregs;
2805 abi_ulong mcfpu_fsr;
2806 abi_ulong mcfpu_fprs;
2807 abi_ulong mcfpu_gsr;
2808 struct target_mc_fq *mcfpu_fq;
2809 unsigned char mcfpu_qcnt;
2810 unsigned char mcfpu_qentsz;
2811 unsigned char mcfpu_enab;
2812 };
2813 typedef struct target_mc_fpu target_mc_fpu_t;
2814
2815 typedef struct {
2816 target_mc_gregset_t mc_gregs;
2817 target_mc_greg_t mc_fp;
2818 target_mc_greg_t mc_i7;
2819 target_mc_fpu_t mc_fpregs;
2820 } target_mcontext_t;
2821
2822 struct target_ucontext {
2823 struct target_ucontext *tuc_link;
2824 abi_ulong tuc_flags;
2825 target_sigset_t tuc_sigmask;
2826 target_mcontext_t tuc_mcontext;
2827 };
2828
2829 /* A V9 register window */
2830 struct target_reg_window {
2831 abi_ulong locals[8];
2832 abi_ulong ins[8];
2833 };
2834
2835 #define TARGET_STACK_BIAS 2047
2836
2837 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2838 void sparc64_set_context(CPUSPARCState *env)
2839 {
2840 abi_ulong ucp_addr;
2841 struct target_ucontext *ucp;
2842 target_mc_gregset_t *grp;
2843 abi_ulong pc, npc, tstate;
2844 abi_ulong fp, i7, w_addr;
2845 unsigned int i;
2846
2847 ucp_addr = env->regwptr[UREG_I0];
2848 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2849 goto do_sigsegv;
2850 }
2851 grp = &ucp->tuc_mcontext.mc_gregs;
2852 __get_user(pc, &((*grp)[SPARC_MC_PC]));
2853 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
2854 if ((pc | npc) & 3) {
2855 goto do_sigsegv;
2856 }
2857 if (env->regwptr[UREG_I1]) {
2858 target_sigset_t target_set;
2859 sigset_t set;
2860
2861 if (TARGET_NSIG_WORDS == 1) {
2862 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2863 } else {
2864 abi_ulong *src, *dst;
2865 src = ucp->tuc_sigmask.sig;
2866 dst = target_set.sig;
2867 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2868 __get_user(*dst, src);
2869 }
2870 }
2871 target_to_host_sigset_internal(&set, &target_set);
2872 set_sigmask(&set);
2873 }
2874 env->pc = pc;
2875 env->npc = npc;
2876 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
2877 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
2878 env->asi = (tstate >> 24) & 0xff;
2879 cpu_put_ccr(env, tstate >> 32);
2880 cpu_put_cwp64(env, tstate & 0x1f);
2881 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
2882 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
2883 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
2884 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
2885 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
2886 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
2887 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
2888 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
2889 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
2890 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
2891 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
2892 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
2893 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
2894 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
2895 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
2896
2897 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2898 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2899
2900 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2901 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2902 abi_ulong) != 0) {
2903 goto do_sigsegv;
2904 }
2905 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2906 abi_ulong) != 0) {
2907 goto do_sigsegv;
2908 }
2909 /* FIXME this does not match how the kernel handles the FPU in
2910 * its sparc64_set_context implementation. In particular the FPU
2911 * is only restored if fenab is non-zero in:
2912 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2913 */
2914 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2915 {
2916 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2917 for (i = 0; i < 64; i++, src++) {
2918 if (i & 1) {
2919 __get_user(env->fpr[i/2].l.lower, src);
2920 } else {
2921 __get_user(env->fpr[i/2].l.upper, src);
2922 }
2923 }
2924 }
2925 __get_user(env->fsr,
2926 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2927 __get_user(env->gsr,
2928 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2929 unlock_user_struct(ucp, ucp_addr, 0);
2930 return;
2931 do_sigsegv:
2932 unlock_user_struct(ucp, ucp_addr, 0);
2933 force_sig(TARGET_SIGSEGV);
2934 }
2935
2936 void sparc64_get_context(CPUSPARCState *env)
2937 {
2938 abi_ulong ucp_addr;
2939 struct target_ucontext *ucp;
2940 target_mc_gregset_t *grp;
2941 target_mcontext_t *mcp;
2942 abi_ulong fp, i7, w_addr;
2943 int err;
2944 unsigned int i;
2945 target_sigset_t target_set;
2946 sigset_t set;
2947
2948 ucp_addr = env->regwptr[UREG_I0];
2949 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2950 goto do_sigsegv;
2951 }
2952
2953 mcp = &ucp->tuc_mcontext;
2954 grp = &mcp->mc_gregs;
2955
2956 /* Skip over the trap instruction, first. */
2957 env->pc = env->npc;
2958 env->npc += 4;
2959
2960 /* If we're only reading the signal mask then do_sigprocmask()
2961 * is guaranteed not to fail, which is important because we don't
2962 * have any way to signal a failure or restart this operation since
2963 * this is not a normal syscall.
2964 */
2965 err = do_sigprocmask(0, NULL, &set);
2966 assert(err == 0);
2967 host_to_target_sigset_internal(&target_set, &set);
2968 if (TARGET_NSIG_WORDS == 1) {
2969 __put_user(target_set.sig[0],
2970 (abi_ulong *)&ucp->tuc_sigmask);
2971 } else {
2972 abi_ulong *src, *dst;
2973 src = target_set.sig;
2974 dst = ucp->tuc_sigmask.sig;
2975 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2976 __put_user(*src, dst);
2977 }
2978 if (err)
2979 goto do_sigsegv;
2980 }
2981
2982 /* XXX: tstate must be saved properly */
2983 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
2984 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
2985 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
2986 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
2987 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
2988 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
2989 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
2990 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
2991 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
2992 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
2993 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
2994 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
2995 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
2996 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
2997 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
2998 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
2999 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
3000 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
3001 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
3002
3003 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3004 fp = i7 = 0;
3005 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3006 abi_ulong) != 0) {
3007 goto do_sigsegv;
3008 }
3009 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3010 abi_ulong) != 0) {
3011 goto do_sigsegv;
3012 }
3013 __put_user(fp, &(mcp->mc_fp));
3014 __put_user(i7, &(mcp->mc_i7));
3015
3016 {
3017 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3018 for (i = 0; i < 64; i++, dst++) {
3019 if (i & 1) {
3020 __put_user(env->fpr[i/2].l.lower, dst);
3021 } else {
3022 __put_user(env->fpr[i/2].l.upper, dst);
3023 }
3024 }
3025 }
3026 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3027 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3028 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3029
3030 if (err)
3031 goto do_sigsegv;
3032 unlock_user_struct(ucp, ucp_addr, 1);
3033 return;
3034 do_sigsegv:
3035 unlock_user_struct(ucp, ucp_addr, 1);
3036 force_sig(TARGET_SIGSEGV);
3037 }
3038 #endif
3039 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3040
3041 # if defined(TARGET_ABI_MIPSO32)
3042 struct target_sigcontext {
3043 uint32_t sc_regmask; /* Unused */
3044 uint32_t sc_status;
3045 uint64_t sc_pc;
3046 uint64_t sc_regs[32];
3047 uint64_t sc_fpregs[32];
3048 uint32_t sc_ownedfp; /* Unused */
3049 uint32_t sc_fpc_csr;
3050 uint32_t sc_fpc_eir; /* Unused */
3051 uint32_t sc_used_math;
3052 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3053 uint32_t pad0;
3054 uint64_t sc_mdhi;
3055 uint64_t sc_mdlo;
3056 target_ulong sc_hi1; /* Was sc_cause */
3057 target_ulong sc_lo1; /* Was sc_badvaddr */
3058 target_ulong sc_hi2; /* Was sc_sigset[4] */
3059 target_ulong sc_lo2;
3060 target_ulong sc_hi3;
3061 target_ulong sc_lo3;
3062 };
3063 # else /* N32 || N64 */
3064 struct target_sigcontext {
3065 uint64_t sc_regs[32];
3066 uint64_t sc_fpregs[32];
3067 uint64_t sc_mdhi;
3068 uint64_t sc_hi1;
3069 uint64_t sc_hi2;
3070 uint64_t sc_hi3;
3071 uint64_t sc_mdlo;
3072 uint64_t sc_lo1;
3073 uint64_t sc_lo2;
3074 uint64_t sc_lo3;
3075 uint64_t sc_pc;
3076 uint32_t sc_fpc_csr;
3077 uint32_t sc_used_math;
3078 uint32_t sc_dsp;
3079 uint32_t sc_reserved;
3080 };
3081 # endif /* O32 */
3082
3083 struct sigframe {
3084 uint32_t sf_ass[4]; /* argument save space for o32 */
3085 uint32_t sf_code[2]; /* signal trampoline */
3086 struct target_sigcontext sf_sc;
3087 target_sigset_t sf_mask;
3088 };
3089
3090 struct target_ucontext {
3091 target_ulong tuc_flags;
3092 target_ulong tuc_link;
3093 target_stack_t tuc_stack;
3094 target_ulong pad0;
3095 struct target_sigcontext tuc_mcontext;
3096 target_sigset_t tuc_sigmask;
3097 };
3098
3099 struct target_rt_sigframe {
3100 uint32_t rs_ass[4]; /* argument save space for o32 */
3101 uint32_t rs_code[2]; /* signal trampoline */
3102 struct target_siginfo rs_info;
3103 struct target_ucontext rs_uc;
3104 };
3105
3106 /* Install trampoline to jump back from signal handler */
3107 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3108 {
3109 int err = 0;
3110
3111 /*
3112 * Set up the return code ...
3113 *
3114 * li v0, __NR__foo_sigreturn
3115 * syscall
3116 */
3117
3118 __put_user(0x24020000 + syscall, tramp + 0);
3119 __put_user(0x0000000c , tramp + 1);
3120 return err;
3121 }
3122
3123 static inline void setup_sigcontext(CPUMIPSState *regs,
3124 struct target_sigcontext *sc)
3125 {
3126 int i;
3127
3128 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3129 regs->hflags &= ~MIPS_HFLAG_BMASK;
3130
3131 __put_user(0, &sc->sc_regs[0]);
3132 for (i = 1; i < 32; ++i) {
3133 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3134 }
3135
3136 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3137 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3138
3139 /* Rather than checking for dsp existence, always copy. The storage
3140 would just be garbage otherwise. */
3141 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3142 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3143 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3144 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3145 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3146 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3147 {
3148 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3149 __put_user(dsp, &sc->sc_dsp);
3150 }
3151
3152 __put_user(1, &sc->sc_used_math);
3153
3154 for (i = 0; i < 32; ++i) {
3155 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3156 }
3157 }
3158
3159 static inline void
3160 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3161 {
3162 int i;
3163
3164 __get_user(regs->CP0_EPC, &sc->sc_pc);
3165
3166 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3167 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3168
3169 for (i = 1; i < 32; ++i) {
3170 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3171 }
3172
3173 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3174 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3175 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3176 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3177 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3178 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3179 {
3180 uint32_t dsp;
3181 __get_user(dsp, &sc->sc_dsp);
3182 cpu_wrdsp(dsp, 0x3ff, regs);
3183 }
3184
3185 for (i = 0; i < 32; ++i) {
3186 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3187 }
3188 }
3189
3190 /*
3191 * Determine which stack to use..
3192 */
3193 static inline abi_ulong
3194 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3195 {
3196 unsigned long sp;
3197
3198 /* Default to using normal stack */
3199 sp = regs->active_tc.gpr[29];
3200
3201 /*
3202 * FPU emulator may have its own trampoline active just
3203 * above the user stack, 16-bytes before the next lowest
3204 * 16 byte boundary. Try to avoid trashing it.
3205 */
3206 sp -= 32;
3207
3208 /* This is the X/Open sanctioned signal stack switching. */
3209 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3210 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3211 }
3212
3213 return (sp - frame_size) & ~7;
3214 }
3215
3216 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3217 {
3218 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3219 env->hflags &= ~MIPS_HFLAG_M16;
3220 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3221 env->active_tc.PC &= ~(target_ulong) 1;
3222 }
3223 }
3224
3225 # if defined(TARGET_ABI_MIPSO32)
3226 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3227 static void setup_frame(int sig, struct target_sigaction * ka,
3228 target_sigset_t *set, CPUMIPSState *regs)
3229 {
3230 struct sigframe *frame;
3231 abi_ulong frame_addr;
3232 int i;
3233
3234 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3235 trace_user_setup_frame(regs, frame_addr);
3236 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3237 goto give_sigsegv;
3238 }
3239
3240 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3241
3242 setup_sigcontext(regs, &frame->sf_sc);
3243
3244 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3245 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3246 }
3247
3248 /*
3249 * Arguments to signal handler:
3250 *
3251 * a0 = signal number
3252 * a1 = 0 (should be cause)
3253 * a2 = pointer to struct sigcontext
3254 *
3255 * $25 and PC point to the signal handler, $29 points to the
3256 * struct sigframe.
3257 */
3258 regs->active_tc.gpr[ 4] = sig;
3259 regs->active_tc.gpr[ 5] = 0;
3260 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3261 regs->active_tc.gpr[29] = frame_addr;
3262 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3263 /* The original kernel code sets CP0_EPC to the handler
3264 * since it returns to userland using eret
3265 * we cannot do this here, and we must set PC directly */
3266 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3267 mips_set_hflags_isa_mode_from_pc(regs);
3268 unlock_user_struct(frame, frame_addr, 1);
3269 return;
3270
3271 give_sigsegv:
3272 force_sigsegv(sig);
3273 }
3274
3275 long do_sigreturn(CPUMIPSState *regs)
3276 {
3277 struct sigframe *frame;
3278 abi_ulong frame_addr;
3279 sigset_t blocked;
3280 target_sigset_t target_set;
3281 int i;
3282
3283 frame_addr = regs->active_tc.gpr[29];
3284 trace_user_do_sigreturn(regs, frame_addr);
3285 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3286 goto badframe;
3287
3288 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3289 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3290 }
3291
3292 target_to_host_sigset_internal(&blocked, &target_set);
3293 set_sigmask(&blocked);
3294
3295 restore_sigcontext(regs, &frame->sf_sc);
3296
3297 #if 0
3298 /*
3299 * Don't let your children do this ...
3300 */
3301 __asm__ __volatile__(
3302 "move\t$29, %0\n\t"
3303 "j\tsyscall_exit"
3304 :/* no outputs */
3305 :"r" (&regs));
3306 /* Unreached */
3307 #endif
3308
3309 regs->active_tc.PC = regs->CP0_EPC;
3310 mips_set_hflags_isa_mode_from_pc(regs);
3311 /* I am not sure this is right, but it seems to work
3312 * maybe a problem with nested signals ? */
3313 regs->CP0_EPC = 0;
3314 return -TARGET_QEMU_ESIGRETURN;
3315
3316 badframe:
3317 force_sig(TARGET_SIGSEGV);
3318 return -TARGET_QEMU_ESIGRETURN;
3319 }
3320 # endif /* O32 */
3321
3322 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3323 target_siginfo_t *info,
3324 target_sigset_t *set, CPUMIPSState *env)
3325 {
3326 struct target_rt_sigframe *frame;
3327 abi_ulong frame_addr;
3328 int i;
3329
3330 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3331 trace_user_setup_rt_frame(env, frame_addr);
3332 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3333 goto give_sigsegv;
3334 }
3335
3336 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3337
3338 tswap_siginfo(&frame->rs_info, info);
3339
3340 __put_user(0, &frame->rs_uc.tuc_flags);
3341 __put_user(0, &frame->rs_uc.tuc_link);
3342 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3343 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3344 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3345 &frame->rs_uc.tuc_stack.ss_flags);
3346
3347 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3348
3349 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3350 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3351 }
3352
3353 /*
3354 * Arguments to signal handler:
3355 *
3356 * a0 = signal number
3357 * a1 = pointer to siginfo_t
3358 * a2 = pointer to ucontext_t
3359 *
3360 * $25 and PC point to the signal handler, $29 points to the
3361 * struct sigframe.
3362 */
3363 env->active_tc.gpr[ 4] = sig;
3364 env->active_tc.gpr[ 5] = frame_addr
3365 + offsetof(struct target_rt_sigframe, rs_info);
3366 env->active_tc.gpr[ 6] = frame_addr
3367 + offsetof(struct target_rt_sigframe, rs_uc);
3368 env->active_tc.gpr[29] = frame_addr;
3369 env->active_tc.gpr[31] = frame_addr
3370 + offsetof(struct target_rt_sigframe, rs_code);
3371 /* The original kernel code sets CP0_EPC to the handler
3372 * since it returns to userland using eret
3373 * we cannot do this here, and we must set PC directly */
3374 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3375 mips_set_hflags_isa_mode_from_pc(env);
3376 unlock_user_struct(frame, frame_addr, 1);
3377 return;
3378
3379 give_sigsegv:
3380 unlock_user_struct(frame, frame_addr, 1);
3381 force_sigsegv(sig);
3382 }
3383
3384 long do_rt_sigreturn(CPUMIPSState *env)
3385 {
3386 struct target_rt_sigframe *frame;
3387 abi_ulong frame_addr;
3388 sigset_t blocked;
3389
3390 frame_addr = env->active_tc.gpr[29];
3391 trace_user_do_rt_sigreturn(env, frame_addr);
3392 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3393 goto badframe;
3394 }
3395
3396 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3397 set_sigmask(&blocked);
3398
3399 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3400
3401 if (do_sigaltstack(frame_addr +
3402 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3403 0, get_sp_from_cpustate(env)) == -EFAULT)
3404 goto badframe;
3405
3406 env->active_tc.PC = env->CP0_EPC;
3407 mips_set_hflags_isa_mode_from_pc(env);
3408 /* I am not sure this is right, but it seems to work
3409 * maybe a problem with nested signals ? */
3410 env->CP0_EPC = 0;
3411 return -TARGET_QEMU_ESIGRETURN;
3412
3413 badframe:
3414 force_sig(TARGET_SIGSEGV);
3415 return -TARGET_QEMU_ESIGRETURN;
3416 }
3417
3418 #elif defined(TARGET_SH4)
3419
3420 /*
3421 * code and data structures from linux kernel:
3422 * include/asm-sh/sigcontext.h
3423 * arch/sh/kernel/signal.c
3424 */
3425
3426 struct target_sigcontext {
3427 target_ulong oldmask;
3428
3429 /* CPU registers */
3430 target_ulong sc_gregs[16];
3431 target_ulong sc_pc;
3432 target_ulong sc_pr;
3433 target_ulong sc_sr;
3434 target_ulong sc_gbr;
3435 target_ulong sc_mach;
3436 target_ulong sc_macl;
3437
3438 /* FPU registers */
3439 target_ulong sc_fpregs[16];
3440 target_ulong sc_xfpregs[16];
3441 unsigned int sc_fpscr;
3442 unsigned int sc_fpul;
3443 unsigned int sc_ownedfp;
3444 };
3445
3446 struct target_sigframe
3447 {
3448 struct target_sigcontext sc;
3449 target_ulong extramask[TARGET_NSIG_WORDS-1];
3450 uint16_t retcode[3];
3451 };
3452
3453
3454 struct target_ucontext {
3455 target_ulong tuc_flags;
3456 struct target_ucontext *tuc_link;
3457 target_stack_t tuc_stack;
3458 struct target_sigcontext tuc_mcontext;
3459 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3460 };
3461
3462 struct target_rt_sigframe
3463 {
3464 struct target_siginfo info;
3465 struct target_ucontext uc;
3466 uint16_t retcode[3];
3467 };
3468
3469
3470 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3471 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3472
3473 static abi_ulong get_sigframe(struct target_sigaction *ka,
3474 unsigned long sp, size_t frame_size)
3475 {
3476 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3477 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3478 }
3479
3480 return (sp - frame_size) & -8ul;
3481 }
3482
3483 /* Notice when we're in the middle of a gUSA region and reset.
3484 Note that this will only occur for !parallel_cpus, as we will
3485 translate such sequences differently in a parallel context. */
3486 static void unwind_gusa(CPUSH4State *regs)
3487 {
3488 /* If the stack pointer is sufficiently negative, and we haven't
3489 completed the sequence, then reset to the entry to the region. */
3490 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3491 However, the page mappings in qemu linux-user aren't as restricted
3492 and we wind up with the normal stack mapped above 0xF0000000.
3493 That said, there is no reason why the kernel should be allowing
3494 a gUSA region that spans 1GB. Use a tighter check here, for what
3495 can actually be enabled by the immediate move. */
3496 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3497 /* Reset the PC to before the gUSA region, as computed from
3498 R0 = region end, SP = -(region size), plus one more for the
3499 insn that actually initializes SP to the region size. */
3500 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3501
3502 /* Reset the SP to the saved version in R1. */
3503 regs->gregs[15] = regs->gregs[1];
3504 }
3505 }
3506
3507 static void setup_sigcontext(struct target_sigcontext *sc,
3508 CPUSH4State *regs, unsigned long mask)
3509 {
3510 int i;
3511
3512 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3513 COPY(gregs[0]); COPY(gregs[1]);
3514 COPY(gregs[2]); COPY(gregs[3]);
3515 COPY(gregs[4]); COPY(gregs[5]);
3516 COPY(gregs[6]); COPY(gregs[7]);
3517 COPY(gregs[8]); COPY(gregs[9]);
3518 COPY(gregs[10]); COPY(gregs[11]);
3519 COPY(gregs[12]); COPY(gregs[13]);
3520 COPY(gregs[14]); COPY(gregs[15]);
3521 COPY(gbr); COPY(mach);
3522 COPY(macl); COPY(pr);
3523 COPY(sr); COPY(pc);
3524 #undef COPY
3525
3526 for (i=0; i<16; i++) {
3527 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3528 }
3529 __put_user(regs->fpscr, &sc->sc_fpscr);
3530 __put_user(regs->fpul, &sc->sc_fpul);
3531
3532 /* non-iBCS2 extensions.. */
3533 __put_user(mask, &sc->oldmask);
3534 }
3535
3536 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3537 {
3538 int i;
3539
3540 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3541 COPY(gregs[0]); COPY(gregs[1]);
3542 COPY(gregs[2]); COPY(gregs[3]);
3543 COPY(gregs[4]); COPY(gregs[5]);
3544 COPY(gregs[6]); COPY(gregs[7]);
3545 COPY(gregs[8]); COPY(gregs[9]);
3546 COPY(gregs[10]); COPY(gregs[11]);
3547 COPY(gregs[12]); COPY(gregs[13]);
3548 COPY(gregs[14]); COPY(gregs[15]);
3549 COPY(gbr); COPY(mach);
3550 COPY(macl); COPY(pr);
3551 COPY(sr); COPY(pc);
3552 #undef COPY
3553
3554 for (i=0; i<16; i++) {
3555 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3556 }
3557 __get_user(regs->fpscr, &sc->sc_fpscr);
3558 __get_user(regs->fpul, &sc->sc_fpul);
3559
3560 regs->tra = -1; /* disable syscall checks */
3561 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3562 }
3563
3564 static void setup_frame(int sig, struct target_sigaction *ka,
3565 target_sigset_t *set, CPUSH4State *regs)
3566 {
3567 struct target_sigframe *frame;
3568 abi_ulong frame_addr;
3569 int i;
3570
3571 unwind_gusa(regs);
3572
3573 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3574 trace_user_setup_frame(regs, frame_addr);
3575 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3576 goto give_sigsegv;
3577 }
3578
3579 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3580
3581 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3582 __put_user(set->sig[i + 1], &frame->extramask[i]);
3583 }
3584
3585 /* Set up to return from userspace. If provided, use a stub
3586 already in userspace. */
3587 if (ka->sa_flags & TARGET_SA_RESTORER) {
3588 regs->pr = (unsigned long) ka->sa_restorer;
3589 } else {
3590 /* Generate return code (system call to sigreturn) */
3591 abi_ulong retcode_addr = frame_addr +
3592 offsetof(struct target_sigframe, retcode);
3593 __put_user(MOVW(2), &frame->retcode[0]);
3594 __put_user(TRAP_NOARG, &frame->retcode[1]);
3595 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3596 regs->pr = (unsigned long) retcode_addr;
3597 }
3598
3599 /* Set up registers for signal handler */
3600 regs->gregs[15] = frame_addr;
3601 regs->gregs[4] = sig; /* Arg for signal handler */
3602 regs->gregs[5] = 0;
3603 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3604 regs->pc = (unsigned long) ka->_sa_handler;
3605 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3606
3607 unlock_user_struct(frame, frame_addr, 1);
3608 return;
3609
3610 give_sigsegv:
3611 unlock_user_struct(frame, frame_addr, 1);
3612 force_sigsegv(sig);
3613 }
3614
3615 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3616 target_siginfo_t *info,
3617 target_sigset_t *set, CPUSH4State *regs)
3618 {
3619 struct target_rt_sigframe *frame;
3620 abi_ulong frame_addr;
3621 int i;
3622
3623 unwind_gusa(regs);
3624
3625 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3626 trace_user_setup_rt_frame(regs, frame_addr);
3627 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3628 goto give_sigsegv;
3629 }
3630
3631 tswap_siginfo(&frame->info, info);
3632
3633 /* Create the ucontext. */
3634 __put_user(0, &frame->uc.tuc_flags);
3635 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3636 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3637 &frame->uc.tuc_stack.ss_sp);
3638 __put_user(sas_ss_flags(regs->gregs[15]),
3639 &frame->uc.tuc_stack.ss_flags);
3640 __put_user(target_sigaltstack_used.ss_size,
3641 &frame->uc.tuc_stack.ss_size);
3642 setup_sigcontext(&frame->uc.tuc_mcontext,
3643 regs, set->sig[0]);
3644 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3645 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3646 }
3647
3648 /* Set up to return from userspace. If provided, use a stub
3649 already in userspace. */
3650 if (ka->sa_flags & TARGET_SA_RESTORER) {
3651 regs->pr = (unsigned long) ka->sa_restorer;
3652 } else {
3653 /* Generate return code (system call to sigreturn) */
3654 abi_ulong retcode_addr = frame_addr +
3655 offsetof(struct target_rt_sigframe, retcode);
3656 __put_user(MOVW(2), &frame->retcode[0]);
3657 __put_user(TRAP_NOARG, &frame->retcode[1]);
3658 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3659 regs->pr = (unsigned long) retcode_addr;
3660 }
3661
3662 /* Set up registers for signal handler */
3663 regs->gregs[15] = frame_addr;
3664 regs->gregs[4] = sig; /* Arg for signal handler */
3665 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3666 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3667 regs->pc = (unsigned long) ka->_sa_handler;
3668 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3669
3670 unlock_user_struct(frame, frame_addr, 1);
3671 return;
3672
3673 give_sigsegv:
3674 unlock_user_struct(frame, frame_addr, 1);
3675 force_sigsegv(sig);
3676 }
3677
3678 long do_sigreturn(CPUSH4State *regs)
3679 {
3680 struct target_sigframe *frame;
3681 abi_ulong frame_addr;
3682 sigset_t blocked;
3683 target_sigset_t target_set;
3684 int i;
3685 int err = 0;
3686
3687 frame_addr = regs->gregs[15];
3688 trace_user_do_sigreturn(regs, frame_addr);
3689 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3690 goto badframe;
3691 }
3692
3693 __get_user(target_set.sig[0], &frame->sc.oldmask);
3694 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3695 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3696 }
3697
3698 if (err)
3699 goto badframe;
3700
3701 target_to_host_sigset_internal(&blocked, &target_set);
3702 set_sigmask(&blocked);
3703
3704 restore_sigcontext(regs, &frame->sc);
3705
3706 unlock_user_struct(frame, frame_addr, 0);
3707 return -TARGET_QEMU_ESIGRETURN;
3708
3709 badframe:
3710 unlock_user_struct(frame, frame_addr, 0);
3711 force_sig(TARGET_SIGSEGV);
3712 return -TARGET_QEMU_ESIGRETURN;
3713 }
3714
3715 long do_rt_sigreturn(CPUSH4State *regs)
3716 {
3717 struct target_rt_sigframe *frame;
3718 abi_ulong frame_addr;
3719 sigset_t blocked;
3720
3721 frame_addr = regs->gregs[15];
3722 trace_user_do_rt_sigreturn(regs, frame_addr);
3723 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3724 goto badframe;
3725 }
3726
3727 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3728 set_sigmask(&blocked);
3729
3730 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3731
3732 if (do_sigaltstack(frame_addr +
3733 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3734 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3735 goto badframe;
3736 }
3737
3738 unlock_user_struct(frame, frame_addr, 0);
3739 return -TARGET_QEMU_ESIGRETURN;
3740
3741 badframe:
3742 unlock_user_struct(frame, frame_addr, 0);
3743 force_sig(TARGET_SIGSEGV);
3744 return -TARGET_QEMU_ESIGRETURN;
3745 }
3746 #elif defined(TARGET_MICROBLAZE)
3747
3748 struct target_sigcontext {
3749 struct target_pt_regs regs; /* needs to be first */
3750 uint32_t oldmask;
3751 };
3752
3753 struct target_stack_t {
3754 abi_ulong ss_sp;
3755 int ss_flags;
3756 unsigned int ss_size;
3757 };
3758
3759 struct target_ucontext {
3760 abi_ulong tuc_flags;
3761 abi_ulong tuc_link;
3762 struct target_stack_t tuc_stack;
3763 struct target_sigcontext tuc_mcontext;
3764 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3765 };
3766
3767 /* Signal frames. */
3768 struct target_signal_frame {
3769 struct target_ucontext uc;
3770 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3771 uint32_t tramp[2];
3772 };
3773
3774 struct rt_signal_frame {
3775 siginfo_t info;
3776 ucontext_t uc;
3777 uint32_t tramp[2];
3778 };
3779
3780 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3781 {
3782 __put_user(env->regs[0], &sc->regs.r0);
3783 __put_user(env->regs[1], &sc->regs.r1);
3784 __put_user(env->regs[2], &sc->regs.r2);
3785 __put_user(env->regs[3], &sc->regs.r3);
3786 __put_user(env->regs[4], &sc->regs.r4);
3787 __put_user(env->regs[5], &sc->regs.r5);
3788 __put_user(env->regs[6], &sc->regs.r6);
3789 __put_user(env->regs[7], &sc->regs.r7);
3790 __put_user(env->regs[8], &sc->regs.r8);
3791 __put_user(env->regs[9], &sc->regs.r9);
3792 __put_user(env->regs[10], &sc->regs.r10);
3793 __put_user(env->regs[11], &sc->regs.r11);
3794 __put_user(env->regs[12], &sc->regs.r12);
3795 __put_user(env->regs[13], &sc->regs.r13);
3796 __put_user(env->regs[14], &sc->regs.r14);
3797 __put_user(env->regs[15], &sc->regs.r15);
3798 __put_user(env->regs[16], &sc->regs.r16);
3799 __put_user(env->regs[17], &sc->regs.r17);
3800 __put_user(env->regs[18], &sc->regs.r18);
3801 __put_user(env->regs[19], &sc->regs.r19);
3802 __put_user(env->regs[20], &sc->regs.r20);
3803 __put_user(env->regs[21], &sc->regs.r21);
3804 __put_user(env->regs[22], &sc->regs.r22);
3805 __put_user(env->regs[23], &sc->regs.r23);
3806 __put_user(env->regs[24], &sc->regs.r24);
3807 __put_user(env->regs[25], &sc->regs.r25);
3808 __put_user(env->regs[26], &sc->regs.r26);
3809 __put_user(env->regs[27], &sc->regs.r27);
3810 __put_user(env->regs[28], &sc->regs.r28);
3811 __put_user(env->regs[29], &sc->regs.r29);
3812 __put_user(env->regs[30], &sc->regs.r30);
3813 __put_user(env->regs[31], &sc->regs.r31);
3814 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3815 }
3816
3817 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3818 {
3819 __get_user(env->regs[0], &sc->regs.r0);
3820 __get_user(env->regs[1], &sc->regs.r1);
3821 __get_user(env->regs[2], &sc->regs.r2);
3822 __get_user(env->regs[3], &sc->regs.r3);
3823 __get_user(env->regs[4], &sc->regs.r4);
3824 __get_user(env->regs[5], &sc->regs.r5);
3825 __get_user(env->regs[6], &sc->regs.r6);
3826 __get_user(env->regs[7], &sc->regs.r7);
3827 __get_user(env->regs[8], &sc->regs.r8);
3828 __get_user(env->regs[9], &sc->regs.r9);
3829 __get_user(env->regs[10], &sc->regs.r10);
3830 __get_user(env->regs[11], &sc->regs.r11);
3831 __get_user(env->regs[12], &sc->regs.r12);
3832 __get_user(env->regs[13], &sc->regs.r13);
3833 __get_user(env->regs[14], &sc->regs.r14);
3834 __get_user(env->regs[15], &sc->regs.r15);
3835 __get_user(env->regs[16], &sc->regs.r16);
3836 __get_user(env->regs[17], &sc->regs.r17);
3837 __get_user(env->regs[18], &sc->regs.r18);
3838 __get_user(env->regs[19], &sc->regs.r19);
3839 __get_user(env->regs[20], &sc->regs.r20);
3840 __get_user(env->regs[21], &sc->regs.r21);
3841 __get_user(env->regs[22], &sc->regs.r22);
3842 __get_user(env->regs[23], &sc->regs.r23);
3843 __get_user(env->regs[24], &sc->regs.r24);
3844 __get_user(env->regs[25], &sc->regs.r25);
3845 __get_user(env->regs[26], &sc->regs.r26);
3846 __get_user(env->regs[27], &sc->regs.r27);
3847 __get_user(env->regs[28], &sc->regs.r28);
3848 __get_user(env->regs[29], &sc->regs.r29);
3849 __get_user(env->regs[30], &sc->regs.r30);
3850 __get_user(env->regs[31], &sc->regs.r31);
3851 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3852 }
3853
3854 static abi_ulong get_sigframe(struct target_sigaction *ka,
3855 CPUMBState *env, int frame_size)
3856 {
3857 abi_ulong sp = env->regs[1];
3858
3859 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3860 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3861 }
3862
3863 return ((sp - frame_size) & -8UL);
3864 }
3865
3866 static void setup_frame(int sig, struct target_sigaction *ka,
3867 target_sigset_t *set, CPUMBState *env)
3868 {
3869 struct target_signal_frame *frame;
3870 abi_ulong frame_addr;
3871 int i;
3872
3873 frame_addr = get_sigframe(ka, env, sizeof *frame);
3874 trace_user_setup_frame(env, frame_addr);
3875 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3876 goto badframe;
3877
3878 /* Save the mask. */
3879 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3880
3881 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3882 __put_user(set->sig[i], &frame->extramask[i - 1]);
3883 }
3884
3885 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3886
3887 /* Set up to return from userspace. If provided, use a stub
3888 already in userspace. */
3889 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3890 if (ka->sa_flags & TARGET_SA_RESTORER) {
3891 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3892 } else {
3893 uint32_t t;
3894 /* Note, these encodings are _big endian_! */
3895 /* addi r12, r0, __NR_sigreturn */
3896 t = 0x31800000UL | TARGET_NR_sigreturn;
3897 __put_user(t, frame->tramp + 0);
3898 /* brki r14, 0x8 */
3899 t = 0xb9cc0008UL;
3900 __put_user(t, frame->tramp + 1);
3901
3902 /* Return from sighandler will jump to the tramp.
3903 Negative 8 offset because return is rtsd r15, 8 */
3904 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3905 - 8;
3906 }
3907
3908 /* Set up registers for signal handler */
3909 env->regs[1] = frame_addr;
3910 /* Signal handler args: */
3911 env->regs[5] = sig; /* Arg 0: signum */
3912 env->regs[6] = 0;
3913 /* arg 1: sigcontext */
3914 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3915
3916 /* Offset of 4 to handle microblaze rtid r14, 0 */
3917 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3918
3919 unlock_user_struct(frame, frame_addr, 1);
3920 return;
3921 badframe:
3922 force_sigsegv(sig);
3923 }
3924
3925 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3926 target_siginfo_t *info,
3927 target_sigset_t *set, CPUMBState *env)
3928 {
3929 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3930 }
3931
3932 long do_sigreturn(CPUMBState *env)
3933 {
3934 struct target_signal_frame *frame;
3935 abi_ulong frame_addr;
3936 target_sigset_t target_set;
3937 sigset_t set;
3938 int i;
3939
3940 frame_addr = env->regs[R_SP];
3941 trace_user_do_sigreturn(env, frame_addr);
3942 /* Make sure the guest isn't playing games. */
3943 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3944 goto badframe;
3945
3946 /* Restore blocked signals */
3947 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3948 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3949 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3950 }
3951 target_to_host_sigset_internal(&set, &target_set);
3952 set_sigmask(&set);
3953
3954 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3955 /* We got here through a sigreturn syscall, our path back is via an
3956 rtb insn so setup r14 for that. */
3957 env->regs[14] = env->sregs[SR_PC];
3958
3959 unlock_user_struct(frame, frame_addr, 0);
3960 return -TARGET_QEMU_ESIGRETURN;
3961 badframe:
3962 force_sig(TARGET_SIGSEGV);
3963 return -TARGET_QEMU_ESIGRETURN;
3964 }
3965
3966 long do_rt_sigreturn(CPUMBState *env)
3967 {
3968 trace_user_do_rt_sigreturn(env, 0);
3969 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3970 return -TARGET_ENOSYS;
3971 }
3972
3973 #elif defined(TARGET_CRIS)
3974
3975 struct target_sigcontext {
3976 struct target_pt_regs regs; /* needs to be first */
3977 uint32_t oldmask;
3978 uint32_t usp; /* usp before stacking this gunk on it */
3979 };
3980
3981 /* Signal frames. */
3982 struct target_signal_frame {
3983 struct target_sigcontext sc;
3984 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3985 uint16_t retcode[4]; /* Trampoline code. */
3986 };
3987
3988 struct rt_signal_frame {
3989 siginfo_t *pinfo;
3990 void *puc;
3991 siginfo_t info;
3992 ucontext_t uc;
3993 uint16_t retcode[4]; /* Trampoline code. */
3994 };
3995
3996 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3997 {
3998 __put_user(env->regs[0], &sc->regs.r0);
3999 __put_user(env->regs[1], &sc->regs.r1);
4000 __put_user(env->regs[2], &sc->regs.r2);
4001 __put_user(env->regs[3], &sc->regs.r3);
4002 __put_user(env->regs[4], &sc->regs.r4);
4003 __put_user(env->regs[5], &sc->regs.r5);
4004 __put_user(env->regs[6], &sc->regs.r6);
4005 __put_user(env->regs[7], &sc->regs.r7);
4006 __put_user(env->regs[8], &sc->regs.r8);
4007 __put_user(env->regs[9], &sc->regs.r9);
4008 __put_user(env->regs[10], &sc->regs.r10);
4009 __put_user(env->regs[11], &sc->regs.r11);
4010 __put_user(env->regs[12], &sc->regs.r12);
4011 __put_user(env->regs[13], &sc->regs.r13);
4012 __put_user(env->regs[14], &sc->usp);
4013 __put_user(env->regs[15], &sc->regs.acr);
4014 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4015 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4016 __put_user(env->pc, &sc->regs.erp);
4017 }
4018
4019 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4020 {
4021 __get_user(env->regs[0], &sc->regs.r0);
4022 __get_user(env->regs[1], &sc->regs.r1);
4023 __get_user(env->regs[2], &sc->regs.r2);
4024 __get_user(env->regs[3], &sc->regs.r3);
4025 __get_user(env->regs[4], &sc->regs.r4);
4026 __get_user(env->regs[5], &sc->regs.r5);
4027 __get_user(env->regs[6], &sc->regs.r6);
4028 __get_user(env->regs[7], &sc->regs.r7);
4029 __get_user(env->regs[8], &sc->regs.r8);
4030 __get_user(env->regs[9], &sc->regs.r9);
4031 __get_user(env->regs[10], &sc->regs.r10);
4032 __get_user(env->regs[11], &sc->regs.r11);
4033 __get_user(env->regs[12], &sc->regs.r12);
4034 __get_user(env->regs[13], &sc->regs.r13);
4035 __get_user(env->regs[14], &sc->usp);
4036 __get_user(env->regs[15], &sc->regs.acr);
4037 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4038 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4039 __get_user(env->pc, &sc->regs.erp);
4040 }
4041
4042 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4043 {
4044 abi_ulong sp;
4045 /* Align the stack downwards to 4. */
4046 sp = (env->regs[R_SP] & ~3);
4047 return sp - framesize;
4048 }
4049
4050 static void setup_frame(int sig, struct target_sigaction *ka,
4051 target_sigset_t *set, CPUCRISState *env)
4052 {
4053 struct target_signal_frame *frame;
4054 abi_ulong frame_addr;
4055 int i;
4056
4057 frame_addr = get_sigframe(env, sizeof *frame);
4058 trace_user_setup_frame(env, frame_addr);
4059 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4060 goto badframe;
4061
4062 /*
4063 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4064 * use this trampoline anymore but it sets it up for GDB.
4065 * In QEMU, using the trampoline simplifies things a bit so we use it.
4066 *
4067 * This is movu.w __NR_sigreturn, r9; break 13;
4068 */
4069 __put_user(0x9c5f, frame->retcode+0);
4070 __put_user(TARGET_NR_sigreturn,
4071 frame->retcode + 1);
4072 __put_user(0xe93d, frame->retcode + 2);
4073
4074 /* Save the mask. */
4075 __put_user(set->sig[0], &frame->sc.oldmask);
4076
4077 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4078 __put_user(set->sig[i], &frame->extramask[i - 1]);
4079 }
4080
4081 setup_sigcontext(&frame->sc, env);
4082
4083 /* Move the stack and setup the arguments for the handler. */
4084 env->regs[R_SP] = frame_addr;
4085 env->regs[10] = sig;
4086 env->pc = (unsigned long) ka->_sa_handler;
4087 /* Link SRP so the guest returns through the trampoline. */
4088 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4089
4090 unlock_user_struct(frame, frame_addr, 1);
4091 return;
4092 badframe:
4093 force_sigsegv(sig);
4094 }
4095
4096 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4097 target_siginfo_t *info,
4098 target_sigset_t *set, CPUCRISState *env)
4099 {
4100 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4101 }
4102
4103 long do_sigreturn(CPUCRISState *env)
4104 {
4105 struct target_signal_frame *frame;
4106 abi_ulong frame_addr;
4107 target_sigset_t target_set;
4108 sigset_t set;
4109 int i;
4110
4111 frame_addr = env->regs[R_SP];
4112 trace_user_do_sigreturn(env, frame_addr);
4113 /* Make sure the guest isn't playing games. */
4114 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4115 goto badframe;
4116 }
4117
4118 /* Restore blocked signals */
4119 __get_user(target_set.sig[0], &frame->sc.oldmask);
4120 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4121 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4122 }
4123 target_to_host_sigset_internal(&set, &target_set);
4124 set_sigmask(&set);
4125
4126 restore_sigcontext(&frame->sc, env);
4127 unlock_user_struct(frame, frame_addr, 0);
4128 return -TARGET_QEMU_ESIGRETURN;
4129 badframe:
4130 force_sig(TARGET_SIGSEGV);
4131 return -TARGET_QEMU_ESIGRETURN;
4132 }
4133
4134 long do_rt_sigreturn(CPUCRISState *env)
4135 {
4136 trace_user_do_rt_sigreturn(env, 0);
4137 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4138 return -TARGET_ENOSYS;
4139 }
4140
4141 #elif defined(TARGET_NIOS2)
4142
4143 #define MCONTEXT_VERSION 2
4144
4145 struct target_sigcontext {
4146 int version;
4147 unsigned long gregs[32];
4148 };
4149
4150 struct target_ucontext {
4151 abi_ulong tuc_flags;
4152 abi_ulong tuc_link;
4153 target_stack_t tuc_stack;
4154 struct target_sigcontext tuc_mcontext;
4155 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4156 };
4157
4158 struct target_rt_sigframe {
4159 struct target_siginfo info;
4160 struct target_ucontext uc;
4161 };
4162
4163 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4164 {
4165 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4166 #ifdef CONFIG_STACK_GROWSUP
4167 return target_sigaltstack_used.ss_sp;
4168 #else
4169 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4170 #endif
4171 }
4172 return sp;
4173 }
4174
4175 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4176 {
4177 unsigned long *gregs = uc->tuc_mcontext.gregs;
4178
4179 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4180 __put_user(env->regs[1], &gregs[0]);
4181 __put_user(env->regs[2], &gregs[1]);
4182 __put_user(env->regs[3], &gregs[2]);
4183 __put_user(env->regs[4], &gregs[3]);
4184 __put_user(env->regs[5], &gregs[4]);
4185 __put_user(env->regs[6], &gregs[5]);
4186 __put_user(env->regs[7], &gregs[6]);
4187 __put_user(env->regs[8], &gregs[7]);
4188 __put_user(env->regs[9], &gregs[8]);
4189 __put_user(env->regs[10], &gregs[9]);
4190 __put_user(env->regs[11], &gregs[10]);
4191 __put_user(env->regs[12], &gregs[11]);
4192 __put_user(env->regs[13], &gregs[12]);
4193 __put_user(env->regs[14], &gregs[13]);
4194 __put_user(env->regs[15], &gregs[14]);
4195 __put_user(env->regs[16], &gregs[15]);
4196 __put_user(env->regs[17], &gregs[16]);
4197 __put_user(env->regs[18], &gregs[17]);
4198 __put_user(env->regs[19], &gregs[18]);
4199 __put_user(env->regs[20], &gregs[19]);
4200 __put_user(env->regs[21], &gregs[20]);
4201 __put_user(env->regs[22], &gregs[21]);
4202 __put_user(env->regs[23], &gregs[22]);
4203 __put_user(env->regs[R_RA], &gregs[23]);
4204 __put_user(env->regs[R_FP], &gregs[24]);
4205 __put_user(env->regs[R_GP], &gregs[25]);
4206 __put_user(env->regs[R_EA], &gregs[27]);
4207 __put_user(env->regs[R_SP], &gregs[28]);
4208
4209 return 0;
4210 }
4211
4212 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4213 int *pr2)
4214 {
4215 int temp;
4216 abi_ulong off, frame_addr = env->regs[R_SP];
4217 unsigned long *gregs = uc->tuc_mcontext.gregs;
4218 int err;
4219
4220 /* Always make any pending restarted system calls return -EINTR */
4221 /* current->restart_block.fn = do_no_restart_syscall; */
4222
4223 __get_user(temp, &uc->tuc_mcontext.version);
4224 if (temp != MCONTEXT_VERSION) {
4225 return 1;
4226 }
4227
4228 /* restore passed registers */
4229 __get_user(env->regs[1], &gregs[0]);
4230 __get_user(env->regs[2], &gregs[1]);
4231 __get_user(env->regs[3], &gregs[2]);
4232 __get_user(env->regs[4], &gregs[3]);
4233 __get_user(env->regs[5], &gregs[4]);
4234 __get_user(env->regs[6], &gregs[5]);
4235 __get_user(env->regs[7], &gregs[6]);
4236 __get_user(env->regs[8], &gregs[7]);
4237 __get_user(env->regs[9], &gregs[8]);
4238 __get_user(env->regs[10], &gregs[9]);
4239 __get_user(env->regs[11], &gregs[10]);
4240 __get_user(env->regs[12], &gregs[11]);
4241 __get_user(env->regs[13], &gregs[12]);
4242 __get_user(env->regs[14], &gregs[13]);
4243 __get_user(env->regs[15], &gregs[14]);
4244 __get_user(env->regs[16], &gregs[15]);
4245 __get_user(env->regs[17], &gregs[16]);
4246 __get_user(env->regs[18], &gregs[17]);
4247 __get_user(env->regs[19], &gregs[18]);
4248 __get_user(env->regs[20], &gregs[19]);
4249 __get_user(env->regs[21], &gregs[20]);
4250 __get_user(env->regs[22], &gregs[21]);
4251 __get_user(env->regs[23], &gregs[22]);
4252 /* gregs[23] is handled below */
4253 /* Verify, should this be settable */
4254 __get_user(env->regs[R_FP], &gregs[24]);
4255 /* Verify, should this be settable */
4256 __get_user(env->regs[R_GP], &gregs[25]);
4257 /* Not really necessary no user settable bits */
4258 __get_user(temp, &gregs[26]);
4259 __get_user(env->regs[R_EA], &gregs[27]);
4260
4261 __get_user(env->regs[R_RA], &gregs[23]);
4262 __get_user(env->regs[R_SP], &gregs[28]);
4263
4264 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4265 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4266 if (err == -EFAULT) {
4267 return 1;
4268 }
4269
4270 *pr2 = env->regs[2];
4271 return 0;
4272 }
4273
4274 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4275 size_t frame_size)
4276 {
4277 unsigned long usp;
4278
4279 /* Default to using normal stack. */
4280 usp = env->regs[R_SP];
4281
4282 /* This is the X/Open sanctioned signal stack switching. */
4283 usp = sigsp(usp, ka);
4284
4285 /* Verify, is it 32 or 64 bit aligned */
4286 return (void *)((usp - frame_size) & -8UL);
4287 }
4288
4289 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4290 target_siginfo_t *info,
4291 target_sigset_t *set,
4292 CPUNios2State *env)
4293 {
4294 struct target_rt_sigframe *frame;
4295 int i, err = 0;
4296
4297 frame = get_sigframe(ka, env, sizeof(*frame));
4298
4299 if (ka->sa_flags & SA_SIGINFO) {
4300 tswap_siginfo(&frame->info, info);
4301 }
4302
4303 /* Create the ucontext. */
4304 __put_user(0, &frame->uc.tuc_flags);
4305 __put_user(0, &frame->uc.tuc_link);
4306 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4307 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4308 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4309 err |= rt_setup_ucontext(&frame->uc, env);
4310 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4311 __put_user((abi_ulong)set->sig[i],
4312 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4313 }
4314
4315 if (err) {
4316 goto give_sigsegv;
4317 }
4318
4319 /* Set up to return from userspace; jump to fixed address sigreturn
4320 trampoline on kuser page. */
4321 env->regs[R_RA] = (unsigned long) (0x1044);
4322
4323 /* Set up registers for signal handler */
4324 env->regs[R_SP] = (unsigned long) frame;
4325 env->regs[4] = (unsigned long) sig;
4326 env->regs[5] = (unsigned long) &frame->info;
4327 env->regs[6] = (unsigned long) &frame->uc;
4328 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4329 return;
4330
4331 give_sigsegv:
4332 if (sig == TARGET_SIGSEGV) {
4333 ka->_sa_handler = TARGET_SIG_DFL;
4334 }
4335 force_sigsegv(sig);
4336 return;
4337 }
4338
4339 long do_sigreturn(CPUNios2State *env)
4340 {
4341 trace_user_do_sigreturn(env, 0);
4342 fprintf(stderr, "do_sigreturn: not implemented\n");
4343 return -TARGET_ENOSYS;
4344 }
4345
4346 long do_rt_sigreturn(CPUNios2State *env)
4347 {
4348 /* Verify, can we follow the stack back */
4349 abi_ulong frame_addr = env->regs[R_SP];
4350 struct target_rt_sigframe *frame;
4351 sigset_t set;
4352 int rval;
4353
4354 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4355 goto badframe;
4356 }
4357
4358 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4359 do_sigprocmask(SIG_SETMASK, &set, NULL);
4360
4361 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4362 goto badframe;
4363 }
4364
4365 unlock_user_struct(frame, frame_addr, 0);
4366 return rval;
4367
4368 badframe:
4369 unlock_user_struct(frame, frame_addr, 0);
4370 force_sig(TARGET_SIGSEGV);
4371 return 0;
4372 }
4373 /* TARGET_NIOS2 */
4374
4375 #elif defined(TARGET_OPENRISC)
4376
4377 struct target_sigcontext {
4378 struct target_pt_regs regs;
4379 abi_ulong oldmask;
4380 abi_ulong usp;
4381 };
4382
4383 struct target_ucontext {
4384 abi_ulong tuc_flags;
4385 abi_ulong tuc_link;
4386 target_stack_t tuc_stack;
4387 struct target_sigcontext tuc_mcontext;
4388 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4389 };
4390
4391 struct target_rt_sigframe {
4392 abi_ulong pinfo;
4393 uint64_t puc;
4394 struct target_siginfo info;
4395 struct target_sigcontext sc;
4396 struct target_ucontext uc;
4397 unsigned char retcode[16]; /* trampoline code */
4398 };
4399
4400 /* This is the asm-generic/ucontext.h version */
4401 #if 0
4402 static int restore_sigcontext(CPUOpenRISCState *regs,
4403 struct target_sigcontext *sc)
4404 {
4405 unsigned int err = 0;
4406 unsigned long old_usp;
4407
4408 /* Alwys make any pending restarted system call return -EINTR */
4409 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4410
4411 /* restore the regs from &sc->regs (same as sc, since regs is first)
4412 * (sc is already checked for VERIFY_READ since the sigframe was
4413 * checked in sys_sigreturn previously)
4414 */
4415
4416 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4417 goto badframe;
4418 }
4419
4420 /* make sure the U-flag is set so user-mode cannot fool us */
4421
4422 regs->sr &= ~SR_SM;
4423
4424 /* restore the old USP as it was before we stacked the sc etc.
4425 * (we cannot just pop the sigcontext since we aligned the sp and
4426 * stuff after pushing it)
4427 */
4428
4429 __get_user(old_usp, &sc->usp);
4430 phx_signal("old_usp 0x%lx", old_usp);
4431
4432 __PHX__ REALLY /* ??? */
4433 wrusp(old_usp);
4434 regs->gpr[1] = old_usp;
4435
4436 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4437 * after this completes, but we don't use that mechanism. maybe we can
4438 * use it now ?
4439 */
4440
4441 return err;
4442
4443 badframe:
4444 return 1;
4445 }
4446 #endif
4447
4448 /* Set up a signal frame. */
4449
4450 static void setup_sigcontext(struct target_sigcontext *sc,
4451 CPUOpenRISCState *regs,
4452 unsigned long mask)
4453 {
4454 unsigned long usp = cpu_get_gpr(regs, 1);
4455
4456 /* copy the regs. they are first in sc so we can use sc directly */
4457
4458 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4459
4460 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4461 the signal handler. The frametype will be restored to its previous
4462 value in restore_sigcontext. */
4463 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4464
4465 /* then some other stuff */
4466 __put_user(mask, &sc->oldmask);
4467 __put_user(usp, &sc->usp);
4468 }
4469
4470 static inline unsigned long align_sigframe(unsigned long sp)
4471 {
4472 return sp & ~3UL;
4473 }
4474
4475 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4476 CPUOpenRISCState *regs,
4477 size_t frame_size)
4478 {
4479 unsigned long sp = cpu_get_gpr(regs, 1);
4480 int onsigstack = on_sig_stack(sp);
4481
4482 /* redzone */
4483 /* This is the X/Open sanctioned signal stack switching. */
4484 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4485 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4486 }
4487
4488 sp = align_sigframe(sp - frame_size);
4489
4490 /*
4491 * If we are on the alternate signal stack and would overflow it, don't.
4492 * Return an always-bogus address instead so we will die with SIGSEGV.
4493 */
4494
4495 if (onsigstack && !likely(on_sig_stack(sp))) {
4496 return -1L;
4497 }
4498
4499 return sp;
4500 }
4501
4502 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4503 target_siginfo_t *info,
4504 target_sigset_t *set, CPUOpenRISCState *env)
4505 {
4506 int err = 0;
4507 abi_ulong frame_addr;
4508 unsigned long return_ip;
4509 struct target_rt_sigframe *frame;
4510 abi_ulong info_addr, uc_addr;
4511
4512 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4513 trace_user_setup_rt_frame(env, frame_addr);
4514 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4515 goto give_sigsegv;
4516 }
4517
4518 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4519 __put_user(info_addr, &frame->pinfo);
4520 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4521 __put_user(uc_addr, &frame->puc);
4522
4523 if (ka->sa_flags & SA_SIGINFO) {
4524 tswap_siginfo(&frame->info, info);
4525 }
4526
4527 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4528 __put_user(0, &frame->uc.tuc_flags);
4529 __put_user(0, &frame->uc.tuc_link);
4530 __put_user(target_sigaltstack_used.ss_sp,
4531 &frame->uc.tuc_stack.ss_sp);
4532 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4533 &frame->uc.tuc_stack.ss_flags);
4534 __put_user(target_sigaltstack_used.ss_size,
4535 &frame->uc.tuc_stack.ss_size);
4536 setup_sigcontext(&frame->sc, env, set->sig[0]);
4537
4538 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4539
4540 /* trampoline - the desired return ip is the retcode itself */
4541 return_ip = (unsigned long)&frame->retcode;
4542 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4543 __put_user(0xa960, (short *)(frame->retcode + 0));
4544 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4545 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4546 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4547
4548 if (err) {
4549 goto give_sigsegv;
4550 }
4551
4552 /* TODO what is the current->exec_domain stuff and invmap ? */
4553
4554 /* Set up registers for signal handler */
4555 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4556 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4557 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4558 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4559 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4560
4561 /* actually move the usp to reflect the stacked frame */
4562 cpu_set_gpr(env, 1, (unsigned long)frame);
4563
4564 return;
4565
4566 give_sigsegv:
4567 unlock_user_struct(frame, frame_addr, 1);
4568 force_sigsegv(sig);
4569 }
4570
4571 long do_sigreturn(CPUOpenRISCState *env)
4572 {
4573 trace_user_do_sigreturn(env, 0);
4574 fprintf(stderr, "do_sigreturn: not implemented\n");
4575 return -TARGET_ENOSYS;
4576 }
4577
4578 long do_rt_sigreturn(CPUOpenRISCState *env)
4579 {
4580 trace_user_do_rt_sigreturn(env, 0);
4581 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4582 return -TARGET_ENOSYS;
4583 }
4584 /* TARGET_OPENRISC */
4585
4586 #elif defined(TARGET_S390X)
4587
4588 #define __NUM_GPRS 16
4589 #define __NUM_FPRS 16
4590 #define __NUM_ACRS 16
4591
4592 #define S390_SYSCALL_SIZE 2
4593 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4594
4595 #define _SIGCONTEXT_NSIG 64
4596 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4597 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4598 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4599 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4600 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4601
4602 typedef struct {
4603 target_psw_t psw;
4604 target_ulong gprs[__NUM_GPRS];
4605 unsigned int acrs[__NUM_ACRS];
4606 } target_s390_regs_common;
4607
4608 typedef struct {
4609 unsigned int fpc;
4610 double fprs[__NUM_FPRS];
4611 } target_s390_fp_regs;
4612
4613 typedef struct {
4614 target_s390_regs_common regs;
4615 target_s390_fp_regs fpregs;
4616 } target_sigregs;
4617
4618 struct target_sigcontext {
4619 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4620 target_sigregs *sregs;
4621 };
4622
4623 typedef struct {
4624 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4625 struct target_sigcontext sc;
4626 target_sigregs sregs;
4627 int signo;
4628 uint8_t retcode[S390_SYSCALL_SIZE];
4629 } sigframe;
4630
4631 struct target_ucontext {
4632 target_ulong tuc_flags;
4633 struct target_ucontext *tuc_link;
4634 target_stack_t tuc_stack;
4635 target_sigregs tuc_mcontext;
4636 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4637 };
4638
4639 typedef struct {
4640 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4641 uint8_t retcode[S390_SYSCALL_SIZE];
4642 struct target_siginfo info;
4643 struct target_ucontext uc;
4644 } rt_sigframe;
4645
4646 static inline abi_ulong
4647 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4648 {
4649 abi_ulong sp;
4650
4651 /* Default to using normal stack */
4652 sp = env->regs[15];
4653
4654 /* This is the X/Open sanctioned signal stack switching. */
4655 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4656 if (!sas_ss_flags(sp)) {
4657 sp = target_sigaltstack_used.ss_sp +
4658 target_sigaltstack_used.ss_size;
4659 }
4660 }
4661
4662 /* This is the legacy signal stack switching. */
4663 else if (/* FIXME !user_mode(regs) */ 0 &&
4664 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4665 ka->sa_restorer) {
4666 sp = (abi_ulong) ka->sa_restorer;
4667 }
4668
4669 return (sp - frame_size) & -8ul;
4670 }
4671
4672 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4673 {
4674 int i;
4675 //save_access_regs(current->thread.acrs); FIXME
4676
4677 /* Copy a 'clean' PSW mask to the user to avoid leaking
4678 information about whether PER is currently on. */
4679 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4680 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4681 for (i = 0; i < 16; i++) {
4682 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4683 }
4684 for (i = 0; i < 16; i++) {
4685 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4686 }
4687 /*
4688 * We have to store the fp registers to current->thread.fp_regs
4689 * to merge them with the emulated registers.
4690 */
4691 //save_fp_regs(&current->thread.fp_regs); FIXME
4692 for (i = 0; i < 16; i++) {
4693 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4694 }
4695 }
4696
4697 static void setup_frame(int sig, struct target_sigaction *ka,
4698 target_sigset_t *set, CPUS390XState *env)
4699 {
4700 sigframe *frame;
4701 abi_ulong frame_addr;
4702
4703 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4704 trace_user_setup_frame(env, frame_addr);
4705 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4706 goto give_sigsegv;
4707 }
4708
4709 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4710
4711 save_sigregs(env, &frame->sregs);
4712
4713 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4714 (abi_ulong *)&frame->sc.sregs);
4715
4716 /* Set up to return from userspace. If provided, use a stub
4717 already in userspace. */
4718 if (ka->sa_flags & TARGET_SA_RESTORER) {
4719 env->regs[14] = (unsigned long)
4720 ka->sa_restorer | PSW_ADDR_AMODE;
4721 } else {
4722 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4723 | PSW_ADDR_AMODE;
4724 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4725 (uint16_t *)(frame->retcode));
4726 }
4727
4728 /* Set up backchain. */
4729 __put_user(env->regs[15], (abi_ulong *) frame);
4730
4731 /* Set up registers for signal handler */
4732 env->regs[15] = frame_addr;
4733 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4734
4735 env->regs[2] = sig; //map_signal(sig);
4736 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4737
4738 /* We forgot to include these in the sigcontext.
4739 To avoid breaking binary compatibility, they are passed as args. */
4740 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4741 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4742
4743 /* Place signal number on stack to allow backtrace from handler. */
4744 __put_user(env->regs[2], &frame->signo);
4745 unlock_user_struct(frame, frame_addr, 1);
4746 return;
4747
4748 give_sigsegv:
4749 force_sigsegv(sig);
4750 }
4751
4752 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4753 target_siginfo_t *info,
4754 target_sigset_t *set, CPUS390XState *env)
4755 {
4756 int i;
4757 rt_sigframe *frame;
4758 abi_ulong frame_addr;
4759
4760 frame_addr = get_sigframe(ka, env, sizeof *frame);
4761 trace_user_setup_rt_frame(env, frame_addr);
4762 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4763 goto give_sigsegv;
4764 }
4765
4766 tswap_siginfo(&frame->info, info);
4767
4768 /* Create the ucontext. */
4769 __put_user(0, &frame->uc.tuc_flags);
4770 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4771 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4772 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4773 &frame->uc.tuc_stack.ss_flags);
4774 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4775 save_sigregs(env, &frame->uc.tuc_mcontext);
4776 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4777 __put_user((abi_ulong)set->sig[i],
4778 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4779 }
4780
4781 /* Set up to return from userspace. If provided, use a stub
4782 already in userspace. */
4783 if (ka->sa_flags & TARGET_SA_RESTORER) {
4784 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4785 } else {
4786 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4787 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4788 (uint16_t *)(frame->retcode));
4789 }
4790
4791 /* Set up backchain. */
4792 __put_user(env->regs[15], (abi_ulong *) frame);
4793
4794 /* Set up registers for signal handler */
4795 env->regs[15] = frame_addr;
4796 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4797
4798 env->regs[2] = sig; //map_signal(sig);
4799 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4800 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4801 return;
4802
4803 give_sigsegv:
4804 force_sigsegv(sig);
4805 }
4806
4807 static int
4808 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4809 {
4810 int err = 0;
4811 int i;
4812
4813 for (i = 0; i < 16; i++) {
4814 __get_user(env->regs[i], &sc->regs.gprs[i]);
4815 }
4816
4817 __get_user(env->psw.mask, &sc->regs.psw.mask);
4818 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4819 (unsigned long long)env->psw.addr);
4820 __get_user(env->psw.addr, &sc->regs.psw.addr);
4821 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4822
4823 for (i = 0; i < 16; i++) {
4824 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4825 }
4826 for (i = 0; i < 16; i++) {
4827 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4828 }
4829
4830 return err;
4831 }
4832
4833 long do_sigreturn(CPUS390XState *env)
4834 {
4835 sigframe *frame;
4836 abi_ulong frame_addr = env->regs[15];
4837 target_sigset_t target_set;
4838 sigset_t set;
4839
4840 trace_user_do_sigreturn(env, frame_addr);
4841 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4842 goto badframe;
4843 }
4844 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4845
4846 target_to_host_sigset_internal(&set, &target_set);
4847 set_sigmask(&set); /* ~_BLOCKABLE? */
4848
4849 if (restore_sigregs(env, &frame->sregs)) {
4850 goto badframe;
4851 }
4852
4853 unlock_user_struct(frame, frame_addr, 0);
4854 return -TARGET_QEMU_ESIGRETURN;
4855
4856 badframe:
4857 force_sig(TARGET_SIGSEGV);
4858 return -TARGET_QEMU_ESIGRETURN;
4859 }
4860
4861 long do_rt_sigreturn(CPUS390XState *env)
4862 {
4863 rt_sigframe *frame;
4864 abi_ulong frame_addr = env->regs[15];
4865 sigset_t set;
4866
4867 trace_user_do_rt_sigreturn(env, frame_addr);
4868 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4869 goto badframe;
4870 }
4871 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4872
4873 set_sigmask(&set); /* ~_BLOCKABLE? */
4874
4875 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4876 goto badframe;
4877 }
4878
4879 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4880 get_sp_from_cpustate(env)) == -EFAULT) {
4881 goto badframe;
4882 }
4883 unlock_user_struct(frame, frame_addr, 0);
4884 return -TARGET_QEMU_ESIGRETURN;
4885
4886 badframe:
4887 unlock_user_struct(frame, frame_addr, 0);
4888 force_sig(TARGET_SIGSEGV);
4889 return -TARGET_QEMU_ESIGRETURN;
4890 }
4891
4892 #elif defined(TARGET_PPC)
4893
4894 /* Size of dummy stack frame allocated when calling signal handler.
4895 See arch/powerpc/include/asm/ptrace.h. */
4896 #if defined(TARGET_PPC64)
4897 #define SIGNAL_FRAMESIZE 128
4898 #else
4899 #define SIGNAL_FRAMESIZE 64
4900 #endif
4901
4902 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4903 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4904 struct target_mcontext {
4905 target_ulong mc_gregs[48];
4906 /* Includes fpscr. */
4907 uint64_t mc_fregs[33];
4908 #if defined(TARGET_PPC64)
4909 /* Pointer to the vector regs */
4910 target_ulong v_regs;
4911 #else
4912 target_ulong mc_pad[2];
4913 #endif
4914 /* We need to handle Altivec and SPE at the same time, which no
4915 kernel needs to do. Fortunately, the kernel defines this bit to
4916 be Altivec-register-large all the time, rather than trying to
4917 twiddle it based on the specific platform. */
4918 union {
4919 /* SPE vector registers. One extra for SPEFSCR. */
4920 uint32_t spe[33];
4921 /* Altivec vector registers. The packing of VSCR and VRSAVE
4922 varies depending on whether we're PPC64 or not: PPC64 splits
4923 them apart; PPC32 stuffs them together.
4924 We also need to account for the VSX registers on PPC64
4925 */
4926 #if defined(TARGET_PPC64)
4927 #define QEMU_NVRREG (34 + 16)
4928 /* On ppc64, this mcontext structure is naturally *unaligned*,
4929 * or rather it is aligned on a 8 bytes boundary but not on
4930 * a 16 bytes one. This pad fixes it up. This is also why the
4931 * vector regs are referenced by the v_regs pointer above so
4932 * any amount of padding can be added here
4933 */
4934 target_ulong pad;
4935 #else
4936 /* On ppc32, we are already aligned to 16 bytes */
4937 #define QEMU_NVRREG 33
4938 #endif
4939 /* We cannot use ppc_avr_t here as we do *not* want the implied
4940 * 16-bytes alignment that would result from it. This would have
4941 * the effect of making the whole struct target_mcontext aligned
4942 * which breaks the layout of struct target_ucontext on ppc64.
4943 */
4944 uint64_t altivec[QEMU_NVRREG][2];
4945 #undef QEMU_NVRREG
4946 } mc_vregs;
4947 };
4948
4949 /* See arch/powerpc/include/asm/sigcontext.h. */
4950 struct target_sigcontext {
4951 target_ulong _unused[4];
4952 int32_t signal;
4953 #if defined(TARGET_PPC64)
4954 int32_t pad0;
4955 #endif
4956 target_ulong handler;
4957 target_ulong oldmask;
4958 target_ulong regs; /* struct pt_regs __user * */
4959 #if defined(TARGET_PPC64)
4960 struct target_mcontext mcontext;
4961 #endif
4962 };
4963
4964 /* Indices for target_mcontext.mc_gregs, below.
4965 See arch/powerpc/include/asm/ptrace.h for details. */
4966 enum {
4967 TARGET_PT_R0 = 0,
4968 TARGET_PT_R1 = 1,
4969 TARGET_PT_R2 = 2,
4970 TARGET_PT_R3 = 3,
4971 TARGET_PT_R4 = 4,
4972 TARGET_PT_R5 = 5,
4973 TARGET_PT_R6 = 6,
4974 TARGET_PT_R7 = 7,
4975 TARGET_PT_R8 = 8,
4976 TARGET_PT_R9 = 9,
4977 TARGET_PT_R10 = 10,
4978 TARGET_PT_R11 = 11,
4979 TARGET_PT_R12 = 12,
4980 TARGET_PT_R13 = 13,
4981 TARGET_PT_R14 = 14,
4982 TARGET_PT_R15 = 15,
4983 TARGET_PT_R16 = 16,
4984 TARGET_PT_R17 = 17,
4985 TARGET_PT_R18 = 18,
4986 TARGET_PT_R19 = 19,
4987 TARGET_PT_R20 = 20,
4988 TARGET_PT_R21 = 21,
4989 TARGET_PT_R22 = 22,
4990 TARGET_PT_R23 = 23,
4991 TARGET_PT_R24 = 24,
4992 TARGET_PT_R25 = 25,
4993 TARGET_PT_R26 = 26,
4994 TARGET_PT_R27 = 27,
4995 TARGET_PT_R28 = 28,
4996 TARGET_PT_R29 = 29,
4997 TARGET_PT_R30 = 30,
4998 TARGET_PT_R31 = 31,
4999 TARGET_PT_NIP = 32,
5000 TARGET_PT_MSR = 33,
5001 TARGET_PT_ORIG_R3 = 34,
5002 TARGET_PT_CTR = 35,
5003 TARGET_PT_LNK = 36,
5004 TARGET_PT_XER = 37,
5005 TARGET_PT_CCR = 38,
5006 /* Yes, there are two registers with #39. One is 64-bit only. */
5007 TARGET_PT_MQ = 39,
5008 TARGET_PT_SOFTE = 39,
5009 TARGET_PT_TRAP = 40,
5010 TARGET_PT_DAR = 41,
5011 TARGET_PT_DSISR = 42,
5012 TARGET_PT_RESULT = 43,
5013 TARGET_PT_REGS_COUNT = 44
5014 };
5015
5016
5017 struct target_ucontext {
5018 target_ulong tuc_flags;
5019 target_ulong tuc_link; /* ucontext_t __user * */
5020 struct target_sigaltstack tuc_stack;
5021 #if !defined(TARGET_PPC64)
5022 int32_t tuc_pad[7];
5023 target_ulong tuc_regs; /* struct mcontext __user *
5024 points to uc_mcontext field */
5025 #endif
5026 target_sigset_t tuc_sigmask;
5027 #if defined(TARGET_PPC64)
5028 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5029 struct target_sigcontext tuc_sigcontext;
5030 #else
5031 int32_t tuc_maskext[30];
5032 int32_t tuc_pad2[3];
5033 struct target_mcontext tuc_mcontext;
5034 #endif
5035 };
5036
5037 /* See arch/powerpc/kernel/signal_32.c. */
5038 struct target_sigframe {
5039 struct target_sigcontext sctx;
5040 struct target_mcontext mctx;
5041 int32_t abigap[56];
5042 };
5043
5044 #if defined(TARGET_PPC64)
5045
5046 #define TARGET_TRAMP_SIZE 6
5047
5048 struct target_rt_sigframe {
5049 /* sys_rt_sigreturn requires the ucontext be the first field */
5050 struct target_ucontext uc;
5051 target_ulong _unused[2];
5052 uint32_t trampoline[TARGET_TRAMP_SIZE];
5053 target_ulong pinfo; /* struct siginfo __user * */
5054 target_ulong puc; /* void __user * */
5055 struct target_siginfo info;
5056 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5057 char abigap[288];
5058 } __attribute__((aligned(16)));
5059
5060 #else
5061
5062 struct target_rt_sigframe {
5063 struct target_siginfo info;
5064 struct target_ucontext uc;
5065 int32_t abigap[56];
5066 };
5067
5068 #endif
5069
5070 #if defined(TARGET_PPC64)
5071
5072 struct target_func_ptr {
5073 target_ulong entry;
5074 target_ulong toc;
5075 };
5076
5077 #endif
5078
5079 /* We use the mc_pad field for the signal return trampoline. */
5080 #define tramp mc_pad
5081
5082 /* See arch/powerpc/kernel/signal.c. */
5083 static target_ulong get_sigframe(struct target_sigaction *ka,
5084 CPUPPCState *env,
5085 int frame_size)
5086 {
5087 target_ulong oldsp;
5088
5089 oldsp = env->gpr[1];
5090
5091 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5092 (sas_ss_flags(oldsp) == 0)) {
5093 oldsp = (target_sigaltstack_used.ss_sp
5094 + target_sigaltstack_used.ss_size);
5095 }
5096
5097 return (oldsp - frame_size) & ~0xFUL;
5098 }
5099
5100 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5101 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5102 #define PPC_VEC_HI 0
5103 #define PPC_VEC_LO 1
5104 #else
5105 #define PPC_VEC_HI 1
5106 #define PPC_VEC_LO 0
5107 #endif
5108
5109
5110 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5111 {
5112 target_ulong msr = env->msr;
5113 int i;
5114 target_ulong ccr = 0;
5115
5116 /* In general, the kernel attempts to be intelligent about what it
5117 needs to save for Altivec/FP/SPE registers. We don't care that
5118 much, so we just go ahead and save everything. */
5119
5120 /* Save general registers. */
5121 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5122 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5123 }
5124 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5125 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5126 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5127 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5128
5129 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5130 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5131 }
5132 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5133
5134 /* Save Altivec registers if necessary. */
5135 if (env->insns_flags & PPC_ALTIVEC) {
5136 uint32_t *vrsave;
5137 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5138 ppc_avr_t *avr = &env->avr[i];
5139 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5140
5141 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5142 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5143 }
5144 /* Set MSR_VR in the saved MSR value to indicate that
5145 frame->mc_vregs contains valid data. */
5146 msr |= MSR_VR;
5147 #if defined(TARGET_PPC64)
5148 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5149 /* 64-bit needs to put a pointer to the vectors in the frame */
5150 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5151 #else
5152 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5153 #endif
5154 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5155 }
5156
5157 /* Save VSX second halves */
5158 if (env->insns_flags2 & PPC2_VSX) {
5159 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5160 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5161 __put_user(env->vsr[i], &vsregs[i]);
5162 }
5163 }
5164
5165 /* Save floating point registers. */
5166 if (env->insns_flags & PPC_FLOAT) {
5167 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5168 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5169 }
5170 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5171 }
5172
5173 /* Save SPE registers. The kernel only saves the high half. */
5174 if (env->insns_flags & PPC_SPE) {
5175 #if defined(TARGET_PPC64)
5176 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5177 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5178 }
5179 #else
5180 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5181 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5182 }
5183 #endif
5184 /* Set MSR_SPE in the saved MSR value to indicate that
5185 frame->mc_vregs contains valid data. */
5186 msr |= MSR_SPE;
5187 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5188 }
5189
5190 /* Store MSR. */
5191 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5192 }
5193
5194 static void encode_trampoline(int sigret, uint32_t *tramp)
5195 {
5196 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5197 if (sigret) {
5198 __put_user(0x38000000 | sigret, &tramp[0]);
5199 __put_user(0x44000002, &tramp[1]);
5200 }
5201 }
5202
5203 static void restore_user_regs(CPUPPCState *env,
5204 struct target_mcontext *frame, int sig)
5205 {
5206 target_ulong save_r2 = 0;
5207 target_ulong msr;
5208 target_ulong ccr;
5209
5210 int i;
5211
5212 if (!sig) {
5213 save_r2 = env->gpr[2];
5214 }
5215
5216 /* Restore general registers. */
5217 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5218 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5219 }
5220 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5221 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5222 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5223 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5224 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5225
5226 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5227 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5228 }
5229
5230 if (!sig) {
5231 env->gpr[2] = save_r2;
5232 }
5233 /* Restore MSR. */
5234 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5235
5236 /* If doing signal return, restore the previous little-endian mode. */
5237 if (sig)
5238 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5239
5240 /* Restore Altivec registers if necessary. */
5241 if (env->insns_flags & PPC_ALTIVEC) {
5242 ppc_avr_t *v_regs;
5243 uint32_t *vrsave;
5244 #if defined(TARGET_PPC64)
5245 uint64_t v_addr;
5246 /* 64-bit needs to recover the pointer to the vectors from the frame */
5247 __get_user(v_addr, &frame->v_regs);
5248 v_regs = g2h(v_addr);
5249 #else
5250 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5251 #endif
5252 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5253 ppc_avr_t *avr = &env->avr[i];
5254 ppc_avr_t *vreg = &v_regs[i];
5255
5256 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5257 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5258 }
5259 /* Set MSR_VEC in the saved MSR value to indicate that
5260 frame->mc_vregs contains valid data. */
5261 #if defined(TARGET_PPC64)
5262 vrsave = (uint32_t *)&v_regs[33];
5263 #else
5264 vrsave = (uint32_t *)&v_regs[32];
5265 #endif
5266 __get_user(env->spr[SPR_VRSAVE], vrsave);
5267 }
5268
5269 /* Restore VSX second halves */
5270 if (env->insns_flags2 & PPC2_VSX) {
5271 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5272 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5273 __get_user(env->vsr[i], &vsregs[i]);
5274 }
5275 }
5276
5277 /* Restore floating point registers. */
5278 if (env->insns_flags & PPC_FLOAT) {
5279 uint64_t fpscr;
5280 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5281 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5282 }
5283 __get_user(fpscr, &frame->mc_fregs[32]);
5284 env->fpscr = (uint32_t) fpscr;
5285 }
5286
5287 /* Save SPE registers. The kernel only saves the high half. */
5288 if (env->insns_flags & PPC_SPE) {
5289 #if defined(TARGET_PPC64)
5290 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5291 uint32_t hi;
5292
5293 __get_user(hi, &frame->mc_vregs.spe[i]);
5294 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5295 }
5296 #else
5297 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5298 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5299 }
5300 #endif
5301 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5302 }
5303 }
5304
5305 #if !defined(TARGET_PPC64)
5306 static void setup_frame(int sig, struct target_sigaction *ka,
5307 target_sigset_t *set, CPUPPCState *env)
5308 {
5309 struct target_sigframe *frame;
5310 struct target_sigcontext *sc;
5311 target_ulong frame_addr, newsp;
5312 int err = 0;
5313
5314 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5315 trace_user_setup_frame(env, frame_addr);
5316 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5317 goto sigsegv;
5318 sc = &frame->sctx;
5319
5320 __put_user(ka->_sa_handler, &sc->handler);
5321 __put_user(set->sig[0], &sc->oldmask);
5322 __put_user(set->sig[1], &sc->_unused[3]);
5323 __put_user(h2g(&frame->mctx), &sc->regs);
5324 __put_user(sig, &sc->signal);
5325
5326 /* Save user regs. */
5327 save_user_regs(env, &frame->mctx);
5328
5329 /* Construct the trampoline code on the stack. */
5330 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5331
5332 /* The kernel checks for the presence of a VDSO here. We don't
5333 emulate a vdso, so use a sigreturn system call. */
5334 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5335
5336 /* Turn off all fp exceptions. */
5337 env->fpscr = 0;
5338
5339 /* Create a stack frame for the caller of the handler. */
5340 newsp = frame_addr - SIGNAL_FRAMESIZE;
5341 err |= put_user(env->gpr[1], newsp, target_ulong);
5342
5343 if (err)
5344 goto sigsegv;
5345
5346 /* Set up registers for signal handler. */
5347 env->gpr[1] = newsp;
5348 env->gpr[3] = sig;
5349 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5350
5351 env->nip = (target_ulong) ka->_sa_handler;
5352
5353 /* Signal handlers are entered in big-endian mode. */
5354 env->msr &= ~(1ull << MSR_LE);
5355
5356 unlock_user_struct(frame, frame_addr, 1);
5357 return;
5358
5359 sigsegv:
5360 unlock_user_struct(frame, frame_addr, 1);
5361 force_sigsegv(sig);
5362 }
5363 #endif /* !defined(TARGET_PPC64) */
5364
5365 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5366 target_siginfo_t *info,
5367 target_sigset_t *set, CPUPPCState *env)
5368 {
5369 struct target_rt_sigframe *rt_sf;
5370 uint32_t *trampptr = 0;
5371 struct target_mcontext *mctx = 0;
5372 target_ulong rt_sf_addr, newsp = 0;
5373 int i, err = 0;
5374 #if defined(TARGET_PPC64)
5375 struct target_sigcontext *sc = 0;
5376 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5377 #endif
5378
5379 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5380 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5381 goto sigsegv;
5382
5383 tswap_siginfo(&rt_sf->info, info);
5384
5385 __put_user(0, &rt_sf->uc.tuc_flags);
5386 __put_user(0, &rt_sf->uc.tuc_link);
5387 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5388 &rt_sf->uc.tuc_stack.ss_sp);
5389 __put_user(sas_ss_flags(env->gpr[1]),
5390 &rt_sf->uc.tuc_stack.ss_flags);
5391 __put_user(target_sigaltstack_used.ss_size,
5392 &rt_sf->uc.tuc_stack.ss_size);
5393 #if !defined(TARGET_PPC64)
5394 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5395 &rt_sf->uc.tuc_regs);
5396 #endif
5397 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5398 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5399 }
5400
5401 #if defined(TARGET_PPC64)
5402 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5403 trampptr = &rt_sf->trampoline[0];
5404
5405 sc = &rt_sf->uc.tuc_sigcontext;
5406 __put_user(h2g(mctx), &sc->regs);
5407 __put_user(sig, &sc->signal);
5408 #else
5409 mctx = &rt_sf->uc.tuc_mcontext;
5410 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5411 #endif
5412
5413 save_user_regs(env, mctx);
5414 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5415
5416 /* The kernel checks for the presence of a VDSO here. We don't
5417 emulate a vdso, so use a sigreturn system call. */
5418 env->lr = (target_ulong) h2g(trampptr);
5419
5420 /* Turn off all fp exceptions. */
5421 env->fpscr = 0;
5422
5423 /* Create a stack frame for the caller of the handler. */
5424 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5425 err |= put_user(env->gpr[1], newsp, target_ulong);
5426
5427 if (err)
5428 goto sigsegv;
5429
5430 /* Set up registers for signal handler. */
5431 env->gpr[1] = newsp;
5432 env->gpr[3] = (target_ulong) sig;
5433 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5434 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5435 env->gpr[6] = (target_ulong) h2g(rt_sf);
5436
5437 #if defined(TARGET_PPC64)
5438 if (get_ppc64_abi(image) < 2) {
5439 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5440 struct target_func_ptr *handler =
5441 (struct target_func_ptr *)g2h(ka->_sa_handler);
5442 env->nip = tswapl(handler->entry);
5443 env->gpr[2] = tswapl(handler->toc);
5444 } else {
5445 /* ELFv2 PPC64 function pointers are entry points, but R12
5446 * must also be set */
5447 env->nip = tswapl((target_ulong) ka->_sa_handler);
5448 env->gpr[12] = env->nip;
5449 }
5450 #else
5451 env->nip = (target_ulong) ka->_sa_handler;
5452 #endif
5453
5454 /* Signal handlers are entered in big-endian mode. */
5455 env->msr &= ~(1ull << MSR_LE);
5456
5457 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5458 return;
5459
5460 sigsegv:
5461 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5462 force_sigsegv(sig);
5463
5464 }
5465
5466 #if !defined(TARGET_PPC64)
5467 long do_sigreturn(CPUPPCState *env)
5468 {
5469 struct target_sigcontext *sc = NULL;
5470 struct target_mcontext *sr = NULL;
5471 target_ulong sr_addr = 0, sc_addr;
5472 sigset_t blocked;
5473 target_sigset_t set;
5474
5475 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5476 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5477 goto sigsegv;
5478
5479 #if defined(TARGET_PPC64)
5480 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5481 #else
5482 __get_user(set.sig[0], &sc->oldmask);
5483 __get_user(set.sig[1], &sc->_unused[3]);
5484 #endif
5485 target_to_host_sigset_internal(&blocked, &set);
5486 set_sigmask(&blocked);
5487
5488 __get_user(sr_addr, &sc->regs);
5489 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5490 goto sigsegv;
5491 restore_user_regs(env, sr, 1);
5492
5493 unlock_user_struct(sr, sr_addr, 1);
5494 unlock_user_struct(sc, sc_addr, 1);
5495 return -TARGET_QEMU_ESIGRETURN;
5496
5497 sigsegv:
5498 unlock_user_struct(sr, sr_addr, 1);
5499 unlock_user_struct(sc, sc_addr, 1);
5500 force_sig(TARGET_SIGSEGV);
5501 return -TARGET_QEMU_ESIGRETURN;
5502 }
5503 #endif /* !defined(TARGET_PPC64) */
5504
5505 /* See arch/powerpc/kernel/signal_32.c. */
5506 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5507 {
5508 struct target_mcontext *mcp;
5509 target_ulong mcp_addr;
5510 sigset_t blocked;
5511 target_sigset_t set;
5512
5513 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5514 sizeof (set)))
5515 return 1;
5516
5517 #if defined(TARGET_PPC64)
5518 mcp_addr = h2g(ucp) +
5519 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5520 #else
5521 __get_user(mcp_addr, &ucp->tuc_regs);
5522 #endif
5523
5524 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5525 return 1;
5526
5527 target_to_host_sigset_internal(&blocked, &set);
5528 set_sigmask(&blocked);
5529 restore_user_regs(env, mcp, sig);
5530
5531 unlock_user_struct(mcp, mcp_addr, 1);
5532 return 0;
5533 }
5534
5535 long do_rt_sigreturn(CPUPPCState *env)
5536 {
5537 struct target_rt_sigframe *rt_sf = NULL;
5538 target_ulong rt_sf_addr;
5539
5540 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5541 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5542 goto sigsegv;
5543
5544 if (do_setcontext(&rt_sf->uc, env, 1))
5545 goto sigsegv;
5546
5547 do_sigaltstack(rt_sf_addr
5548 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5549 0, env->gpr[1]);
5550
5551 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5552 return -TARGET_QEMU_ESIGRETURN;
5553
5554 sigsegv:
5555 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5556 force_sig(TARGET_SIGSEGV);
5557 return -TARGET_QEMU_ESIGRETURN;
5558 }
5559
5560 #elif defined(TARGET_M68K)
5561
5562 struct target_sigcontext {
5563 abi_ulong sc_mask;
5564 abi_ulong sc_usp;
5565 abi_ulong sc_d0;
5566 abi_ulong sc_d1;
5567 abi_ulong sc_a0;
5568 abi_ulong sc_a1;
5569 unsigned short sc_sr;
5570 abi_ulong sc_pc;
5571 };
5572
5573 struct target_sigframe
5574 {
5575 abi_ulong pretcode;
5576 int sig;
5577 int code;
5578 abi_ulong psc;
5579 char retcode[8];
5580 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5581 struct target_sigcontext sc;
5582 };
5583
5584 typedef int target_greg_t;
5585 #define TARGET_NGREG 18
5586 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5587
5588 typedef struct target_fpregset {
5589 int f_fpcntl[3];
5590 int f_fpregs[8*3];
5591 } target_fpregset_t;
5592
5593 struct target_mcontext {
5594 int version;
5595 target_gregset_t gregs;
5596 target_fpregset_t fpregs;
5597 };
5598
5599 #define TARGET_MCONTEXT_VERSION 2
5600
5601 struct target_ucontext {
5602 abi_ulong tuc_flags;
5603 abi_ulong tuc_link;
5604 target_stack_t tuc_stack;
5605 struct target_mcontext tuc_mcontext;
5606 abi_long tuc_filler[80];
5607 target_sigset_t tuc_sigmask;
5608 };
5609
5610 struct target_rt_sigframe
5611 {
5612 abi_ulong pretcode;
5613 int sig;
5614 abi_ulong pinfo;
5615 abi_ulong puc;
5616 char retcode[8];
5617 struct target_siginfo info;
5618 struct target_ucontext uc;
5619 };
5620
5621 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5622 abi_ulong mask)
5623 {
5624 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5625 __put_user(mask, &sc->sc_mask);
5626 __put_user(env->aregs[7], &sc->sc_usp);
5627 __put_user(env->dregs[0], &sc->sc_d0);
5628 __put_user(env->dregs[1], &sc->sc_d1);
5629 __put_user(env->aregs[0], &sc->sc_a0);
5630 __put_user(env->aregs[1], &sc->sc_a1);
5631 __put_user(sr, &sc->sc_sr);
5632 __put_user(env->pc, &sc->sc_pc);
5633 }
5634
5635 static void
5636 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5637 {
5638 int temp;
5639
5640 __get_user(env->aregs[7], &sc->sc_usp);
5641 __get_user(env->dregs[0], &sc->sc_d0);
5642 __get_user(env->dregs[1], &sc->sc_d1);
5643 __get_user(env->aregs[0], &sc->sc_a0);
5644 __get_user(env->aregs[1], &sc->sc_a1);
5645 __get_user(env->pc, &sc->sc_pc);
5646 __get_user(temp, &sc->sc_sr);
5647 cpu_m68k_set_ccr(env, temp);
5648 }
5649
5650 /*
5651 * Determine which stack to use..
5652 */
5653 static inline abi_ulong
5654 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5655 size_t frame_size)
5656 {
5657 unsigned long sp;
5658
5659 sp = regs->aregs[7];
5660
5661 /* This is the X/Open sanctioned signal stack switching. */
5662 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5663 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5664 }
5665
5666 return ((sp - frame_size) & -8UL);
5667 }
5668
5669 static void setup_frame(int sig, struct target_sigaction *ka,
5670 target_sigset_t *set, CPUM68KState *env)
5671 {
5672 struct target_sigframe *frame;
5673 abi_ulong frame_addr;
5674 abi_ulong retcode_addr;
5675 abi_ulong sc_addr;
5676 int i;
5677
5678 frame_addr = get_sigframe(ka, env, sizeof *frame);
5679 trace_user_setup_frame(env, frame_addr);
5680 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5681 goto give_sigsegv;
5682 }
5683
5684 __put_user(sig, &frame->sig);
5685
5686 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5687 __put_user(sc_addr, &frame->psc);
5688
5689 setup_sigcontext(&frame->sc, env, set->sig[0]);
5690
5691 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5692 __put_user(set->sig[i], &frame->extramask[i - 1]);
5693 }
5694
5695 /* Set up to return from userspace. */
5696
5697 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5698 __put_user(retcode_addr, &frame->pretcode);
5699
5700 /* moveq #,d0; trap #0 */
5701
5702 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5703 (uint32_t *)(frame->retcode));
5704
5705 /* Set up to return from userspace */
5706
5707 env->aregs[7] = frame_addr;
5708 env->pc = ka->_sa_handler;
5709
5710 unlock_user_struct(frame, frame_addr, 1);
5711 return;
5712
5713 give_sigsegv:
5714 force_sigsegv(sig);
5715 }
5716
5717 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5718 CPUM68KState *env)
5719 {
5720 int i;
5721 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5722
5723 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
5724 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
5725 /* fpiar is not emulated */
5726
5727 for (i = 0; i < 8; i++) {
5728 uint32_t high = env->fregs[i].d.high << 16;
5729 __put_user(high, &fpregs->f_fpregs[i * 3]);
5730 __put_user(env->fregs[i].d.low,
5731 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5732 }
5733 }
5734
5735 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5736 CPUM68KState *env)
5737 {
5738 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5739 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5740
5741 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5742 __put_user(env->dregs[0], &gregs[0]);
5743 __put_user(env->dregs[1], &gregs[1]);
5744 __put_user(env->dregs[2], &gregs[2]);
5745 __put_user(env->dregs[3], &gregs[3]);
5746 __put_user(env->dregs[4], &gregs[4]);
5747 __put_user(env->dregs[5], &gregs[5]);
5748 __put_user(env->dregs[6], &gregs[6]);
5749 __put_user(env->dregs[7], &gregs[7]);
5750 __put_user(env->aregs[0], &gregs[8]);
5751 __put_user(env->aregs[1], &gregs[9]);
5752 __put_user(env->aregs[2], &gregs[10]);
5753 __put_user(env->aregs[3], &gregs[11]);
5754 __put_user(env->aregs[4], &gregs[12]);
5755 __put_user(env->aregs[5], &gregs[13]);
5756 __put_user(env->aregs[6], &gregs[14]);
5757 __put_user(env->aregs[7], &gregs[15]);
5758 __put_user(env->pc, &gregs[16]);
5759 __put_user(sr, &gregs[17]);
5760
5761 target_rt_save_fpu_state(uc, env);
5762
5763 return 0;
5764 }
5765
5766 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
5767 struct target_ucontext *uc)
5768 {
5769 int i;
5770 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5771 uint32_t fpcr;
5772
5773 __get_user(fpcr, &fpregs->f_fpcntl[0]);
5774 cpu_m68k_set_fpcr(env, fpcr);
5775 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
5776 /* fpiar is not emulated */
5777
5778 for (i = 0; i < 8; i++) {
5779 uint32_t high;
5780 __get_user(high, &fpregs->f_fpregs[i * 3]);
5781 env->fregs[i].d.high = high >> 16;
5782 __get_user(env->fregs[i].d.low,
5783 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5784 }
5785 }
5786
5787 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5788 struct target_ucontext *uc)
5789 {
5790 int temp;
5791 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5792
5793 __get_user(temp, &uc->tuc_mcontext.version);
5794 if (temp != TARGET_MCONTEXT_VERSION)
5795 goto badframe;
5796
5797 /* restore passed registers */
5798 __get_user(env->dregs[0], &gregs[0]);
5799 __get_user(env->dregs[1], &gregs[1]);
5800 __get_user(env->dregs[2], &gregs[2]);
5801 __get_user(env->dregs[3], &gregs[3]);
5802 __get_user(env->dregs[4], &gregs[4]);
5803 __get_user(env->dregs[5], &gregs[5]);
5804 __get_user(env->dregs[6], &gregs[6]);
5805 __get_user(env->dregs[7], &gregs[7]);
5806 __get_user(env->aregs[0], &gregs[8]);
5807 __get_user(env->aregs[1], &gregs[9]);
5808 __get_user(env->aregs[2], &gregs[10]);
5809 __get_user(env->aregs[3], &gregs[11]);
5810 __get_user(env->aregs[4], &gregs[12]);
5811 __get_user(env->aregs[5], &gregs[13]);
5812 __get_user(env->aregs[6], &gregs[14]);
5813 __get_user(env->aregs[7], &gregs[15]);
5814 __get_user(env->pc, &gregs[16]);
5815 __get_user(temp, &gregs[17]);
5816 cpu_m68k_set_ccr(env, temp);
5817
5818 target_rt_restore_fpu_state(env, uc);
5819
5820 return 0;
5821
5822 badframe:
5823 return 1;
5824 }
5825
5826 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5827 target_siginfo_t *info,
5828 target_sigset_t *set, CPUM68KState *env)
5829 {
5830 struct target_rt_sigframe *frame;
5831 abi_ulong frame_addr;
5832 abi_ulong retcode_addr;
5833 abi_ulong info_addr;
5834 abi_ulong uc_addr;
5835 int err = 0;
5836 int i;
5837
5838 frame_addr = get_sigframe(ka, env, sizeof *frame);
5839 trace_user_setup_rt_frame(env, frame_addr);
5840 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5841 goto give_sigsegv;
5842 }
5843
5844 __put_user(sig, &frame->sig);
5845
5846 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5847 __put_user(info_addr, &frame->pinfo);
5848
5849 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5850 __put_user(uc_addr, &frame->puc);
5851
5852 tswap_siginfo(&frame->info, info);
5853
5854 /* Create the ucontext */
5855
5856 __put_user(0, &frame->uc.tuc_flags);
5857 __put_user(0, &frame->uc.tuc_link);
5858 __put_user(target_sigaltstack_used.ss_sp,
5859 &frame->uc.tuc_stack.ss_sp);
5860 __put_user(sas_ss_flags(env->aregs[7]),
5861 &frame->uc.tuc_stack.ss_flags);
5862 __put_user(target_sigaltstack_used.ss_size,
5863 &frame->uc.tuc_stack.ss_size);
5864 err |= target_rt_setup_ucontext(&frame->uc, env);
5865
5866 if (err)
5867 goto give_sigsegv;
5868
5869 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5870 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5871 }
5872
5873 /* Set up to return from userspace. */
5874
5875 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5876 __put_user(retcode_addr, &frame->pretcode);
5877
5878 /* moveq #,d0; notb d0; trap #0 */
5879
5880 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5881 (uint32_t *)(frame->retcode + 0));
5882 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5883
5884 if (err)
5885 goto give_sigsegv;
5886
5887 /* Set up to return from userspace */
5888
5889 env->aregs[7] = frame_addr;
5890 env->pc = ka->_sa_handler;
5891
5892 unlock_user_struct(frame, frame_addr, 1);
5893 return;
5894
5895 give_sigsegv:
5896 unlock_user_struct(frame, frame_addr, 1);
5897 force_sigsegv(sig);
5898 }
5899
5900 long do_sigreturn(CPUM68KState *env)
5901 {
5902 struct target_sigframe *frame;
5903 abi_ulong frame_addr = env->aregs[7] - 4;
5904 target_sigset_t target_set;
5905 sigset_t set;
5906 int i;
5907
5908 trace_user_do_sigreturn(env, frame_addr);
5909 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5910 goto badframe;
5911
5912 /* set blocked signals */
5913
5914 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5915
5916 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5917 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5918 }
5919
5920 target_to_host_sigset_internal(&set, &target_set);
5921 set_sigmask(&set);
5922
5923 /* restore registers */
5924
5925 restore_sigcontext(env, &frame->sc);
5926
5927 unlock_user_struct(frame, frame_addr, 0);
5928 return -TARGET_QEMU_ESIGRETURN;
5929
5930 badframe:
5931 force_sig(TARGET_SIGSEGV);
5932 return -TARGET_QEMU_ESIGRETURN;
5933 }
5934
5935 long do_rt_sigreturn(CPUM68KState *env)
5936 {
5937 struct target_rt_sigframe *frame;
5938 abi_ulong frame_addr = env->aregs[7] - 4;
5939 sigset_t set;
5940
5941 trace_user_do_rt_sigreturn(env, frame_addr);
5942 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5943 goto badframe;
5944
5945 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5946 set_sigmask(&set);
5947
5948 /* restore registers */
5949
5950 if (target_rt_restore_ucontext(env, &frame->uc))
5951 goto badframe;
5952
5953 if (do_sigaltstack(frame_addr +
5954 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5955 0, get_sp_from_cpustate(env)) == -EFAULT)
5956 goto badframe;
5957
5958 unlock_user_struct(frame, frame_addr, 0);
5959 return -TARGET_QEMU_ESIGRETURN;
5960
5961 badframe:
5962 unlock_user_struct(frame, frame_addr, 0);
5963 force_sig(TARGET_SIGSEGV);
5964 return -TARGET_QEMU_ESIGRETURN;
5965 }
5966
5967 #elif defined(TARGET_ALPHA)
5968
5969 struct target_sigcontext {
5970 abi_long sc_onstack;
5971 abi_long sc_mask;
5972 abi_long sc_pc;
5973 abi_long sc_ps;
5974 abi_long sc_regs[32];
5975 abi_long sc_ownedfp;
5976 abi_long sc_fpregs[32];
5977 abi_ulong sc_fpcr;
5978 abi_ulong sc_fp_control;
5979 abi_ulong sc_reserved1;
5980 abi_ulong sc_reserved2;
5981 abi_ulong sc_ssize;
5982 abi_ulong sc_sbase;
5983 abi_ulong sc_traparg_a0;
5984 abi_ulong sc_traparg_a1;
5985 abi_ulong sc_traparg_a2;
5986 abi_ulong sc_fp_trap_pc;
5987 abi_ulong sc_fp_trigger_sum;
5988 abi_ulong sc_fp_trigger_inst;
5989 };
5990
5991 struct target_ucontext {
5992 abi_ulong tuc_flags;
5993 abi_ulong tuc_link;
5994 abi_ulong tuc_osf_sigmask;
5995 target_stack_t tuc_stack;
5996 struct target_sigcontext tuc_mcontext;
5997 target_sigset_t tuc_sigmask;
5998 };
5999
6000 struct target_sigframe {
6001 struct target_sigcontext sc;
6002 unsigned int retcode[3];
6003 };
6004
6005 struct target_rt_sigframe {
6006 target_siginfo_t info;
6007 struct target_ucontext uc;
6008 unsigned int retcode[3];
6009 };
6010
6011 #define INSN_MOV_R30_R16 0x47fe0410
6012 #define INSN_LDI_R0 0x201f0000
6013 #define INSN_CALLSYS 0x00000083
6014
6015 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6016 abi_ulong frame_addr, target_sigset_t *set)
6017 {
6018 int i;
6019
6020 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6021 __put_user(set->sig[0], &sc->sc_mask);
6022 __put_user(env->pc, &sc->sc_pc);
6023 __put_user(8, &sc->sc_ps);
6024
6025 for (i = 0; i < 31; ++i) {
6026 __put_user(env->ir[i], &sc->sc_regs[i]);
6027 }
6028 __put_user(0, &sc->sc_regs[31]);
6029
6030 for (i = 0; i < 31; ++i) {
6031 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6032 }
6033 __put_user(0, &sc->sc_fpregs[31]);
6034 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6035
6036 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6037 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6038 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6039 }
6040
6041 static void restore_sigcontext(CPUAlphaState *env,
6042 struct target_sigcontext *sc)
6043 {
6044 uint64_t fpcr;
6045 int i;
6046
6047 __get_user(env->pc, &sc->sc_pc);
6048
6049 for (i = 0; i < 31; ++i) {
6050 __get_user(env->ir[i], &sc->sc_regs[i]);
6051 }
6052 for (i = 0; i < 31; ++i) {
6053 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6054 }
6055
6056 __get_user(fpcr, &sc->sc_fpcr);
6057 cpu_alpha_store_fpcr(env, fpcr);
6058 }
6059
6060 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6061 CPUAlphaState *env,
6062 unsigned long framesize)
6063 {
6064 abi_ulong sp = env->ir[IR_SP];
6065
6066 /* This is the X/Open sanctioned signal stack switching. */
6067 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6068 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6069 }
6070 return (sp - framesize) & -32;
6071 }
6072
6073 static void setup_frame(int sig, struct target_sigaction *ka,
6074 target_sigset_t *set, CPUAlphaState *env)
6075 {
6076 abi_ulong frame_addr, r26;
6077 struct target_sigframe *frame;
6078 int err = 0;
6079
6080 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6081 trace_user_setup_frame(env, frame_addr);
6082 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6083 goto give_sigsegv;
6084 }
6085
6086 setup_sigcontext(&frame->sc, env, frame_addr, set);
6087
6088 if (ka->sa_restorer) {
6089 r26 = ka->sa_restorer;
6090 } else {
6091 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6092 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6093 &frame->retcode[1]);
6094 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6095 /* imb() */
6096 r26 = frame_addr;
6097 }
6098
6099 unlock_user_struct(frame, frame_addr, 1);
6100
6101 if (err) {
6102 give_sigsegv:
6103 force_sigsegv(sig);
6104 return;
6105 }
6106
6107 env->ir[IR_RA] = r26;
6108 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6109 env->ir[IR_A0] = sig;
6110 env->ir[IR_A1] = 0;
6111 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6112 env->ir[IR_SP] = frame_addr;
6113 }
6114
6115 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6116 target_siginfo_t *info,
6117 target_sigset_t *set, CPUAlphaState *env)
6118 {
6119 abi_ulong frame_addr, r26;
6120 struct target_rt_sigframe *frame;
6121 int i, err = 0;
6122
6123 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6124 trace_user_setup_rt_frame(env, frame_addr);
6125 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6126 goto give_sigsegv;
6127 }
6128
6129 tswap_siginfo(&frame->info, info);
6130
6131 __put_user(0, &frame->uc.tuc_flags);
6132 __put_user(0, &frame->uc.tuc_link);
6133 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6134 __put_user(target_sigaltstack_used.ss_sp,
6135 &frame->uc.tuc_stack.ss_sp);
6136 __put_user(sas_ss_flags(env->ir[IR_SP]),
6137 &frame->uc.tuc_stack.ss_flags);
6138 __put_user(target_sigaltstack_used.ss_size,
6139 &frame->uc.tuc_stack.ss_size);
6140 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6141 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6142 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6143 }
6144
6145 if (ka->sa_restorer) {
6146 r26 = ka->sa_restorer;
6147 } else {
6148 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6149 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6150 &frame->retcode[1]);
6151 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6152 /* imb(); */
6153 r26 = frame_addr;
6154 }
6155
6156 if (err) {
6157 give_sigsegv:
6158 force_sigsegv(sig);
6159 return;
6160 }
6161
6162 env->ir[IR_RA] = r26;
6163 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6164 env->ir[IR_A0] = sig;
6165 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6166 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6167 env->ir[IR_SP] = frame_addr;
6168 }
6169
6170 long do_sigreturn(CPUAlphaState *env)
6171 {
6172 struct target_sigcontext *sc;
6173 abi_ulong sc_addr = env->ir[IR_A0];
6174 target_sigset_t target_set;
6175 sigset_t set;
6176
6177 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6178 goto badframe;
6179 }
6180
6181 target_sigemptyset(&target_set);
6182 __get_user(target_set.sig[0], &sc->sc_mask);
6183
6184 target_to_host_sigset_internal(&set, &target_set);
6185 set_sigmask(&set);
6186
6187 restore_sigcontext(env, sc);
6188 unlock_user_struct(sc, sc_addr, 0);
6189 return -TARGET_QEMU_ESIGRETURN;
6190
6191 badframe:
6192 force_sig(TARGET_SIGSEGV);
6193 return -TARGET_QEMU_ESIGRETURN;
6194 }
6195
6196 long do_rt_sigreturn(CPUAlphaState *env)
6197 {
6198 abi_ulong frame_addr = env->ir[IR_A0];
6199 struct target_rt_sigframe *frame;
6200 sigset_t set;
6201
6202 trace_user_do_rt_sigreturn(env, frame_addr);
6203 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6204 goto badframe;
6205 }
6206 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6207 set_sigmask(&set);
6208
6209 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6210 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6211 uc.tuc_stack),
6212 0, env->ir[IR_SP]) == -EFAULT) {
6213 goto badframe;
6214 }
6215
6216 unlock_user_struct(frame, frame_addr, 0);
6217 return -TARGET_QEMU_ESIGRETURN;
6218
6219
6220 badframe:
6221 unlock_user_struct(frame, frame_addr, 0);
6222 force_sig(TARGET_SIGSEGV);
6223 return -TARGET_QEMU_ESIGRETURN;
6224 }
6225
6226 #elif defined(TARGET_TILEGX)
6227
6228 struct target_sigcontext {
6229 union {
6230 /* General-purpose registers. */
6231 abi_ulong gregs[56];
6232 struct {
6233 abi_ulong __gregs[53];
6234 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6235 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6236 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6237 };
6238 };
6239 abi_ulong pc; /* Program counter. */
6240 abi_ulong ics; /* In Interrupt Critical Section? */
6241 abi_ulong faultnum; /* Fault number. */
6242 abi_ulong pad[5];
6243 };
6244
6245 struct target_ucontext {
6246 abi_ulong tuc_flags;
6247 abi_ulong tuc_link;
6248 target_stack_t tuc_stack;
6249 struct target_sigcontext tuc_mcontext;
6250 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6251 };
6252
6253 struct target_rt_sigframe {
6254 unsigned char save_area[16]; /* caller save area */
6255 struct target_siginfo info;
6256 struct target_ucontext uc;
6257 abi_ulong retcode[2];
6258 };
6259
6260 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6261 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6262
6263
6264 static void setup_sigcontext(struct target_sigcontext *sc,
6265 CPUArchState *env, int signo)
6266 {
6267 int i;
6268
6269 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6270 __put_user(env->regs[i], &sc->gregs[i]);
6271 }
6272
6273 __put_user(env->pc, &sc->pc);
6274 __put_user(0, &sc->ics);
6275 __put_user(signo, &sc->faultnum);
6276 }
6277
6278 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6279 {
6280 int i;
6281
6282 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6283 __get_user(env->regs[i], &sc->gregs[i]);
6284 }
6285
6286 __get_user(env->pc, &sc->pc);
6287 }
6288
6289 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6290 size_t frame_size)
6291 {
6292 unsigned long sp = env->regs[TILEGX_R_SP];
6293
6294 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6295 return -1UL;
6296 }
6297
6298 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6299 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6300 }
6301
6302 sp -= frame_size;
6303 sp &= -16UL;
6304 return sp;
6305 }
6306
6307 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6308 target_siginfo_t *info,
6309 target_sigset_t *set, CPUArchState *env)
6310 {
6311 abi_ulong frame_addr;
6312 struct target_rt_sigframe *frame;
6313 unsigned long restorer;
6314
6315 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6316 trace_user_setup_rt_frame(env, frame_addr);
6317 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6318 goto give_sigsegv;
6319 }
6320
6321 /* Always write at least the signal number for the stack backtracer. */
6322 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6323 /* At sigreturn time, restore the callee-save registers too. */
6324 tswap_siginfo(&frame->info, info);
6325 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6326 } else {
6327 __put_user(info->si_signo, &frame->info.si_signo);
6328 }
6329
6330 /* Create the ucontext. */
6331 __put_user(0, &frame->uc.tuc_flags);
6332 __put_user(0, &frame->uc.tuc_link);
6333 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6334 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6335 &frame->uc.tuc_stack.ss_flags);
6336 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6337 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6338
6339 if (ka->sa_flags & TARGET_SA_RESTORER) {
6340 restorer = (unsigned long) ka->sa_restorer;
6341 } else {
6342 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6343 __put_user(INSN_SWINT1, &frame->retcode[1]);
6344 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6345 }
6346 env->pc = (unsigned long) ka->_sa_handler;
6347 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6348 env->regs[TILEGX_R_LR] = restorer;
6349 env->regs[0] = (unsigned long) sig;
6350 env->regs[1] = (unsigned long) &frame->info;
6351 env->regs[2] = (unsigned long) &frame->uc;
6352 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6353
6354 unlock_user_struct(frame, frame_addr, 1);
6355 return;
6356
6357 give_sigsegv:
6358 force_sigsegv(sig);
6359 }
6360
6361 long do_rt_sigreturn(CPUTLGState *env)
6362 {
6363 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6364 struct target_rt_sigframe *frame;
6365 sigset_t set;
6366
6367 trace_user_do_rt_sigreturn(env, frame_addr);
6368 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6369 goto badframe;
6370 }
6371 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6372 set_sigmask(&set);
6373
6374 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6375 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6376 uc.tuc_stack),
6377 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6378 goto badframe;
6379 }
6380
6381 unlock_user_struct(frame, frame_addr, 0);
6382 return -TARGET_QEMU_ESIGRETURN;
6383
6384
6385 badframe:
6386 unlock_user_struct(frame, frame_addr, 0);
6387 force_sig(TARGET_SIGSEGV);
6388 return -TARGET_QEMU_ESIGRETURN;
6389 }
6390
6391 #elif defined(TARGET_RISCV)
6392
6393 /* Signal handler invocation must be transparent for the code being
6394 interrupted. Complete CPU (hart) state is saved on entry and restored
6395 before returning from the handler. Process sigmask is also saved to block
6396 signals while the handler is running. The handler gets its own stack,
6397 which also doubles as storage for the CPU state and sigmask.
6398
6399 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
6400
6401 struct target_sigcontext {
6402 abi_long pc;
6403 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
6404 uint64_t fpr[32];
6405 uint32_t fcsr;
6406 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
6407
6408 struct target_ucontext {
6409 unsigned long uc_flags;
6410 struct target_ucontext *uc_link;
6411 target_stack_t uc_stack;
6412 struct target_sigcontext uc_mcontext;
6413 target_sigset_t uc_sigmask;
6414 };
6415
6416 struct target_rt_sigframe {
6417 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
6418 struct target_siginfo info;
6419 struct target_ucontext uc;
6420 };
6421
6422 static abi_ulong get_sigframe(struct target_sigaction *ka,
6423 CPURISCVState *regs, size_t framesize)
6424 {
6425 abi_ulong sp = regs->gpr[xSP];
6426 int onsigstack = on_sig_stack(sp);
6427
6428 /* redzone */
6429 /* This is the X/Open sanctioned signal stack switching. */
6430 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
6431 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6432 }
6433
6434 sp -= framesize;
6435 sp &= ~3UL; /* align sp on 4-byte boundary */
6436
6437 /* If we are on the alternate signal stack and would overflow it, don't.
6438 Return an always-bogus address instead so we will die with SIGSEGV. */
6439 if (onsigstack && !likely(on_sig_stack(sp))) {
6440 return -1L;
6441 }
6442
6443 return sp;
6444 }
6445
6446 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
6447 {
6448 int i;
6449
6450 __put_user(env->pc, &sc->pc);
6451
6452 for (i = 1; i < 32; i++) {
6453 __put_user(env->gpr[i], &sc->gpr[i - 1]);
6454 }
6455 for (i = 0; i < 32; i++) {
6456 __put_user(env->fpr[i], &sc->fpr[i]);
6457 }
6458
6459 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
6460 __put_user(fcsr, &sc->fcsr);
6461 }
6462
6463 static void setup_ucontext(struct target_ucontext *uc,
6464 CPURISCVState *env, target_sigset_t *set)
6465 {
6466 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
6467 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
6468 abi_ulong ss_size = target_sigaltstack_used.ss_size;
6469
6470 __put_user(0, &(uc->uc_flags));
6471 __put_user(0, &(uc->uc_link));
6472
6473 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
6474 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
6475 __put_user(ss_size, &(uc->uc_stack.ss_size));
6476
6477 int i;
6478 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6479 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
6480 }
6481
6482 setup_sigcontext(&uc->uc_mcontext, env);
6483 }
6484
6485 static inline void install_sigtramp(uint32_t *tramp)
6486 {
6487 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
6488 __put_user(0x00000073, tramp + 1); /* ecall */
6489 }
6490
6491 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6492 target_siginfo_t *info,
6493 target_sigset_t *set, CPURISCVState *env)
6494 {
6495 abi_ulong frame_addr;
6496 struct target_rt_sigframe *frame;
6497
6498 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6499 trace_user_setup_rt_frame(env, frame_addr);
6500
6501 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6502 goto badframe;
6503 }
6504
6505 setup_ucontext(&frame->uc, env, set);
6506 tswap_siginfo(&frame->info, info);
6507 install_sigtramp(frame->tramp);
6508
6509 env->pc = ka->_sa_handler;
6510 env->gpr[xSP] = frame_addr;
6511 env->gpr[xA0] = sig;
6512 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6513 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6514 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
6515
6516 return;
6517
6518 badframe:
6519 unlock_user_struct(frame, frame_addr, 1);
6520 if (sig == TARGET_SIGSEGV) {
6521 ka->_sa_handler = TARGET_SIG_DFL;
6522 }
6523 force_sig(TARGET_SIGSEGV);
6524 }
6525
6526 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
6527 {
6528 int i;
6529
6530 __get_user(env->pc, &sc->pc);
6531
6532 for (i = 1; i < 32; ++i) {
6533 __get_user(env->gpr[i], &sc->gpr[i - 1]);
6534 }
6535 for (i = 0; i < 32; ++i) {
6536 __get_user(env->fpr[i], &sc->fpr[i]);
6537 }
6538
6539 uint32_t fcsr;
6540 __get_user(fcsr, &sc->fcsr);
6541 csr_write_helper(env, fcsr, CSR_FCSR);
6542 }
6543
6544 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
6545 {
6546 sigset_t blocked;
6547 target_sigset_t target_set;
6548 int i;
6549
6550 target_sigemptyset(&target_set);
6551 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6552 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
6553 }
6554
6555 target_to_host_sigset_internal(&blocked, &target_set);
6556 set_sigmask(&blocked);
6557
6558 restore_sigcontext(env, &uc->uc_mcontext);
6559 }
6560
6561 long do_rt_sigreturn(CPURISCVState *env)
6562 {
6563 struct target_rt_sigframe *frame;
6564 abi_ulong frame_addr;
6565
6566 frame_addr = env->gpr[xSP];
6567 trace_user_do_sigreturn(env, frame_addr);
6568 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6569 goto badframe;
6570 }
6571
6572 restore_ucontext(env, &frame->uc);
6573
6574 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6575 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
6576 goto badframe;
6577 }
6578
6579 unlock_user_struct(frame, frame_addr, 0);
6580 return -TARGET_QEMU_ESIGRETURN;
6581
6582 badframe:
6583 unlock_user_struct(frame, frame_addr, 0);
6584 force_sig(TARGET_SIGSEGV);
6585 return 0;
6586 }
6587
6588 #elif defined(TARGET_HPPA)
6589
6590 struct target_sigcontext {
6591 abi_ulong sc_flags;
6592 abi_ulong sc_gr[32];
6593 uint64_t sc_fr[32];
6594 abi_ulong sc_iasq[2];
6595 abi_ulong sc_iaoq[2];
6596 abi_ulong sc_sar;
6597 };
6598
6599 struct target_ucontext {
6600 abi_uint tuc_flags;
6601 abi_ulong tuc_link;
6602 target_stack_t tuc_stack;
6603 abi_uint pad[1];
6604 struct target_sigcontext tuc_mcontext;
6605 target_sigset_t tuc_sigmask;
6606 };
6607
6608 struct target_rt_sigframe {
6609 abi_uint tramp[9];
6610 target_siginfo_t info;
6611 struct target_ucontext uc;
6612 /* hidden location of upper halves of pa2.0 64-bit gregs */
6613 };
6614
6615 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6616 {
6617 int flags = 0;
6618 int i;
6619
6620 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6621
6622 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6623 /* In the gateway page, executing a syscall. */
6624 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6625 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6626 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6627 } else {
6628 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6629 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6630 }
6631 __put_user(0, &sc->sc_iasq[0]);
6632 __put_user(0, &sc->sc_iasq[1]);
6633 __put_user(flags, &sc->sc_flags);
6634
6635 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6636 for (i = 1; i < 32; ++i) {
6637 __put_user(env->gr[i], &sc->sc_gr[i]);
6638 }
6639
6640 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6641 for (i = 1; i < 32; ++i) {
6642 __put_user(env->fr[i], &sc->sc_fr[i]);
6643 }
6644
6645 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6646 }
6647
6648 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6649 {
6650 target_ulong psw;
6651 int i;
6652
6653 __get_user(psw, &sc->sc_gr[0]);
6654 cpu_hppa_put_psw(env, psw);
6655
6656 for (i = 1; i < 32; ++i) {
6657 __get_user(env->gr[i], &sc->sc_gr[i]);
6658 }
6659 for (i = 0; i < 32; ++i) {
6660 __get_user(env->fr[i], &sc->sc_fr[i]);
6661 }
6662 cpu_hppa_loaded_fr0(env);
6663
6664 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6665 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6666 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6667 }
6668
6669 /* No, this doesn't look right, but it's copied straight from the kernel. */
6670 #define PARISC_RT_SIGFRAME_SIZE32 \
6671 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6672
6673 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6674 target_siginfo_t *info,
6675 target_sigset_t *set, CPUArchState *env)
6676 {
6677 abi_ulong frame_addr, sp, haddr;
6678 struct target_rt_sigframe *frame;
6679 int i;
6680
6681 sp = env->gr[30];
6682 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6683 if (sas_ss_flags(sp) == 0) {
6684 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6685 }
6686 }
6687 frame_addr = QEMU_ALIGN_UP(sp, 64);
6688 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6689
6690 trace_user_setup_rt_frame(env, frame_addr);
6691
6692 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6693 goto give_sigsegv;
6694 }
6695
6696 tswap_siginfo(&frame->info, info);
6697 frame->uc.tuc_flags = 0;
6698 frame->uc.tuc_link = 0;
6699
6700 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6701 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6702 &frame->uc.tuc_stack.ss_flags);
6703 __put_user(target_sigaltstack_used.ss_size,
6704 &frame->uc.tuc_stack.ss_size);
6705
6706 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6707 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6708 }
6709
6710 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6711
6712 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6713 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6714 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6715 __put_user(0x08000240, frame->tramp + 3); /* nop */
6716
6717 unlock_user_struct(frame, frame_addr, 1);
6718
6719 env->gr[2] = h2g(frame->tramp);
6720 env->gr[30] = sp;
6721 env->gr[26] = sig;
6722 env->gr[25] = h2g(&frame->info);
6723 env->gr[24] = h2g(&frame->uc);
6724
6725 haddr = ka->_sa_handler;
6726 if (haddr & 2) {
6727 /* Function descriptor. */
6728 target_ulong *fdesc, dest;
6729
6730 haddr &= -4;
6731 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
6732 goto give_sigsegv;
6733 }
6734 __get_user(dest, fdesc);
6735 __get_user(env->gr[19], fdesc + 1);
6736 unlock_user_struct(fdesc, haddr, 1);
6737 haddr = dest;
6738 }
6739 env->iaoq_f = haddr;
6740 env->iaoq_b = haddr + 4;
6741 return;
6742
6743 give_sigsegv:
6744 force_sigsegv(sig);
6745 }
6746
6747 long do_rt_sigreturn(CPUArchState *env)
6748 {
6749 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
6750 struct target_rt_sigframe *frame;
6751 sigset_t set;
6752
6753 trace_user_do_rt_sigreturn(env, frame_addr);
6754 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6755 goto badframe;
6756 }
6757 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6758 set_sigmask(&set);
6759
6760 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6761 unlock_user_struct(frame, frame_addr, 0);
6762
6763 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6764 uc.tuc_stack),
6765 0, env->gr[30]) == -EFAULT) {
6766 goto badframe;
6767 }
6768
6769 unlock_user_struct(frame, frame_addr, 0);
6770 return -TARGET_QEMU_ESIGRETURN;
6771
6772 badframe:
6773 force_sig(TARGET_SIGSEGV);
6774 return -TARGET_QEMU_ESIGRETURN;
6775 }
6776
6777 #else
6778
6779 static void setup_frame(int sig, struct target_sigaction *ka,
6780 target_sigset_t *set, CPUArchState *env)
6781 {
6782 fprintf(stderr, "setup_frame: not implemented\n");
6783 }
6784
6785 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6786 target_siginfo_t *info,
6787 target_sigset_t *set, CPUArchState *env)
6788 {
6789 fprintf(stderr, "setup_rt_frame: not implemented\n");
6790 }
6791
6792 long do_sigreturn(CPUArchState *env)
6793 {
6794 fprintf(stderr, "do_sigreturn: not implemented\n");
6795 return -TARGET_ENOSYS;
6796 }
6797
6798 long do_rt_sigreturn(CPUArchState *env)
6799 {
6800 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
6801 return -TARGET_ENOSYS;
6802 }
6803
6804 #endif
6805
6806 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
6807 struct emulated_sigtable *k)
6808 {
6809 CPUState *cpu = ENV_GET_CPU(cpu_env);
6810 abi_ulong handler;
6811 sigset_t set;
6812 target_sigset_t target_old_set;
6813 struct target_sigaction *sa;
6814 TaskState *ts = cpu->opaque;
6815
6816 trace_user_handle_signal(cpu_env, sig);
6817 /* dequeue signal */
6818 k->pending = 0;
6819
6820 sig = gdb_handlesig(cpu, sig);
6821 if (!sig) {
6822 sa = NULL;
6823 handler = TARGET_SIG_IGN;
6824 } else {
6825 sa = &sigact_table[sig - 1];
6826 handler = sa->_sa_handler;
6827 }
6828
6829 if (do_strace) {
6830 print_taken_signal(sig, &k->info);
6831 }
6832
6833 if (handler == TARGET_SIG_DFL) {
6834 /* default handler : ignore some signal. The other are job control or fatal */
6835 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
6836 kill(getpid(),SIGSTOP);
6837 } else if (sig != TARGET_SIGCHLD &&
6838 sig != TARGET_SIGURG &&
6839 sig != TARGET_SIGWINCH &&
6840 sig != TARGET_SIGCONT) {
6841 dump_core_and_abort(sig);
6842 }
6843 } else if (handler == TARGET_SIG_IGN) {
6844 /* ignore sig */
6845 } else if (handler == TARGET_SIG_ERR) {
6846 dump_core_and_abort(sig);
6847 } else {
6848 /* compute the blocked signals during the handler execution */
6849 sigset_t *blocked_set;
6850
6851 target_to_host_sigset(&set, &sa->sa_mask);
6852 /* SA_NODEFER indicates that the current signal should not be
6853 blocked during the handler */
6854 if (!(sa->sa_flags & TARGET_SA_NODEFER))
6855 sigaddset(&set, target_to_host_signal(sig));
6856
6857 /* save the previous blocked signal state to restore it at the
6858 end of the signal execution (see do_sigreturn) */
6859 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
6860
6861 /* block signals in the handler */
6862 blocked_set = ts->in_sigsuspend ?
6863 &ts->sigsuspend_mask : &ts->signal_mask;
6864 sigorset(&ts->signal_mask, blocked_set, &set);
6865 ts->in_sigsuspend = 0;
6866
6867 /* if the CPU is in VM86 mode, we restore the 32 bit values */
6868 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
6869 {
6870 CPUX86State *env = cpu_env;
6871 if (env->eflags & VM_MASK)
6872 save_v86_state(env);
6873 }
6874 #endif
6875 /* prepare the stack frame of the virtual CPU */
6876 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
6877 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
6878 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
6879 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
6880 || defined(TARGET_RISCV)
6881 /* These targets do not have traditional signals. */
6882 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6883 #else
6884 if (sa->sa_flags & TARGET_SA_SIGINFO)
6885 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6886 else
6887 setup_frame(sig, sa, &target_old_set, cpu_env);
6888 #endif
6889 if (sa->sa_flags & TARGET_SA_RESETHAND) {
6890 sa->_sa_handler = TARGET_SIG_DFL;
6891 }
6892 }
6893 }
6894
6895 void process_pending_signals(CPUArchState *cpu_env)
6896 {
6897 CPUState *cpu = ENV_GET_CPU(cpu_env);
6898 int sig;
6899 TaskState *ts = cpu->opaque;
6900 sigset_t set;
6901 sigset_t *blocked_set;
6902
6903 while (atomic_read(&ts->signal_pending)) {
6904 /* FIXME: This is not threadsafe. */
6905 sigfillset(&set);
6906 sigprocmask(SIG_SETMASK, &set, 0);
6907
6908 restart_scan:
6909 sig = ts->sync_signal.pending;
6910 if (sig) {
6911 /* Synchronous signals are forced,
6912 * see force_sig_info() and callers in Linux
6913 * Note that not all of our queue_signal() calls in QEMU correspond
6914 * to force_sig_info() calls in Linux (some are send_sig_info()).
6915 * However it seems like a kernel bug to me to allow the process
6916 * to block a synchronous signal since it could then just end up
6917 * looping round and round indefinitely.
6918 */
6919 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
6920 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
6921 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
6922 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
6923 }
6924
6925 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
6926 }
6927
6928 for (sig = 1; sig <= TARGET_NSIG; sig++) {
6929 blocked_set = ts->in_sigsuspend ?
6930 &ts->sigsuspend_mask : &ts->signal_mask;
6931
6932 if (ts->sigtab[sig - 1].pending &&
6933 (!sigismember(blocked_set,
6934 target_to_host_signal_table[sig]))) {
6935 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
6936 /* Restart scan from the beginning, as handle_pending_signal
6937 * might have resulted in a new synchronous signal (eg SIGSEGV).
6938 */
6939 goto restart_scan;
6940 }
6941 }
6942
6943 /* if no signal is pending, unblock signals and recheck (the act
6944 * of unblocking might cause us to take another host signal which
6945 * will set signal_pending again).
6946 */
6947 atomic_set(&ts->signal_pending, 0);
6948 ts->in_sigsuspend = 0;
6949 set = ts->signal_mask;
6950 sigdelset(&set, SIGSEGV);
6951 sigdelset(&set, SIGBUS);
6952 sigprocmask(SIG_SETMASK, &set, 0);
6953 }
6954 ts->in_sigsuspend = 0;
6955 }