]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/kernel/signal.c
MIPS: Implement Read Inhibit/eXecute Inhibit
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / signal.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/personality.h>
14 #include <linux/smp.h>
15 #include <linux/kernel.h>
16 #include <linux/signal.h>
17 #include <linux/errno.h>
18 #include <linux/wait.h>
19 #include <linux/ptrace.h>
20 #include <linux/unistd.h>
21 #include <linux/compiler.h>
22 #include <linux/syscalls.h>
23 #include <linux/uaccess.h>
24 #include <linux/tracehook.h>
25
26 #include <asm/abi.h>
27 #include <asm/asm.h>
28 #include <linux/bitops.h>
29 #include <asm/cacheflush.h>
30 #include <asm/fpu.h>
31 #include <asm/sim.h>
32 #include <asm/ucontext.h>
33 #include <asm/cpu-features.h>
34 #include <asm/war.h>
35
36 #include "signal-common.h"
37
38 static int (*save_fp_context)(struct sigcontext __user *sc);
39 static int (*restore_fp_context)(struct sigcontext __user *sc);
40
41 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
42 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
43
44 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
45 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
46
47 /*
48 * Horribly complicated - with the bloody RM9000 workarounds enabled
49 * the signal trampolines is moving to the end of the structure so we can
50 * increase the alignment without breaking software compatibility.
51 */
52 #if ICACHE_REFILLS_WORKAROUND_WAR == 0
53
54 struct sigframe {
55 u32 sf_ass[4]; /* argument save space for o32 */
56 u32 sf_code[2]; /* signal trampoline */
57 struct sigcontext sf_sc;
58 sigset_t sf_mask;
59 };
60
61 struct rt_sigframe {
62 u32 rs_ass[4]; /* argument save space for o32 */
63 u32 rs_code[2]; /* signal trampoline */
64 struct siginfo rs_info;
65 struct ucontext rs_uc;
66 };
67
68 #else
69
70 struct sigframe {
71 u32 sf_ass[4]; /* argument save space for o32 */
72 u32 sf_pad[2];
73 struct sigcontext sf_sc; /* hw context */
74 sigset_t sf_mask;
75 u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
76 };
77
78 struct rt_sigframe {
79 u32 rs_ass[4]; /* argument save space for o32 */
80 u32 rs_pad[2];
81 struct siginfo rs_info;
82 struct ucontext rs_uc;
83 u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
84 };
85
86 #endif
87
88 /*
89 * Helper routines
90 */
91 static int protected_save_fp_context(struct sigcontext __user *sc)
92 {
93 int err;
94 while (1) {
95 lock_fpu_owner();
96 own_fpu_inatomic(1);
97 err = save_fp_context(sc); /* this might fail */
98 unlock_fpu_owner();
99 if (likely(!err))
100 break;
101 /* touch the sigcontext and try again */
102 err = __put_user(0, &sc->sc_fpregs[0]) |
103 __put_user(0, &sc->sc_fpregs[31]) |
104 __put_user(0, &sc->sc_fpc_csr);
105 if (err)
106 break; /* really bad sigcontext */
107 }
108 return err;
109 }
110
111 static int protected_restore_fp_context(struct sigcontext __user *sc)
112 {
113 int err, tmp;
114 while (1) {
115 lock_fpu_owner();
116 own_fpu_inatomic(0);
117 err = restore_fp_context(sc); /* this might fail */
118 unlock_fpu_owner();
119 if (likely(!err))
120 break;
121 /* touch the sigcontext and try again */
122 err = __get_user(tmp, &sc->sc_fpregs[0]) |
123 __get_user(tmp, &sc->sc_fpregs[31]) |
124 __get_user(tmp, &sc->sc_fpc_csr);
125 if (err)
126 break; /* really bad sigcontext */
127 }
128 return err;
129 }
130
131 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
132 {
133 int err = 0;
134 int i;
135 unsigned int used_math;
136
137 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
138
139 err |= __put_user(0, &sc->sc_regs[0]);
140 for (i = 1; i < 32; i++)
141 err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
142
143 #ifdef CONFIG_CPU_HAS_SMARTMIPS
144 err |= __put_user(regs->acx, &sc->sc_acx);
145 #endif
146 err |= __put_user(regs->hi, &sc->sc_mdhi);
147 err |= __put_user(regs->lo, &sc->sc_mdlo);
148 if (cpu_has_dsp) {
149 err |= __put_user(mfhi1(), &sc->sc_hi1);
150 err |= __put_user(mflo1(), &sc->sc_lo1);
151 err |= __put_user(mfhi2(), &sc->sc_hi2);
152 err |= __put_user(mflo2(), &sc->sc_lo2);
153 err |= __put_user(mfhi3(), &sc->sc_hi3);
154 err |= __put_user(mflo3(), &sc->sc_lo3);
155 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
156 }
157
158 used_math = !!used_math();
159 err |= __put_user(used_math, &sc->sc_used_math);
160
161 if (used_math) {
162 /*
163 * Save FPU state to signal context. Signal handler
164 * will "inherit" current FPU state.
165 */
166 err |= protected_save_fp_context(sc);
167 }
168 return err;
169 }
170
171 int fpcsr_pending(unsigned int __user *fpcsr)
172 {
173 int err, sig = 0;
174 unsigned int csr, enabled;
175
176 err = __get_user(csr, fpcsr);
177 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
178 /*
179 * If the signal handler set some FPU exceptions, clear it and
180 * send SIGFPE.
181 */
182 if (csr & enabled) {
183 csr &= ~enabled;
184 err |= __put_user(csr, fpcsr);
185 sig = SIGFPE;
186 }
187 return err ?: sig;
188 }
189
190 static int
191 check_and_restore_fp_context(struct sigcontext __user *sc)
192 {
193 int err, sig;
194
195 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
196 if (err > 0)
197 err = 0;
198 err |= protected_restore_fp_context(sc);
199 return err ?: sig;
200 }
201
202 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
203 {
204 unsigned int used_math;
205 unsigned long treg;
206 int err = 0;
207 int i;
208
209 /* Always make any pending restarted system calls return -EINTR */
210 current_thread_info()->restart_block.fn = do_no_restart_syscall;
211
212 err |= __get_user(regs->cp0_epc, &sc->sc_pc);
213
214 #ifdef CONFIG_CPU_HAS_SMARTMIPS
215 err |= __get_user(regs->acx, &sc->sc_acx);
216 #endif
217 err |= __get_user(regs->hi, &sc->sc_mdhi);
218 err |= __get_user(regs->lo, &sc->sc_mdlo);
219 if (cpu_has_dsp) {
220 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
221 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
222 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
223 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
224 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
225 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
226 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
227 }
228
229 for (i = 1; i < 32; i++)
230 err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
231
232 err |= __get_user(used_math, &sc->sc_used_math);
233 conditional_used_math(used_math);
234
235 if (used_math) {
236 /* restore fpu context if we have used it before */
237 if (!err)
238 err = check_and_restore_fp_context(sc);
239 } else {
240 /* signal handler may have used FPU. Give it up. */
241 lose_fpu(0);
242 }
243
244 return err;
245 }
246
247 void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
248 size_t frame_size)
249 {
250 unsigned long sp;
251
252 /* Default to using normal stack */
253 sp = regs->regs[29];
254
255 /*
256 * FPU emulator may have it's own trampoline active just
257 * above the user stack, 16-bytes before the next lowest
258 * 16 byte boundary. Try to avoid trashing it.
259 */
260 sp -= 32;
261
262 /* This is the X/Open sanctioned signal stack switching. */
263 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
264 sp = current->sas_ss_sp + current->sas_ss_size;
265
266 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
267 }
268
269 int install_sigtramp(unsigned int __user *tramp, unsigned int syscall)
270 {
271 int err;
272
273 /*
274 * Set up the return code ...
275 *
276 * li v0, __NR__foo_sigreturn
277 * syscall
278 */
279
280 err = __put_user(0x24020000 + syscall, tramp + 0);
281 err |= __put_user(0x0000000c , tramp + 1);
282 if (ICACHE_REFILLS_WORKAROUND_WAR) {
283 err |= __put_user(0, tramp + 2);
284 err |= __put_user(0, tramp + 3);
285 err |= __put_user(0, tramp + 4);
286 err |= __put_user(0, tramp + 5);
287 err |= __put_user(0, tramp + 6);
288 err |= __put_user(0, tramp + 7);
289 }
290 flush_cache_sigtramp((unsigned long) tramp);
291
292 return err;
293 }
294
295 /*
296 * Atomically swap in the new signal mask, and wait for a signal.
297 */
298
299 #ifdef CONFIG_TRAD_SIGNALS
300 asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
301 {
302 sigset_t newset;
303 sigset_t __user *uset;
304
305 uset = (sigset_t __user *) regs.regs[4];
306 if (copy_from_user(&newset, uset, sizeof(sigset_t)))
307 return -EFAULT;
308 sigdelsetmask(&newset, ~_BLOCKABLE);
309
310 spin_lock_irq(&current->sighand->siglock);
311 current->saved_sigmask = current->blocked;
312 current->blocked = newset;
313 recalc_sigpending();
314 spin_unlock_irq(&current->sighand->siglock);
315
316 current->state = TASK_INTERRUPTIBLE;
317 schedule();
318 set_thread_flag(TIF_RESTORE_SIGMASK);
319 return -ERESTARTNOHAND;
320 }
321 #endif
322
323 asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
324 {
325 sigset_t newset;
326 sigset_t __user *unewset;
327 size_t sigsetsize;
328
329 /* XXX Don't preclude handling different sized sigset_t's. */
330 sigsetsize = regs.regs[5];
331 if (sigsetsize != sizeof(sigset_t))
332 return -EINVAL;
333
334 unewset = (sigset_t __user *) regs.regs[4];
335 if (copy_from_user(&newset, unewset, sizeof(newset)))
336 return -EFAULT;
337 sigdelsetmask(&newset, ~_BLOCKABLE);
338
339 spin_lock_irq(&current->sighand->siglock);
340 current->saved_sigmask = current->blocked;
341 current->blocked = newset;
342 recalc_sigpending();
343 spin_unlock_irq(&current->sighand->siglock);
344
345 current->state = TASK_INTERRUPTIBLE;
346 schedule();
347 set_thread_flag(TIF_RESTORE_SIGMASK);
348 return -ERESTARTNOHAND;
349 }
350
351 #ifdef CONFIG_TRAD_SIGNALS
352 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
353 struct sigaction __user *, oact)
354 {
355 struct k_sigaction new_ka, old_ka;
356 int ret;
357 int err = 0;
358
359 if (act) {
360 old_sigset_t mask;
361
362 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
363 return -EFAULT;
364 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
365 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
366 err |= __get_user(mask, &act->sa_mask.sig[0]);
367 if (err)
368 return -EFAULT;
369
370 siginitset(&new_ka.sa.sa_mask, mask);
371 }
372
373 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
374
375 if (!ret && oact) {
376 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
377 return -EFAULT;
378 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
379 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
380 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
381 err |= __put_user(0, &oact->sa_mask.sig[1]);
382 err |= __put_user(0, &oact->sa_mask.sig[2]);
383 err |= __put_user(0, &oact->sa_mask.sig[3]);
384 if (err)
385 return -EFAULT;
386 }
387
388 return ret;
389 }
390 #endif
391
392 asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
393 {
394 const stack_t __user *uss = (const stack_t __user *) regs.regs[4];
395 stack_t __user *uoss = (stack_t __user *) regs.regs[5];
396 unsigned long usp = regs.regs[29];
397
398 return do_sigaltstack(uss, uoss, usp);
399 }
400
401 #ifdef CONFIG_TRAD_SIGNALS
402 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
403 {
404 struct sigframe __user *frame;
405 sigset_t blocked;
406 int sig;
407
408 frame = (struct sigframe __user *) regs.regs[29];
409 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
410 goto badframe;
411 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
412 goto badframe;
413
414 sigdelsetmask(&blocked, ~_BLOCKABLE);
415 spin_lock_irq(&current->sighand->siglock);
416 current->blocked = blocked;
417 recalc_sigpending();
418 spin_unlock_irq(&current->sighand->siglock);
419
420 sig = restore_sigcontext(&regs, &frame->sf_sc);
421 if (sig < 0)
422 goto badframe;
423 else if (sig)
424 force_sig(sig, current);
425
426 /*
427 * Don't let your children do this ...
428 */
429 __asm__ __volatile__(
430 "move\t$29, %0\n\t"
431 "j\tsyscall_exit"
432 :/* no outputs */
433 :"r" (&regs));
434 /* Unreached */
435
436 badframe:
437 force_sig(SIGSEGV, current);
438 }
439 #endif /* CONFIG_TRAD_SIGNALS */
440
441 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
442 {
443 struct rt_sigframe __user *frame;
444 sigset_t set;
445 stack_t st;
446 int sig;
447
448 frame = (struct rt_sigframe __user *) regs.regs[29];
449 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
450 goto badframe;
451 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
452 goto badframe;
453
454 sigdelsetmask(&set, ~_BLOCKABLE);
455 spin_lock_irq(&current->sighand->siglock);
456 current->blocked = set;
457 recalc_sigpending();
458 spin_unlock_irq(&current->sighand->siglock);
459
460 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
461 if (sig < 0)
462 goto badframe;
463 else if (sig)
464 force_sig(sig, current);
465
466 if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
467 goto badframe;
468 /* It is more difficult to avoid calling this function than to
469 call it and ignore errors. */
470 do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
471
472 /*
473 * Don't let your children do this ...
474 */
475 __asm__ __volatile__(
476 "move\t$29, %0\n\t"
477 "j\tsyscall_exit"
478 :/* no outputs */
479 :"r" (&regs));
480 /* Unreached */
481
482 badframe:
483 force_sig(SIGSEGV, current);
484 }
485
486 #ifdef CONFIG_TRAD_SIGNALS
487 static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
488 int signr, sigset_t *set)
489 {
490 struct sigframe __user *frame;
491 int err = 0;
492
493 frame = get_sigframe(ka, regs, sizeof(*frame));
494 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
495 goto give_sigsegv;
496
497 err |= install_sigtramp(frame->sf_code, __NR_sigreturn);
498
499 err |= setup_sigcontext(regs, &frame->sf_sc);
500 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
501 if (err)
502 goto give_sigsegv;
503
504 /*
505 * Arguments to signal handler:
506 *
507 * a0 = signal number
508 * a1 = 0 (should be cause)
509 * a2 = pointer to struct sigcontext
510 *
511 * $25 and c0_epc point to the signal handler, $29 points to the
512 * struct sigframe.
513 */
514 regs->regs[ 4] = signr;
515 regs->regs[ 5] = 0;
516 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
517 regs->regs[29] = (unsigned long) frame;
518 regs->regs[31] = (unsigned long) frame->sf_code;
519 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
520
521 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
522 current->comm, current->pid,
523 frame, regs->cp0_epc, regs->regs[31]);
524 return 0;
525
526 give_sigsegv:
527 force_sigsegv(signr, current);
528 return -EFAULT;
529 }
530 #endif
531
532 static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
533 int signr, sigset_t *set, siginfo_t *info)
534 {
535 struct rt_sigframe __user *frame;
536 int err = 0;
537
538 frame = get_sigframe(ka, regs, sizeof(*frame));
539 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
540 goto give_sigsegv;
541
542 err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
543
544 /* Create siginfo. */
545 err |= copy_siginfo_to_user(&frame->rs_info, info);
546
547 /* Create the ucontext. */
548 err |= __put_user(0, &frame->rs_uc.uc_flags);
549 err |= __put_user(NULL, &frame->rs_uc.uc_link);
550 err |= __put_user((void __user *)current->sas_ss_sp,
551 &frame->rs_uc.uc_stack.ss_sp);
552 err |= __put_user(sas_ss_flags(regs->regs[29]),
553 &frame->rs_uc.uc_stack.ss_flags);
554 err |= __put_user(current->sas_ss_size,
555 &frame->rs_uc.uc_stack.ss_size);
556 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
557 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
558
559 if (err)
560 goto give_sigsegv;
561
562 /*
563 * Arguments to signal handler:
564 *
565 * a0 = signal number
566 * a1 = 0 (should be cause)
567 * a2 = pointer to ucontext
568 *
569 * $25 and c0_epc point to the signal handler, $29 points to
570 * the struct rt_sigframe.
571 */
572 regs->regs[ 4] = signr;
573 regs->regs[ 5] = (unsigned long) &frame->rs_info;
574 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
575 regs->regs[29] = (unsigned long) frame;
576 regs->regs[31] = (unsigned long) frame->rs_code;
577 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
578
579 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
580 current->comm, current->pid,
581 frame, regs->cp0_epc, regs->regs[31]);
582
583 return 0;
584
585 give_sigsegv:
586 force_sigsegv(signr, current);
587 return -EFAULT;
588 }
589
590 struct mips_abi mips_abi = {
591 #ifdef CONFIG_TRAD_SIGNALS
592 .setup_frame = setup_frame,
593 #endif
594 .setup_rt_frame = setup_rt_frame,
595 .restart = __NR_restart_syscall
596 };
597
598 static int handle_signal(unsigned long sig, siginfo_t *info,
599 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
600 {
601 int ret;
602
603 switch(regs->regs[0]) {
604 case ERESTART_RESTARTBLOCK:
605 case ERESTARTNOHAND:
606 regs->regs[2] = EINTR;
607 break;
608 case ERESTARTSYS:
609 if (!(ka->sa.sa_flags & SA_RESTART)) {
610 regs->regs[2] = EINTR;
611 break;
612 }
613 /* fallthrough */
614 case ERESTARTNOINTR: /* Userland will reload $v0. */
615 regs->regs[7] = regs->regs[26];
616 regs->cp0_epc -= 8;
617 }
618
619 regs->regs[0] = 0; /* Don't deal with this again. */
620
621 if (sig_uses_siginfo(ka))
622 ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
623 else
624 ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
625
626 spin_lock_irq(&current->sighand->siglock);
627 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
628 if (!(ka->sa.sa_flags & SA_NODEFER))
629 sigaddset(&current->blocked, sig);
630 recalc_sigpending();
631 spin_unlock_irq(&current->sighand->siglock);
632
633 return ret;
634 }
635
636 static void do_signal(struct pt_regs *regs)
637 {
638 struct k_sigaction ka;
639 sigset_t *oldset;
640 siginfo_t info;
641 int signr;
642
643 /*
644 * We want the common case to go fast, which is why we may in certain
645 * cases get here from kernel mode. Just return without doing anything
646 * if so.
647 */
648 if (!user_mode(regs))
649 return;
650
651 if (test_thread_flag(TIF_RESTORE_SIGMASK))
652 oldset = &current->saved_sigmask;
653 else
654 oldset = &current->blocked;
655
656 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
657 if (signr > 0) {
658 /* Whee! Actually deliver the signal. */
659 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
660 /*
661 * A signal was successfully delivered; the saved
662 * sigmask will have been stored in the signal frame,
663 * and will be restored by sigreturn, so we can simply
664 * clear the TIF_RESTORE_SIGMASK flag.
665 */
666 if (test_thread_flag(TIF_RESTORE_SIGMASK))
667 clear_thread_flag(TIF_RESTORE_SIGMASK);
668 }
669
670 return;
671 }
672
673 /*
674 * Who's code doesn't conform to the restartable syscall convention
675 * dies here!!! The li instruction, a single machine instruction,
676 * must directly be followed by the syscall instruction.
677 */
678 if (regs->regs[0]) {
679 if (regs->regs[2] == ERESTARTNOHAND ||
680 regs->regs[2] == ERESTARTSYS ||
681 regs->regs[2] == ERESTARTNOINTR) {
682 regs->regs[7] = regs->regs[26];
683 regs->cp0_epc -= 8;
684 }
685 if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
686 regs->regs[2] = current->thread.abi->restart;
687 regs->regs[7] = regs->regs[26];
688 regs->cp0_epc -= 4;
689 }
690 regs->regs[0] = 0; /* Don't deal with this again. */
691 }
692
693 /*
694 * If there's no signal to deliver, we just put the saved sigmask
695 * back
696 */
697 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
698 clear_thread_flag(TIF_RESTORE_SIGMASK);
699 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
700 }
701 }
702
703 /*
704 * notification of userspace execution resumption
705 * - triggered by the TIF_WORK_MASK flags
706 */
707 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
708 __u32 thread_info_flags)
709 {
710 /* deal with pending signal delivery */
711 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
712 do_signal(regs);
713
714 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
715 clear_thread_flag(TIF_NOTIFY_RESUME);
716 tracehook_notify_resume(regs);
717 if (current->replacement_session_keyring)
718 key_replace_session_keyring();
719 }
720 }
721
722 #ifdef CONFIG_SMP
723 static int smp_save_fp_context(struct sigcontext __user *sc)
724 {
725 return raw_cpu_has_fpu
726 ? _save_fp_context(sc)
727 : fpu_emulator_save_context(sc);
728 }
729
730 static int smp_restore_fp_context(struct sigcontext __user *sc)
731 {
732 return raw_cpu_has_fpu
733 ? _restore_fp_context(sc)
734 : fpu_emulator_restore_context(sc);
735 }
736 #endif
737
738 static int signal_setup(void)
739 {
740 #ifdef CONFIG_SMP
741 /* For now just do the cpu_has_fpu check when the functions are invoked */
742 save_fp_context = smp_save_fp_context;
743 restore_fp_context = smp_restore_fp_context;
744 #else
745 if (cpu_has_fpu) {
746 save_fp_context = _save_fp_context;
747 restore_fp_context = _restore_fp_context;
748 } else {
749 save_fp_context = fpu_emulator_save_context;
750 restore_fp_context = fpu_emulator_restore_context;
751 }
752 #endif
753
754 return 0;
755 }
756
757 arch_initcall(signal_setup);