]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/ia64/kernel/signal.c
Merge tag 'powerpc-4.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-focal-kernel.git] / arch / ia64 / kernel / signal.c
1 /*
2 * Architecture-specific signal handling support.
3 *
4 * Copyright (C) 1999-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Derived from i386 and Alpha versions.
8 */
9
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/ptrace.h>
14 #include <linux/tracehook.h>
15 #include <linux/sched.h>
16 #include <linux/signal.h>
17 #include <linux/smp.h>
18 #include <linux/stddef.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <linux/unistd.h>
22 #include <linux/wait.h>
23
24 #include <asm/intrinsics.h>
25 #include <linux/uaccess.h>
26 #include <asm/rse.h>
27 #include <asm/sigcontext.h>
28
29 #include "sigframe.h"
30
31 #define DEBUG_SIG 0
32 #define STACK_ALIGN 16 /* minimal alignment for stack pointer */
33
34 #if _NSIG_WORDS > 1
35 # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
36 # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t))
37 #else
38 # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0])
39 # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
40 #endif
41
42 static long
43 restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
44 {
45 unsigned long ip, flags, nat, um, cfm, rsc;
46 long err;
47
48 /* Always make any pending restarted system calls return -EINTR */
49 current->restart_block.fn = do_no_restart_syscall;
50
51 /* restore scratch that always needs gets updated during signal delivery: */
52 err = __get_user(flags, &sc->sc_flags);
53 err |= __get_user(nat, &sc->sc_nat);
54 err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
55 err |= __get_user(cfm, &sc->sc_cfm);
56 err |= __get_user(um, &sc->sc_um); /* user mask */
57 err |= __get_user(rsc, &sc->sc_ar_rsc);
58 err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
59 err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
60 err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
61 err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
62 err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
63 err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
64 err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
65 err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
66 err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
67 err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
68
69 scr->pt.cr_ifs = cfm | (1UL << 63);
70 scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
71
72 /* establish new instruction pointer: */
73 scr->pt.cr_iip = ip & ~0x3UL;
74 ia64_psr(&scr->pt)->ri = ip & 0x3;
75 scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM);
76
77 scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);
78
79 if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
80 /* Restore most scratch-state only when not in syscall. */
81 err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
82 err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
83 err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
84 err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
85 err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
86 err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
87 }
88
89 if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) {
90 struct ia64_psr *psr = ia64_psr(&scr->pt);
91
92 err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
93 psr->mfh = 0; /* drop signal handler's fph contents... */
94 preempt_disable();
95 if (psr->dfh)
96 ia64_drop_fpu(current);
97 else {
98 /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */
99 __ia64_load_fpu(current->thread.fph);
100 ia64_set_local_fpu_owner(current);
101 }
102 preempt_enable();
103 }
104 return err;
105 }
106
107 int
108 copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from)
109 {
110 if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
111 return -EFAULT;
112 if (from->si_code < 0) {
113 if (__copy_to_user(to, from, sizeof(siginfo_t)))
114 return -EFAULT;
115 return 0;
116 } else {
117 int err;
118
119 /*
120 * If you change siginfo_t structure, please be sure this code is fixed
121 * accordingly. It should never copy any pad contained in the structure
122 * to avoid security leaks, but must copy the generic 3 ints plus the
123 * relevant union member.
124 */
125 err = __put_user(from->si_signo, &to->si_signo);
126 err |= __put_user(from->si_errno, &to->si_errno);
127 err |= __put_user(from->si_code, &to->si_code);
128 switch (siginfo_layout(from->si_signo, from->si_code)) {
129 case SIL_FAULT:
130 err |= __put_user(from->si_flags, &to->si_flags);
131 err |= __put_user(from->si_isr, &to->si_isr);
132 case SIL_POLL:
133 err |= __put_user(from->si_addr, &to->si_addr);
134 err |= __put_user(from->si_imm, &to->si_imm);
135 break;
136 case SIL_TIMER:
137 err |= __put_user(from->si_tid, &to->si_tid);
138 err |= __put_user(from->si_overrun, &to->si_overrun);
139 err |= __put_user(from->si_ptr, &to->si_ptr);
140 break;
141 case SIL_RT:
142 err |= __put_user(from->si_uid, &to->si_uid);
143 err |= __put_user(from->si_pid, &to->si_pid);
144 err |= __put_user(from->si_ptr, &to->si_ptr);
145 break;
146 case SIL_CHLD:
147 err |= __put_user(from->si_utime, &to->si_utime);
148 err |= __put_user(from->si_stime, &to->si_stime);
149 err |= __put_user(from->si_status, &to->si_status);
150 case SIL_KILL:
151 err |= __put_user(from->si_uid, &to->si_uid);
152 err |= __put_user(from->si_pid, &to->si_pid);
153 break;
154 }
155 return err;
156 }
157 }
158
159 long
160 ia64_rt_sigreturn (struct sigscratch *scr)
161 {
162 extern char ia64_strace_leave_kernel, ia64_leave_kernel;
163 struct sigcontext __user *sc;
164 struct siginfo si;
165 sigset_t set;
166 long retval;
167
168 sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc;
169
170 /*
171 * When we return to the previously executing context, r8 and r10 have already
172 * been setup the way we want them. Indeed, if the signal wasn't delivered while
173 * in a system call, we must not touch r8 or r10 as otherwise user-level state
174 * could be corrupted.
175 */
176 retval = (long) &ia64_leave_kernel;
177 if (test_thread_flag(TIF_SYSCALL_TRACE)
178 || test_thread_flag(TIF_SYSCALL_AUDIT))
179 /*
180 * strace expects to be notified after sigreturn returns even though the
181 * context to which we return may not be in the middle of a syscall.
182 * Thus, the return-value that strace displays for sigreturn is
183 * meaningless.
184 */
185 retval = (long) &ia64_strace_leave_kernel;
186
187 if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
188 goto give_sigsegv;
189
190 if (GET_SIGSET(&set, &sc->sc_mask))
191 goto give_sigsegv;
192
193 set_current_blocked(&set);
194
195 if (restore_sigcontext(sc, scr))
196 goto give_sigsegv;
197
198 #if DEBUG_SIG
199 printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
200 current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
201 #endif
202 if (restore_altstack(&sc->sc_stack))
203 goto give_sigsegv;
204 return retval;
205
206 give_sigsegv:
207 si.si_signo = SIGSEGV;
208 si.si_errno = 0;
209 si.si_code = SI_KERNEL;
210 si.si_pid = task_pid_vnr(current);
211 si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
212 si.si_addr = sc;
213 force_sig_info(SIGSEGV, &si, current);
214 return retval;
215 }
216
217 /*
218 * This does just the minimum required setup of sigcontext.
219 * Specifically, it only installs data that is either not knowable at
220 * the user-level or that gets modified before execution in the
221 * trampoline starts. Everything else is done at the user-level.
222 */
223 static long
224 setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr)
225 {
226 unsigned long flags = 0, ifs, cfm, nat;
227 long err = 0;
228
229 ifs = scr->pt.cr_ifs;
230
231 if (on_sig_stack((unsigned long) sc))
232 flags |= IA64_SC_FLAG_ONSTACK;
233 if ((ifs & (1UL << 63)) == 0)
234 /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */
235 flags |= IA64_SC_FLAG_IN_SYSCALL;
236 cfm = ifs & ((1UL << 38) - 1);
237 ia64_flush_fph(current);
238 if ((current->thread.flags & IA64_THREAD_FPH_VALID)) {
239 flags |= IA64_SC_FLAG_FPH_VALID;
240 err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16);
241 }
242
243 nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
244
245 err |= __put_user(flags, &sc->sc_flags);
246 err |= __put_user(nat, &sc->sc_nat);
247 err |= PUT_SIGSET(mask, &sc->sc_mask);
248 err |= __put_user(cfm, &sc->sc_cfm);
249 err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
250 err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
251 err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
252 err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
253 err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
254 err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
255 err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
256 err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
257 err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
258 err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
259 err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
260 err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
261 err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
262
263 if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
264 /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
265 err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
266 err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
267 err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
268 err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */
269 err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
270 err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
271 }
272 return err;
273 }
274
275 /*
276 * Check whether the register-backing store is already on the signal stack.
277 */
278 static inline int
279 rbs_on_sig_stack (unsigned long bsp)
280 {
281 return (bsp - current->sas_ss_sp < current->sas_ss_size);
282 }
283
284 static long
285 force_sigsegv_info (int sig, void __user *addr)
286 {
287 unsigned long flags;
288 struct siginfo si;
289
290 if (sig == SIGSEGV) {
291 /*
292 * Acquiring siglock around the sa_handler-update is almost
293 * certainly overkill, but this isn't a
294 * performance-critical path and I'd rather play it safe
295 * here than having to debug a nasty race if and when
296 * something changes in kernel/signal.c that would make it
297 * no longer safe to modify sa_handler without holding the
298 * lock.
299 */
300 spin_lock_irqsave(&current->sighand->siglock, flags);
301 current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
302 spin_unlock_irqrestore(&current->sighand->siglock, flags);
303 }
304 si.si_signo = SIGSEGV;
305 si.si_errno = 0;
306 si.si_code = SI_KERNEL;
307 si.si_pid = task_pid_vnr(current);
308 si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
309 si.si_addr = addr;
310 force_sig_info(SIGSEGV, &si, current);
311 return 1;
312 }
313
314 static long
315 setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
316 {
317 extern char __kernel_sigtramp[];
318 unsigned long tramp_addr, new_rbs = 0, new_sp;
319 struct sigframe __user *frame;
320 long err;
321
322 new_sp = scr->pt.r12;
323 tramp_addr = (unsigned long) __kernel_sigtramp;
324 if (ksig->ka.sa.sa_flags & SA_ONSTACK) {
325 int onstack = sas_ss_flags(new_sp);
326
327 if (onstack == 0) {
328 new_sp = current->sas_ss_sp + current->sas_ss_size;
329 /*
330 * We need to check for the register stack being on the
331 * signal stack separately, because it's switched
332 * separately (memory stack is switched in the kernel,
333 * register stack is switched in the signal trampoline).
334 */
335 if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
336 new_rbs = ALIGN(current->sas_ss_sp,
337 sizeof(long));
338 } else if (onstack == SS_ONSTACK) {
339 unsigned long check_sp;
340
341 /*
342 * If we are on the alternate signal stack and would
343 * overflow it, don't. Return an always-bogus address
344 * instead so we will die with SIGSEGV.
345 */
346 check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
347 if (!likely(on_sig_stack(check_sp)))
348 return force_sigsegv_info(ksig->sig, (void __user *)
349 check_sp);
350 }
351 }
352 frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
353
354 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
355 return force_sigsegv_info(ksig->sig, frame);
356
357 err = __put_user(ksig->sig, &frame->arg0);
358 err |= __put_user(&frame->info, &frame->arg1);
359 err |= __put_user(&frame->sc, &frame->arg2);
360 err |= __put_user(new_rbs, &frame->sc.sc_rbs_base);
361 err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */
362 err |= __put_user(ksig->ka.sa.sa_handler, &frame->handler);
363
364 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
365
366 err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12);
367 err |= setup_sigcontext(&frame->sc, set, scr);
368
369 if (unlikely(err))
370 return force_sigsegv_info(ksig->sig, frame);
371
372 scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
373 scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
374 scr->pt.cr_iip = tramp_addr;
375 ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */
376 ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */
377 /*
378 * Force the interruption function mask to zero. This has no effect when a
379 * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is
380 * ignored), but it has the desirable effect of making it possible to deliver a
381 * signal with an incomplete register frame (which happens when a mandatory RSE
382 * load faults). Furthermore, it has no negative effect on the getting the user's
383 * dirty partition preserved, because that's governed by scr->pt.loadrs.
384 */
385 scr->pt.cr_ifs = (1UL << 63);
386
387 /*
388 * Note: this affects only the NaT bits of the scratch regs (the ones saved in
389 * pt_regs), which is exactly what we want.
390 */
391 scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */
392
393 #if DEBUG_SIG
394 printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n",
395 current->comm, current->pid, ksig->sig, scr->pt.r12, frame->sc.sc_ip, frame->handler);
396 #endif
397 return 0;
398 }
399
400 static long
401 handle_signal (struct ksignal *ksig, struct sigscratch *scr)
402 {
403 int ret = setup_frame(ksig, sigmask_to_save(), scr);
404
405 if (!ret)
406 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
407
408 return ret;
409 }
410
411 /*
412 * Note that `init' is a special process: it doesn't get signals it doesn't want to
413 * handle. Thus you cannot kill init even with a SIGKILL even by mistake.
414 */
415 void
416 ia64_do_signal (struct sigscratch *scr, long in_syscall)
417 {
418 long restart = in_syscall;
419 long errno = scr->pt.r8;
420 struct ksignal ksig;
421
422 /*
423 * This only loops in the rare cases of handle_signal() failing, in which case we
424 * need to push through a forced SIGSEGV.
425 */
426 while (1) {
427 get_signal(&ksig);
428
429 /*
430 * get_signal_to_deliver() may have run a debugger (via notify_parent())
431 * and the debugger may have modified the state (e.g., to arrange for an
432 * inferior call), thus it's important to check for restarting _after_
433 * get_signal_to_deliver().
434 */
435 if ((long) scr->pt.r10 != -1)
436 /*
437 * A system calls has to be restarted only if one of the error codes
438 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
439 * isn't -1 then r8 doesn't hold an error code and we don't need to
440 * restart the syscall, so we can clear the "restart" flag here.
441 */
442 restart = 0;
443
444 if (ksig.sig <= 0)
445 break;
446
447 if (unlikely(restart)) {
448 switch (errno) {
449 case ERESTART_RESTARTBLOCK:
450 case ERESTARTNOHAND:
451 scr->pt.r8 = EINTR;
452 /* note: scr->pt.r10 is already -1 */
453 break;
454
455 case ERESTARTSYS:
456 if ((ksig.ka.sa.sa_flags & SA_RESTART) == 0) {
457 scr->pt.r8 = EINTR;
458 /* note: scr->pt.r10 is already -1 */
459 break;
460 }
461 case ERESTARTNOINTR:
462 ia64_decrement_ip(&scr->pt);
463 restart = 0; /* don't restart twice if handle_signal() fails... */
464 }
465 }
466
467 /*
468 * Whee! Actually deliver the signal. If the delivery failed, we need to
469 * continue to iterate in this loop so we can deliver the SIGSEGV...
470 */
471 if (handle_signal(&ksig, scr))
472 return;
473 }
474
475 /* Did we come from a system call? */
476 if (restart) {
477 /* Restart the system call - no handlers present */
478 if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR
479 || errno == ERESTART_RESTARTBLOCK)
480 {
481 /*
482 * Note: the syscall number is in r15 which is saved in
483 * pt_regs so all we need to do here is adjust ip so that
484 * the "break" instruction gets re-executed.
485 */
486 ia64_decrement_ip(&scr->pt);
487 if (errno == ERESTART_RESTARTBLOCK)
488 scr->pt.r15 = __NR_restart_syscall;
489 }
490 }
491
492 /* if there's no signal to deliver, we just put the saved sigmask
493 * back */
494 restore_saved_sigmask();
495 }