]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Architecture-specific signal handling support. | |
3 | * | |
4 | * Copyright (C) 1999-2004 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | * | |
7 | * Derived from i386 and Alpha versions. | |
8 | */ | |
9 | ||
10 | #include <linux/config.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/signal.h> | |
17 | #include <linux/smp.h> | |
18 | #include <linux/smp_lock.h> | |
19 | #include <linux/stddef.h> | |
20 | #include <linux/tty.h> | |
21 | #include <linux/binfmts.h> | |
22 | #include <linux/unistd.h> | |
23 | #include <linux/wait.h> | |
24 | ||
25 | #include <asm/ia32.h> | |
26 | #include <asm/intrinsics.h> | |
27 | #include <asm/uaccess.h> | |
28 | #include <asm/rse.h> | |
29 | #include <asm/sigcontext.h> | |
30 | ||
31 | #include "sigframe.h" | |
32 | ||
33 | #define DEBUG_SIG 0 | |
34 | #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ | |
35 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | |
36 | ||
37 | #if _NSIG_WORDS > 1 | |
38 | # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) | |
39 | # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t)) | |
40 | #else | |
41 | # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0]) | |
42 | # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) | |
43 | #endif | |
44 | ||
45 | long | |
46 | ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr) | |
47 | { | |
48 | sigset_t oldset, set; | |
49 | ||
50 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
51 | if (sigsetsize != sizeof(sigset_t)) | |
52 | return -EINVAL; | |
53 | ||
54 | if (!access_ok(VERIFY_READ, uset, sigsetsize)) | |
55 | return -EFAULT; | |
56 | ||
57 | if (GET_SIGSET(&set, uset)) | |
58 | return -EFAULT; | |
59 | ||
60 | sigdelsetmask(&set, ~_BLOCKABLE); | |
61 | ||
62 | spin_lock_irq(¤t->sighand->siglock); | |
63 | { | |
64 | oldset = current->blocked; | |
65 | current->blocked = set; | |
66 | recalc_sigpending(); | |
67 | } | |
68 | spin_unlock_irq(¤t->sighand->siglock); | |
69 | ||
70 | /* | |
71 | * The return below usually returns to the signal handler. We need to | |
72 | * pre-set the correct error code here to ensure that the right values | |
73 | * get saved in sigcontext by ia64_do_signal. | |
74 | */ | |
75 | scr->pt.r8 = EINTR; | |
76 | scr->pt.r10 = -1; | |
77 | ||
78 | while (1) { | |
79 | current->state = TASK_INTERRUPTIBLE; | |
80 | schedule(); | |
81 | if (ia64_do_signal(&oldset, scr, 1)) | |
82 | return -EINTR; | |
83 | } | |
84 | } | |
85 | ||
86 | asmlinkage long | |
87 | sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, | |
88 | long arg3, long arg4, long arg5, long arg6, long arg7, | |
89 | struct pt_regs regs) | |
90 | { | |
91 | return do_sigaltstack(uss, uoss, regs.r12); | |
92 | } | |
93 | ||
94 | static long | |
95 | restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) | |
96 | { | |
4ea78729 | 97 | unsigned long ip, flags, nat, um, cfm, rsc; |
1da177e4 LT |
98 | long err; |
99 | ||
100 | /* Always make any pending restarted system calls return -EINTR */ | |
101 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | |
102 | ||
103 | /* restore scratch that always needs gets updated during signal delivery: */ | |
104 | err = __get_user(flags, &sc->sc_flags); | |
105 | err |= __get_user(nat, &sc->sc_nat); | |
106 | err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ | |
107 | err |= __get_user(cfm, &sc->sc_cfm); | |
108 | err |= __get_user(um, &sc->sc_um); /* user mask */ | |
4ea78729 | 109 | err |= __get_user(rsc, &sc->sc_ar_rsc); |
1da177e4 LT |
110 | err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); |
111 | err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); | |
112 | err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); | |
113 | err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ | |
114 | err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ | |
115 | err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ | |
116 | err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */ | |
117 | err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ | |
118 | err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */ | |
119 | err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */ | |
120 | ||
121 | scr->pt.cr_ifs = cfm | (1UL << 63); | |
4ea78729 | 122 | scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */ |
1da177e4 LT |
123 | |
124 | /* establish new instruction pointer: */ | |
125 | scr->pt.cr_iip = ip & ~0x3UL; | |
126 | ia64_psr(&scr->pt)->ri = ip & 0x3; | |
127 | scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); | |
128 | ||
129 | scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); | |
130 | ||
131 | if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { | |
132 | /* Restore most scratch-state only when not in syscall. */ | |
133 | err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ | |
134 | err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ | |
135 | err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ | |
136 | err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ | |
137 | err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */ | |
138 | err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ | |
139 | } | |
140 | ||
141 | if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) { | |
142 | struct ia64_psr *psr = ia64_psr(&scr->pt); | |
143 | ||
144 | __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); | |
145 | psr->mfh = 0; /* drop signal handler's fph contents... */ | |
2ba3e3e6 | 146 | preempt_disable(); |
1da177e4 LT |
147 | if (psr->dfh) |
148 | ia64_drop_fpu(current); | |
149 | else { | |
150 | /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */ | |
151 | __ia64_load_fpu(current->thread.fph); | |
152 | ia64_set_local_fpu_owner(current); | |
153 | } | |
2ba3e3e6 | 154 | preempt_enable(); |
1da177e4 LT |
155 | } |
156 | return err; | |
157 | } | |
158 | ||
159 | int | |
160 | copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) | |
161 | { | |
162 | if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) | |
163 | return -EFAULT; | |
164 | if (from->si_code < 0) { | |
165 | if (__copy_to_user(to, from, sizeof(siginfo_t))) | |
166 | return -EFAULT; | |
167 | return 0; | |
168 | } else { | |
169 | int err; | |
170 | ||
171 | /* | |
172 | * If you change siginfo_t structure, please be sure this code is fixed | |
173 | * accordingly. It should never copy any pad contained in the structure | |
174 | * to avoid security leaks, but must copy the generic 3 ints plus the | |
175 | * relevant union member. | |
176 | */ | |
177 | err = __put_user(from->si_signo, &to->si_signo); | |
178 | err |= __put_user(from->si_errno, &to->si_errno); | |
179 | err |= __put_user((short)from->si_code, &to->si_code); | |
180 | switch (from->si_code >> 16) { | |
181 | case __SI_FAULT >> 16: | |
182 | err |= __put_user(from->si_flags, &to->si_flags); | |
183 | err |= __put_user(from->si_isr, &to->si_isr); | |
184 | case __SI_POLL >> 16: | |
185 | err |= __put_user(from->si_addr, &to->si_addr); | |
186 | err |= __put_user(from->si_imm, &to->si_imm); | |
187 | break; | |
188 | case __SI_TIMER >> 16: | |
189 | err |= __put_user(from->si_tid, &to->si_tid); | |
190 | err |= __put_user(from->si_overrun, &to->si_overrun); | |
191 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
192 | break; | |
193 | case __SI_RT >> 16: /* Not generated by the kernel as of now. */ | |
194 | case __SI_MESGQ >> 16: | |
195 | err |= __put_user(from->si_uid, &to->si_uid); | |
196 | err |= __put_user(from->si_pid, &to->si_pid); | |
197 | err |= __put_user(from->si_ptr, &to->si_ptr); | |
198 | break; | |
199 | case __SI_CHLD >> 16: | |
200 | err |= __put_user(from->si_utime, &to->si_utime); | |
201 | err |= __put_user(from->si_stime, &to->si_stime); | |
202 | err |= __put_user(from->si_status, &to->si_status); | |
203 | default: | |
204 | err |= __put_user(from->si_uid, &to->si_uid); | |
205 | err |= __put_user(from->si_pid, &to->si_pid); | |
206 | break; | |
207 | } | |
208 | return err; | |
209 | } | |
210 | } | |
211 | ||
212 | long | |
213 | ia64_rt_sigreturn (struct sigscratch *scr) | |
214 | { | |
215 | extern char ia64_strace_leave_kernel, ia64_leave_kernel; | |
216 | struct sigcontext __user *sc; | |
217 | struct siginfo si; | |
218 | sigset_t set; | |
219 | long retval; | |
220 | ||
221 | sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc; | |
222 | ||
223 | /* | |
224 | * When we return to the previously executing context, r8 and r10 have already | |
225 | * been setup the way we want them. Indeed, if the signal wasn't delivered while | |
226 | * in a system call, we must not touch r8 or r10 as otherwise user-level state | |
227 | * could be corrupted. | |
228 | */ | |
229 | retval = (long) &ia64_leave_kernel; | |
446b8831 DW |
230 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
231 | || test_thread_flag(TIF_SYSCALL_AUDIT)) | |
1da177e4 LT |
232 | /* |
233 | * strace expects to be notified after sigreturn returns even though the | |
234 | * context to which we return may not be in the middle of a syscall. | |
235 | * Thus, the return-value that strace displays for sigreturn is | |
236 | * meaningless. | |
237 | */ | |
238 | retval = (long) &ia64_strace_leave_kernel; | |
239 | ||
240 | if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) | |
241 | goto give_sigsegv; | |
242 | ||
243 | if (GET_SIGSET(&set, &sc->sc_mask)) | |
244 | goto give_sigsegv; | |
245 | ||
246 | sigdelsetmask(&set, ~_BLOCKABLE); | |
247 | ||
248 | spin_lock_irq(¤t->sighand->siglock); | |
249 | { | |
250 | current->blocked = set; | |
251 | recalc_sigpending(); | |
252 | } | |
253 | spin_unlock_irq(¤t->sighand->siglock); | |
254 | ||
255 | if (restore_sigcontext(sc, scr)) | |
256 | goto give_sigsegv; | |
257 | ||
258 | #if DEBUG_SIG | |
259 | printk("SIG return (%s:%d): sp=%lx ip=%lx\n", | |
260 | current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); | |
261 | #endif | |
262 | /* | |
263 | * It is more difficult to avoid calling this function than to | |
264 | * call it and ignore errors. | |
265 | */ | |
266 | do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12); | |
267 | return retval; | |
268 | ||
269 | give_sigsegv: | |
270 | si.si_signo = SIGSEGV; | |
271 | si.si_errno = 0; | |
272 | si.si_code = SI_KERNEL; | |
273 | si.si_pid = current->pid; | |
274 | si.si_uid = current->uid; | |
275 | si.si_addr = sc; | |
276 | force_sig_info(SIGSEGV, &si, current); | |
277 | return retval; | |
278 | } | |
279 | ||
280 | /* | |
281 | * This does just the minimum required setup of sigcontext. | |
282 | * Specifically, it only installs data that is either not knowable at | |
283 | * the user-level or that gets modified before execution in the | |
284 | * trampoline starts. Everything else is done at the user-level. | |
285 | */ | |
286 | static long | |
287 | setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr) | |
288 | { | |
289 | unsigned long flags = 0, ifs, cfm, nat; | |
290 | long err; | |
291 | ||
292 | ifs = scr->pt.cr_ifs; | |
293 | ||
294 | if (on_sig_stack((unsigned long) sc)) | |
295 | flags |= IA64_SC_FLAG_ONSTACK; | |
296 | if ((ifs & (1UL << 63)) == 0) | |
297 | /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */ | |
298 | flags |= IA64_SC_FLAG_IN_SYSCALL; | |
299 | cfm = ifs & ((1UL << 38) - 1); | |
300 | ia64_flush_fph(current); | |
301 | if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { | |
302 | flags |= IA64_SC_FLAG_FPH_VALID; | |
303 | __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); | |
304 | } | |
305 | ||
306 | nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); | |
307 | ||
308 | err = __put_user(flags, &sc->sc_flags); | |
309 | err |= __put_user(nat, &sc->sc_nat); | |
310 | err |= PUT_SIGSET(mask, &sc->sc_mask); | |
311 | err |= __put_user(cfm, &sc->sc_cfm); | |
312 | err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); | |
313 | err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); | |
314 | err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ | |
315 | err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ | |
316 | err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); | |
317 | err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ | |
318 | err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ | |
319 | err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ | |
320 | err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */ | |
321 | err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ | |
322 | err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */ | |
323 | err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ | |
324 | err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); | |
325 | ||
326 | if (flags & IA64_SC_FLAG_IN_SYSCALL) { | |
327 | /* Clear scratch registers if the signal interrupted a system call. */ | |
328 | err |= __put_user(0, &sc->sc_ar_ccv); /* ar.ccv */ | |
329 | err |= __put_user(0, &sc->sc_br[7]); /* b7 */ | |
330 | err |= __put_user(0, &sc->sc_gr[14]); /* r14 */ | |
331 | err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ | |
332 | err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */ | |
333 | err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */ | |
334 | } else { | |
335 | /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ | |
336 | err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ | |
337 | err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ | |
338 | err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ | |
339 | err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ | |
340 | err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */ | |
341 | err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ | |
342 | } | |
343 | return err; | |
344 | } | |
345 | ||
346 | /* | |
347 | * Check whether the register-backing store is already on the signal stack. | |
348 | */ | |
349 | static inline int | |
350 | rbs_on_sig_stack (unsigned long bsp) | |
351 | { | |
352 | return (bsp - current->sas_ss_sp < current->sas_ss_size); | |
353 | } | |
354 | ||
355 | static long | |
356 | force_sigsegv_info (int sig, void __user *addr) | |
357 | { | |
358 | unsigned long flags; | |
359 | struct siginfo si; | |
360 | ||
361 | if (sig == SIGSEGV) { | |
362 | /* | |
363 | * Acquiring siglock around the sa_handler-update is almost | |
364 | * certainly overkill, but this isn't a | |
365 | * performance-critical path and I'd rather play it safe | |
366 | * here than having to debug a nasty race if and when | |
367 | * something changes in kernel/signal.c that would make it | |
368 | * no longer safe to modify sa_handler without holding the | |
369 | * lock. | |
370 | */ | |
371 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
372 | current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
373 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
374 | } | |
375 | si.si_signo = SIGSEGV; | |
376 | si.si_errno = 0; | |
377 | si.si_code = SI_KERNEL; | |
378 | si.si_pid = current->pid; | |
379 | si.si_uid = current->uid; | |
380 | si.si_addr = addr; | |
381 | force_sig_info(SIGSEGV, &si, current); | |
382 | return 0; | |
383 | } | |
384 | ||
385 | static long | |
386 | setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |
387 | struct sigscratch *scr) | |
388 | { | |
389 | extern char __kernel_sigtramp[]; | |
cf20d1ea | 390 | unsigned long tramp_addr, new_rbs = 0, new_sp; |
1da177e4 LT |
391 | struct sigframe __user *frame; |
392 | long err; | |
393 | ||
cf20d1ea | 394 | new_sp = scr->pt.r12; |
1da177e4 | 395 | tramp_addr = (unsigned long) __kernel_sigtramp; |
cf20d1ea DMT |
396 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) { |
397 | new_sp = current->sas_ss_sp + current->sas_ss_size; | |
1da177e4 LT |
398 | /* |
399 | * We need to check for the register stack being on the signal stack | |
400 | * separately, because it's switched separately (memory stack is switched | |
401 | * in the kernel, register stack is switched in the signal trampoline). | |
402 | */ | |
403 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | |
404 | new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); | |
405 | } | |
cf20d1ea | 406 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); |
1da177e4 LT |
407 | |
408 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | |
409 | return force_sigsegv_info(sig, frame); | |
410 | ||
411 | err = __put_user(sig, &frame->arg0); | |
412 | err |= __put_user(&frame->info, &frame->arg1); | |
413 | err |= __put_user(&frame->sc, &frame->arg2); | |
414 | err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); | |
415 | err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ | |
416 | err |= __put_user(ka->sa.sa_handler, &frame->handler); | |
417 | ||
418 | err |= copy_siginfo_to_user(&frame->info, info); | |
419 | ||
420 | err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); | |
421 | err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); | |
422 | err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); | |
423 | err |= setup_sigcontext(&frame->sc, set, scr); | |
424 | ||
425 | if (unlikely(err)) | |
426 | return force_sigsegv_info(sig, frame); | |
427 | ||
428 | scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ | |
429 | scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ | |
430 | scr->pt.cr_iip = tramp_addr; | |
431 | ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ | |
432 | ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ | |
433 | /* | |
434 | * Force the interruption function mask to zero. This has no effect when a | |
435 | * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is | |
436 | * ignored), but it has the desirable effect of making it possible to deliver a | |
437 | * signal with an incomplete register frame (which happens when a mandatory RSE | |
438 | * load faults). Furthermore, it has no negative effect on the getting the user's | |
439 | * dirty partition preserved, because that's governed by scr->pt.loadrs. | |
440 | */ | |
441 | scr->pt.cr_ifs = (1UL << 63); | |
442 | ||
443 | /* | |
444 | * Note: this affects only the NaT bits of the scratch regs (the ones saved in | |
445 | * pt_regs), which is exactly what we want. | |
446 | */ | |
447 | scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ | |
448 | ||
449 | #if DEBUG_SIG | |
450 | printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", | |
451 | current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); | |
452 | #endif | |
453 | return 1; | |
454 | } | |
455 | ||
456 | static long | |
457 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, | |
458 | struct sigscratch *scr) | |
459 | { | |
460 | if (IS_IA32_PROCESS(&scr->pt)) { | |
461 | /* send signal to IA-32 process */ | |
462 | if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt)) | |
463 | return 0; | |
464 | } else | |
465 | /* send signal to IA-64 process */ | |
466 | if (!setup_frame(sig, ka, info, oldset, scr)) | |
467 | return 0; | |
468 | ||
69be8f18 SR |
469 | spin_lock_irq(¤t->sighand->siglock); |
470 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | |
471 | if (!(ka->sa.sa_flags & SA_NODEFER)) | |
472 | sigaddset(¤t->blocked, sig); | |
473 | recalc_sigpending(); | |
474 | spin_unlock_irq(¤t->sighand->siglock); | |
1da177e4 LT |
475 | return 1; |
476 | } | |
477 | ||
478 | /* | |
479 | * Note that `init' is a special process: it doesn't get signals it doesn't want to | |
480 | * handle. Thus you cannot kill init even with a SIGKILL even by mistake. | |
481 | */ | |
482 | long | |
483 | ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | |
484 | { | |
485 | struct k_sigaction ka; | |
486 | siginfo_t info; | |
487 | long restart = in_syscall; | |
488 | long errno = scr->pt.r8; | |
489 | # define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c)) | |
490 | ||
491 | /* | |
492 | * In the ia64_leave_kernel code path, we want the common case to go fast, which | |
493 | * is why we may in certain cases get here from kernel mode. Just return without | |
494 | * doing anything if so. | |
495 | */ | |
496 | if (!user_mode(&scr->pt)) | |
497 | return 0; | |
498 | ||
499 | if (!oldset) | |
500 | oldset = ¤t->blocked; | |
501 | ||
502 | /* | |
503 | * This only loops in the rare cases of handle_signal() failing, in which case we | |
504 | * need to push through a forced SIGSEGV. | |
505 | */ | |
506 | while (1) { | |
507 | int signr = get_signal_to_deliver(&info, &ka, &scr->pt, NULL); | |
508 | ||
509 | /* | |
510 | * get_signal_to_deliver() may have run a debugger (via notify_parent()) | |
511 | * and the debugger may have modified the state (e.g., to arrange for an | |
512 | * inferior call), thus it's important to check for restarting _after_ | |
513 | * get_signal_to_deliver(). | |
514 | */ | |
515 | if (IS_IA32_PROCESS(&scr->pt)) { | |
516 | if (in_syscall) { | |
517 | if (errno >= 0) | |
518 | restart = 0; | |
519 | else | |
520 | errno = -errno; | |
521 | } | |
522 | } else if ((long) scr->pt.r10 != -1) | |
523 | /* | |
524 | * A system calls has to be restarted only if one of the error codes | |
525 | * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 | |
526 | * isn't -1 then r8 doesn't hold an error code and we don't need to | |
527 | * restart the syscall, so we can clear the "restart" flag here. | |
528 | */ | |
529 | restart = 0; | |
530 | ||
531 | if (signr <= 0) | |
532 | break; | |
533 | ||
534 | if (unlikely(restart)) { | |
535 | switch (errno) { | |
536 | case ERESTART_RESTARTBLOCK: | |
537 | case ERESTARTNOHAND: | |
538 | scr->pt.r8 = ERR_CODE(EINTR); | |
539 | /* note: scr->pt.r10 is already -1 */ | |
540 | break; | |
541 | ||
542 | case ERESTARTSYS: | |
543 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { | |
544 | scr->pt.r8 = ERR_CODE(EINTR); | |
545 | /* note: scr->pt.r10 is already -1 */ | |
546 | break; | |
547 | } | |
548 | case ERESTARTNOINTR: | |
549 | if (IS_IA32_PROCESS(&scr->pt)) { | |
550 | scr->pt.r8 = scr->pt.r1; | |
551 | scr->pt.cr_iip -= 2; | |
552 | } else | |
553 | ia64_decrement_ip(&scr->pt); | |
554 | restart = 0; /* don't restart twice if handle_signal() fails... */ | |
555 | } | |
556 | } | |
557 | ||
558 | /* | |
559 | * Whee! Actually deliver the signal. If the delivery failed, we need to | |
560 | * continue to iterate in this loop so we can deliver the SIGSEGV... | |
561 | */ | |
562 | if (handle_signal(signr, &ka, &info, oldset, scr)) | |
563 | return 1; | |
564 | } | |
565 | ||
566 | /* Did we come from a system call? */ | |
567 | if (restart) { | |
568 | /* Restart the system call - no handlers present */ | |
569 | if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR | |
570 | || errno == ERESTART_RESTARTBLOCK) | |
571 | { | |
572 | if (IS_IA32_PROCESS(&scr->pt)) { | |
573 | scr->pt.r8 = scr->pt.r1; | |
574 | scr->pt.cr_iip -= 2; | |
575 | if (errno == ERESTART_RESTARTBLOCK) | |
576 | scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ | |
577 | } else { | |
578 | /* | |
579 | * Note: the syscall number is in r15 which is saved in | |
580 | * pt_regs so all we need to do here is adjust ip so that | |
581 | * the "break" instruction gets re-executed. | |
582 | */ | |
583 | ia64_decrement_ip(&scr->pt); | |
584 | if (errno == ERESTART_RESTARTBLOCK) | |
585 | scr->pt.r15 = __NR_restart_syscall; | |
586 | } | |
587 | } | |
588 | } | |
589 | return 0; | |
590 | } | |
591 | ||
592 | /* Set a delayed signal that was detected in MCA/INIT/NMI/PMI context where it | |
593 | * could not be delivered. It is important that the target process is not | |
594 | * allowed to do any more work in user space. Possible cases for the target | |
595 | * process: | |
596 | * | |
597 | * - It is sleeping and will wake up soon. Store the data in the current task, | |
598 | * the signal will be sent when the current task returns from the next | |
599 | * interrupt. | |
600 | * | |
601 | * - It is running in user context. Store the data in the current task, the | |
602 | * signal will be sent when the current task returns from the next interrupt. | |
603 | * | |
604 | * - It is running in kernel context on this or another cpu and will return to | |
605 | * user context. Store the data in the target task, the signal will be sent | |
606 | * to itself when the target task returns to user space. | |
607 | * | |
608 | * - It is running in kernel context on this cpu and will sleep before | |
609 | * returning to user context. Because this is also the current task, the | |
610 | * signal will not get delivered and the task could sleep indefinitely. | |
611 | * Store the data in the idle task for this cpu, the signal will be sent | |
612 | * after the idle task processes its next interrupt. | |
613 | * | |
614 | * To cover all cases, store the data in the target task, the current task and | |
615 | * the idle task on this cpu. Whatever happens, the signal will be delivered | |
616 | * to the target task before it can do any useful user space work. Multiple | |
617 | * deliveries have no unwanted side effects. | |
618 | * | |
619 | * Note: This code is executed in MCA/INIT/NMI/PMI context, with interrupts | |
620 | * disabled. It must not take any locks nor use kernel structures or services | |
621 | * that require locks. | |
622 | */ | |
623 | ||
624 | /* To ensure that we get the right pid, check its start time. To avoid extra | |
625 | * include files in thread_info.h, convert the task start_time to unsigned long, | |
626 | * giving us a cycle time of > 580 years. | |
627 | */ | |
628 | static inline unsigned long | |
629 | start_time_ul(const struct task_struct *t) | |
630 | { | |
631 | return t->start_time.tv_sec * NSEC_PER_SEC + t->start_time.tv_nsec; | |
632 | } | |
633 | ||
634 | void | |
635 | set_sigdelayed(pid_t pid, int signo, int code, void __user *addr) | |
636 | { | |
637 | struct task_struct *t; | |
638 | unsigned long start_time = 0; | |
639 | int i; | |
640 | ||
641 | for (i = 1; i <= 3; ++i) { | |
642 | switch (i) { | |
643 | case 1: | |
644 | t = find_task_by_pid(pid); | |
645 | if (t) | |
646 | start_time = start_time_ul(t); | |
647 | break; | |
648 | case 2: | |
649 | t = current; | |
650 | break; | |
651 | default: | |
652 | t = idle_task(smp_processor_id()); | |
653 | break; | |
654 | } | |
655 | ||
656 | if (!t) | |
657 | return; | |
ab03591d AV |
658 | task_thread_info(t)->sigdelayed.signo = signo; |
659 | task_thread_info(t)->sigdelayed.code = code; | |
660 | task_thread_info(t)->sigdelayed.addr = addr; | |
661 | task_thread_info(t)->sigdelayed.start_time = start_time; | |
662 | task_thread_info(t)->sigdelayed.pid = pid; | |
1da177e4 LT |
663 | wmb(); |
664 | set_tsk_thread_flag(t, TIF_SIGDELAYED); | |
665 | } | |
666 | } | |
667 | ||
668 | /* Called from entry.S when it detects TIF_SIGDELAYED, a delayed signal that | |
669 | * was detected in MCA/INIT/NMI/PMI context where it could not be delivered. | |
670 | */ | |
671 | ||
672 | void | |
673 | do_sigdelayed(void) | |
674 | { | |
675 | struct siginfo siginfo; | |
676 | pid_t pid; | |
677 | struct task_struct *t; | |
678 | ||
679 | clear_thread_flag(TIF_SIGDELAYED); | |
680 | memset(&siginfo, 0, sizeof(siginfo)); | |
681 | siginfo.si_signo = current_thread_info()->sigdelayed.signo; | |
682 | siginfo.si_code = current_thread_info()->sigdelayed.code; | |
683 | siginfo.si_addr = current_thread_info()->sigdelayed.addr; | |
684 | pid = current_thread_info()->sigdelayed.pid; | |
685 | t = find_task_by_pid(pid); | |
686 | if (!t) | |
687 | return; | |
688 | if (current_thread_info()->sigdelayed.start_time != start_time_ul(t)) | |
689 | return; | |
690 | force_sig_info(siginfo.si_signo, &siginfo, t); | |
691 | } |