]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * PowerPC version | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Derived from "arch/i386/kernel/signal.c" | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <linux/sched.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/smp.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/signal.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/wait.h> | |
22 | #include <linux/unistd.h> | |
23 | #include <linux/stddef.h> | |
24 | #include <linux/elf.h> | |
25 | #include <linux/ptrace.h> | |
26 | #include <linux/ratelimit.h> | |
27 | ||
28 | #include <asm/sigcontext.h> | |
29 | #include <asm/ucontext.h> | |
30 | #include <linux/uaccess.h> | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/unistd.h> | |
33 | #include <asm/cacheflush.h> | |
34 | #include <asm/syscalls.h> | |
35 | #include <asm/vdso.h> | |
36 | #include <asm/switch_to.h> | |
37 | #include <asm/tm.h> | |
38 | #include <asm/asm-prototypes.h> | |
39 | ||
40 | #include "signal.h" | |
41 | ||
42 | ||
43 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) | |
44 | #define FP_REGS_SIZE sizeof(elf_fpregset_t) | |
45 | ||
46 | #define TRAMP_TRACEBACK 3 | |
47 | #define TRAMP_SIZE 6 | |
48 | ||
49 | /* | |
50 | * When we have signals to deliver, we set up on the user stack, | |
51 | * going down from the original stack pointer: | |
52 | * 1) a rt_sigframe struct which contains the ucontext | |
53 | * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller | |
54 | * frame for the signal handler. | |
55 | */ | |
56 | ||
57 | struct rt_sigframe { | |
58 | /* sys_rt_sigreturn requires the ucontext be the first field */ | |
59 | struct ucontext uc; | |
60 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
61 | struct ucontext uc_transact; | |
62 | #endif | |
63 | unsigned long _unused[2]; | |
64 | unsigned int tramp[TRAMP_SIZE]; | |
65 | struct siginfo __user *pinfo; | |
66 | void __user *puc; | |
67 | struct siginfo info; | |
68 | /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ | |
69 | char abigap[USER_REDZONE_SIZE]; | |
70 | } __attribute__ ((aligned (16))); | |
71 | ||
72 | static const char fmt32[] = KERN_INFO \ | |
73 | "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; | |
74 | static const char fmt64[] = KERN_INFO \ | |
75 | "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; | |
76 | ||
77 | /* | |
78 | * This computes a quad word aligned pointer inside the vmx_reserve array | |
79 | * element. For historical reasons sigcontext might not be quad word aligned, | |
80 | * but the location we write the VMX regs to must be. See the comment in | |
81 | * sigcontext for more detail. | |
82 | */ | |
83 | #ifdef CONFIG_ALTIVEC | |
84 | static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc) | |
85 | { | |
86 | return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful); | |
87 | } | |
88 | #endif | |
89 | ||
90 | /* | |
91 | * Set up the sigcontext for the signal frame. | |
92 | */ | |
93 | ||
94 | static long setup_sigcontext(struct sigcontext __user *sc, | |
95 | struct task_struct *tsk, int signr, sigset_t *set, | |
96 | unsigned long handler, int ctx_has_vsx_region) | |
97 | { | |
98 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the | |
99 | * process never used altivec yet (MSR_VEC is zero in pt_regs of | |
100 | * the context). This is very important because we must ensure we | |
101 | * don't lose the VRSAVE content that may have been set prior to | |
102 | * the process doing its first vector operation | |
103 | * Userland shall check AT_HWCAP to know whether it can rely on the | |
104 | * v_regs pointer or not | |
105 | */ | |
106 | #ifdef CONFIG_ALTIVEC | |
107 | elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); | |
108 | unsigned long vrsave; | |
109 | #endif | |
110 | struct pt_regs *regs = tsk->thread.regs; | |
111 | unsigned long msr = regs->msr; | |
112 | long err = 0; | |
113 | ||
114 | BUG_ON(tsk != current); | |
115 | ||
116 | #ifdef CONFIG_ALTIVEC | |
117 | err |= __put_user(v_regs, &sc->v_regs); | |
118 | ||
119 | /* save altivec registers */ | |
120 | if (tsk->thread.used_vr) { | |
121 | flush_altivec_to_thread(tsk); | |
122 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ | |
123 | err |= __copy_to_user(v_regs, &tsk->thread.vr_state, | |
124 | 33 * sizeof(vector128)); | |
125 | /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) | |
126 | * contains valid data. | |
127 | */ | |
128 | msr |= MSR_VEC; | |
129 | } | |
130 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | |
131 | * use altivec. | |
132 | */ | |
133 | vrsave = 0; | |
134 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
135 | vrsave = mfspr(SPRN_VRSAVE); | |
136 | tsk->thread.vrsave = vrsave; | |
137 | } | |
138 | ||
139 | err |= __put_user(vrsave, (u32 __user *)&v_regs[33]); | |
140 | #else /* CONFIG_ALTIVEC */ | |
141 | err |= __put_user(0, &sc->v_regs); | |
142 | #endif /* CONFIG_ALTIVEC */ | |
143 | flush_fp_to_thread(tsk); | |
144 | /* copy fpr regs and fpscr */ | |
145 | err |= copy_fpr_to_user(&sc->fp_regs, tsk); | |
146 | ||
147 | /* | |
148 | * Clear the MSR VSX bit to indicate there is no valid state attached | |
149 | * to this context, except in the specific case below where we set it. | |
150 | */ | |
151 | msr &= ~MSR_VSX; | |
152 | #ifdef CONFIG_VSX | |
153 | /* | |
154 | * Copy VSX low doubleword to local buffer for formatting, | |
155 | * then out to userspace. Update v_regs to point after the | |
156 | * VMX data. | |
157 | */ | |
158 | if (tsk->thread.used_vsr && ctx_has_vsx_region) { | |
159 | flush_vsx_to_thread(tsk); | |
160 | v_regs += ELF_NVRREG; | |
161 | err |= copy_vsx_to_user(v_regs, tsk); | |
162 | /* set MSR_VSX in the MSR value in the frame to | |
163 | * indicate that sc->vs_reg) contains valid data. | |
164 | */ | |
165 | msr |= MSR_VSX; | |
166 | } | |
167 | #endif /* CONFIG_VSX */ | |
168 | err |= __put_user(&sc->gp_regs, &sc->regs); | |
169 | WARN_ON(!FULL_REGS(regs)); | |
170 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); | |
171 | err |= __put_user(msr, &sc->gp_regs[PT_MSR]); | |
172 | err |= __put_user(signr, &sc->signal); | |
173 | err |= __put_user(handler, &sc->handler); | |
174 | if (set != NULL) | |
175 | err |= __put_user(set->sig[0], &sc->oldmask); | |
176 | ||
177 | return err; | |
178 | } | |
179 | ||
180 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
181 | /* | |
182 | * As above, but Transactional Memory is in use, so deliver sigcontexts | |
183 | * containing checkpointed and transactional register states. | |
184 | * | |
185 | * To do this, we treclaim (done before entering here) to gather both sets of | |
186 | * registers and set up the 'normal' sigcontext registers with rolled-back | |
187 | * register values such that a simple signal handler sees a correct | |
188 | * checkpointed register state. If interested, a TM-aware sighandler can | |
189 | * examine the transactional registers in the 2nd sigcontext to determine the | |
190 | * real origin of the signal. | |
191 | */ | |
192 | static long setup_tm_sigcontexts(struct sigcontext __user *sc, | |
193 | struct sigcontext __user *tm_sc, | |
194 | struct task_struct *tsk, | |
195 | int signr, sigset_t *set, unsigned long handler) | |
196 | { | |
197 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the | |
198 | * process never used altivec yet (MSR_VEC is zero in pt_regs of | |
199 | * the context). This is very important because we must ensure we | |
200 | * don't lose the VRSAVE content that may have been set prior to | |
201 | * the process doing its first vector operation | |
202 | * Userland shall check AT_HWCAP to know wether it can rely on the | |
203 | * v_regs pointer or not. | |
204 | */ | |
205 | #ifdef CONFIG_ALTIVEC | |
206 | elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); | |
207 | elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); | |
208 | #endif | |
209 | struct pt_regs *regs = tsk->thread.regs; | |
210 | unsigned long msr = tsk->thread.ckpt_regs.msr; | |
211 | long err = 0; | |
212 | ||
213 | BUG_ON(tsk != current); | |
214 | ||
215 | BUG_ON(!MSR_TM_ACTIVE(regs->msr)); | |
216 | ||
217 | /* Remove TM bits from thread's MSR. The MSR in the sigcontext | |
218 | * just indicates to userland that we were doing a transaction, but we | |
219 | * don't want to return in transactional state. This also ensures | |
220 | * that flush_fp_to_thread won't set TIF_RESTORE_TM again. | |
221 | */ | |
222 | regs->msr &= ~MSR_TS_MASK; | |
223 | ||
224 | #ifdef CONFIG_ALTIVEC | |
225 | err |= __put_user(v_regs, &sc->v_regs); | |
226 | err |= __put_user(tm_v_regs, &tm_sc->v_regs); | |
227 | ||
228 | /* save altivec registers */ | |
229 | if (tsk->thread.used_vr) { | |
230 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ | |
231 | err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state, | |
232 | 33 * sizeof(vector128)); | |
233 | /* If VEC was enabled there are transactional VRs valid too, | |
234 | * else they're a copy of the checkpointed VRs. | |
235 | */ | |
236 | if (msr & MSR_VEC) | |
237 | err |= __copy_to_user(tm_v_regs, | |
238 | &tsk->thread.vr_state, | |
239 | 33 * sizeof(vector128)); | |
240 | else | |
241 | err |= __copy_to_user(tm_v_regs, | |
242 | &tsk->thread.ckvr_state, | |
243 | 33 * sizeof(vector128)); | |
244 | ||
245 | /* set MSR_VEC in the MSR value in the frame to indicate | |
246 | * that sc->v_reg contains valid data. | |
247 | */ | |
248 | msr |= MSR_VEC; | |
249 | } | |
250 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | |
251 | * use altivec. | |
252 | */ | |
253 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
254 | tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE); | |
255 | err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]); | |
256 | if (msr & MSR_VEC) | |
257 | err |= __put_user(tsk->thread.vrsave, | |
258 | (u32 __user *)&tm_v_regs[33]); | |
259 | else | |
260 | err |= __put_user(tsk->thread.ckvrsave, | |
261 | (u32 __user *)&tm_v_regs[33]); | |
262 | ||
263 | #else /* CONFIG_ALTIVEC */ | |
264 | err |= __put_user(0, &sc->v_regs); | |
265 | err |= __put_user(0, &tm_sc->v_regs); | |
266 | #endif /* CONFIG_ALTIVEC */ | |
267 | ||
268 | /* copy fpr regs and fpscr */ | |
269 | err |= copy_ckfpr_to_user(&sc->fp_regs, tsk); | |
270 | if (msr & MSR_FP) | |
271 | err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk); | |
272 | else | |
273 | err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk); | |
274 | ||
275 | #ifdef CONFIG_VSX | |
276 | /* | |
277 | * Copy VSX low doubleword to local buffer for formatting, | |
278 | * then out to userspace. Update v_regs to point after the | |
279 | * VMX data. | |
280 | */ | |
281 | if (tsk->thread.used_vsr) { | |
282 | v_regs += ELF_NVRREG; | |
283 | tm_v_regs += ELF_NVRREG; | |
284 | ||
285 | err |= copy_ckvsx_to_user(v_regs, tsk); | |
286 | ||
287 | if (msr & MSR_VSX) | |
288 | err |= copy_vsx_to_user(tm_v_regs, tsk); | |
289 | else | |
290 | err |= copy_ckvsx_to_user(tm_v_regs, tsk); | |
291 | ||
292 | /* set MSR_VSX in the MSR value in the frame to | |
293 | * indicate that sc->vs_reg) contains valid data. | |
294 | */ | |
295 | msr |= MSR_VSX; | |
296 | } | |
297 | #endif /* CONFIG_VSX */ | |
298 | ||
299 | err |= __put_user(&sc->gp_regs, &sc->regs); | |
300 | err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); | |
301 | WARN_ON(!FULL_REGS(regs)); | |
302 | err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); | |
303 | err |= __copy_to_user(&sc->gp_regs, | |
304 | &tsk->thread.ckpt_regs, GP_REGS_SIZE); | |
305 | err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); | |
306 | err |= __put_user(msr, &sc->gp_regs[PT_MSR]); | |
307 | err |= __put_user(signr, &sc->signal); | |
308 | err |= __put_user(handler, &sc->handler); | |
309 | if (set != NULL) | |
310 | err |= __put_user(set->sig[0], &sc->oldmask); | |
311 | ||
312 | return err; | |
313 | } | |
314 | #endif | |
315 | ||
316 | /* | |
317 | * Restore the sigcontext from the signal frame. | |
318 | */ | |
319 | ||
320 | static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig, | |
321 | struct sigcontext __user *sc) | |
322 | { | |
323 | #ifdef CONFIG_ALTIVEC | |
324 | elf_vrreg_t __user *v_regs; | |
325 | #endif | |
326 | unsigned long err = 0; | |
327 | unsigned long save_r13 = 0; | |
328 | unsigned long msr; | |
329 | struct pt_regs *regs = tsk->thread.regs; | |
330 | #ifdef CONFIG_VSX | |
331 | int i; | |
332 | #endif | |
333 | ||
334 | BUG_ON(tsk != current); | |
335 | ||
336 | /* If this is not a signal return, we preserve the TLS in r13 */ | |
337 | if (!sig) | |
338 | save_r13 = regs->gpr[13]; | |
339 | ||
340 | /* copy the GPRs */ | |
341 | err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); | |
342 | err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); | |
343 | /* get MSR separately, transfer the LE bit if doing signal return */ | |
344 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | |
345 | if (sig) | |
346 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | |
347 | err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); | |
348 | err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); | |
349 | err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); | |
350 | err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); | |
351 | err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); | |
352 | /* skip SOFTE */ | |
353 | regs->trap = 0; | |
354 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); | |
355 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); | |
356 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); | |
357 | ||
358 | if (!sig) | |
359 | regs->gpr[13] = save_r13; | |
360 | if (set != NULL) | |
361 | err |= __get_user(set->sig[0], &sc->oldmask); | |
362 | ||
363 | /* | |
364 | * Force reload of FP/VEC. | |
365 | * This has to be done before copying stuff into tsk->thread.fpr/vr | |
366 | * for the reasons explained in the previous comment. | |
367 | */ | |
368 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); | |
369 | ||
370 | #ifdef CONFIG_ALTIVEC | |
371 | err |= __get_user(v_regs, &sc->v_regs); | |
372 | if (err) | |
373 | return err; | |
374 | if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) | |
375 | return -EFAULT; | |
376 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ | |
377 | if (v_regs != NULL && (msr & MSR_VEC) != 0) { | |
378 | err |= __copy_from_user(&tsk->thread.vr_state, v_regs, | |
379 | 33 * sizeof(vector128)); | |
380 | tsk->thread.used_vr = true; | |
381 | } else if (tsk->thread.used_vr) { | |
382 | memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128)); | |
383 | } | |
384 | /* Always get VRSAVE back */ | |
385 | if (v_regs != NULL) | |
386 | err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]); | |
387 | else | |
388 | tsk->thread.vrsave = 0; | |
389 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
390 | mtspr(SPRN_VRSAVE, tsk->thread.vrsave); | |
391 | #endif /* CONFIG_ALTIVEC */ | |
392 | /* restore floating point */ | |
393 | err |= copy_fpr_from_user(tsk, &sc->fp_regs); | |
394 | #ifdef CONFIG_VSX | |
395 | /* | |
396 | * Get additional VSX data. Update v_regs to point after the | |
397 | * VMX data. Copy VSX low doubleword from userspace to local | |
398 | * buffer for formatting, then into the taskstruct. | |
399 | */ | |
400 | v_regs += ELF_NVRREG; | |
401 | if ((msr & MSR_VSX) != 0) { | |
402 | err |= copy_vsx_from_user(tsk, v_regs); | |
403 | tsk->thread.used_vsr = true; | |
404 | } else { | |
405 | for (i = 0; i < 32 ; i++) | |
406 | tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; | |
407 | } | |
408 | #endif | |
409 | return err; | |
410 | } | |
411 | ||
412 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
413 | /* | |
414 | * Restore the two sigcontexts from the frame of a transactional processes. | |
415 | */ | |
416 | ||
417 | static long restore_tm_sigcontexts(struct task_struct *tsk, | |
418 | struct sigcontext __user *sc, | |
419 | struct sigcontext __user *tm_sc) | |
420 | { | |
421 | #ifdef CONFIG_ALTIVEC | |
422 | elf_vrreg_t __user *v_regs, *tm_v_regs; | |
423 | #endif | |
424 | unsigned long err = 0; | |
425 | unsigned long msr; | |
426 | struct pt_regs *regs = tsk->thread.regs; | |
427 | #ifdef CONFIG_VSX | |
428 | int i; | |
429 | #endif | |
430 | ||
431 | BUG_ON(tsk != current); | |
432 | ||
433 | /* copy the GPRs */ | |
434 | err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); | |
435 | err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs, | |
436 | sizeof(regs->gpr)); | |
437 | ||
438 | /* | |
439 | * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. | |
440 | * TEXASR was set by the signal delivery reclaim, as was TFIAR. | |
441 | * Users doing anything abhorrent like thread-switching w/ signals for | |
442 | * TM-Suspended code will have to back TEXASR/TFIAR up themselves. | |
443 | * For the case of getting a signal and simply returning from it, | |
444 | * we don't need to re-copy them here. | |
445 | */ | |
446 | err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); | |
447 | err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); | |
448 | ||
449 | /* get MSR separately, transfer the LE bit if doing signal return */ | |
450 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | |
451 | /* Don't allow reserved mode. */ | |
452 | if (MSR_TM_RESV(msr)) | |
453 | return -EINVAL; | |
454 | ||
455 | /* pull in MSR TM from user context */ | |
456 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); | |
457 | ||
458 | /* pull in MSR LE from user context */ | |
459 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | |
460 | ||
461 | /* The following non-GPR non-FPR non-VR state is also checkpointed: */ | |
462 | err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); | |
463 | err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); | |
464 | err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); | |
465 | err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); | |
466 | err |= __get_user(tsk->thread.ckpt_regs.ctr, | |
467 | &sc->gp_regs[PT_CTR]); | |
468 | err |= __get_user(tsk->thread.ckpt_regs.link, | |
469 | &sc->gp_regs[PT_LNK]); | |
470 | err |= __get_user(tsk->thread.ckpt_regs.xer, | |
471 | &sc->gp_regs[PT_XER]); | |
472 | err |= __get_user(tsk->thread.ckpt_regs.ccr, | |
473 | &sc->gp_regs[PT_CCR]); | |
474 | ||
475 | /* These regs are not checkpointed; they can go in 'regs'. */ | |
476 | err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); | |
477 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); | |
478 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); | |
479 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); | |
480 | ||
481 | /* | |
482 | * Force reload of FP/VEC. | |
483 | * This has to be done before copying stuff into tsk->thread.fpr/vr | |
484 | * for the reasons explained in the previous comment. | |
485 | */ | |
486 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); | |
487 | ||
488 | #ifdef CONFIG_ALTIVEC | |
489 | err |= __get_user(v_regs, &sc->v_regs); | |
490 | err |= __get_user(tm_v_regs, &tm_sc->v_regs); | |
491 | if (err) | |
492 | return err; | |
493 | if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) | |
494 | return -EFAULT; | |
495 | if (tm_v_regs && !access_ok(VERIFY_READ, | |
496 | tm_v_regs, 34 * sizeof(vector128))) | |
497 | return -EFAULT; | |
498 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ | |
499 | if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { | |
500 | err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs, | |
501 | 33 * sizeof(vector128)); | |
502 | err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs, | |
503 | 33 * sizeof(vector128)); | |
504 | current->thread.used_vr = true; | |
505 | } | |
506 | else if (tsk->thread.used_vr) { | |
507 | memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128)); | |
508 | memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128)); | |
509 | } | |
510 | /* Always get VRSAVE back */ | |
511 | if (v_regs != NULL && tm_v_regs != NULL) { | |
512 | err |= __get_user(tsk->thread.ckvrsave, | |
513 | (u32 __user *)&v_regs[33]); | |
514 | err |= __get_user(tsk->thread.vrsave, | |
515 | (u32 __user *)&tm_v_regs[33]); | |
516 | } | |
517 | else { | |
518 | tsk->thread.vrsave = 0; | |
519 | tsk->thread.ckvrsave = 0; | |
520 | } | |
521 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
522 | mtspr(SPRN_VRSAVE, tsk->thread.vrsave); | |
523 | #endif /* CONFIG_ALTIVEC */ | |
524 | /* restore floating point */ | |
525 | err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs); | |
526 | err |= copy_ckfpr_from_user(tsk, &sc->fp_regs); | |
527 | #ifdef CONFIG_VSX | |
528 | /* | |
529 | * Get additional VSX data. Update v_regs to point after the | |
530 | * VMX data. Copy VSX low doubleword from userspace to local | |
531 | * buffer for formatting, then into the taskstruct. | |
532 | */ | |
533 | if (v_regs && ((msr & MSR_VSX) != 0)) { | |
534 | v_regs += ELF_NVRREG; | |
535 | tm_v_regs += ELF_NVRREG; | |
536 | err |= copy_vsx_from_user(tsk, tm_v_regs); | |
537 | err |= copy_ckvsx_from_user(tsk, v_regs); | |
538 | tsk->thread.used_vsr = true; | |
539 | } else { | |
540 | for (i = 0; i < 32 ; i++) { | |
541 | tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; | |
542 | tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0; | |
543 | } | |
544 | } | |
545 | #endif | |
546 | tm_enable(); | |
547 | /* Make sure the transaction is marked as failed */ | |
548 | tsk->thread.tm_texasr |= TEXASR_FS; | |
549 | /* This loads the checkpointed FP/VEC state, if used */ | |
550 | tm_recheckpoint(&tsk->thread, msr); | |
551 | ||
552 | msr_check_and_set(msr & (MSR_FP | MSR_VEC)); | |
553 | if (msr & MSR_FP) { | |
554 | load_fp_state(&tsk->thread.fp_state); | |
555 | regs->msr |= (MSR_FP | tsk->thread.fpexc_mode); | |
556 | } | |
557 | if (msr & MSR_VEC) { | |
558 | load_vr_state(&tsk->thread.vr_state); | |
559 | regs->msr |= MSR_VEC; | |
560 | } | |
561 | ||
562 | return err; | |
563 | } | |
564 | #endif | |
565 | ||
566 | /* | |
567 | * Setup the trampoline code on the stack | |
568 | */ | |
569 | static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) | |
570 | { | |
571 | int i; | |
572 | long err = 0; | |
573 | ||
574 | /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ | |
575 | err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); | |
576 | /* li r0, __NR_[rt_]sigreturn| */ | |
577 | err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); | |
578 | /* sc */ | |
579 | err |= __put_user(0x44000002UL, &tramp[2]); | |
580 | ||
581 | /* Minimal traceback info */ | |
582 | for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) | |
583 | err |= __put_user(0, &tramp[i]); | |
584 | ||
585 | if (!err) | |
586 | flush_icache_range((unsigned long) &tramp[0], | |
587 | (unsigned long) &tramp[TRAMP_SIZE]); | |
588 | ||
589 | return err; | |
590 | } | |
591 | ||
592 | /* | |
593 | * Userspace code may pass a ucontext which doesn't include VSX added | |
594 | * at the end. We need to check for this case. | |
595 | */ | |
596 | #define UCONTEXTSIZEWITHOUTVSX \ | |
597 | (sizeof(struct ucontext) - 32*sizeof(long)) | |
598 | ||
599 | /* | |
600 | * Handle {get,set,swap}_context operations | |
601 | */ | |
602 | int sys_swapcontext(struct ucontext __user *old_ctx, | |
603 | struct ucontext __user *new_ctx, | |
604 | long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) | |
605 | { | |
606 | unsigned char tmp; | |
607 | sigset_t set; | |
608 | unsigned long new_msr = 0; | |
609 | int ctx_has_vsx_region = 0; | |
610 | ||
611 | BUG_ON(regs != current->thread.regs); | |
612 | ||
613 | if (new_ctx && | |
614 | get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) | |
615 | return -EFAULT; | |
616 | /* | |
617 | * Check that the context is not smaller than the original | |
618 | * size (with VMX but without VSX) | |
619 | */ | |
620 | if (ctx_size < UCONTEXTSIZEWITHOUTVSX) | |
621 | return -EINVAL; | |
622 | /* | |
623 | * If the new context state sets the MSR VSX bits but | |
624 | * it doesn't provide VSX state. | |
625 | */ | |
626 | if ((ctx_size < sizeof(struct ucontext)) && | |
627 | (new_msr & MSR_VSX)) | |
628 | return -EINVAL; | |
629 | /* Does the context have enough room to store VSX data? */ | |
630 | if (ctx_size >= sizeof(struct ucontext)) | |
631 | ctx_has_vsx_region = 1; | |
632 | ||
633 | if (old_ctx != NULL) { | |
634 | if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) | |
635 | || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0, | |
636 | ctx_has_vsx_region) | |
637 | || __copy_to_user(&old_ctx->uc_sigmask, | |
638 | ¤t->blocked, sizeof(sigset_t))) | |
639 | return -EFAULT; | |
640 | } | |
641 | if (new_ctx == NULL) | |
642 | return 0; | |
643 | if (!access_ok(VERIFY_READ, new_ctx, ctx_size) | |
644 | || __get_user(tmp, (u8 __user *) new_ctx) | |
645 | || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) | |
646 | return -EFAULT; | |
647 | ||
648 | /* | |
649 | * If we get a fault copying the context into the kernel's | |
650 | * image of the user's registers, we can't just return -EFAULT | |
651 | * because the user's registers will be corrupted. For instance | |
652 | * the NIP value may have been updated but not some of the | |
653 | * other registers. Given that we have done the access_ok | |
654 | * and successfully read the first and last bytes of the region | |
655 | * above, this should only happen in an out-of-memory situation | |
656 | * or if another thread unmaps the region containing the context. | |
657 | * We kill the task with a SIGSEGV in this situation. | |
658 | */ | |
659 | ||
660 | if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) | |
661 | do_exit(SIGSEGV); | |
662 | set_current_blocked(&set); | |
663 | if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) | |
664 | do_exit(SIGSEGV); | |
665 | ||
666 | /* This returns like rt_sigreturn */ | |
667 | set_thread_flag(TIF_RESTOREALL); | |
668 | return 0; | |
669 | } | |
670 | ||
671 | ||
672 | /* | |
673 | * Do a signal return; undo the signal stack. | |
674 | */ | |
675 | ||
676 | int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, | |
677 | unsigned long r6, unsigned long r7, unsigned long r8, | |
678 | struct pt_regs *regs) | |
679 | { | |
680 | struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; | |
681 | sigset_t set; | |
682 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
683 | unsigned long msr; | |
684 | #endif | |
685 | ||
686 | BUG_ON(current->thread.regs != regs); | |
687 | ||
688 | /* Always make any pending restarted system calls return -EINTR */ | |
689 | current->restart_block.fn = do_no_restart_syscall; | |
690 | ||
691 | if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) | |
692 | goto badframe; | |
693 | ||
694 | if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) | |
695 | goto badframe; | |
696 | set_current_blocked(&set); | |
697 | ||
698 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
699 | /* | |
700 | * If there is a transactional state then throw it away. | |
701 | * The purpose of a sigreturn is to destroy all traces of the | |
702 | * signal frame, this includes any transactional state created | |
703 | * within in. We only check for suspended as we can never be | |
704 | * active in the kernel, we are active, there is nothing better to | |
705 | * do than go ahead and Bad Thing later. | |
706 | * The cause is not important as there will never be a | |
707 | * recheckpoint so it's not user visible. | |
708 | */ | |
709 | if (MSR_TM_SUSPENDED(mfmsr())) | |
710 | tm_reclaim_current(0); | |
711 | ||
712 | if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) | |
713 | goto badframe; | |
714 | if (MSR_TM_ACTIVE(msr)) { | |
715 | /* We recheckpoint on return. */ | |
716 | struct ucontext __user *uc_transact; | |
717 | if (__get_user(uc_transact, &uc->uc_link)) | |
718 | goto badframe; | |
719 | if (restore_tm_sigcontexts(current, &uc->uc_mcontext, | |
720 | &uc_transact->uc_mcontext)) | |
721 | goto badframe; | |
722 | } | |
723 | else | |
724 | /* Fall through, for non-TM restore */ | |
725 | #endif | |
726 | if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) | |
727 | goto badframe; | |
728 | ||
729 | if (restore_altstack(&uc->uc_stack)) | |
730 | goto badframe; | |
731 | ||
732 | set_thread_flag(TIF_RESTOREALL); | |
733 | return 0; | |
734 | ||
735 | badframe: | |
736 | if (show_unhandled_signals) | |
737 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, | |
738 | current->comm, current->pid, "rt_sigreturn", | |
739 | (long)uc, regs->nip, regs->link); | |
740 | ||
741 | force_sig(SIGSEGV, current); | |
742 | return 0; | |
743 | } | |
744 | ||
745 | int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, | |
746 | struct task_struct *tsk) | |
747 | { | |
748 | struct rt_sigframe __user *frame; | |
749 | unsigned long newsp = 0; | |
750 | long err = 0; | |
751 | struct pt_regs *regs = tsk->thread.regs; | |
752 | ||
753 | BUG_ON(tsk != current); | |
754 | ||
755 | frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 0); | |
756 | if (unlikely(frame == NULL)) | |
757 | goto badframe; | |
758 | ||
759 | err |= __put_user(&frame->info, &frame->pinfo); | |
760 | err |= __put_user(&frame->uc, &frame->puc); | |
761 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); | |
762 | if (err) | |
763 | goto badframe; | |
764 | ||
765 | /* Create the ucontext. */ | |
766 | err |= __put_user(0, &frame->uc.uc_flags); | |
767 | err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); | |
768 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
769 | if (MSR_TM_ACTIVE(regs->msr)) { | |
770 | /* The ucontext_t passed to userland points to the second | |
771 | * ucontext_t (for transactional state) with its uc_link ptr. | |
772 | */ | |
773 | err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); | |
774 | err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, | |
775 | &frame->uc_transact.uc_mcontext, | |
776 | tsk, ksig->sig, NULL, | |
777 | (unsigned long)ksig->ka.sa.sa_handler); | |
778 | } else | |
779 | #endif | |
780 | { | |
781 | err |= __put_user(0, &frame->uc.uc_link); | |
782 | err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, | |
783 | NULL, (unsigned long)ksig->ka.sa.sa_handler, | |
784 | 1); | |
785 | } | |
786 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | |
787 | if (err) | |
788 | goto badframe; | |
789 | ||
790 | /* Make sure signal handler doesn't get spurious FP exceptions */ | |
791 | tsk->thread.fp_state.fpscr = 0; | |
792 | ||
793 | /* Set up to return from userspace. */ | |
794 | if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { | |
795 | regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; | |
796 | } else { | |
797 | err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); | |
798 | if (err) | |
799 | goto badframe; | |
800 | regs->link = (unsigned long) &frame->tramp[0]; | |
801 | } | |
802 | ||
803 | /* Allocate a dummy caller frame for the signal handler. */ | |
804 | newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; | |
805 | err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); | |
806 | ||
807 | /* Set up "regs" so we "return" to the signal handler. */ | |
808 | if (is_elf2_task()) { | |
809 | regs->nip = (unsigned long) ksig->ka.sa.sa_handler; | |
810 | regs->gpr[12] = regs->nip; | |
811 | } else { | |
812 | /* Handler is *really* a pointer to the function descriptor for | |
813 | * the signal routine. The first entry in the function | |
814 | * descriptor is the entry address of signal and the second | |
815 | * entry is the TOC value we need to use. | |
816 | */ | |
817 | func_descr_t __user *funct_desc_ptr = | |
818 | (func_descr_t __user *) ksig->ka.sa.sa_handler; | |
819 | ||
820 | err |= get_user(regs->nip, &funct_desc_ptr->entry); | |
821 | err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); | |
822 | } | |
823 | ||
824 | /* enter the signal handler in native-endian mode */ | |
825 | regs->msr &= ~MSR_LE; | |
826 | regs->msr |= (MSR_KERNEL & MSR_LE); | |
827 | regs->gpr[1] = newsp; | |
828 | regs->gpr[3] = ksig->sig; | |
829 | regs->result = 0; | |
830 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { | |
831 | err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); | |
832 | err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); | |
833 | regs->gpr[6] = (unsigned long) frame; | |
834 | } else { | |
835 | regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; | |
836 | } | |
837 | if (err) | |
838 | goto badframe; | |
839 | ||
840 | return 0; | |
841 | ||
842 | badframe: | |
843 | if (show_unhandled_signals) | |
844 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, | |
845 | tsk->comm, tsk->pid, "setup_rt_frame", | |
846 | (long)frame, regs->nip, regs->link); | |
847 | ||
848 | return 1; | |
849 | } |