]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Derived from "arch/i386/kernel/signal.c" | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
1da177e4 LT |
15 | #include <linux/sched.h> |
16 | #include <linux/mm.h> | |
17 | #include <linux/smp.h> | |
1da177e4 LT |
18 | #include <linux/kernel.h> |
19 | #include <linux/signal.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/wait.h> | |
22 | #include <linux/unistd.h> | |
23 | #include <linux/stddef.h> | |
24 | #include <linux/elf.h> | |
25 | #include <linux/ptrace.h> | |
76462232 | 26 | #include <linux/ratelimit.h> |
1da177e4 LT |
27 | |
28 | #include <asm/sigcontext.h> | |
29 | #include <asm/ucontext.h> | |
30 | #include <asm/uaccess.h> | |
31 | #include <asm/pgtable.h> | |
1da177e4 LT |
32 | #include <asm/unistd.h> |
33 | #include <asm/cacheflush.h> | |
a7f31841 | 34 | #include <asm/syscalls.h> |
1da177e4 | 35 | #include <asm/vdso.h> |
ae3a197e | 36 | #include <asm/switch_to.h> |
2b0a576d | 37 | #include <asm/tm.h> |
1da177e4 | 38 | |
22e38f29 | 39 | #include "signal.h" |
1da177e4 | 40 | |
1da177e4 | 41 | |
6741f3a7 | 42 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) |
1da177e4 LT |
43 | #define FP_REGS_SIZE sizeof(elf_fpregset_t) |
44 | ||
45 | #define TRAMP_TRACEBACK 3 | |
46 | #define TRAMP_SIZE 6 | |
47 | ||
48 | /* | |
49 | * When we have signals to deliver, we set up on the user stack, | |
50 | * going down from the original stack pointer: | |
51 | * 1) a rt_sigframe struct which contains the ucontext | |
52 | * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller | |
53 | * frame for the signal handler. | |
54 | */ | |
55 | ||
56 | struct rt_sigframe { | |
57 | /* sys_rt_sigreturn requires the ucontext be the first field */ | |
58 | struct ucontext uc; | |
2b0a576d MN |
59 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
60 | struct ucontext uc_transact; | |
61 | #endif | |
1da177e4 LT |
62 | unsigned long _unused[2]; |
63 | unsigned int tramp[TRAMP_SIZE]; | |
29e646df AV |
64 | struct siginfo __user *pinfo; |
65 | void __user *puc; | |
1da177e4 | 66 | struct siginfo info; |
573ebfa6 PM |
67 | /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ |
68 | char abigap[USER_REDZONE_SIZE]; | |
1da177e4 LT |
69 | } __attribute__ ((aligned (16))); |
70 | ||
d0c3d534 OJ |
71 | static const char fmt32[] = KERN_INFO \ |
72 | "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; | |
73 | static const char fmt64[] = KERN_INFO \ | |
74 | "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; | |
75 | ||
1da177e4 LT |
76 | /* |
77 | * Set up the sigcontext for the signal frame. | |
78 | */ | |
79 | ||
80 | static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |
16c29d18 MN |
81 | int signr, sigset_t *set, unsigned long handler, |
82 | int ctx_has_vsx_region) | |
1da177e4 LT |
83 | { |
84 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the | |
85 | * process never used altivec yet (MSR_VEC is zero in pt_regs of | |
86 | * the context). This is very important because we must ensure we | |
87 | * don't lose the VRSAVE content that may have been set prior to | |
88 | * the process doing its first vector operation | |
48fc7f7e | 89 | * Userland shall check AT_HWCAP to know whether it can rely on the |
1da177e4 LT |
90 | * v_regs pointer or not |
91 | */ | |
92 | #ifdef CONFIG_ALTIVEC | |
93 | elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful); | |
94 | #endif | |
0be234a4 | 95 | unsigned long msr = regs->msr; |
1da177e4 LT |
96 | long err = 0; |
97 | ||
1da177e4 LT |
98 | #ifdef CONFIG_ALTIVEC |
99 | err |= __put_user(v_regs, &sc->v_regs); | |
100 | ||
101 | /* save altivec registers */ | |
102 | if (current->thread.used_vr) { | |
103 | flush_altivec_to_thread(current); | |
104 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ | |
de79f7b9 PM |
105 | err |= __copy_to_user(v_regs, ¤t->thread.vr_state, |
106 | 33 * sizeof(vector128)); | |
1da177e4 LT |
107 | /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) |
108 | * contains valid data. | |
109 | */ | |
0be234a4 | 110 | msr |= MSR_VEC; |
1da177e4 LT |
111 | } |
112 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | |
113 | * use altivec. | |
114 | */ | |
408a7e08 PM |
115 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
116 | current->thread.vrsave = mfspr(SPRN_VRSAVE); | |
1da177e4 LT |
117 | err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); |
118 | #else /* CONFIG_ALTIVEC */ | |
119 | err |= __put_user(0, &sc->v_regs); | |
120 | #endif /* CONFIG_ALTIVEC */ | |
c6e6771b | 121 | flush_fp_to_thread(current); |
6a274c08 MN |
122 | /* copy fpr regs and fpscr */ |
123 | err |= copy_fpr_to_user(&sc->fp_regs, current); | |
ec67ad82 MN |
124 | |
125 | /* | |
126 | * Clear the MSR VSX bit to indicate there is no valid state attached | |
127 | * to this context, except in the specific case below where we set it. | |
128 | */ | |
129 | msr &= ~MSR_VSX; | |
c6e6771b | 130 | #ifdef CONFIG_VSX |
ce48b210 MN |
131 | /* |
132 | * Copy VSX low doubleword to local buffer for formatting, | |
133 | * then out to userspace. Update v_regs to point after the | |
134 | * VMX data. | |
135 | */ | |
16c29d18 | 136 | if (current->thread.used_vsr && ctx_has_vsx_region) { |
7c292170 | 137 | __giveup_vsx(current); |
ce48b210 | 138 | v_regs += ELF_NVRREG; |
6a274c08 | 139 | err |= copy_vsx_to_user(v_regs, current); |
ce48b210 MN |
140 | /* set MSR_VSX in the MSR value in the frame to |
141 | * indicate that sc->vs_reg) contains valid data. | |
142 | */ | |
143 | msr |= MSR_VSX; | |
144 | } | |
c6e6771b | 145 | #endif /* CONFIG_VSX */ |
1da177e4 | 146 | err |= __put_user(&sc->gp_regs, &sc->regs); |
1bd79336 | 147 | WARN_ON(!FULL_REGS(regs)); |
1da177e4 | 148 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); |
0be234a4 | 149 | err |= __put_user(msr, &sc->gp_regs[PT_MSR]); |
1da177e4 LT |
150 | err |= __put_user(signr, &sc->signal); |
151 | err |= __put_user(handler, &sc->handler); | |
152 | if (set != NULL) | |
153 | err |= __put_user(set->sig[0], &sc->oldmask); | |
154 | ||
155 | return err; | |
156 | } | |
157 | ||
2b0a576d MN |
158 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
159 | /* | |
160 | * As above, but Transactional Memory is in use, so deliver sigcontexts | |
161 | * containing checkpointed and transactional register states. | |
162 | * | |
2b3f8e87 MN |
163 | * To do this, we treclaim (done before entering here) to gather both sets of |
164 | * registers and set up the 'normal' sigcontext registers with rolled-back | |
165 | * register values such that a simple signal handler sees a correct | |
166 | * checkpointed register state. If interested, a TM-aware sighandler can | |
167 | * examine the transactional registers in the 2nd sigcontext to determine the | |
168 | * real origin of the signal. | |
2b0a576d MN |
169 | */ |
170 | static long setup_tm_sigcontexts(struct sigcontext __user *sc, | |
171 | struct sigcontext __user *tm_sc, | |
172 | struct pt_regs *regs, | |
173 | int signr, sigset_t *set, unsigned long handler) | |
174 | { | |
175 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the | |
176 | * process never used altivec yet (MSR_VEC is zero in pt_regs of | |
177 | * the context). This is very important because we must ensure we | |
178 | * don't lose the VRSAVE content that may have been set prior to | |
179 | * the process doing its first vector operation | |
180 | * Userland shall check AT_HWCAP to know wether it can rely on the | |
181 | * v_regs pointer or not. | |
182 | */ | |
183 | #ifdef CONFIG_ALTIVEC | |
184 | elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *) | |
185 | (((unsigned long)sc->vmx_reserve + 15) & ~0xful); | |
186 | elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *) | |
187 | (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful); | |
188 | #endif | |
189 | unsigned long msr = regs->msr; | |
190 | long err = 0; | |
191 | ||
192 | BUG_ON(!MSR_TM_ACTIVE(regs->msr)); | |
193 | ||
d31626f7 PM |
194 | /* Remove TM bits from thread's MSR. The MSR in the sigcontext |
195 | * just indicates to userland that we were doing a transaction, but we | |
196 | * don't want to return in transactional state. This also ensures | |
197 | * that flush_fp_to_thread won't set TIF_RESTORE_TM again. | |
198 | */ | |
199 | regs->msr &= ~MSR_TS_MASK; | |
200 | ||
2b0a576d MN |
201 | flush_fp_to_thread(current); |
202 | ||
203 | #ifdef CONFIG_ALTIVEC | |
204 | err |= __put_user(v_regs, &sc->v_regs); | |
205 | err |= __put_user(tm_v_regs, &tm_sc->v_regs); | |
206 | ||
207 | /* save altivec registers */ | |
208 | if (current->thread.used_vr) { | |
209 | flush_altivec_to_thread(current); | |
210 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ | |
de79f7b9 | 211 | err |= __copy_to_user(v_regs, ¤t->thread.vr_state, |
2b0a576d MN |
212 | 33 * sizeof(vector128)); |
213 | /* If VEC was enabled there are transactional VRs valid too, | |
214 | * else they're a copy of the checkpointed VRs. | |
215 | */ | |
216 | if (msr & MSR_VEC) | |
217 | err |= __copy_to_user(tm_v_regs, | |
de79f7b9 | 218 | ¤t->thread.transact_vr, |
2b0a576d MN |
219 | 33 * sizeof(vector128)); |
220 | else | |
221 | err |= __copy_to_user(tm_v_regs, | |
de79f7b9 | 222 | ¤t->thread.vr_state, |
2b0a576d MN |
223 | 33 * sizeof(vector128)); |
224 | ||
225 | /* set MSR_VEC in the MSR value in the frame to indicate | |
226 | * that sc->v_reg contains valid data. | |
227 | */ | |
228 | msr |= MSR_VEC; | |
229 | } | |
230 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | |
231 | * use altivec. | |
232 | */ | |
408a7e08 PM |
233 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
234 | current->thread.vrsave = mfspr(SPRN_VRSAVE); | |
2b0a576d MN |
235 | err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); |
236 | if (msr & MSR_VEC) | |
237 | err |= __put_user(current->thread.transact_vrsave, | |
238 | (u32 __user *)&tm_v_regs[33]); | |
239 | else | |
240 | err |= __put_user(current->thread.vrsave, | |
241 | (u32 __user *)&tm_v_regs[33]); | |
242 | ||
243 | #else /* CONFIG_ALTIVEC */ | |
244 | err |= __put_user(0, &sc->v_regs); | |
245 | err |= __put_user(0, &tm_sc->v_regs); | |
246 | #endif /* CONFIG_ALTIVEC */ | |
247 | ||
248 | /* copy fpr regs and fpscr */ | |
249 | err |= copy_fpr_to_user(&sc->fp_regs, current); | |
250 | if (msr & MSR_FP) | |
251 | err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); | |
252 | else | |
253 | err |= copy_fpr_to_user(&tm_sc->fp_regs, current); | |
254 | ||
255 | #ifdef CONFIG_VSX | |
256 | /* | |
257 | * Copy VSX low doubleword to local buffer for formatting, | |
258 | * then out to userspace. Update v_regs to point after the | |
259 | * VMX data. | |
260 | */ | |
261 | if (current->thread.used_vsr) { | |
262 | __giveup_vsx(current); | |
263 | v_regs += ELF_NVRREG; | |
264 | tm_v_regs += ELF_NVRREG; | |
265 | ||
266 | err |= copy_vsx_to_user(v_regs, current); | |
267 | ||
268 | if (msr & MSR_VSX) | |
269 | err |= copy_transact_vsx_to_user(tm_v_regs, current); | |
270 | else | |
271 | err |= copy_vsx_to_user(tm_v_regs, current); | |
272 | ||
273 | /* set MSR_VSX in the MSR value in the frame to | |
274 | * indicate that sc->vs_reg) contains valid data. | |
275 | */ | |
276 | msr |= MSR_VSX; | |
277 | } | |
278 | #endif /* CONFIG_VSX */ | |
279 | ||
280 | err |= __put_user(&sc->gp_regs, &sc->regs); | |
281 | err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); | |
282 | WARN_ON(!FULL_REGS(regs)); | |
283 | err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); | |
284 | err |= __copy_to_user(&sc->gp_regs, | |
285 | ¤t->thread.ckpt_regs, GP_REGS_SIZE); | |
286 | err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); | |
287 | err |= __put_user(msr, &sc->gp_regs[PT_MSR]); | |
288 | err |= __put_user(signr, &sc->signal); | |
289 | err |= __put_user(handler, &sc->handler); | |
290 | if (set != NULL) | |
291 | err |= __put_user(set->sig[0], &sc->oldmask); | |
292 | ||
293 | return err; | |
294 | } | |
295 | #endif | |
296 | ||
1da177e4 LT |
297 | /* |
298 | * Restore the sigcontext from the signal frame. | |
299 | */ | |
300 | ||
301 | static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | |
302 | struct sigcontext __user *sc) | |
303 | { | |
304 | #ifdef CONFIG_ALTIVEC | |
305 | elf_vrreg_t __user *v_regs; | |
306 | #endif | |
307 | unsigned long err = 0; | |
308 | unsigned long save_r13 = 0; | |
1da177e4 | 309 | unsigned long msr; |
6a274c08 MN |
310 | #ifdef CONFIG_VSX |
311 | int i; | |
312 | #endif | |
1da177e4 LT |
313 | |
314 | /* If this is not a signal return, we preserve the TLS in r13 */ | |
315 | if (!sig) | |
316 | save_r13 = regs->gpr[13]; | |
317 | ||
fcbc5a97 SR |
318 | /* copy the GPRs */ |
319 | err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); | |
320 | err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); | |
fab5db97 PM |
321 | /* get MSR separately, transfer the LE bit if doing signal return */ |
322 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | |
323 | if (sig) | |
324 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | |
fcbc5a97 SR |
325 | err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); |
326 | err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); | |
327 | err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); | |
328 | err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); | |
329 | err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); | |
fab5db97 | 330 | /* skip SOFTE */ |
9a81c16b | 331 | regs->trap = 0; |
fcbc5a97 SR |
332 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); |
333 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); | |
334 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); | |
1da177e4 LT |
335 | |
336 | if (!sig) | |
337 | regs->gpr[13] = save_r13; | |
1da177e4 LT |
338 | if (set != NULL) |
339 | err |= __get_user(set->sig[0], &sc->oldmask); | |
340 | ||
5388fb10 PM |
341 | /* |
342 | * Do this before updating the thread state in | |
343 | * current->thread.fpr/vr. That way, if we get preempted | |
344 | * and another task grabs the FPU/Altivec, it won't be | |
345 | * tempted to save the current CPU state into the thread_struct | |
346 | * and corrupt what we are writing there. | |
347 | */ | |
348 | discard_lazy_cpu_state(); | |
349 | ||
ae62fbb5 PM |
350 | /* |
351 | * Force reload of FP/VEC. | |
352 | * This has to be done before copying stuff into current->thread.fpr/vr | |
353 | * for the reasons explained in the previous comment. | |
354 | */ | |
ce48b210 | 355 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); |
ae62fbb5 | 356 | |
1da177e4 LT |
357 | #ifdef CONFIG_ALTIVEC |
358 | err |= __get_user(v_regs, &sc->v_regs); | |
1da177e4 LT |
359 | if (err) |
360 | return err; | |
7c85d1f9 PM |
361 | if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) |
362 | return -EFAULT; | |
1da177e4 | 363 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ |
b0d436c7 | 364 | if (v_regs != NULL && (msr & MSR_VEC) != 0) |
de79f7b9 | 365 | err |= __copy_from_user(¤t->thread.vr_state, v_regs, |
1da177e4 LT |
366 | 33 * sizeof(vector128)); |
367 | else if (current->thread.used_vr) | |
de79f7b9 | 368 | memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); |
1da177e4 | 369 | /* Always get VRSAVE back */ |
b0d436c7 | 370 | if (v_regs != NULL) |
1da177e4 LT |
371 | err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); |
372 | else | |
373 | current->thread.vrsave = 0; | |
408a7e08 PM |
374 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
375 | mtspr(SPRN_VRSAVE, current->thread.vrsave); | |
1da177e4 | 376 | #endif /* CONFIG_ALTIVEC */ |
c6e6771b | 377 | /* restore floating point */ |
6a274c08 MN |
378 | err |= copy_fpr_from_user(current, &sc->fp_regs); |
379 | #ifdef CONFIG_VSX | |
ce48b210 MN |
380 | /* |
381 | * Get additional VSX data. Update v_regs to point after the | |
382 | * VMX data. Copy VSX low doubleword from userspace to local | |
383 | * buffer for formatting, then into the taskstruct. | |
384 | */ | |
385 | v_regs += ELF_NVRREG; | |
386 | if ((msr & MSR_VSX) != 0) | |
6a274c08 | 387 | err |= copy_vsx_from_user(current, v_regs); |
ce48b210 | 388 | else |
6a274c08 | 389 | for (i = 0; i < 32 ; i++) |
de79f7b9 | 390 | current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
c6e6771b | 391 | #endif |
1da177e4 LT |
392 | return err; |
393 | } | |
394 | ||
2b0a576d MN |
395 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
396 | /* | |
397 | * Restore the two sigcontexts from the frame of a transactional processes. | |
398 | */ | |
399 | ||
400 | static long restore_tm_sigcontexts(struct pt_regs *regs, | |
401 | struct sigcontext __user *sc, | |
402 | struct sigcontext __user *tm_sc) | |
403 | { | |
404 | #ifdef CONFIG_ALTIVEC | |
405 | elf_vrreg_t __user *v_regs, *tm_v_regs; | |
406 | #endif | |
407 | unsigned long err = 0; | |
408 | unsigned long msr; | |
409 | #ifdef CONFIG_VSX | |
410 | int i; | |
411 | #endif | |
412 | /* copy the GPRs */ | |
413 | err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); | |
414 | err |= __copy_from_user(¤t->thread.ckpt_regs, sc->gp_regs, | |
415 | sizeof(regs->gpr)); | |
416 | ||
417 | /* | |
418 | * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. | |
419 | * TEXASR was set by the signal delivery reclaim, as was TFIAR. | |
420 | * Users doing anything abhorrent like thread-switching w/ signals for | |
421 | * TM-Suspended code will have to back TEXASR/TFIAR up themselves. | |
422 | * For the case of getting a signal and simply returning from it, | |
423 | * we don't need to re-copy them here. | |
424 | */ | |
425 | err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); | |
426 | err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); | |
427 | ||
428 | /* get MSR separately, transfer the LE bit if doing signal return */ | |
429 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); | |
87b4e539 MN |
430 | /* pull in MSR TM from user context */ |
431 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); | |
432 | ||
433 | /* pull in MSR LE from user context */ | |
2b0a576d MN |
434 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); |
435 | ||
436 | /* The following non-GPR non-FPR non-VR state is also checkpointed: */ | |
437 | err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); | |
438 | err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); | |
439 | err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); | |
440 | err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); | |
441 | err |= __get_user(current->thread.ckpt_regs.ctr, | |
442 | &sc->gp_regs[PT_CTR]); | |
443 | err |= __get_user(current->thread.ckpt_regs.link, | |
444 | &sc->gp_regs[PT_LNK]); | |
445 | err |= __get_user(current->thread.ckpt_regs.xer, | |
446 | &sc->gp_regs[PT_XER]); | |
447 | err |= __get_user(current->thread.ckpt_regs.ccr, | |
448 | &sc->gp_regs[PT_CCR]); | |
449 | ||
450 | /* These regs are not checkpointed; they can go in 'regs'. */ | |
451 | err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); | |
452 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); | |
453 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); | |
454 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); | |
455 | ||
456 | /* | |
457 | * Do this before updating the thread state in | |
458 | * current->thread.fpr/vr. That way, if we get preempted | |
459 | * and another task grabs the FPU/Altivec, it won't be | |
460 | * tempted to save the current CPU state into the thread_struct | |
461 | * and corrupt what we are writing there. | |
462 | */ | |
463 | discard_lazy_cpu_state(); | |
464 | ||
465 | /* | |
466 | * Force reload of FP/VEC. | |
467 | * This has to be done before copying stuff into current->thread.fpr/vr | |
468 | * for the reasons explained in the previous comment. | |
469 | */ | |
470 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); | |
471 | ||
472 | #ifdef CONFIG_ALTIVEC | |
473 | err |= __get_user(v_regs, &sc->v_regs); | |
474 | err |= __get_user(tm_v_regs, &tm_sc->v_regs); | |
475 | if (err) | |
476 | return err; | |
477 | if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) | |
478 | return -EFAULT; | |
479 | if (tm_v_regs && !access_ok(VERIFY_READ, | |
480 | tm_v_regs, 34 * sizeof(vector128))) | |
481 | return -EFAULT; | |
482 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ | |
b0d436c7 | 483 | if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { |
de79f7b9 | 484 | err |= __copy_from_user(¤t->thread.vr_state, v_regs, |
2b0a576d | 485 | 33 * sizeof(vector128)); |
de79f7b9 | 486 | err |= __copy_from_user(¤t->thread.transact_vr, tm_v_regs, |
2b0a576d MN |
487 | 33 * sizeof(vector128)); |
488 | } | |
489 | else if (current->thread.used_vr) { | |
de79f7b9 PM |
490 | memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); |
491 | memset(¤t->thread.transact_vr, 0, 33 * sizeof(vector128)); | |
2b0a576d MN |
492 | } |
493 | /* Always get VRSAVE back */ | |
b0d436c7 | 494 | if (v_regs != NULL && tm_v_regs != NULL) { |
2b0a576d MN |
495 | err |= __get_user(current->thread.vrsave, |
496 | (u32 __user *)&v_regs[33]); | |
497 | err |= __get_user(current->thread.transact_vrsave, | |
498 | (u32 __user *)&tm_v_regs[33]); | |
499 | } | |
500 | else { | |
501 | current->thread.vrsave = 0; | |
502 | current->thread.transact_vrsave = 0; | |
503 | } | |
408a7e08 PM |
504 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
505 | mtspr(SPRN_VRSAVE, current->thread.vrsave); | |
2b0a576d MN |
506 | #endif /* CONFIG_ALTIVEC */ |
507 | /* restore floating point */ | |
508 | err |= copy_fpr_from_user(current, &sc->fp_regs); | |
509 | err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); | |
510 | #ifdef CONFIG_VSX | |
511 | /* | |
512 | * Get additional VSX data. Update v_regs to point after the | |
513 | * VMX data. Copy VSX low doubleword from userspace to local | |
514 | * buffer for formatting, then into the taskstruct. | |
515 | */ | |
516 | if (v_regs && ((msr & MSR_VSX) != 0)) { | |
517 | v_regs += ELF_NVRREG; | |
518 | tm_v_regs += ELF_NVRREG; | |
519 | err |= copy_vsx_from_user(current, v_regs); | |
520 | err |= copy_transact_vsx_from_user(current, tm_v_regs); | |
521 | } else { | |
522 | for (i = 0; i < 32 ; i++) { | |
de79f7b9 PM |
523 | current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
524 | current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; | |
2b0a576d MN |
525 | } |
526 | } | |
527 | #endif | |
528 | tm_enable(); | |
e6b8fd02 MN |
529 | /* Make sure the transaction is marked as failed */ |
530 | current->thread.tm_texasr |= TEXASR_FS; | |
2b0a576d MN |
531 | /* This loads the checkpointed FP/VEC state, if used */ |
532 | tm_recheckpoint(¤t->thread, msr); | |
2b0a576d MN |
533 | |
534 | /* This loads the speculative FP/VEC state, if used */ | |
535 | if (msr & MSR_FP) { | |
536 | do_load_up_transact_fpu(¤t->thread); | |
537 | regs->msr |= (MSR_FP | current->thread.fpexc_mode); | |
538 | } | |
f110c0c1 | 539 | #ifdef CONFIG_ALTIVEC |
2b0a576d MN |
540 | if (msr & MSR_VEC) { |
541 | do_load_up_transact_altivec(¤t->thread); | |
542 | regs->msr |= MSR_VEC; | |
543 | } | |
f110c0c1 | 544 | #endif |
2b0a576d MN |
545 | |
546 | return err; | |
547 | } | |
548 | #endif | |
549 | ||
1da177e4 LT |
550 | /* |
551 | * Setup the trampoline code on the stack | |
552 | */ | |
553 | static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) | |
554 | { | |
555 | int i; | |
556 | long err = 0; | |
557 | ||
558 | /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ | |
559 | err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); | |
560 | /* li r0, __NR_[rt_]sigreturn| */ | |
561 | err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); | |
562 | /* sc */ | |
563 | err |= __put_user(0x44000002UL, &tramp[2]); | |
564 | ||
565 | /* Minimal traceback info */ | |
566 | for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) | |
567 | err |= __put_user(0, &tramp[i]); | |
568 | ||
569 | if (!err) | |
570 | flush_icache_range((unsigned long) &tramp[0], | |
571 | (unsigned long) &tramp[TRAMP_SIZE]); | |
572 | ||
573 | return err; | |
574 | } | |
575 | ||
c1cb299e MN |
576 | /* |
577 | * Userspace code may pass a ucontext which doesn't include VSX added | |
578 | * at the end. We need to check for this case. | |
579 | */ | |
580 | #define UCONTEXTSIZEWITHOUTVSX \ | |
581 | (sizeof(struct ucontext) - 32*sizeof(long)) | |
582 | ||
1da177e4 LT |
583 | /* |
584 | * Handle {get,set,swap}_context operations | |
585 | */ | |
586 | int sys_swapcontext(struct ucontext __user *old_ctx, | |
587 | struct ucontext __user *new_ctx, | |
588 | long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) | |
589 | { | |
590 | unsigned char tmp; | |
591 | sigset_t set; | |
c1cb299e | 592 | unsigned long new_msr = 0; |
16c29d18 | 593 | int ctx_has_vsx_region = 0; |
1da177e4 | 594 | |
c1cb299e | 595 | if (new_ctx && |
16c29d18 | 596 | get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) |
c1cb299e MN |
597 | return -EFAULT; |
598 | /* | |
599 | * Check that the context is not smaller than the original | |
600 | * size (with VMX but without VSX) | |
1da177e4 | 601 | */ |
c1cb299e | 602 | if (ctx_size < UCONTEXTSIZEWITHOUTVSX) |
1da177e4 | 603 | return -EINVAL; |
c1cb299e MN |
604 | /* |
605 | * If the new context state sets the MSR VSX bits but | |
606 | * it doesn't provide VSX state. | |
607 | */ | |
608 | if ((ctx_size < sizeof(struct ucontext)) && | |
609 | (new_msr & MSR_VSX)) | |
610 | return -EINVAL; | |
16c29d18 MN |
611 | /* Does the context have enough room to store VSX data? */ |
612 | if (ctx_size >= sizeof(struct ucontext)) | |
613 | ctx_has_vsx_region = 1; | |
614 | ||
1da177e4 | 615 | if (old_ctx != NULL) { |
16c29d18 MN |
616 | if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) |
617 | || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, | |
618 | ctx_has_vsx_region) | |
1da177e4 LT |
619 | || __copy_to_user(&old_ctx->uc_sigmask, |
620 | ¤t->blocked, sizeof(sigset_t))) | |
621 | return -EFAULT; | |
622 | } | |
623 | if (new_ctx == NULL) | |
624 | return 0; | |
16c29d18 | 625 | if (!access_ok(VERIFY_READ, new_ctx, ctx_size) |
1da177e4 | 626 | || __get_user(tmp, (u8 __user *) new_ctx) |
16c29d18 | 627 | || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) |
1da177e4 LT |
628 | return -EFAULT; |
629 | ||
630 | /* | |
631 | * If we get a fault copying the context into the kernel's | |
632 | * image of the user's registers, we can't just return -EFAULT | |
633 | * because the user's registers will be corrupted. For instance | |
634 | * the NIP value may have been updated but not some of the | |
635 | * other registers. Given that we have done the access_ok | |
636 | * and successfully read the first and last bytes of the region | |
637 | * above, this should only happen in an out-of-memory situation | |
638 | * or if another thread unmaps the region containing the context. | |
639 | * We kill the task with a SIGSEGV in this situation. | |
640 | */ | |
641 | ||
642 | if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) | |
643 | do_exit(SIGSEGV); | |
17440f17 | 644 | set_current_blocked(&set); |
1da177e4 LT |
645 | if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) |
646 | do_exit(SIGSEGV); | |
647 | ||
648 | /* This returns like rt_sigreturn */ | |
401d1f02 | 649 | set_thread_flag(TIF_RESTOREALL); |
1da177e4 LT |
650 | return 0; |
651 | } | |
652 | ||
653 | ||
654 | /* | |
655 | * Do a signal return; undo the signal stack. | |
656 | */ | |
657 | ||
658 | int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, | |
659 | unsigned long r6, unsigned long r7, unsigned long r8, | |
660 | struct pt_regs *regs) | |
661 | { | |
662 | struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; | |
663 | sigset_t set; | |
2b0a576d MN |
664 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
665 | unsigned long msr; | |
666 | #endif | |
1da177e4 LT |
667 | |
668 | /* Always make any pending restarted system calls return -EINTR */ | |
f56141e3 | 669 | current->restart_block.fn = do_no_restart_syscall; |
1da177e4 LT |
670 | |
671 | if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) | |
672 | goto badframe; | |
673 | ||
674 | if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) | |
675 | goto badframe; | |
17440f17 | 676 | set_current_blocked(&set); |
2b0a576d MN |
677 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
678 | if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) | |
679 | goto badframe; | |
87b4e539 | 680 | if (MSR_TM_ACTIVE(msr)) { |
2b0a576d MN |
681 | /* We recheckpoint on return. */ |
682 | struct ucontext __user *uc_transact; | |
683 | if (__get_user(uc_transact, &uc->uc_link)) | |
684 | goto badframe; | |
685 | if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, | |
686 | &uc_transact->uc_mcontext)) | |
687 | goto badframe; | |
688 | } | |
689 | else | |
690 | /* Fall through, for non-TM restore */ | |
691 | #endif | |
1da177e4 LT |
692 | if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) |
693 | goto badframe; | |
694 | ||
7cce2465 AV |
695 | if (restore_altstack(&uc->uc_stack)) |
696 | goto badframe; | |
1da177e4 | 697 | |
401d1f02 DW |
698 | set_thread_flag(TIF_RESTOREALL); |
699 | return 0; | |
1da177e4 LT |
700 | |
701 | badframe: | |
76462232 CD |
702 | if (show_unhandled_signals) |
703 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, | |
704 | current->comm, current->pid, "rt_sigreturn", | |
705 | (long)uc, regs->nip, regs->link); | |
d0c3d534 | 706 | |
1da177e4 LT |
707 | force_sig(SIGSEGV, current); |
708 | return 0; | |
709 | } | |
710 | ||
129b69df | 711 | int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) |
1da177e4 | 712 | { |
1da177e4 LT |
713 | struct rt_sigframe __user *frame; |
714 | unsigned long newsp = 0; | |
715 | long err = 0; | |
716 | ||
059ade65 | 717 | frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0); |
a3f61dc0 | 718 | if (unlikely(frame == NULL)) |
1da177e4 LT |
719 | goto badframe; |
720 | ||
721 | err |= __put_user(&frame->info, &frame->pinfo); | |
722 | err |= __put_user(&frame->uc, &frame->puc); | |
129b69df | 723 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
1da177e4 LT |
724 | if (err) |
725 | goto badframe; | |
726 | ||
727 | /* Create the ucontext. */ | |
728 | err |= __put_user(0, &frame->uc.uc_flags); | |
7cce2465 | 729 | err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); |
2b0a576d MN |
730 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
731 | if (MSR_TM_ACTIVE(regs->msr)) { | |
732 | /* The ucontext_t passed to userland points to the second | |
733 | * ucontext_t (for transactional state) with its uc_link ptr. | |
734 | */ | |
735 | err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); | |
736 | err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, | |
737 | &frame->uc_transact.uc_mcontext, | |
129b69df | 738 | regs, ksig->sig, |
2b0a576d | 739 | NULL, |
129b69df | 740 | (unsigned long)ksig->ka.sa.sa_handler); |
2b0a576d MN |
741 | } else |
742 | #endif | |
743 | { | |
744 | err |= __put_user(0, &frame->uc.uc_link); | |
129b69df RW |
745 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig, |
746 | NULL, (unsigned long)ksig->ka.sa.sa_handler, | |
2b0a576d MN |
747 | 1); |
748 | } | |
1da177e4 LT |
749 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
750 | if (err) | |
751 | goto badframe; | |
752 | ||
cc657f53 | 753 | /* Make sure signal handler doesn't get spurious FP exceptions */ |
de79f7b9 | 754 | current->thread.fp_state.fpscr = 0; |
cc657f53 | 755 | |
1da177e4 | 756 | /* Set up to return from userspace. */ |
a5bba930 BH |
757 | if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { |
758 | regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; | |
1da177e4 LT |
759 | } else { |
760 | err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); | |
761 | if (err) | |
762 | goto badframe; | |
763 | regs->link = (unsigned long) &frame->tramp[0]; | |
764 | } | |
1da177e4 LT |
765 | |
766 | /* Allocate a dummy caller frame for the signal handler. */ | |
a3f61dc0 | 767 | newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; |
1da177e4 LT |
768 | err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); |
769 | ||
770 | /* Set up "regs" so we "return" to the signal handler. */ | |
d606b92a | 771 | if (is_elf2_task()) { |
129b69df | 772 | regs->nip = (unsigned long) ksig->ka.sa.sa_handler; |
d606b92a RR |
773 | regs->gpr[12] = regs->nip; |
774 | } else { | |
775 | /* Handler is *really* a pointer to the function descriptor for | |
776 | * the signal routine. The first entry in the function | |
777 | * descriptor is the entry address of signal and the second | |
778 | * entry is the TOC value we need to use. | |
779 | */ | |
780 | func_descr_t __user *funct_desc_ptr = | |
129b69df | 781 | (func_descr_t __user *) ksig->ka.sa.sa_handler; |
d606b92a RR |
782 | |
783 | err |= get_user(regs->nip, &funct_desc_ptr->entry); | |
784 | err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); | |
785 | } | |
786 | ||
e871c6bb | 787 | /* enter the signal handler in native-endian mode */ |
fab5db97 | 788 | regs->msr &= ~MSR_LE; |
e871c6bb | 789 | regs->msr |= (MSR_KERNEL & MSR_LE); |
1da177e4 | 790 | regs->gpr[1] = newsp; |
129b69df | 791 | regs->gpr[3] = ksig->sig; |
1da177e4 | 792 | regs->result = 0; |
129b69df | 793 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
1da177e4 LT |
794 | err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); |
795 | err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); | |
796 | regs->gpr[6] = (unsigned long) frame; | |
797 | } else { | |
798 | regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; | |
799 | } | |
800 | if (err) | |
801 | goto badframe; | |
802 | ||
129b69df | 803 | return 0; |
1da177e4 LT |
804 | |
805 | badframe: | |
76462232 CD |
806 | if (show_unhandled_signals) |
807 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, | |
808 | current->comm, current->pid, "setup_rt_frame", | |
809 | (long)frame, regs->nip, regs->link); | |
d0c3d534 | 810 | |
129b69df | 811 | return 1; |
1da177e4 | 812 | } |