]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
81e7009e | 3 | * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC |
1da177e4 | 4 | * |
81e7009e SR |
5 | * PowerPC version |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
1da177e4 LT |
7 | * Copyright (C) 2001 IBM |
8 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
9 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | |
10 | * | |
81e7009e SR |
11 | * Derived from "arch/i386/kernel/signal.c" |
12 | * Copyright (C) 1991, 1992 Linus Torvalds | |
13 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | |
1da177e4 LT |
14 | */ |
15 | ||
1da177e4 | 16 | #include <linux/sched.h> |
81e7009e | 17 | #include <linux/mm.h> |
1da177e4 | 18 | #include <linux/smp.h> |
1da177e4 LT |
19 | #include <linux/kernel.h> |
20 | #include <linux/signal.h> | |
1da177e4 LT |
21 | #include <linux/errno.h> |
22 | #include <linux/elf.h> | |
05ead015 | 23 | #include <linux/ptrace.h> |
56b04d56 | 24 | #include <linux/pagemap.h> |
76462232 | 25 | #include <linux/ratelimit.h> |
81e7009e | 26 | #include <linux/syscalls.h> |
f3675644 | 27 | #ifdef CONFIG_PPC64 |
1da177e4 | 28 | #include <linux/compat.h> |
81e7009e SR |
29 | #else |
30 | #include <linux/wait.h> | |
81e7009e SR |
31 | #include <linux/unistd.h> |
32 | #include <linux/stddef.h> | |
33 | #include <linux/tty.h> | |
34 | #include <linux/binfmts.h> | |
81e7009e SR |
35 | #endif |
36 | ||
7c0f6ba6 | 37 | #include <linux/uaccess.h> |
81e7009e | 38 | #include <asm/cacheflush.h> |
a7f31841 | 39 | #include <asm/syscalls.h> |
c5ff7001 | 40 | #include <asm/sigcontext.h> |
a7f290da | 41 | #include <asm/vdso.h> |
ae3a197e | 42 | #include <asm/switch_to.h> |
2b0a576d | 43 | #include <asm/tm.h> |
0545d543 | 44 | #include <asm/asm-prototypes.h> |
81e7009e | 45 | #ifdef CONFIG_PPC64 |
879168ee | 46 | #include "ppc32.h" |
1da177e4 | 47 | #include <asm/unistd.h> |
81e7009e SR |
48 | #else |
49 | #include <asm/ucontext.h> | |
81e7009e | 50 | #endif |
1da177e4 | 51 | |
22e38f29 BH |
52 | #include "signal.h" |
53 | ||
1da177e4 | 54 | |
81e7009e | 55 | #ifdef CONFIG_PPC64 |
81e7009e SR |
56 | #define old_sigaction old_sigaction32 |
57 | #define sigcontext sigcontext32 | |
58 | #define mcontext mcontext32 | |
59 | #define ucontext ucontext32 | |
60 | ||
c1cb299e MN |
61 | /* |
62 | * Userspace code may pass a ucontext which doesn't include VSX added | |
63 | * at the end. We need to check for this case. | |
64 | */ | |
65 | #define UCONTEXTSIZEWITHOUTVSX \ | |
66 | (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32)) | |
67 | ||
81e7009e SR |
68 | /* |
69 | * Returning 0 means we return to userspace via | |
70 | * ret_from_except and thus restore all user | |
71 | * registers from *regs. This is what we need | |
72 | * to do when a signal has been delivered. | |
73 | */ | |
81e7009e SR |
74 | |
75 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) | |
76 | #undef __SIGNAL_FRAMESIZE | |
77 | #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32 | |
78 | #undef ELF_NVRREG | |
79 | #define ELF_NVRREG ELF_NVRREG32 | |
80 | ||
81 | /* | |
82 | * Functions for flipping sigsets (thanks to brain dead generic | |
83 | * implementation that makes things simple for little endian only) | |
84 | */ | |
de781ebd | 85 | #define unsafe_put_sigset_t unsafe_put_compat_sigset |
887f3ceb | 86 | #define unsafe_get_sigset_t unsafe_get_compat_sigset |
81e7009e | 87 | |
29e646df | 88 | #define to_user_ptr(p) ptr_to_compat(p) |
81e7009e SR |
89 | #define from_user_ptr(p) compat_ptr(p) |
90 | ||
ef75e731 | 91 | static __always_inline int |
f918a81e | 92 | __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) |
81e7009e SR |
93 | { |
94 | elf_greg_t64 *gregs = (elf_greg_t64 *)regs; | |
020c4831 | 95 | int val, i; |
81e7009e | 96 | |
401d1f02 | 97 | for (i = 0; i <= PT_RESULT; i ++) { |
020c4831 CL |
98 | /* Force usr to alway see softe as 1 (interrupts enabled) */ |
99 | if (i == PT_SOFTE) | |
100 | val = 1; | |
101 | else | |
102 | val = gregs[i]; | |
103 | ||
ef75e731 | 104 | unsafe_put_user(val, &frame->mc_gregs[i], failed); |
401d1f02 | 105 | } |
81e7009e | 106 | return 0; |
ef75e731 CL |
107 | |
108 | failed: | |
109 | return 1; | |
81e7009e SR |
110 | } |
111 | ||
627b72be CL |
112 | static __always_inline int |
113 | __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) | |
81e7009e SR |
114 | { |
115 | elf_greg_t64 *gregs = (elf_greg_t64 *)regs; | |
116 | int i; | |
117 | ||
118 | for (i = 0; i <= PT_RESULT; i++) { | |
119 | if ((i == PT_MSR) || (i == PT_SOFTE)) | |
120 | continue; | |
627b72be | 121 | unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed); |
81e7009e SR |
122 | } |
123 | return 0; | |
627b72be CL |
124 | |
125 | failed: | |
126 | return 1; | |
81e7009e SR |
127 | } |
128 | ||
129 | #else /* CONFIG_PPC64 */ | |
130 | ||
81e7009e SR |
131 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) |
132 | ||
de781ebd CL |
133 | #define unsafe_put_sigset_t(uset, set, label) do { \ |
134 | sigset_t __user *__us = uset ; \ | |
135 | const sigset_t *__s = set; \ | |
136 | \ | |
137 | unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \ | |
138 | } while (0) | |
139 | ||
887f3ceb | 140 | #define unsafe_get_sigset_t unsafe_get_user_sigset |
81e7009e | 141 | |
29e646df AV |
142 | #define to_user_ptr(p) ((unsigned long)(p)) |
143 | #define from_user_ptr(p) ((void __user *)(p)) | |
81e7009e | 144 | |
ef75e731 | 145 | static __always_inline int |
f918a81e | 146 | __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) |
81e7009e | 147 | { |
ef75e731 CL |
148 | unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed); |
149 | return 0; | |
150 | ||
151 | failed: | |
152 | return 1; | |
81e7009e SR |
153 | } |
154 | ||
627b72be CL |
155 | static __always_inline |
156 | int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) | |
81e7009e SR |
157 | { |
158 | /* copy up to but not including MSR */ | |
627b72be CL |
159 | unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed); |
160 | ||
81e7009e | 161 | /* copy from orig_r3 (the word after the MSR) up to the end */ |
627b72be CL |
162 | unsafe_copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], |
163 | GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed); | |
164 | ||
81e7009e | 165 | return 0; |
627b72be CL |
166 | |
167 | failed: | |
168 | return 1; | |
81e7009e | 169 | } |
81e7009e SR |
170 | #endif |
171 | ||
ef75e731 | 172 | #define unsafe_save_general_regs(regs, frame, label) do { \ |
f918a81e | 173 | if (__unsafe_save_general_regs(regs, frame)) \ |
ef75e731 CL |
174 | goto label; \ |
175 | } while (0) | |
176 | ||
627b72be CL |
177 | #define unsafe_restore_general_regs(regs, frame, label) do { \ |
178 | if (__unsafe_restore_general_regs(regs, frame)) \ | |
ef75e731 CL |
179 | goto label; \ |
180 | } while (0) | |
181 | ||
1da177e4 LT |
182 | /* |
183 | * When we have signals to deliver, we set up on the | |
184 | * user stack, going down from the original stack pointer: | |
a3f61dc0 BH |
185 | * an ABI gap of 56 words |
186 | * an mcontext struct | |
81e7009e SR |
187 | * a sigcontext struct |
188 | * a gap of __SIGNAL_FRAMESIZE bytes | |
1da177e4 | 189 | * |
a3f61dc0 BH |
190 | * Each of these things must be a multiple of 16 bytes in size. The following |
191 | * structure represent all of this except the __SIGNAL_FRAMESIZE gap | |
1da177e4 LT |
192 | * |
193 | */ | |
a3f61dc0 BH |
194 | struct sigframe { |
195 | struct sigcontext sctx; /* the sigcontext */ | |
81e7009e | 196 | struct mcontext mctx; /* all the register values */ |
2b0a576d MN |
197 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
198 | struct sigcontext sctx_transact; | |
199 | struct mcontext mctx_transact; | |
200 | #endif | |
1da177e4 LT |
201 | /* |
202 | * Programs using the rs6000/xcoff abi can save up to 19 gp | |
203 | * regs and 18 fp regs below sp before decrementing it. | |
204 | */ | |
205 | int abigap[56]; | |
206 | }; | |
207 | ||
1da177e4 LT |
208 | /* |
209 | * When we have rt signals to deliver, we set up on the | |
210 | * user stack, going down from the original stack pointer: | |
81e7009e SR |
211 | * one rt_sigframe struct (siginfo + ucontext + ABI gap) |
212 | * a gap of __SIGNAL_FRAMESIZE+16 bytes | |
213 | * (the +16 is to get the siginfo and ucontext in the same | |
1da177e4 LT |
214 | * positions as in older kernels). |
215 | * | |
216 | * Each of these things must be a multiple of 16 bytes in size. | |
217 | * | |
218 | */ | |
81e7009e SR |
219 | struct rt_sigframe { |
220 | #ifdef CONFIG_PPC64 | |
221 | compat_siginfo_t info; | |
222 | #else | |
223 | struct siginfo info; | |
224 | #endif | |
225 | struct ucontext uc; | |
2b0a576d MN |
226 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
227 | struct ucontext uc_transact; | |
228 | #endif | |
1da177e4 LT |
229 | /* |
230 | * Programs using the rs6000/xcoff abi can save up to 19 gp | |
231 | * regs and 18 fp regs below sp before decrementing it. | |
232 | */ | |
233 | int abigap[56]; | |
234 | }; | |
235 | ||
1da177e4 LT |
236 | /* |
237 | * Save the current user registers on the user stack. | |
81e7009e SR |
238 | * We only save the altivec/spe registers if the process has used |
239 | * altivec/spe instructions at some point. | |
1da177e4 | 240 | */ |
968c4fcc CL |
241 | static void prepare_save_user_regs(int ctx_has_vsx_region) |
242 | { | |
243 | /* Make sure floating point registers are stored in regs */ | |
244 | flush_fp_to_thread(current); | |
245 | #ifdef CONFIG_ALTIVEC | |
246 | if (current->thread.used_vr) | |
247 | flush_altivec_to_thread(current); | |
248 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
249 | current->thread.vrsave = mfspr(SPRN_VRSAVE); | |
250 | #endif | |
251 | #ifdef CONFIG_VSX | |
252 | if (current->thread.used_vsr && ctx_has_vsx_region) | |
253 | flush_vsx_to_thread(current); | |
254 | #endif | |
255 | #ifdef CONFIG_SPE | |
256 | if (current->thread.used_spe) | |
257 | flush_spe_to_thread(current); | |
258 | #endif | |
259 | } | |
260 | ||
f918a81e CL |
261 | static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, |
262 | struct mcontext __user *tm_frame, int ctx_has_vsx_region) | |
1da177e4 | 263 | { |
9e751186 MN |
264 | unsigned long msr = regs->msr; |
265 | ||
c6e6771b | 266 | /* save general registers */ |
ef75e731 | 267 | unsafe_save_general_regs(regs, frame, failed); |
1da177e4 | 268 | |
1da177e4 LT |
269 | #ifdef CONFIG_ALTIVEC |
270 | /* save altivec registers */ | |
271 | if (current->thread.used_vr) { | |
ef75e731 CL |
272 | unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, |
273 | ELF_NVRREG * sizeof(vector128), failed); | |
1da177e4 LT |
274 | /* set MSR_VEC in the saved MSR value to indicate that |
275 | frame->mc_vregs contains valid data */ | |
9e751186 | 276 | msr |= MSR_VEC; |
1da177e4 LT |
277 | } |
278 | /* else assert((regs->msr & MSR_VEC) == 0) */ | |
279 | ||
280 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | |
281 | * use altivec. Since VSCR only contains 32 bits saved in the least | |
282 | * significant bits of a vector, we "cheat" and stuff VRSAVE in the | |
283 | * most significant bits of that same vector. --BenH | |
408a7e08 | 284 | * Note that the current VRSAVE value is in the SPR at this point. |
1da177e4 | 285 | */ |
ef75e731 CL |
286 | unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32], |
287 | failed); | |
1da177e4 | 288 | #endif /* CONFIG_ALTIVEC */ |
ef75e731 | 289 | unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed); |
ec67ad82 MN |
290 | |
291 | /* | |
292 | * Clear the MSR VSX bit to indicate there is no valid state attached | |
293 | * to this context, except in the specific case below where we set it. | |
294 | */ | |
295 | msr &= ~MSR_VSX; | |
6a274c08 | 296 | #ifdef CONFIG_VSX |
ce48b210 MN |
297 | /* |
298 | * Copy VSR 0-31 upper half from thread_struct to local | |
299 | * buffer, then write that to userspace. Also set MSR_VSX in | |
300 | * the saved MSR value to indicate that frame->mc_vregs | |
301 | * contains valid data | |
302 | */ | |
16c29d18 | 303 | if (current->thread.used_vsr && ctx_has_vsx_region) { |
ef75e731 | 304 | unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed); |
ce48b210 | 305 | msr |= MSR_VSX; |
ec67ad82 | 306 | } |
c6e6771b | 307 | #endif /* CONFIG_VSX */ |
81e7009e SR |
308 | #ifdef CONFIG_SPE |
309 | /* save spe registers */ | |
310 | if (current->thread.used_spe) { | |
ef75e731 CL |
311 | unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr, |
312 | ELF_NEVRREG * sizeof(u32), failed); | |
81e7009e SR |
313 | /* set MSR_SPE in the saved MSR value to indicate that |
314 | frame->mc_vregs contains valid data */ | |
9e751186 | 315 | msr |= MSR_SPE; |
81e7009e SR |
316 | } |
317 | /* else assert((regs->msr & MSR_SPE) == 0) */ | |
318 | ||
319 | /* We always copy to/from spefscr */ | |
ef75e731 CL |
320 | unsafe_put_user(current->thread.spefscr, |
321 | (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed); | |
81e7009e SR |
322 | #endif /* CONFIG_SPE */ |
323 | ||
ef75e731 CL |
324 | unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed); |
325 | ||
1d25f11f MN |
326 | /* We need to write 0 the MSR top 32 bits in the tm frame so that we |
327 | * can check it on the restore to see if TM is active | |
328 | */ | |
ef75e731 CL |
329 | if (tm_frame) |
330 | unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed); | |
1d25f11f | 331 | |
1da177e4 | 332 | return 0; |
ef75e731 CL |
333 | |
334 | failed: | |
335 | return 1; | |
1da177e4 LT |
336 | } |
337 | ||
ef75e731 | 338 | #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \ |
f918a81e | 339 | if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \ |
ef75e731 CL |
340 | goto label; \ |
341 | } while (0) | |
342 | ||
2b0a576d MN |
343 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
344 | /* | |
345 | * Save the current user registers on the user stack. | |
346 | * We only save the altivec/spe registers if the process has used | |
347 | * altivec/spe instructions at some point. | |
348 | * We also save the transactional registers to a second ucontext in the | |
349 | * frame. | |
350 | * | |
f918a81e | 351 | * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts(). |
2b0a576d | 352 | */ |
968c4fcc | 353 | static void prepare_save_tm_user_regs(void) |
2b0a576d | 354 | { |
92fb8690 MN |
355 | WARN_ON(tm_suspend_disabled); |
356 | ||
968c4fcc CL |
357 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
358 | current->thread.ckvrsave = mfspr(SPRN_VRSAVE); | |
968c4fcc CL |
359 | } |
360 | ||
ef75e731 CL |
361 | static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, |
362 | struct mcontext __user *tm_frame, unsigned long msr) | |
968c4fcc | 363 | { |
2b0a576d | 364 | /* Save both sets of general registers */ |
ef75e731 CL |
365 | unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed); |
366 | unsafe_save_general_regs(regs, tm_frame, failed); | |
2b0a576d MN |
367 | |
368 | /* Stash the top half of the 64bit MSR into the 32bit MSR word | |
369 | * of the transactional mcontext. This way we have a backward-compatible | |
370 | * MSR in the 'normal' (checkpointed) mcontext and additionally one can | |
371 | * also look at what type of transaction (T or S) was active at the | |
372 | * time of the signal. | |
373 | */ | |
ef75e731 | 374 | unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed); |
2b0a576d | 375 | |
2b0a576d MN |
376 | /* save altivec registers */ |
377 | if (current->thread.used_vr) { | |
ef75e731 CL |
378 | unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state, |
379 | ELF_NVRREG * sizeof(vector128), failed); | |
380 | if (msr & MSR_VEC) | |
381 | unsafe_copy_to_user(&tm_frame->mc_vregs, | |
382 | ¤t->thread.vr_state, | |
383 | ELF_NVRREG * sizeof(vector128), failed); | |
384 | else | |
385 | unsafe_copy_to_user(&tm_frame->mc_vregs, | |
386 | ¤t->thread.ckvr_state, | |
387 | ELF_NVRREG * sizeof(vector128), failed); | |
2b0a576d MN |
388 | |
389 | /* set MSR_VEC in the saved MSR value to indicate that | |
390 | * frame->mc_vregs contains valid data | |
391 | */ | |
392 | msr |= MSR_VEC; | |
393 | } | |
394 | ||
395 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | |
396 | * use altivec. Since VSCR only contains 32 bits saved in the least | |
397 | * significant bits of a vector, we "cheat" and stuff VRSAVE in the | |
398 | * most significant bits of that same vector. --BenH | |
399 | */ | |
ef75e731 CL |
400 | unsafe_put_user(current->thread.ckvrsave, |
401 | (u32 __user *)&frame->mc_vregs[32], failed); | |
402 | if (msr & MSR_VEC) | |
403 | unsafe_put_user(current->thread.vrsave, | |
404 | (u32 __user *)&tm_frame->mc_vregs[32], failed); | |
405 | else | |
406 | unsafe_put_user(current->thread.ckvrsave, | |
407 | (u32 __user *)&tm_frame->mc_vregs[32], failed); | |
2b0a576d | 408 | |
ef75e731 CL |
409 | unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed); |
410 | if (msr & MSR_FP) | |
411 | unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed); | |
412 | else | |
413 | unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed); | |
2b0a576d | 414 | |
2b0a576d MN |
415 | /* |
416 | * Copy VSR 0-31 upper half from thread_struct to local | |
417 | * buffer, then write that to userspace. Also set MSR_VSX in | |
418 | * the saved MSR value to indicate that frame->mc_vregs | |
419 | * contains valid data | |
420 | */ | |
421 | if (current->thread.used_vsr) { | |
ef75e731 CL |
422 | unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed); |
423 | if (msr & MSR_VSX) | |
424 | unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed); | |
425 | else | |
426 | unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed); | |
2b0a576d MN |
427 | |
428 | msr |= MSR_VSX; | |
429 | } | |
2b0a576d | 430 | |
ef75e731 | 431 | unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed); |
2b0a576d MN |
432 | |
433 | return 0; | |
ef75e731 CL |
434 | |
435 | failed: | |
436 | return 1; | |
2b0a576d | 437 | } |
f1cf4f93 | 438 | #else |
968c4fcc CL |
439 | static void prepare_save_tm_user_regs(void) { } |
440 | ||
ef75e731 CL |
441 | static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, |
442 | struct mcontext __user *tm_frame, unsigned long msr) | |
f1cf4f93 CL |
443 | { |
444 | return 0; | |
445 | } | |
2b0a576d MN |
446 | #endif |
447 | ||
ef75e731 CL |
448 | #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \ |
449 | if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \ | |
450 | goto label; \ | |
451 | } while (0) | |
452 | ||
1da177e4 LT |
453 | /* |
454 | * Restore the current user register values from the user stack, | |
455 | * (except for MSR). | |
456 | */ | |
457 | static long restore_user_regs(struct pt_regs *regs, | |
81e7009e | 458 | struct mcontext __user *sr, int sig) |
1da177e4 | 459 | { |
1da177e4 | 460 | unsigned int save_r2 = 0; |
1da177e4 | 461 | unsigned long msr; |
c6e6771b | 462 | #ifdef CONFIG_VSX |
c6e6771b MN |
463 | int i; |
464 | #endif | |
1da177e4 | 465 | |
627b72be | 466 | if (!user_read_access_begin(sr, sizeof(*sr))) |
362471b3 | 467 | return 1; |
1da177e4 LT |
468 | /* |
469 | * restore general registers but not including MSR or SOFTE. Also | |
470 | * take care of keeping r2 (TLS) intact if not a signal | |
471 | */ | |
472 | if (!sig) | |
473 | save_r2 = (unsigned int)regs->gpr[2]; | |
627b72be | 474 | unsafe_restore_general_regs(regs, sr, failed); |
4e0e45b0 | 475 | set_trap_norestart(regs); |
627b72be | 476 | unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed); |
1da177e4 LT |
477 | if (!sig) |
478 | regs->gpr[2] = (unsigned long) save_r2; | |
1da177e4 | 479 | |
fab5db97 PM |
480 | /* if doing signal return, restore the previous little-endian mode */ |
481 | if (sig) | |
59dc5bfc | 482 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); |
fab5db97 | 483 | |
1da177e4 | 484 | #ifdef CONFIG_ALTIVEC |
c6e6771b MN |
485 | /* |
486 | * Force the process to reload the altivec registers from | |
487 | * current->thread when it next does altivec instructions | |
488 | */ | |
59dc5bfc | 489 | regs_set_return_msr(regs, regs->msr & ~MSR_VEC); |
fab5db97 | 490 | if (msr & MSR_VEC) { |
1da177e4 | 491 | /* restore altivec registers from the stack */ |
627b72be CL |
492 | unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, |
493 | sizeof(sr->mc_vregs), failed); | |
e1c0d66f | 494 | current->thread.used_vr = true; |
1da177e4 | 495 | } else if (current->thread.used_vr) |
de79f7b9 PM |
496 | memset(¤t->thread.vr_state, 0, |
497 | ELF_NVRREG * sizeof(vector128)); | |
1da177e4 LT |
498 | |
499 | /* Always get VRSAVE back */ | |
627b72be | 500 | unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed); |
408a7e08 PM |
501 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
502 | mtspr(SPRN_VRSAVE, current->thread.vrsave); | |
1da177e4 | 503 | #endif /* CONFIG_ALTIVEC */ |
627b72be | 504 | unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); |
1da177e4 | 505 | |
c6e6771b | 506 | #ifdef CONFIG_VSX |
ce48b210 MN |
507 | /* |
508 | * Force the process to reload the VSX registers from | |
509 | * current->thread when it next does VSX instruction. | |
510 | */ | |
59dc5bfc | 511 | regs_set_return_msr(regs, regs->msr & ~MSR_VSX); |
ce48b210 MN |
512 | if (msr & MSR_VSX) { |
513 | /* | |
514 | * Restore altivec registers from the stack to a local | |
515 | * buffer, then write this out to the thread_struct | |
516 | */ | |
627b72be | 517 | unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed); |
e1c0d66f | 518 | current->thread.used_vsr = true; |
ce48b210 MN |
519 | } else if (current->thread.used_vsr) |
520 | for (i = 0; i < 32 ; i++) | |
de79f7b9 | 521 | current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
c6e6771b MN |
522 | #endif /* CONFIG_VSX */ |
523 | /* | |
524 | * force the process to reload the FP registers from | |
525 | * current->thread when it next does FP instructions | |
526 | */ | |
59dc5bfc | 527 | regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); |
c6e6771b | 528 | |
81e7009e SR |
529 | #ifdef CONFIG_SPE |
530 | /* force the process to reload the spe registers from | |
531 | current->thread when it next does spe instructions */ | |
59dc5bfc | 532 | regs_set_return_msr(regs, regs->msr & ~MSR_SPE); |
fab5db97 | 533 | if (msr & MSR_SPE) { |
81e7009e | 534 | /* restore spe registers from the stack */ |
627b72be | 535 | unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs, |
af072b1a | 536 | ELF_NEVRREG * sizeof(u32), failed); |
e1c0d66f | 537 | current->thread.used_spe = true; |
81e7009e SR |
538 | } else if (current->thread.used_spe) |
539 | memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); | |
540 | ||
541 | /* Always get SPEFSCR back */ | |
627b72be | 542 | unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed); |
81e7009e SR |
543 | #endif /* CONFIG_SPE */ |
544 | ||
627b72be | 545 | user_read_access_end(); |
1da177e4 | 546 | return 0; |
627b72be CL |
547 | |
548 | failed: | |
549 | user_read_access_end(); | |
550 | return 1; | |
1da177e4 LT |
551 | } |
552 | ||
2b0a576d MN |
553 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
554 | /* | |
555 | * Restore the current user register values from the user stack, except for | |
556 | * MSR, and recheckpoint the original checkpointed register state for processes | |
557 | * in transactions. | |
558 | */ | |
559 | static long restore_tm_user_regs(struct pt_regs *regs, | |
560 | struct mcontext __user *sr, | |
561 | struct mcontext __user *tm_sr) | |
562 | { | |
2c27a18f | 563 | unsigned long msr, msr_hi; |
2b0a576d | 564 | int i; |
2b0a576d | 565 | |
92fb8690 MN |
566 | if (tm_suspend_disabled) |
567 | return 1; | |
2b0a576d MN |
568 | /* |
569 | * restore general registers but not including MSR or SOFTE. Also | |
570 | * take care of keeping r2 (TLS) intact if not a signal. | |
571 | * See comment in signal_64.c:restore_tm_sigcontexts(); | |
572 | * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR | |
573 | * were set by the signal delivery. | |
574 | */ | |
627b72be | 575 | if (!user_read_access_begin(sr, sizeof(*sr))) |
2b0a576d MN |
576 | return 1; |
577 | ||
627b72be CL |
578 | unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed); |
579 | unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed); | |
580 | unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed); | |
581 | ||
2b0a576d | 582 | /* Restore the previous little-endian mode */ |
59dc5bfc | 583 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); |
2b0a576d | 584 | |
59dc5bfc | 585 | regs_set_return_msr(regs, regs->msr & ~MSR_VEC); |
2b0a576d MN |
586 | if (msr & MSR_VEC) { |
587 | /* restore altivec registers from the stack */ | |
627b72be CL |
588 | unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs, |
589 | sizeof(sr->mc_vregs), failed); | |
e1c0d66f | 590 | current->thread.used_vr = true; |
2b0a576d | 591 | } else if (current->thread.used_vr) { |
de79f7b9 PM |
592 | memset(¤t->thread.vr_state, 0, |
593 | ELF_NVRREG * sizeof(vector128)); | |
000ec280 | 594 | memset(¤t->thread.ckvr_state, 0, |
2b0a576d MN |
595 | ELF_NVRREG * sizeof(vector128)); |
596 | } | |
597 | ||
598 | /* Always get VRSAVE back */ | |
627b72be CL |
599 | unsafe_get_user(current->thread.ckvrsave, |
600 | (u32 __user *)&sr->mc_vregs[32], failed); | |
408a7e08 | 601 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
000ec280 | 602 | mtspr(SPRN_VRSAVE, current->thread.ckvrsave); |
2b0a576d | 603 | |
59dc5bfc | 604 | regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1)); |
2b0a576d | 605 | |
627b72be | 606 | unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed); |
2b0a576d | 607 | |
59dc5bfc | 608 | regs_set_return_msr(regs, regs->msr & ~MSR_VSX); |
2b0a576d MN |
609 | if (msr & MSR_VSX) { |
610 | /* | |
611 | * Restore altivec registers from the stack to a local | |
612 | * buffer, then write this out to the thread_struct | |
613 | */ | |
627b72be | 614 | unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed); |
e1c0d66f | 615 | current->thread.used_vsr = true; |
2b0a576d MN |
616 | } else if (current->thread.used_vsr) |
617 | for (i = 0; i < 32 ; i++) { | |
de79f7b9 | 618 | current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
000ec280 | 619 | current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
2b0a576d | 620 | } |
2b0a576d | 621 | |
627b72be CL |
622 | user_read_access_end(); |
623 | ||
624 | if (!user_read_access_begin(tm_sr, sizeof(*tm_sr))) | |
d2b9d2a5 | 625 | return 1; |
036fc2cb | 626 | |
627b72be CL |
627 | unsafe_restore_general_regs(regs, tm_sr, failed); |
628 | ||
036fc2cb CL |
629 | /* restore altivec registers from the stack */ |
630 | if (msr & MSR_VEC) | |
627b72be CL |
631 | unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs, |
632 | sizeof(sr->mc_vregs), failed); | |
036fc2cb CL |
633 | |
634 | /* Always get VRSAVE back */ | |
627b72be CL |
635 | unsafe_get_user(current->thread.vrsave, |
636 | (u32 __user *)&tm_sr->mc_vregs[32], failed); | |
036fc2cb | 637 | |
627b72be | 638 | unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed); |
036fc2cb | 639 | |
036fc2cb CL |
640 | if (msr & MSR_VSX) { |
641 | /* | |
642 | * Restore altivec registers from the stack to a local | |
643 | * buffer, then write this out to the thread_struct | |
644 | */ | |
627b72be | 645 | unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed); |
036fc2cb CL |
646 | current->thread.used_vsr = true; |
647 | } | |
036fc2cb | 648 | |
d2b9d2a5 | 649 | /* Get the top half of the MSR from the user context */ |
627b72be | 650 | unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed); |
d2b9d2a5 | 651 | msr_hi <<= 32; |
627b72be CL |
652 | |
653 | user_read_access_end(); | |
654 | ||
d2b9d2a5 MN |
655 | /* If TM bits are set to the reserved value, it's an invalid context */ |
656 | if (MSR_TM_RESV(msr_hi)) | |
657 | return 1; | |
e1c3743e BL |
658 | |
659 | /* | |
660 | * Disabling preemption, since it is unsafe to be preempted | |
661 | * with MSR[TS] set without recheckpointing. | |
662 | */ | |
663 | preempt_disable(); | |
664 | ||
665 | /* | |
666 | * CAUTION: | |
667 | * After regs->MSR[TS] being updated, make sure that get_user(), | |
668 | * put_user() or similar functions are *not* called. These | |
669 | * functions can generate page faults which will cause the process | |
670 | * to be de-scheduled with MSR[TS] set but without calling | |
671 | * tm_recheckpoint(). This can cause a bug. | |
672 | * | |
673 | * Pull in the MSR TM bits from the user context | |
674 | */ | |
59dc5bfc | 675 | regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK)); |
2b0a576d MN |
676 | /* Now, recheckpoint. This loads up all of the checkpointed (older) |
677 | * registers, including FP and V[S]Rs. After recheckpointing, the | |
678 | * transactional versions should be loaded. | |
679 | */ | |
680 | tm_enable(); | |
e6b8fd02 MN |
681 | /* Make sure the transaction is marked as failed */ |
682 | current->thread.tm_texasr |= TEXASR_FS; | |
2b0a576d | 683 | /* This loads the checkpointed FP/VEC state, if used */ |
eb5c3f1c | 684 | tm_recheckpoint(¤t->thread); |
2b0a576d MN |
685 | |
686 | /* This loads the speculative FP/VEC state, if used */ | |
dc310669 | 687 | msr_check_and_set(msr & (MSR_FP | MSR_VEC)); |
2b0a576d | 688 | if (msr & MSR_FP) { |
dc310669 | 689 | load_fp_state(¤t->thread.fp_state); |
59dc5bfc | 690 | regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode)); |
2b0a576d MN |
691 | } |
692 | if (msr & MSR_VEC) { | |
dc310669 | 693 | load_vr_state(¤t->thread.vr_state); |
59dc5bfc | 694 | regs_set_return_msr(regs, regs->msr | MSR_VEC); |
2b0a576d MN |
695 | } |
696 | ||
e1c3743e BL |
697 | preempt_enable(); |
698 | ||
2b0a576d | 699 | return 0; |
627b72be CL |
700 | |
701 | failed: | |
702 | user_read_access_end(); | |
703 | return 1; | |
2b0a576d | 704 | } |
ca9e1605 CL |
705 | #else |
706 | static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr, | |
707 | struct mcontext __user *tm_sr) | |
708 | { | |
709 | return 0; | |
2b0a576d MN |
710 | } |
711 | #endif | |
712 | ||
81e7009e | 713 | #ifdef CONFIG_PPC64 |
1da177e4 | 714 | |
81e7009e SR |
715 | #define copy_siginfo_to_user copy_siginfo_to_user32 |
716 | ||
81e7009e | 717 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 718 | |
1da177e4 LT |
719 | /* |
720 | * Set up a signal frame for a "real-time" signal handler | |
721 | * (one which gets siginfo). | |
722 | */ | |
129b69df | 723 | int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, |
d1199431 | 724 | struct task_struct *tsk) |
1da177e4 | 725 | { |
8e91cf85 CL |
726 | struct rt_sigframe __user *frame; |
727 | struct mcontext __user *mctx; | |
728 | struct mcontext __user *tm_mctx = NULL; | |
a3f61dc0 | 729 | unsigned long newsp = 0; |
2b0a576d | 730 | unsigned long tramp; |
d1199431 | 731 | struct pt_regs *regs = tsk->thread.regs; |
2464cc4c GLD |
732 | /* Save the thread's msr before get_tm_stackpointer() changes it */ |
733 | unsigned long msr = regs->msr; | |
d1199431 | 734 | |
1da177e4 | 735 | /* Set up Signal Frame */ |
8e91cf85 | 736 | frame = get_sigframe(ksig, tsk, sizeof(*frame), 1); |
91b8ecd4 CL |
737 | mctx = &frame->uc.uc_mcontext; |
738 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
739 | tm_mctx = &frame->uc_transact.uc_mcontext; | |
740 | #endif | |
ef75e731 CL |
741 | if (MSR_TM_ACTIVE(msr)) |
742 | prepare_save_tm_user_regs(); | |
743 | else | |
744 | prepare_save_user_regs(1); | |
745 | ||
acca5721 | 746 | if (!user_access_begin(frame, sizeof(*frame))) |
1da177e4 LT |
747 | goto badframe; |
748 | ||
1da177e4 | 749 | /* Put the siginfo & fill in most of the ucontext */ |
9504db3e CL |
750 | unsafe_put_user(0, &frame->uc.uc_flags, failed); |
751 | #ifdef CONFIG_PPC64 | |
752 | unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed); | |
753 | #else | |
754 | unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed); | |
755 | #endif | |
756 | unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed); | |
757 | ||
758 | if (MSR_TM_ACTIVE(msr)) { | |
759 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
760 | unsafe_put_user((unsigned long)&frame->uc_transact, | |
761 | &frame->uc.uc_link, failed); | |
762 | unsafe_put_user((unsigned long)tm_mctx, | |
763 | &frame->uc_transact.uc_regs, failed); | |
764 | #endif | |
ef75e731 | 765 | unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed); |
9504db3e CL |
766 | } else { |
767 | unsafe_put_user(0, &frame->uc.uc_link, failed); | |
ef75e731 | 768 | unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed); |
9504db3e | 769 | } |
1da177e4 LT |
770 | |
771 | /* Save user registers on the stack */ | |
91bf6955 CL |
772 | if (tsk->mm->context.vdso) { |
773 | tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32); | |
a7f290da | 774 | } else { |
8d33001d | 775 | tramp = (unsigned long)mctx->mc_pad; |
1c9debbc CL |
776 | unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed); |
777 | unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed); | |
acca5721 | 778 | asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0])); |
2b0a576d | 779 | } |
de781ebd CL |
780 | unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed); |
781 | ||
acca5721 | 782 | user_access_end(); |
9504db3e | 783 | |
9504db3e CL |
784 | if (copy_siginfo_to_user(&frame->info, &ksig->info)) |
785 | goto badframe; | |
786 | ||
2b0a576d MN |
787 | regs->link = tramp; |
788 | ||
b6254ced | 789 | #ifdef CONFIG_PPC_FPU_REGS |
d1199431 | 790 | tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ |
b6254ced | 791 | #endif |
cc657f53 | 792 | |
a3f61dc0 | 793 | /* create a stack frame for the caller of the handler */ |
8e91cf85 | 794 | newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16); |
e2b55306 | 795 | if (put_user(regs->gpr[1], (u32 __user *)newsp)) |
81e7009e | 796 | goto badframe; |
a3f61dc0 BH |
797 | |
798 | /* Fill registers for signal handler */ | |
81e7009e | 799 | regs->gpr[1] = newsp; |
129b69df | 800 | regs->gpr[3] = ksig->sig; |
8e91cf85 CL |
801 | regs->gpr[4] = (unsigned long)&frame->info; |
802 | regs->gpr[5] = (unsigned long)&frame->uc; | |
803 | regs->gpr[6] = (unsigned long)frame; | |
59dc5bfc | 804 | regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler); |
e871c6bb | 805 | /* enter the signal handler in native-endian mode */ |
59dc5bfc NP |
806 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); |
807 | ||
129b69df | 808 | return 0; |
1da177e4 | 809 | |
9504db3e | 810 | failed: |
acca5721 | 811 | user_access_end(); |
9504db3e | 812 | |
1da177e4 | 813 | badframe: |
8e91cf85 | 814 | signal_fault(tsk, regs, "handle_rt_signal32", frame); |
d0c3d534 | 815 | |
129b69df | 816 | return 1; |
1da177e4 LT |
817 | } |
818 | ||
3eea688b CL |
819 | /* |
820 | * OK, we're invoking a handler | |
821 | */ | |
822 | int handle_signal32(struct ksignal *ksig, sigset_t *oldset, | |
823 | struct task_struct *tsk) | |
824 | { | |
825 | struct sigcontext __user *sc; | |
826 | struct sigframe __user *frame; | |
91b8ecd4 | 827 | struct mcontext __user *mctx; |
3eea688b CL |
828 | struct mcontext __user *tm_mctx = NULL; |
829 | unsigned long newsp = 0; | |
3eea688b CL |
830 | unsigned long tramp; |
831 | struct pt_regs *regs = tsk->thread.regs; | |
3eea688b CL |
832 | /* Save the thread's msr before get_tm_stackpointer() changes it */ |
833 | unsigned long msr = regs->msr; | |
3eea688b CL |
834 | |
835 | /* Set up Signal Frame */ | |
836 | frame = get_sigframe(ksig, tsk, sizeof(*frame), 1); | |
91b8ecd4 CL |
837 | mctx = &frame->mctx; |
838 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
839 | tm_mctx = &frame->mctx_transact; | |
840 | #endif | |
ef75e731 CL |
841 | if (MSR_TM_ACTIVE(msr)) |
842 | prepare_save_tm_user_regs(); | |
843 | else | |
844 | prepare_save_user_regs(1); | |
845 | ||
acca5721 | 846 | if (!user_access_begin(frame, sizeof(*frame))) |
3eea688b CL |
847 | goto badframe; |
848 | sc = (struct sigcontext __user *) &frame->sctx; | |
849 | ||
850 | #if _NSIG != 64 | |
851 | #error "Please adjust handle_signal()" | |
852 | #endif | |
ad65f490 CL |
853 | unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed); |
854 | unsafe_put_user(oldset->sig[0], &sc->oldmask, failed); | |
3eea688b | 855 | #ifdef CONFIG_PPC64 |
ad65f490 | 856 | unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed); |
3eea688b | 857 | #else |
ad65f490 | 858 | unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed); |
3eea688b | 859 | #endif |
ad65f490 CL |
860 | unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed); |
861 | unsafe_put_user(ksig->sig, &sc->signal, failed); | |
3eea688b | 862 | |
ef75e731 CL |
863 | if (MSR_TM_ACTIVE(msr)) |
864 | unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed); | |
865 | else | |
866 | unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed); | |
867 | ||
91bf6955 CL |
868 | if (tsk->mm->context.vdso) { |
869 | tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32); | |
3eea688b | 870 | } else { |
8d33001d | 871 | tramp = (unsigned long)mctx->mc_pad; |
1c9debbc CL |
872 | unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed); |
873 | unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed); | |
acca5721 | 874 | asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0])); |
3eea688b | 875 | } |
acca5721 | 876 | user_access_end(); |
3eea688b | 877 | |
3eea688b CL |
878 | regs->link = tramp; |
879 | ||
880 | #ifdef CONFIG_PPC_FPU_REGS | |
881 | tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ | |
882 | #endif | |
883 | ||
884 | /* create a stack frame for the caller of the handler */ | |
885 | newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; | |
886 | if (put_user(regs->gpr[1], (u32 __user *)newsp)) | |
887 | goto badframe; | |
888 | ||
889 | regs->gpr[1] = newsp; | |
890 | regs->gpr[3] = ksig->sig; | |
891 | regs->gpr[4] = (unsigned long) sc; | |
59dc5bfc | 892 | regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler); |
caccf2ac | 893 | /* enter the signal handler in native-endian mode */ |
59dc5bfc NP |
894 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); |
895 | ||
3eea688b CL |
896 | return 0; |
897 | ||
ad65f490 | 898 | failed: |
acca5721 | 899 | user_access_end(); |
ad65f490 | 900 | |
3eea688b CL |
901 | badframe: |
902 | signal_fault(tsk, regs, "handle_signal32", frame); | |
903 | ||
904 | return 1; | |
905 | } | |
906 | ||
81e7009e | 907 | static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) |
1da177e4 | 908 | { |
1da177e4 | 909 | sigset_t set; |
81e7009e SR |
910 | struct mcontext __user *mcp; |
911 | ||
52564262 | 912 | if (!user_read_access_begin(ucp, sizeof(*ucp))) |
81e7009e | 913 | return -EFAULT; |
887f3ceb CL |
914 | |
915 | unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed); | |
81e7009e SR |
916 | #ifdef CONFIG_PPC64 |
917 | { | |
918 | u32 cmcp; | |
1da177e4 | 919 | |
887f3ceb | 920 | unsafe_get_user(cmcp, &ucp->uc_regs, failed); |
81e7009e SR |
921 | mcp = (struct mcontext __user *)(u64)cmcp; |
922 | } | |
923 | #else | |
887f3ceb | 924 | unsafe_get_user(mcp, &ucp->uc_regs, failed); |
81e7009e | 925 | #endif |
887f3ceb CL |
926 | user_read_access_end(); |
927 | ||
17440f17 | 928 | set_current_blocked(&set); |
81e7009e | 929 | if (restore_user_regs(regs, mcp, sig)) |
1da177e4 LT |
930 | return -EFAULT; |
931 | ||
932 | return 0; | |
887f3ceb CL |
933 | |
934 | failed: | |
935 | user_read_access_end(); | |
936 | return -EFAULT; | |
1da177e4 LT |
937 | } |
938 | ||
2b0a576d MN |
939 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
940 | static int do_setcontext_tm(struct ucontext __user *ucp, | |
941 | struct ucontext __user *tm_ucp, | |
942 | struct pt_regs *regs) | |
943 | { | |
944 | sigset_t set; | |
945 | struct mcontext __user *mcp; | |
946 | struct mcontext __user *tm_mcp; | |
947 | u32 cmcp; | |
948 | u32 tm_cmcp; | |
949 | ||
52564262 | 950 | if (!user_read_access_begin(ucp, sizeof(*ucp))) |
2b0a576d MN |
951 | return -EFAULT; |
952 | ||
887f3ceb CL |
953 | unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed); |
954 | unsafe_get_user(cmcp, &ucp->uc_regs, failed); | |
955 | ||
956 | user_read_access_end(); | |
957 | ||
958 | if (__get_user(tm_cmcp, &tm_ucp->uc_regs)) | |
2b0a576d MN |
959 | return -EFAULT; |
960 | mcp = (struct mcontext __user *)(u64)cmcp; | |
961 | tm_mcp = (struct mcontext __user *)(u64)tm_cmcp; | |
962 | /* no need to check access_ok(mcp), since mcp < 4GB */ | |
963 | ||
964 | set_current_blocked(&set); | |
965 | if (restore_tm_user_regs(regs, mcp, tm_mcp)) | |
966 | return -EFAULT; | |
967 | ||
968 | return 0; | |
887f3ceb CL |
969 | |
970 | failed: | |
971 | user_read_access_end(); | |
972 | return -EFAULT; | |
2b0a576d MN |
973 | } |
974 | #endif | |
975 | ||
f3675644 AV |
976 | #ifdef CONFIG_PPC64 |
977 | COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, | |
978 | struct ucontext __user *, new_ctx, int, ctx_size) | |
979 | #else | |
980 | SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, | |
981 | struct ucontext __user *, new_ctx, long, ctx_size) | |
982 | #endif | |
1da177e4 | 983 | { |
f3675644 | 984 | struct pt_regs *regs = current_pt_regs(); |
16c29d18 | 985 | int ctx_has_vsx_region = 0; |
1da177e4 | 986 | |
c1cb299e MN |
987 | #ifdef CONFIG_PPC64 |
988 | unsigned long new_msr = 0; | |
989 | ||
77eb50ae AS |
990 | if (new_ctx) { |
991 | struct mcontext __user *mcp; | |
992 | u32 cmcp; | |
993 | ||
994 | /* | |
995 | * Get pointer to the real mcontext. No need for | |
996 | * access_ok since we are dealing with compat | |
997 | * pointers. | |
998 | */ | |
999 | if (__get_user(cmcp, &new_ctx->uc_regs)) | |
1000 | return -EFAULT; | |
1001 | mcp = (struct mcontext __user *)(u64)cmcp; | |
1002 | if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR])) | |
1003 | return -EFAULT; | |
1004 | } | |
c1cb299e MN |
1005 | /* |
1006 | * Check that the context is not smaller than the original | |
1007 | * size (with VMX but without VSX) | |
1008 | */ | |
1009 | if (ctx_size < UCONTEXTSIZEWITHOUTVSX) | |
1010 | return -EINVAL; | |
1011 | /* | |
1012 | * If the new context state sets the MSR VSX bits but | |
1013 | * it doesn't provide VSX state. | |
1014 | */ | |
1015 | if ((ctx_size < sizeof(struct ucontext)) && | |
1016 | (new_msr & MSR_VSX)) | |
1017 | return -EINVAL; | |
16c29d18 MN |
1018 | /* Does the context have enough room to store VSX data? */ |
1019 | if (ctx_size >= sizeof(struct ucontext)) | |
1020 | ctx_has_vsx_region = 1; | |
c1cb299e | 1021 | #else |
1da177e4 LT |
1022 | /* Context size is for future use. Right now, we only make sure |
1023 | * we are passed something we understand | |
1024 | */ | |
81e7009e | 1025 | if (ctx_size < sizeof(struct ucontext)) |
1da177e4 | 1026 | return -EINVAL; |
c1cb299e | 1027 | #endif |
1da177e4 | 1028 | if (old_ctx != NULL) { |
1c9bb1a0 PM |
1029 | struct mcontext __user *mctx; |
1030 | ||
1031 | /* | |
1032 | * old_ctx might not be 16-byte aligned, in which | |
1033 | * case old_ctx->uc_mcontext won't be either. | |
1034 | * Because we have the old_ctx->uc_pad2 field | |
1035 | * before old_ctx->uc_mcontext, we need to round down | |
1036 | * from &old_ctx->uc_mcontext to a 16-byte boundary. | |
1037 | */ | |
1038 | mctx = (struct mcontext __user *) | |
1039 | ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); | |
968c4fcc | 1040 | prepare_save_user_regs(ctx_has_vsx_region); |
31147d7d | 1041 | if (!user_write_access_begin(old_ctx, ctx_size)) |
1da177e4 | 1042 | return -EFAULT; |
ef75e731 | 1043 | unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed); |
31147d7d CL |
1044 | unsafe_put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked, failed); |
1045 | unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed); | |
1046 | user_write_access_end(); | |
1da177e4 LT |
1047 | } |
1048 | if (new_ctx == NULL) | |
1049 | return 0; | |
96d4f267 | 1050 | if (!access_ok(new_ctx, ctx_size) || |
56b04d56 | 1051 | fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) |
1da177e4 LT |
1052 | return -EFAULT; |
1053 | ||
1054 | /* | |
1055 | * If we get a fault copying the context into the kernel's | |
1056 | * image of the user's registers, we can't just return -EFAULT | |
1057 | * because the user's registers will be corrupted. For instance | |
1058 | * the NIP value may have been updated but not some of the | |
1059 | * other registers. Given that we have done the access_ok | |
1060 | * and successfully read the first and last bytes of the region | |
1061 | * above, this should only happen in an out-of-memory situation | |
1062 | * or if another thread unmaps the region containing the context. | |
1063 | * We kill the task with a SIGSEGV in this situation. | |
1064 | */ | |
92488815 | 1065 | if (do_setcontext(new_ctx, regs, 0)) { |
3d8098eb | 1066 | force_exit_sig(SIGSEGV); |
92488815 EB |
1067 | return -EFAULT; |
1068 | } | |
401d1f02 DW |
1069 | |
1070 | set_thread_flag(TIF_RESTOREALL); | |
1da177e4 | 1071 | return 0; |
31147d7d CL |
1072 | |
1073 | failed: | |
1074 | user_write_access_end(); | |
1075 | return -EFAULT; | |
1da177e4 LT |
1076 | } |
1077 | ||
f3675644 AV |
1078 | #ifdef CONFIG_PPC64 |
1079 | COMPAT_SYSCALL_DEFINE0(rt_sigreturn) | |
1080 | #else | |
1081 | SYSCALL_DEFINE0(rt_sigreturn) | |
1082 | #endif | |
1da177e4 | 1083 | { |
81e7009e | 1084 | struct rt_sigframe __user *rt_sf; |
f3675644 | 1085 | struct pt_regs *regs = current_pt_regs(); |
6f5b9f01 | 1086 | int tm_restore = 0; |
2b0a576d MN |
1087 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1088 | struct ucontext __user *uc_transact; | |
1089 | unsigned long msr_hi; | |
1090 | unsigned long tmp; | |
2b0a576d | 1091 | #endif |
1da177e4 | 1092 | /* Always make any pending restarted system calls return -EINTR */ |
f56141e3 | 1093 | current->restart_block.fn = do_no_restart_syscall; |
1da177e4 | 1094 | |
81e7009e SR |
1095 | rt_sf = (struct rt_sigframe __user *) |
1096 | (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); | |
96d4f267 | 1097 | if (!access_ok(rt_sf, sizeof(*rt_sf))) |
1da177e4 | 1098 | goto bad; |
78a3e888 | 1099 | |
2b0a576d | 1100 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
78a3e888 CB |
1101 | /* |
1102 | * If there is a transactional state then throw it away. | |
1103 | * The purpose of a sigreturn is to destroy all traces of the | |
1104 | * signal frame, this includes any transactional state created | |
1105 | * within in. We only check for suspended as we can never be | |
1106 | * active in the kernel, we are active, there is nothing better to | |
1107 | * do than go ahead and Bad Thing later. | |
1108 | * The cause is not important as there will never be a | |
1109 | * recheckpoint so it's not user visible. | |
1110 | */ | |
1111 | if (MSR_TM_SUSPENDED(mfmsr())) | |
1112 | tm_reclaim_current(0); | |
1113 | ||
2b0a576d MN |
1114 | if (__get_user(tmp, &rt_sf->uc.uc_link)) |
1115 | goto bad; | |
1116 | uc_transact = (struct ucontext __user *)(uintptr_t)tmp; | |
1117 | if (uc_transact) { | |
1118 | u32 cmcp; | |
1119 | struct mcontext __user *mcp; | |
1120 | ||
1121 | if (__get_user(cmcp, &uc_transact->uc_regs)) | |
1122 | return -EFAULT; | |
1123 | mcp = (struct mcontext __user *)(u64)cmcp; | |
1124 | /* The top 32 bits of the MSR are stashed in the transactional | |
1125 | * ucontext. */ | |
1126 | if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR])) | |
1127 | goto bad; | |
1128 | ||
55e43418 | 1129 | if (MSR_TM_ACTIVE(msr_hi<<32)) { |
f16d80b7 MN |
1130 | /* Trying to start TM on non TM system */ |
1131 | if (!cpu_has_feature(CPU_FTR_TM)) | |
1132 | goto bad; | |
2b0a576d MN |
1133 | /* We only recheckpoint on return if we're |
1134 | * transaction. | |
1135 | */ | |
1136 | tm_restore = 1; | |
1137 | if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs)) | |
1138 | goto bad; | |
1139 | } | |
1140 | } | |
6f5b9f01 BL |
1141 | if (!tm_restore) { |
1142 | /* | |
1143 | * Unset regs->msr because ucontext MSR TS is not | |
1144 | * set, and recheckpoint was not called. This avoid | |
1145 | * hitting a TM Bad thing at RFID | |
1146 | */ | |
59dc5bfc | 1147 | regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); |
6f5b9f01 BL |
1148 | } |
1149 | /* Fall through, for non-TM restore */ | |
2b0a576d | 1150 | #endif |
6f5b9f01 BL |
1151 | if (!tm_restore) |
1152 | if (do_setcontext(&rt_sf->uc, regs, 1)) | |
1153 | goto bad; | |
1da177e4 LT |
1154 | |
1155 | /* | |
1156 | * It's not clear whether or why it is desirable to save the | |
1157 | * sigaltstack setting on signal delivery and restore it on | |
1158 | * signal return. But other architectures do this and we have | |
1159 | * always done it up until now so it is probably better not to | |
1160 | * change it. -- paulus | |
81e7009e SR |
1161 | */ |
1162 | #ifdef CONFIG_PPC64 | |
7cce2465 AV |
1163 | if (compat_restore_altstack(&rt_sf->uc.uc_stack)) |
1164 | goto bad; | |
81e7009e | 1165 | #else |
7cce2465 AV |
1166 | if (restore_altstack(&rt_sf->uc.uc_stack)) |
1167 | goto bad; | |
81e7009e | 1168 | #endif |
401d1f02 DW |
1169 | set_thread_flag(TIF_RESTOREALL); |
1170 | return 0; | |
1da177e4 LT |
1171 | |
1172 | bad: | |
7fe8f773 | 1173 | signal_fault(current, regs, "sys_rt_sigreturn", rt_sf); |
d0c3d534 | 1174 | |
3cf5d076 | 1175 | force_sig(SIGSEGV); |
1da177e4 LT |
1176 | return 0; |
1177 | } | |
1178 | ||
81e7009e | 1179 | #ifdef CONFIG_PPC32 |
f3675644 AV |
1180 | SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, |
1181 | int, ndbg, struct sig_dbg_op __user *, dbg) | |
81e7009e | 1182 | { |
f3675644 | 1183 | struct pt_regs *regs = current_pt_regs(); |
81e7009e SR |
1184 | struct sig_dbg_op op; |
1185 | int i; | |
1186 | unsigned long new_msr = regs->msr; | |
172ae2e7 | 1187 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
51ae8d4a | 1188 | unsigned long new_dbcr0 = current->thread.debug.dbcr0; |
81e7009e SR |
1189 | #endif |
1190 | ||
1191 | for (i=0; i<ndbg; i++) { | |
7c85d1f9 | 1192 | if (copy_from_user(&op, dbg + i, sizeof(op))) |
81e7009e SR |
1193 | return -EFAULT; |
1194 | switch (op.dbg_type) { | |
1195 | case SIG_DBG_SINGLE_STEPPING: | |
172ae2e7 | 1196 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
81e7009e SR |
1197 | if (op.dbg_value) { |
1198 | new_msr |= MSR_DE; | |
1199 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); | |
1200 | } else { | |
3bffb652 DK |
1201 | new_dbcr0 &= ~DBCR0_IC; |
1202 | if (!DBCR_ACTIVE_EVENTS(new_dbcr0, | |
51ae8d4a | 1203 | current->thread.debug.dbcr1)) { |
3bffb652 DK |
1204 | new_msr &= ~MSR_DE; |
1205 | new_dbcr0 &= ~DBCR0_IDM; | |
1206 | } | |
81e7009e SR |
1207 | } |
1208 | #else | |
1209 | if (op.dbg_value) | |
1210 | new_msr |= MSR_SE; | |
1211 | else | |
1212 | new_msr &= ~MSR_SE; | |
1213 | #endif | |
1214 | break; | |
1215 | case SIG_DBG_BRANCH_TRACING: | |
172ae2e7 | 1216 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
81e7009e SR |
1217 | return -EINVAL; |
1218 | #else | |
1219 | if (op.dbg_value) | |
1220 | new_msr |= MSR_BE; | |
1221 | else | |
1222 | new_msr &= ~MSR_BE; | |
1223 | #endif | |
1224 | break; | |
1225 | ||
1226 | default: | |
1227 | return -EINVAL; | |
1228 | } | |
1229 | } | |
1230 | ||
1231 | /* We wait until here to actually install the values in the | |
1232 | registers so if we fail in the above loop, it will not | |
1233 | affect the contents of these registers. After this point, | |
1234 | failure is a problem, anyway, and it's very unlikely unless | |
1235 | the user is really doing something wrong. */ | |
59dc5bfc | 1236 | regs_set_return_msr(regs, new_msr); |
172ae2e7 | 1237 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
51ae8d4a | 1238 | current->thread.debug.dbcr0 = new_dbcr0; |
81e7009e SR |
1239 | #endif |
1240 | ||
96d4f267 | 1241 | if (!access_ok(ctx, sizeof(*ctx)) || |
56b04d56 | 1242 | fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx))) |
7c85d1f9 PM |
1243 | return -EFAULT; |
1244 | ||
81e7009e SR |
1245 | /* |
1246 | * If we get a fault copying the context into the kernel's | |
1247 | * image of the user's registers, we can't just return -EFAULT | |
1248 | * because the user's registers will be corrupted. For instance | |
1249 | * the NIP value may have been updated but not some of the | |
1250 | * other registers. Given that we have done the access_ok | |
1251 | * and successfully read the first and last bytes of the region | |
1252 | * above, this should only happen in an out-of-memory situation | |
1253 | * or if another thread unmaps the region containing the context. | |
1254 | * We kill the task with a SIGSEGV in this situation. | |
1255 | */ | |
1256 | if (do_setcontext(ctx, regs, 1)) { | |
7fe8f773 | 1257 | signal_fault(current, regs, "sys_debug_setcontext", ctx); |
d0c3d534 | 1258 | |
3cf5d076 | 1259 | force_sig(SIGSEGV); |
81e7009e SR |
1260 | goto out; |
1261 | } | |
1262 | ||
1263 | /* | |
1264 | * It's not clear whether or why it is desirable to save the | |
1265 | * sigaltstack setting on signal delivery and restore it on | |
1266 | * signal return. But other architectures do this and we have | |
1267 | * always done it up until now so it is probably better not to | |
1268 | * change it. -- paulus | |
1269 | */ | |
7cce2465 | 1270 | restore_altstack(&ctx->uc_stack); |
81e7009e | 1271 | |
401d1f02 | 1272 | set_thread_flag(TIF_RESTOREALL); |
81e7009e SR |
1273 | out: |
1274 | return 0; | |
1275 | } | |
1276 | #endif | |
1da177e4 | 1277 | |
1da177e4 LT |
1278 | /* |
1279 | * Do a signal return; undo the signal stack. | |
1280 | */ | |
f3675644 AV |
1281 | #ifdef CONFIG_PPC64 |
1282 | COMPAT_SYSCALL_DEFINE0(sigreturn) | |
1283 | #else | |
1284 | SYSCALL_DEFINE0(sigreturn) | |
1285 | #endif | |
1da177e4 | 1286 | { |
f3675644 | 1287 | struct pt_regs *regs = current_pt_regs(); |
fee55450 | 1288 | struct sigframe __user *sf; |
81e7009e SR |
1289 | struct sigcontext __user *sc; |
1290 | struct sigcontext sigctx; | |
1291 | struct mcontext __user *sr; | |
1da177e4 | 1292 | sigset_t set; |
ca9e1605 CL |
1293 | struct mcontext __user *mcp; |
1294 | struct mcontext __user *tm_mcp = NULL; | |
1295 | unsigned long long msr_hi = 0; | |
1da177e4 LT |
1296 | |
1297 | /* Always make any pending restarted system calls return -EINTR */ | |
f56141e3 | 1298 | current->restart_block.fn = do_no_restart_syscall; |
1da177e4 | 1299 | |
fee55450 MN |
1300 | sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); |
1301 | sc = &sf->sctx; | |
1da177e4 LT |
1302 | if (copy_from_user(&sigctx, sc, sizeof(sigctx))) |
1303 | goto badframe; | |
1304 | ||
81e7009e | 1305 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
1306 | /* |
1307 | * Note that PPC32 puts the upper 32 bits of the sigmask in the | |
1308 | * unused part of the signal stackframe | |
1309 | */ | |
1310 | set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); | |
81e7009e SR |
1311 | #else |
1312 | set.sig[0] = sigctx.oldmask; | |
1313 | set.sig[1] = sigctx._unused[3]; | |
1314 | #endif | |
17440f17 | 1315 | set_current_blocked(&set); |
1da177e4 | 1316 | |
fee55450 | 1317 | mcp = (struct mcontext __user *)&sf->mctx; |
ca9e1605 | 1318 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
fee55450 MN |
1319 | tm_mcp = (struct mcontext __user *)&sf->mctx_transact; |
1320 | if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR])) | |
1da177e4 | 1321 | goto badframe; |
ca9e1605 | 1322 | #endif |
fee55450 MN |
1323 | if (MSR_TM_ACTIVE(msr_hi<<32)) { |
1324 | if (!cpu_has_feature(CPU_FTR_TM)) | |
1325 | goto badframe; | |
1326 | if (restore_tm_user_regs(regs, mcp, tm_mcp)) | |
1327 | goto badframe; | |
ca9e1605 | 1328 | } else { |
fee55450 | 1329 | sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); |
c7393a71 CL |
1330 | if (restore_user_regs(regs, sr, 1)) { |
1331 | signal_fault(current, regs, "sys_sigreturn", sr); | |
1332 | ||
1333 | force_sig(SIGSEGV); | |
1334 | return 0; | |
1335 | } | |
fee55450 | 1336 | } |
1da177e4 | 1337 | |
401d1f02 | 1338 | set_thread_flag(TIF_RESTOREALL); |
81e7009e | 1339 | return 0; |
1da177e4 LT |
1340 | |
1341 | badframe: | |
c7393a71 | 1342 | signal_fault(current, regs, "sys_sigreturn", sc); |
d0c3d534 | 1343 | |
3cf5d076 | 1344 | force_sig(SIGSEGV); |
1da177e4 LT |
1345 | return 0; |
1346 | } |