]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Common signal handling code for both 32 and 64 bits | |
3 | * | |
4 | * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation | |
5 | * Extracted from signal_32.c and signal_64.c | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General | |
8 | * Public License. See the file README.legal in the main directory of | |
9 | * this archive for more details. | |
10 | */ | |
11 | ||
12 | #include <linux/tracehook.h> | |
13 | #include <linux/signal.h> | |
14 | #include <linux/uprobes.h> | |
15 | #include <linux/key.h> | |
16 | #include <linux/context_tracking.h> | |
17 | #include <linux/livepatch.h> | |
18 | #include <linux/syscalls.h> | |
19 | #include <asm/hw_breakpoint.h> | |
20 | #include <linux/uaccess.h> | |
21 | #include <asm/switch_to.h> | |
22 | #include <asm/unistd.h> | |
23 | #include <asm/debug.h> | |
24 | #include <asm/tm.h> | |
25 | ||
26 | #include "signal.h" | |
27 | ||
28 | #ifdef CONFIG_VSX | |
29 | unsigned long copy_fpr_to_user(void __user *to, | |
30 | struct task_struct *task) | |
31 | { | |
32 | u64 buf[ELF_NFPREG]; | |
33 | int i; | |
34 | ||
35 | /* save FPR copy to local buffer then write to the thread_struct */ | |
36 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
37 | buf[i] = task->thread.TS_FPR(i); | |
38 | buf[i] = task->thread.fp_state.fpscr; | |
39 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); | |
40 | } | |
41 | ||
42 | unsigned long copy_fpr_from_user(struct task_struct *task, | |
43 | void __user *from) | |
44 | { | |
45 | u64 buf[ELF_NFPREG]; | |
46 | int i; | |
47 | ||
48 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) | |
49 | return 1; | |
50 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
51 | task->thread.TS_FPR(i) = buf[i]; | |
52 | task->thread.fp_state.fpscr = buf[i]; | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | unsigned long copy_vsx_to_user(void __user *to, | |
58 | struct task_struct *task) | |
59 | { | |
60 | u64 buf[ELF_NVSRHALFREG]; | |
61 | int i; | |
62 | ||
63 | /* save FPR copy to local buffer then write to the thread_struct */ | |
64 | for (i = 0; i < ELF_NVSRHALFREG; i++) | |
65 | buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; | |
66 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); | |
67 | } | |
68 | ||
69 | unsigned long copy_vsx_from_user(struct task_struct *task, | |
70 | void __user *from) | |
71 | { | |
72 | u64 buf[ELF_NVSRHALFREG]; | |
73 | int i; | |
74 | ||
75 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) | |
76 | return 1; | |
77 | for (i = 0; i < ELF_NVSRHALFREG ; i++) | |
78 | task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; | |
79 | return 0; | |
80 | } | |
81 | ||
82 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
83 | unsigned long copy_ckfpr_to_user(void __user *to, | |
84 | struct task_struct *task) | |
85 | { | |
86 | u64 buf[ELF_NFPREG]; | |
87 | int i; | |
88 | ||
89 | /* save FPR copy to local buffer then write to the thread_struct */ | |
90 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
91 | buf[i] = task->thread.TS_CKFPR(i); | |
92 | buf[i] = task->thread.ckfp_state.fpscr; | |
93 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); | |
94 | } | |
95 | ||
96 | unsigned long copy_ckfpr_from_user(struct task_struct *task, | |
97 | void __user *from) | |
98 | { | |
99 | u64 buf[ELF_NFPREG]; | |
100 | int i; | |
101 | ||
102 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) | |
103 | return 1; | |
104 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
105 | task->thread.TS_CKFPR(i) = buf[i]; | |
106 | task->thread.ckfp_state.fpscr = buf[i]; | |
107 | ||
108 | return 0; | |
109 | } | |
110 | ||
111 | unsigned long copy_ckvsx_to_user(void __user *to, | |
112 | struct task_struct *task) | |
113 | { | |
114 | u64 buf[ELF_NVSRHALFREG]; | |
115 | int i; | |
116 | ||
117 | /* save FPR copy to local buffer then write to the thread_struct */ | |
118 | for (i = 0; i < ELF_NVSRHALFREG; i++) | |
119 | buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; | |
120 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); | |
121 | } | |
122 | ||
123 | unsigned long copy_ckvsx_from_user(struct task_struct *task, | |
124 | void __user *from) | |
125 | { | |
126 | u64 buf[ELF_NVSRHALFREG]; | |
127 | int i; | |
128 | ||
129 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) | |
130 | return 1; | |
131 | for (i = 0; i < ELF_NVSRHALFREG ; i++) | |
132 | task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; | |
133 | return 0; | |
134 | } | |
135 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | |
136 | #endif | |
137 | ||
138 | /* Log an error when sending an unhandled signal to a process. Controlled | |
139 | * through debug.exception-trace sysctl. | |
140 | */ | |
141 | ||
142 | int show_unhandled_signals = 1; | |
143 | ||
144 | /* | |
145 | * Allocate space for the signal frame | |
146 | */ | |
147 | static unsigned long get_tm_stackpointer(struct task_struct *tsk); | |
148 | ||
149 | void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk, | |
150 | size_t frame_size, int is_32) | |
151 | { | |
152 | unsigned long oldsp, newsp; | |
153 | unsigned long sp = get_tm_stackpointer(tsk); | |
154 | ||
155 | /* Default to using normal stack */ | |
156 | if (is_32) | |
157 | oldsp = sp & 0x0ffffffffUL; | |
158 | else | |
159 | oldsp = sp; | |
160 | oldsp = sigsp(oldsp, ksig); | |
161 | newsp = (oldsp - frame_size) & ~0xFUL; | |
162 | ||
163 | return (void __user *)newsp; | |
164 | } | |
165 | ||
166 | static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, | |
167 | int has_handler) | |
168 | { | |
169 | unsigned long ret = regs->gpr[3]; | |
170 | int restart = 1; | |
171 | ||
172 | /* syscall ? */ | |
173 | if (!trap_is_syscall(regs)) | |
174 | return; | |
175 | ||
176 | if (trap_norestart(regs)) | |
177 | return; | |
178 | ||
179 | /* error signalled ? */ | |
180 | if (trap_is_scv(regs)) { | |
181 | /* 32-bit compat mode sign extend? */ | |
182 | if (!IS_ERR_VALUE(ret)) | |
183 | return; | |
184 | ret = -ret; | |
185 | } else if (!(regs->ccr & 0x10000000)) { | |
186 | return; | |
187 | } | |
188 | ||
189 | switch (ret) { | |
190 | case ERESTART_RESTARTBLOCK: | |
191 | case ERESTARTNOHAND: | |
192 | /* ERESTARTNOHAND means that the syscall should only be | |
193 | * restarted if there was no handler for the signal, and since | |
194 | * we only get here if there is a handler, we dont restart. | |
195 | */ | |
196 | restart = !has_handler; | |
197 | break; | |
198 | case ERESTARTSYS: | |
199 | /* ERESTARTSYS means to restart the syscall if there is no | |
200 | * handler or the handler was registered with SA_RESTART | |
201 | */ | |
202 | restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; | |
203 | break; | |
204 | case ERESTARTNOINTR: | |
205 | /* ERESTARTNOINTR means that the syscall should be | |
206 | * called again after the signal handler returns. | |
207 | */ | |
208 | break; | |
209 | default: | |
210 | return; | |
211 | } | |
212 | if (restart) { | |
213 | if (ret == ERESTART_RESTARTBLOCK) | |
214 | regs->gpr[0] = __NR_restart_syscall; | |
215 | else | |
216 | regs->gpr[3] = regs->orig_gpr3; | |
217 | regs->nip -= 4; | |
218 | regs->result = 0; | |
219 | } else { | |
220 | if (trap_is_scv(regs)) { | |
221 | regs->result = -EINTR; | |
222 | regs->gpr[3] = -EINTR; | |
223 | } else { | |
224 | regs->result = -EINTR; | |
225 | regs->gpr[3] = EINTR; | |
226 | regs->ccr |= 0x10000000; | |
227 | } | |
228 | } | |
229 | } | |
230 | ||
231 | static void do_signal(struct task_struct *tsk) | |
232 | { | |
233 | sigset_t *oldset = sigmask_to_save(); | |
234 | struct ksignal ksig = { .sig = 0 }; | |
235 | int ret; | |
236 | ||
237 | BUG_ON(tsk != current); | |
238 | ||
239 | get_signal(&ksig); | |
240 | ||
241 | /* Is there any syscall restart business here ? */ | |
242 | check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0); | |
243 | ||
244 | if (ksig.sig <= 0) { | |
245 | /* No signal to deliver -- put the saved sigmask back */ | |
246 | restore_saved_sigmask(); | |
247 | set_trap_norestart(tsk->thread.regs); | |
248 | return; /* no signals delivered */ | |
249 | } | |
250 | ||
251 | /* | |
252 | * Reenable the DABR before delivering the signal to | |
253 | * user space. The DABR will have been cleared if it | |
254 | * triggered inside the kernel. | |
255 | */ | |
256 | if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) { | |
257 | int i; | |
258 | ||
259 | for (i = 0; i < nr_wp_slots(); i++) { | |
260 | if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type) | |
261 | __set_breakpoint(i, &tsk->thread.hw_brk[i]); | |
262 | } | |
263 | } | |
264 | ||
265 | /* Re-enable the breakpoints for the signal stack */ | |
266 | thread_change_pc(tsk, tsk->thread.regs); | |
267 | ||
268 | rseq_signal_deliver(&ksig, tsk->thread.regs); | |
269 | ||
270 | if (is_32bit_task()) { | |
271 | if (ksig.ka.sa.sa_flags & SA_SIGINFO) | |
272 | ret = handle_rt_signal32(&ksig, oldset, tsk); | |
273 | else | |
274 | ret = handle_signal32(&ksig, oldset, tsk); | |
275 | } else { | |
276 | ret = handle_rt_signal64(&ksig, oldset, tsk); | |
277 | } | |
278 | ||
279 | set_trap_norestart(tsk->thread.regs); | |
280 | signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP)); | |
281 | } | |
282 | ||
283 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | |
284 | { | |
285 | user_exit(); | |
286 | ||
287 | if (thread_info_flags & _TIF_UPROBE) | |
288 | uprobe_notify_resume(regs); | |
289 | ||
290 | if (thread_info_flags & _TIF_PATCH_PENDING) | |
291 | klp_update_patch_state(current); | |
292 | ||
293 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { | |
294 | BUG_ON(regs != current->thread.regs); | |
295 | do_signal(current); | |
296 | } | |
297 | ||
298 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | |
299 | tracehook_notify_resume(regs); | |
300 | rseq_handle_notify_resume(NULL, regs); | |
301 | } | |
302 | ||
303 | user_enter(); | |
304 | } | |
305 | ||
306 | static unsigned long get_tm_stackpointer(struct task_struct *tsk) | |
307 | { | |
308 | /* When in an active transaction that takes a signal, we need to be | |
309 | * careful with the stack. It's possible that the stack has moved back | |
310 | * up after the tbegin. The obvious case here is when the tbegin is | |
311 | * called inside a function that returns before a tend. In this case, | |
312 | * the stack is part of the checkpointed transactional memory state. | |
313 | * If we write over this non transactionally or in suspend, we are in | |
314 | * trouble because if we get a tm abort, the program counter and stack | |
315 | * pointer will be back at the tbegin but our in memory stack won't be | |
316 | * valid anymore. | |
317 | * | |
318 | * To avoid this, when taking a signal in an active transaction, we | |
319 | * need to use the stack pointer from the checkpointed state, rather | |
320 | * than the speculated state. This ensures that the signal context | |
321 | * (written tm suspended) will be written below the stack required for | |
322 | * the rollback. The transaction is aborted because of the treclaim, | |
323 | * so any memory written between the tbegin and the signal will be | |
324 | * rolled back anyway. | |
325 | * | |
326 | * For signals taken in non-TM or suspended mode, we use the | |
327 | * normal/non-checkpointed stack pointer. | |
328 | */ | |
329 | ||
330 | unsigned long ret = tsk->thread.regs->gpr[1]; | |
331 | ||
332 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
333 | BUG_ON(tsk != current); | |
334 | ||
335 | if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) { | |
336 | preempt_disable(); | |
337 | tm_reclaim_current(TM_CAUSE_SIGNAL); | |
338 | if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr)) | |
339 | ret = tsk->thread.ckpt_regs.gpr[1]; | |
340 | ||
341 | /* | |
342 | * If we treclaim, we must clear the current thread's TM bits | |
343 | * before re-enabling preemption. Otherwise we might be | |
344 | * preempted and have the live MSR[TS] changed behind our back | |
345 | * (tm_recheckpoint_new_task() would recheckpoint). Besides, we | |
346 | * enter the signal handler in non-transactional state. | |
347 | */ | |
348 | tsk->thread.regs->msr &= ~MSR_TS_MASK; | |
349 | preempt_enable(); | |
350 | } | |
351 | #endif | |
352 | return ret; | |
353 | } | |
354 | ||
355 | static const char fm32[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %08lx lr %08lx\n"; | |
356 | static const char fm64[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %016lx lr %016lx\n"; | |
357 | ||
358 | void signal_fault(struct task_struct *tsk, struct pt_regs *regs, | |
359 | const char *where, void __user *ptr) | |
360 | { | |
361 | if (show_unhandled_signals) | |
362 | printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm, | |
363 | task_pid_nr(tsk), where, ptr, regs->nip, regs->link); | |
364 | } |