]>
Commit | Line | Data |
---|---|---|
4ba069b8 MG |
1 | /* |
2 | * Kernel Probes (KProbes) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2002, 2006 | |
19 | * | |
20 | * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> | |
21 | */ | |
22 | ||
4ba069b8 MG |
23 | #include <linux/kprobes.h> |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/preempt.h> | |
26 | #include <linux/stop_machine.h> | |
1eeb66a1 | 27 | #include <linux/kdebug.h> |
a2b53673 | 28 | #include <linux/uaccess.h> |
4ba069b8 | 29 | #include <asm/cacheflush.h> |
4ba069b8 | 30 | #include <asm/sections.h> |
4ba069b8 MG |
31 | #include <linux/module.h> |
32 | ||
33 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
34 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
35 | ||
f438d914 MH |
36 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; |
37 | ||
4ba069b8 MG |
38 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
39 | { | |
40 | /* Make sure the probe isn't going on a difficult instruction */ | |
41 | if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) | |
42 | return -EINVAL; | |
43 | ||
5532bd0f | 44 | if ((unsigned long)p->addr & 0x01) |
4ba069b8 | 45 | return -EINVAL; |
4ba069b8 MG |
46 | |
47 | /* Use the get_insn_slot() facility for correctness */ | |
48 | if (!(p->ainsn.insn = get_insn_slot())) | |
49 | return -ENOMEM; | |
50 | ||
51 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
52 | ||
53 | get_instruction_type(&p->ainsn); | |
54 | p->opcode = *p->addr; | |
55 | return 0; | |
56 | } | |
57 | ||
58 | int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction) | |
59 | { | |
60 | switch (*(__u8 *) instruction) { | |
61 | case 0x0c: /* bassm */ | |
62 | case 0x0b: /* bsm */ | |
63 | case 0x83: /* diag */ | |
64 | case 0x44: /* ex */ | |
65 | return -EINVAL; | |
66 | } | |
67 | switch (*(__u16 *) instruction) { | |
68 | case 0x0101: /* pr */ | |
69 | case 0xb25a: /* bsa */ | |
70 | case 0xb240: /* bakr */ | |
71 | case 0xb258: /* bsg */ | |
72 | case 0xb218: /* pc */ | |
73 | case 0xb228: /* pt */ | |
74 | return -EINVAL; | |
75 | } | |
76 | return 0; | |
77 | } | |
78 | ||
79 | void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) | |
80 | { | |
81 | /* default fixup method */ | |
82 | ainsn->fixup = FIXUP_PSW_NORMAL; | |
83 | ||
84 | /* save r1 operand */ | |
85 | ainsn->reg = (*ainsn->insn & 0xf0) >> 4; | |
86 | ||
87 | /* save the instruction length (pop 5-5) in bytes */ | |
9c5f225f | 88 | switch (*(__u8 *) (ainsn->insn) >> 6) { |
4ba069b8 MG |
89 | case 0: |
90 | ainsn->ilen = 2; | |
91 | break; | |
92 | case 1: | |
93 | case 2: | |
94 | ainsn->ilen = 4; | |
95 | break; | |
96 | case 3: | |
97 | ainsn->ilen = 6; | |
98 | break; | |
99 | } | |
100 | ||
101 | switch (*(__u8 *) ainsn->insn) { | |
102 | case 0x05: /* balr */ | |
103 | case 0x0d: /* basr */ | |
104 | ainsn->fixup = FIXUP_RETURN_REGISTER; | |
105 | /* if r2 = 0, no branch will be taken */ | |
106 | if ((*ainsn->insn & 0x0f) == 0) | |
107 | ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; | |
108 | break; | |
109 | case 0x06: /* bctr */ | |
110 | case 0x07: /* bcr */ | |
111 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; | |
112 | break; | |
113 | case 0x45: /* bal */ | |
114 | case 0x4d: /* bas */ | |
115 | ainsn->fixup = FIXUP_RETURN_REGISTER; | |
116 | break; | |
117 | case 0x47: /* bc */ | |
118 | case 0x46: /* bct */ | |
119 | case 0x86: /* bxh */ | |
120 | case 0x87: /* bxle */ | |
121 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; | |
122 | break; | |
123 | case 0x82: /* lpsw */ | |
124 | ainsn->fixup = FIXUP_NOT_REQUIRED; | |
125 | break; | |
126 | case 0xb2: /* lpswe */ | |
127 | if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { | |
128 | ainsn->fixup = FIXUP_NOT_REQUIRED; | |
129 | } | |
130 | break; | |
131 | case 0xa7: /* bras */ | |
132 | if ((*ainsn->insn & 0x0f) == 0x05) { | |
133 | ainsn->fixup |= FIXUP_RETURN_REGISTER; | |
134 | } | |
135 | break; | |
136 | case 0xc0: | |
137 | if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ | |
138 | || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ | |
139 | ainsn->fixup |= FIXUP_RETURN_REGISTER; | |
140 | break; | |
141 | case 0xeb: | |
142 | if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ | |
143 | *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ | |
144 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; | |
145 | } | |
146 | break; | |
147 | case 0xe3: /* bctg */ | |
148 | if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { | |
149 | ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; | |
150 | } | |
151 | break; | |
152 | } | |
153 | } | |
154 | ||
155 | static int __kprobes swap_instruction(void *aref) | |
156 | { | |
157 | struct ins_replace_args *args = aref; | |
a2b53673 HC |
158 | |
159 | return probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); | |
4ba069b8 MG |
160 | } |
161 | ||
162 | void __kprobes arch_arm_kprobe(struct kprobe *p) | |
163 | { | |
164 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
165 | unsigned long status = kcb->kprobe_status; | |
166 | struct ins_replace_args args; | |
167 | ||
168 | args.ptr = p->addr; | |
169 | args.old = p->opcode; | |
170 | args.new = BREAKPOINT_INSTRUCTION; | |
171 | ||
172 | kcb->kprobe_status = KPROBE_SWAP_INST; | |
9b1a4d38 | 173 | stop_machine(swap_instruction, &args, NULL); |
4ba069b8 MG |
174 | kcb->kprobe_status = status; |
175 | } | |
176 | ||
177 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | |
178 | { | |
179 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
180 | unsigned long status = kcb->kprobe_status; | |
181 | struct ins_replace_args args; | |
182 | ||
183 | args.ptr = p->addr; | |
184 | args.old = BREAKPOINT_INSTRUCTION; | |
185 | args.new = p->opcode; | |
186 | ||
187 | kcb->kprobe_status = KPROBE_SWAP_INST; | |
9b1a4d38 | 188 | stop_machine(swap_instruction, &args, NULL); |
4ba069b8 MG |
189 | kcb->kprobe_status = status; |
190 | } | |
191 | ||
192 | void __kprobes arch_remove_kprobe(struct kprobe *p) | |
193 | { | |
12941560 MH |
194 | if (p->ainsn.insn) { |
195 | free_insn_slot(p->ainsn.insn, 0); | |
196 | p->ainsn.insn = NULL; | |
197 | } | |
4ba069b8 MG |
198 | } |
199 | ||
200 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |
201 | { | |
202 | per_cr_bits kprobe_per_regs[1]; | |
203 | ||
204 | memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); | |
205 | regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; | |
206 | ||
207 | /* Set up the per control reg info, will pass to lctl */ | |
208 | kprobe_per_regs[0].em_instruction_fetch = 1; | |
209 | kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; | |
210 | kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; | |
211 | ||
212 | /* Set the PER control regs, turns on single step for this address */ | |
213 | __ctl_load(kprobe_per_regs, 9, 11); | |
214 | regs->psw.mask |= PSW_MASK_PER; | |
215 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | |
216 | } | |
217 | ||
218 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |
219 | { | |
220 | kcb->prev_kprobe.kp = kprobe_running(); | |
221 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
222 | kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; | |
223 | memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, | |
224 | sizeof(kcb->kprobe_saved_ctl)); | |
225 | } | |
226 | ||
227 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |
228 | { | |
229 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | |
230 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
231 | kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; | |
232 | memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl, | |
233 | sizeof(kcb->kprobe_saved_ctl)); | |
234 | } | |
235 | ||
236 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |
237 | struct kprobe_ctlblk *kcb) | |
238 | { | |
239 | __get_cpu_var(current_kprobe) = p; | |
240 | /* Save the interrupt and per flags */ | |
241 | kcb->kprobe_saved_imask = regs->psw.mask & | |
242 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | |
243 | /* Save the control regs that govern PER */ | |
244 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); | |
245 | } | |
246 | ||
4c4308cb | 247 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
4ba069b8 MG |
248 | struct pt_regs *regs) |
249 | { | |
4c4308cb | 250 | ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; |
4ba069b8 | 251 | |
4c4308cb CH |
252 | /* Replace the return addr with trampoline addr */ |
253 | regs->gprs[14] = (unsigned long)&kretprobe_trampoline; | |
4ba069b8 MG |
254 | } |
255 | ||
256 | static int __kprobes kprobe_handler(struct pt_regs *regs) | |
257 | { | |
258 | struct kprobe *p; | |
259 | int ret = 0; | |
260 | unsigned long *addr = (unsigned long *) | |
261 | ((regs->psw.addr & PSW_ADDR_INSN) - 2); | |
262 | struct kprobe_ctlblk *kcb; | |
263 | ||
264 | /* | |
265 | * We don't want to be preempted for the entire | |
266 | * duration of kprobe processing | |
267 | */ | |
268 | preempt_disable(); | |
269 | kcb = get_kprobe_ctlblk(); | |
270 | ||
271 | /* Check we're not actually recursing */ | |
272 | if (kprobe_running()) { | |
273 | p = get_kprobe(addr); | |
274 | if (p) { | |
275 | if (kcb->kprobe_status == KPROBE_HIT_SS && | |
276 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { | |
277 | regs->psw.mask &= ~PSW_MASK_PER; | |
278 | regs->psw.mask |= kcb->kprobe_saved_imask; | |
279 | goto no_kprobe; | |
280 | } | |
281 | /* We have reentered the kprobe_handler(), since | |
282 | * another probe was hit while within the handler. | |
283 | * We here save the original kprobes variables and | |
284 | * just single step on the instruction of the new probe | |
285 | * without calling any user handlers. | |
286 | */ | |
287 | save_previous_kprobe(kcb); | |
288 | set_current_kprobe(p, regs, kcb); | |
289 | kprobes_inc_nmissed_count(p); | |
290 | prepare_singlestep(p, regs); | |
291 | kcb->kprobe_status = KPROBE_REENTER; | |
292 | return 1; | |
293 | } else { | |
294 | p = __get_cpu_var(current_kprobe); | |
295 | if (p->break_handler && p->break_handler(p, regs)) { | |
296 | goto ss_probe; | |
297 | } | |
298 | } | |
299 | goto no_kprobe; | |
300 | } | |
301 | ||
302 | p = get_kprobe(addr); | |
f794c827 MS |
303 | if (!p) |
304 | /* | |
305 | * No kprobe at this address. The fault has not been | |
306 | * caused by a kprobe breakpoint. The race of breakpoint | |
307 | * vs. kprobe remove does not exist because on s390 we | |
9b1a4d38 | 308 | * use stop_machine to arm/disarm the breakpoints. |
f794c827 | 309 | */ |
4ba069b8 | 310 | goto no_kprobe; |
4ba069b8 MG |
311 | |
312 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
313 | set_current_kprobe(p, regs, kcb); | |
314 | if (p->pre_handler && p->pre_handler(p, regs)) | |
315 | /* handler has already set things up, so skip ss setup */ | |
316 | return 1; | |
317 | ||
318 | ss_probe: | |
319 | prepare_singlestep(p, regs); | |
320 | kcb->kprobe_status = KPROBE_HIT_SS; | |
321 | return 1; | |
322 | ||
323 | no_kprobe: | |
324 | preempt_enable_no_resched(); | |
325 | return ret; | |
326 | } | |
327 | ||
328 | /* | |
329 | * Function return probe trampoline: | |
330 | * - init_kprobes() establishes a probepoint here | |
331 | * - When the probed function returns, this probe | |
332 | * causes the handlers to fire | |
333 | */ | |
a806170e | 334 | static void __used kretprobe_trampoline_holder(void) |
4ba069b8 MG |
335 | { |
336 | asm volatile(".global kretprobe_trampoline\n" | |
337 | "kretprobe_trampoline: bcr 0,0\n"); | |
338 | } | |
339 | ||
340 | /* | |
341 | * Called when the probe at kretprobe trampoline is hit | |
342 | */ | |
2b67fc46 HC |
343 | static int __kprobes trampoline_probe_handler(struct kprobe *p, |
344 | struct pt_regs *regs) | |
4ba069b8 MG |
345 | { |
346 | struct kretprobe_instance *ri = NULL; | |
99219a3f | 347 | struct hlist_head *head, empty_rp; |
4ba069b8 MG |
348 | struct hlist_node *node, *tmp; |
349 | unsigned long flags, orig_ret_address = 0; | |
350 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | |
351 | ||
99219a3f | 352 | INIT_HLIST_HEAD(&empty_rp); |
ef53d9c5 | 353 | kretprobe_hash_lock(current, &head, &flags); |
4ba069b8 MG |
354 | |
355 | /* | |
356 | * It is possible to have multiple instances associated with a given | |
357 | * task either because an multiple functions in the call path | |
025dfdaf | 358 | * have a return probe installed on them, and/or more than one return |
4ba069b8 MG |
359 | * return probe was registered for a target function. |
360 | * | |
361 | * We can handle this because: | |
362 | * - instances are always inserted at the head of the list | |
363 | * - when multiple return probes are registered for the same | |
364 | * function, the first instance's ret_addr will point to the | |
365 | * real return address, and all the rest will point to | |
366 | * kretprobe_trampoline | |
367 | */ | |
368 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
369 | if (ri->task != current) | |
370 | /* another task is sharing our hash bucket */ | |
371 | continue; | |
372 | ||
373 | if (ri->rp && ri->rp->handler) | |
374 | ri->rp->handler(ri, regs); | |
375 | ||
376 | orig_ret_address = (unsigned long)ri->ret_addr; | |
99219a3f | 377 | recycle_rp_inst(ri, &empty_rp); |
4ba069b8 MG |
378 | |
379 | if (orig_ret_address != trampoline_address) { | |
380 | /* | |
381 | * This is the real return address. Any other | |
382 | * instances associated with this task are for | |
383 | * other calls deeper on the call stack | |
384 | */ | |
385 | break; | |
386 | } | |
387 | } | |
a5a60a2b | 388 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
4ba069b8 MG |
389 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
390 | ||
391 | reset_current_kprobe(); | |
ef53d9c5 | 392 | kretprobe_hash_unlock(current, &flags); |
4ba069b8 MG |
393 | preempt_enable_no_resched(); |
394 | ||
99219a3f | 395 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
396 | hlist_del(&ri->hlist); | |
397 | kfree(ri); | |
398 | } | |
4ba069b8 MG |
399 | /* |
400 | * By returning a non-zero value, we are telling | |
401 | * kprobe_handler() that we don't want the post_handler | |
402 | * to run (and have re-enabled preemption) | |
403 | */ | |
404 | return 1; | |
405 | } | |
406 | ||
407 | /* | |
408 | * Called after single-stepping. p->addr is the address of the | |
409 | * instruction whose first byte has been replaced by the "breakpoint" | |
410 | * instruction. To avoid the SMP problems that can occur when we | |
411 | * temporarily put back the original opcode to single-step, we | |
412 | * single-stepped a copy of the instruction. The address of this | |
413 | * copy is p->ainsn.insn. | |
414 | */ | |
415 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |
416 | { | |
417 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
418 | ||
419 | regs->psw.addr &= PSW_ADDR_INSN; | |
420 | ||
421 | if (p->ainsn.fixup & FIXUP_PSW_NORMAL) | |
422 | regs->psw.addr = (unsigned long)p->addr + | |
423 | ((unsigned long)regs->psw.addr - | |
424 | (unsigned long)p->ainsn.insn); | |
425 | ||
426 | if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) | |
427 | if ((unsigned long)regs->psw.addr - | |
428 | (unsigned long)p->ainsn.insn == p->ainsn.ilen) | |
429 | regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; | |
430 | ||
431 | if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) | |
432 | regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + | |
433 | (regs->gprs[p->ainsn.reg] - | |
434 | (unsigned long)p->ainsn.insn)) | |
435 | | PSW_ADDR_AMODE; | |
436 | ||
437 | regs->psw.addr |= PSW_ADDR_AMODE; | |
438 | /* turn off PER mode */ | |
439 | regs->psw.mask &= ~PSW_MASK_PER; | |
440 | /* Restore the original per control regs */ | |
441 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); | |
442 | regs->psw.mask |= kcb->kprobe_saved_imask; | |
443 | } | |
444 | ||
445 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |
446 | { | |
447 | struct kprobe *cur = kprobe_running(); | |
448 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
449 | ||
450 | if (!cur) | |
451 | return 0; | |
452 | ||
453 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { | |
454 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
455 | cur->post_handler(cur, regs, 0); | |
456 | } | |
457 | ||
458 | resume_execution(cur, regs); | |
459 | ||
460 | /*Restore back the original saved kprobes variables and continue. */ | |
461 | if (kcb->kprobe_status == KPROBE_REENTER) { | |
462 | restore_previous_kprobe(kcb); | |
463 | goto out; | |
464 | } | |
465 | reset_current_kprobe(); | |
466 | out: | |
467 | preempt_enable_no_resched(); | |
468 | ||
469 | /* | |
470 | * if somebody else is singlestepping across a probe point, psw mask | |
471 | * will have PER set, in which case, continue the remaining processing | |
472 | * of do_single_step, as if this is not a probe hit. | |
473 | */ | |
474 | if (regs->psw.mask & PSW_MASK_PER) { | |
475 | return 0; | |
476 | } | |
477 | ||
478 | return 1; | |
479 | } | |
480 | ||
33464e3b | 481 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
4ba069b8 MG |
482 | { |
483 | struct kprobe *cur = kprobe_running(); | |
484 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
485 | const struct exception_table_entry *entry; | |
486 | ||
487 | switch(kcb->kprobe_status) { | |
488 | case KPROBE_SWAP_INST: | |
489 | /* We are here because the instruction replacement failed */ | |
490 | return 0; | |
491 | case KPROBE_HIT_SS: | |
492 | case KPROBE_REENTER: | |
493 | /* | |
494 | * We are here because the instruction being single | |
495 | * stepped caused a page fault. We reset the current | |
496 | * kprobe and the nip points back to the probe address | |
497 | * and allow the page fault handler to continue as a | |
498 | * normal page fault. | |
499 | */ | |
500 | regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; | |
501 | regs->psw.mask &= ~PSW_MASK_PER; | |
502 | regs->psw.mask |= kcb->kprobe_saved_imask; | |
503 | if (kcb->kprobe_status == KPROBE_REENTER) | |
504 | restore_previous_kprobe(kcb); | |
505 | else | |
506 | reset_current_kprobe(); | |
507 | preempt_enable_no_resched(); | |
508 | break; | |
509 | case KPROBE_HIT_ACTIVE: | |
510 | case KPROBE_HIT_SSDONE: | |
511 | /* | |
512 | * We increment the nmissed count for accounting, | |
513 | * we can also use npre/npostfault count for accouting | |
514 | * these specific fault cases. | |
515 | */ | |
516 | kprobes_inc_nmissed_count(cur); | |
517 | ||
518 | /* | |
519 | * We come here because instructions in the pre/post | |
520 | * handler caused the page_fault, this could happen | |
521 | * if handler tries to access user space by | |
522 | * copy_from_user(), get_user() etc. Let the | |
523 | * user-specified handler try to fix it first. | |
524 | */ | |
525 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
526 | return 1; | |
527 | ||
528 | /* | |
529 | * In case the user-specified fault handler returned | |
530 | * zero, try to fix up. | |
531 | */ | |
532 | entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | |
533 | if (entry) { | |
534 | regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; | |
535 | return 1; | |
536 | } | |
537 | ||
538 | /* | |
539 | * fixup_exception() could not handle it, | |
540 | * Let do_page_fault() fix it. | |
541 | */ | |
542 | break; | |
543 | default: | |
544 | break; | |
545 | } | |
546 | return 0; | |
547 | } | |
548 | ||
549 | /* | |
550 | * Wrapper routine to for handling exceptions. | |
551 | */ | |
552 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |
553 | unsigned long val, void *data) | |
554 | { | |
555 | struct die_args *args = (struct die_args *)data; | |
556 | int ret = NOTIFY_DONE; | |
557 | ||
558 | switch (val) { | |
559 | case DIE_BPT: | |
560 | if (kprobe_handler(args->regs)) | |
561 | ret = NOTIFY_STOP; | |
562 | break; | |
563 | case DIE_SSTEP: | |
564 | if (post_kprobe_handler(args->regs)) | |
565 | ret = NOTIFY_STOP; | |
566 | break; | |
567 | case DIE_TRAP: | |
4ba069b8 MG |
568 | /* kprobe_running() needs smp_processor_id() */ |
569 | preempt_disable(); | |
570 | if (kprobe_running() && | |
571 | kprobe_fault_handler(args->regs, args->trapnr)) | |
572 | ret = NOTIFY_STOP; | |
573 | preempt_enable(); | |
574 | break; | |
575 | default: | |
576 | break; | |
577 | } | |
578 | return ret; | |
579 | } | |
580 | ||
581 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
582 | { | |
583 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
584 | unsigned long addr; | |
585 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
586 | ||
587 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); | |
588 | ||
589 | /* setup return addr to the jprobe handler routine */ | |
590 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; | |
591 | ||
592 | /* r14 is the function return address */ | |
593 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; | |
594 | /* r15 is the stack pointer */ | |
595 | kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; | |
596 | addr = (unsigned long)kcb->jprobe_saved_r15; | |
597 | ||
598 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, | |
599 | MIN_STACK_SIZE(addr)); | |
600 | return 1; | |
601 | } | |
602 | ||
603 | void __kprobes jprobe_return(void) | |
604 | { | |
605 | asm volatile(".word 0x0002"); | |
606 | } | |
607 | ||
608 | void __kprobes jprobe_return_end(void) | |
609 | { | |
610 | asm volatile("bcr 0,0"); | |
611 | } | |
612 | ||
613 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |
614 | { | |
615 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
616 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); | |
617 | ||
618 | /* Put the regs back */ | |
619 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); | |
620 | /* put the stack back */ | |
621 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, | |
622 | MIN_STACK_SIZE(stack_addr)); | |
623 | preempt_enable_no_resched(); | |
624 | return 1; | |
625 | } | |
626 | ||
627 | static struct kprobe trampoline_p = { | |
628 | .addr = (kprobe_opcode_t *) & kretprobe_trampoline, | |
629 | .pre_handler = trampoline_probe_handler | |
630 | }; | |
631 | ||
632 | int __init arch_init_kprobes(void) | |
633 | { | |
634 | return register_kprobe(&trampoline_p); | |
635 | } | |
bf8f6e5b AM |
636 | |
637 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |
638 | { | |
639 | if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline) | |
640 | return 1; | |
641 | return 0; | |
642 | } |