]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
1da177e4 LT |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2002, 2004 | |
19 | * | |
20 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | |
21 | * Probes initial implementation ( includes contributions from | |
22 | * Rusty Russell). | |
23 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | |
24 | * interface to access function arguments. | |
25 | * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port | |
26 | * for PPC64 | |
27 | */ | |
28 | ||
1da177e4 LT |
29 | #include <linux/kprobes.h> |
30 | #include <linux/ptrace.h> | |
1da177e4 | 31 | #include <linux/preempt.h> |
50e21f2b | 32 | #include <linux/module.h> |
1eeb66a1 | 33 | #include <linux/kdebug.h> |
7e1048b1 | 34 | #include <asm/cacheflush.h> |
1da177e4 | 35 | #include <asm/sstep.h> |
50e21f2b | 36 | #include <asm/uaccess.h> |
1da177e4 | 37 | |
0dc036c9 AM |
38 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
39 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
1da177e4 | 40 | |
bb144a85 | 41 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
1da177e4 | 42 | { |
63224d1e | 43 | int ret = 0; |
1da177e4 LT |
44 | kprobe_opcode_t insn = *p->addr; |
45 | ||
63224d1e AM |
46 | if ((unsigned long)p->addr & 0x03) { |
47 | printk("Attempt to register kprobe at an unaligned address\n"); | |
48 | ret = -EINVAL; | |
82090035 KG |
49 | } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { |
50 | printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); | |
63224d1e AM |
51 | ret = -EINVAL; |
52 | } | |
9ec4b1f3 AM |
53 | |
54 | /* insn must be on a special executable page on ppc64 */ | |
55 | if (!ret) { | |
2d8ab6ad | 56 | p->ainsn.insn = get_insn_slot(); |
9ec4b1f3 AM |
57 | if (!p->ainsn.insn) |
58 | ret = -ENOMEM; | |
59 | } | |
1da177e4 | 60 | |
49a2a1b8 | 61 | if (!ret) { |
e6349a95 AM |
62 | memcpy(p->ainsn.insn, p->addr, |
63 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
49a2a1b8 | 64 | p->opcode = *p->addr; |
83db3dde AM |
65 | flush_icache_range((unsigned long)p->ainsn.insn, |
66 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); | |
49a2a1b8 AK |
67 | } |
68 | ||
e6349a95 | 69 | p->ainsn.boostable = 0; |
49a2a1b8 | 70 | return ret; |
1da177e4 LT |
71 | } |
72 | ||
bb144a85 | 73 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
1da177e4 | 74 | { |
7e1048b1 RL |
75 | *p->addr = BREAKPOINT_INSTRUCTION; |
76 | flush_icache_range((unsigned long) p->addr, | |
77 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
1da177e4 LT |
78 | } |
79 | ||
bb144a85 | 80 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
1da177e4 LT |
81 | { |
82 | *p->addr = p->opcode; | |
7e1048b1 RL |
83 | flush_icache_range((unsigned long) p->addr, |
84 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
85 | } | |
86 | ||
0498b635 | 87 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
7e1048b1 | 88 | { |
7a7d1cf9 | 89 | mutex_lock(&kprobe_mutex); |
b4c6c34a | 90 | free_insn_slot(p->ainsn.insn, 0); |
7a7d1cf9 | 91 | mutex_unlock(&kprobe_mutex); |
1da177e4 LT |
92 | } |
93 | ||
46dbe2f4 | 94 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
95 | { |
96 | regs->msr |= MSR_SE; | |
9ec4b1f3 | 97 | |
0ccde0a2 AM |
98 | /* |
99 | * On powerpc we should single step on the original | |
100 | * instruction even if the probed insn is a trap | |
101 | * variant as values in regs could play a part in | |
102 | * if the trap is taken or not | |
103 | */ | |
104 | regs->nip = (unsigned long)p->ainsn.insn; | |
1da177e4 LT |
105 | } |
106 | ||
46dbe2f4 | 107 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
0dc036c9 AM |
108 | { |
109 | kcb->prev_kprobe.kp = kprobe_running(); | |
110 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
111 | kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; | |
112 | } | |
113 | ||
46dbe2f4 | 114 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
42cc2060 | 115 | { |
0dc036c9 AM |
116 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
117 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
118 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; | |
42cc2060 PP |
119 | } |
120 | ||
46dbe2f4 | 121 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
0dc036c9 | 122 | struct kprobe_ctlblk *kcb) |
42cc2060 | 123 | { |
0dc036c9 AM |
124 | __get_cpu_var(current_kprobe) = p; |
125 | kcb->kprobe_saved_msr = regs->msr; | |
42cc2060 PP |
126 | } |
127 | ||
991a51d8 | 128 | /* Called with kretprobe_lock held */ |
bb144a85 PP |
129 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
130 | struct pt_regs *regs) | |
97f7943d RL |
131 | { |
132 | struct kretprobe_instance *ri; | |
133 | ||
134 | if ((ri = get_free_rp_inst(rp)) != NULL) { | |
135 | ri->rp = rp; | |
136 | ri->task = current; | |
137 | ri->ret_addr = (kprobe_opcode_t *)regs->link; | |
138 | ||
139 | /* Replace the return addr with trampoline addr */ | |
140 | regs->link = (unsigned long)kretprobe_trampoline; | |
141 | add_rp_inst(ri); | |
142 | } else { | |
143 | rp->nmissed++; | |
144 | } | |
145 | } | |
146 | ||
46dbe2f4 | 147 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
1da177e4 LT |
148 | { |
149 | struct kprobe *p; | |
150 | int ret = 0; | |
151 | unsigned int *addr = (unsigned int *)regs->nip; | |
d217d545 AM |
152 | struct kprobe_ctlblk *kcb; |
153 | ||
154 | /* | |
155 | * We don't want to be preempted for the entire | |
156 | * duration of kprobe processing | |
157 | */ | |
158 | preempt_disable(); | |
159 | kcb = get_kprobe_ctlblk(); | |
1da177e4 LT |
160 | |
161 | /* Check we're not actually recursing */ | |
162 | if (kprobe_running()) { | |
1da177e4 LT |
163 | p = get_kprobe(addr); |
164 | if (p) { | |
deac66ae | 165 | kprobe_opcode_t insn = *p->ainsn.insn; |
0dc036c9 | 166 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
deac66ae | 167 | is_trap(insn)) { |
1da177e4 | 168 | regs->msr &= ~MSR_SE; |
0dc036c9 | 169 | regs->msr |= kcb->kprobe_saved_msr; |
1da177e4 LT |
170 | goto no_kprobe; |
171 | } | |
42cc2060 PP |
172 | /* We have reentered the kprobe_handler(), since |
173 | * another probe was hit while within the handler. | |
174 | * We here save the original kprobes variables and | |
175 | * just single step on the instruction of the new probe | |
176 | * without calling any user handlers. | |
177 | */ | |
0dc036c9 AM |
178 | save_previous_kprobe(kcb); |
179 | set_current_kprobe(p, regs, kcb); | |
180 | kcb->kprobe_saved_msr = regs->msr; | |
bf8d5c52 | 181 | kprobes_inc_nmissed_count(p); |
42cc2060 | 182 | prepare_singlestep(p, regs); |
0dc036c9 | 183 | kcb->kprobe_status = KPROBE_REENTER; |
42cc2060 | 184 | return 1; |
1da177e4 | 185 | } else { |
eb3a7292 KA |
186 | if (*addr != BREAKPOINT_INSTRUCTION) { |
187 | /* If trap variant, then it belongs not to us */ | |
188 | kprobe_opcode_t cur_insn = *addr; | |
189 | if (is_trap(cur_insn)) | |
190 | goto no_kprobe; | |
191 | /* The breakpoint instruction was removed by | |
192 | * another cpu right after we hit, no further | |
193 | * handling of this interrupt is appropriate | |
194 | */ | |
195 | ret = 1; | |
196 | goto no_kprobe; | |
197 | } | |
0dc036c9 | 198 | p = __get_cpu_var(current_kprobe); |
1da177e4 LT |
199 | if (p->break_handler && p->break_handler(p, regs)) { |
200 | goto ss_probe; | |
201 | } | |
202 | } | |
1da177e4 LT |
203 | goto no_kprobe; |
204 | } | |
205 | ||
1da177e4 LT |
206 | p = get_kprobe(addr); |
207 | if (!p) { | |
1da177e4 LT |
208 | if (*addr != BREAKPOINT_INSTRUCTION) { |
209 | /* | |
210 | * PowerPC has multiple variants of the "trap" | |
211 | * instruction. If the current instruction is a | |
212 | * trap variant, it could belong to someone else | |
213 | */ | |
214 | kprobe_opcode_t cur_insn = *addr; | |
deac66ae | 215 | if (is_trap(cur_insn)) |
1da177e4 LT |
216 | goto no_kprobe; |
217 | /* | |
218 | * The breakpoint instruction was removed right | |
219 | * after we hit it. Another cpu has removed | |
220 | * either a probepoint or a debugger breakpoint | |
221 | * at this address. In either case, no further | |
222 | * handling of this interrupt is appropriate. | |
223 | */ | |
224 | ret = 1; | |
225 | } | |
226 | /* Not one of ours: let kernel handle it */ | |
227 | goto no_kprobe; | |
228 | } | |
229 | ||
0dc036c9 AM |
230 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
231 | set_current_kprobe(p, regs, kcb); | |
1da177e4 LT |
232 | if (p->pre_handler && p->pre_handler(p, regs)) |
233 | /* handler has already set things up, so skip ss setup */ | |
234 | return 1; | |
235 | ||
236 | ss_probe: | |
e6349a95 AM |
237 | if (p->ainsn.boostable >= 0) { |
238 | unsigned int insn = *p->ainsn.insn; | |
239 | ||
240 | /* regs->nip is also adjusted if emulate_step returns 1 */ | |
241 | ret = emulate_step(regs, insn); | |
242 | if (ret > 0) { | |
243 | /* | |
244 | * Once this instruction has been boosted | |
245 | * successfully, set the boostable flag | |
246 | */ | |
247 | if (unlikely(p->ainsn.boostable == 0)) | |
248 | p->ainsn.boostable = 1; | |
249 | ||
250 | if (p->post_handler) | |
251 | p->post_handler(p, regs, 0); | |
252 | ||
253 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
254 | reset_current_kprobe(); | |
255 | preempt_enable_no_resched(); | |
256 | return 1; | |
257 | } else if (ret < 0) { | |
258 | /* | |
259 | * We don't allow kprobes on mtmsr(d)/rfi(d), etc. | |
260 | * So, we should never get here... but, its still | |
261 | * good to catch them, just in case... | |
262 | */ | |
263 | printk("Can't step on instruction %x\n", insn); | |
264 | BUG(); | |
265 | } else if (ret == 0) | |
266 | /* This instruction can't be boosted */ | |
267 | p->ainsn.boostable = -1; | |
268 | } | |
1da177e4 | 269 | prepare_singlestep(p, regs); |
0dc036c9 | 270 | kcb->kprobe_status = KPROBE_HIT_SS; |
1da177e4 LT |
271 | return 1; |
272 | ||
273 | no_kprobe: | |
d217d545 | 274 | preempt_enable_no_resched(); |
1da177e4 LT |
275 | return ret; |
276 | } | |
277 | ||
97f7943d RL |
278 | /* |
279 | * Function return probe trampoline: | |
280 | * - init_kprobes() establishes a probepoint here | |
281 | * - When the probed function returns, this probe | |
282 | * causes the handlers to fire | |
283 | */ | |
284 | void kretprobe_trampoline_holder(void) | |
285 | { | |
286 | asm volatile(".global kretprobe_trampoline\n" | |
287 | "kretprobe_trampoline:\n" | |
288 | "nop\n"); | |
289 | } | |
290 | ||
291 | /* | |
292 | * Called when the probe at kretprobe trampoline is hit | |
293 | */ | |
bb144a85 | 294 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
97f7943d | 295 | { |
62c27be0 | 296 | struct kretprobe_instance *ri = NULL; |
99219a3f | 297 | struct hlist_head *head, empty_rp; |
62c27be0 | 298 | struct hlist_node *node, *tmp; |
991a51d8 | 299 | unsigned long flags, orig_ret_address = 0; |
97f7943d RL |
300 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
301 | ||
99219a3f | 302 | INIT_HLIST_HEAD(&empty_rp); |
991a51d8 | 303 | spin_lock_irqsave(&kretprobe_lock, flags); |
62c27be0 | 304 | head = kretprobe_inst_table_head(current); |
97f7943d RL |
305 | |
306 | /* | |
307 | * It is possible to have multiple instances associated with a given | |
308 | * task either because an multiple functions in the call path | |
309 | * have a return probe installed on them, and/or more then one return | |
310 | * return probe was registered for a target function. | |
311 | * | |
312 | * We can handle this because: | |
313 | * - instances are always inserted at the head of the list | |
314 | * - when multiple return probes are registered for the same | |
62c27be0 | 315 | * function, the first instance's ret_addr will point to the |
97f7943d RL |
316 | * real return address, and all the rest will point to |
317 | * kretprobe_trampoline | |
318 | */ | |
319 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
62c27be0 | 320 | if (ri->task != current) |
97f7943d | 321 | /* another task is sharing our hash bucket */ |
62c27be0 | 322 | continue; |
97f7943d RL |
323 | |
324 | if (ri->rp && ri->rp->handler) | |
325 | ri->rp->handler(ri, regs); | |
326 | ||
327 | orig_ret_address = (unsigned long)ri->ret_addr; | |
99219a3f | 328 | recycle_rp_inst(ri, &empty_rp); |
97f7943d RL |
329 | |
330 | if (orig_ret_address != trampoline_address) | |
331 | /* | |
332 | * This is the real return address. Any other | |
333 | * instances associated with this task are for | |
334 | * other calls deeper on the call stack | |
335 | */ | |
336 | break; | |
337 | } | |
338 | ||
0f95b7fc | 339 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
97f7943d RL |
340 | regs->nip = orig_ret_address; |
341 | ||
0dc036c9 | 342 | reset_current_kprobe(); |
991a51d8 | 343 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
66ff2d06 | 344 | preempt_enable_no_resched(); |
97f7943d | 345 | |
99219a3f | 346 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
347 | hlist_del(&ri->hlist); | |
348 | kfree(ri); | |
349 | } | |
62c27be0 | 350 | /* |
351 | * By returning a non-zero value, we are telling | |
352 | * kprobe_handler() that we don't want the post_handler | |
353 | * to run (and have re-enabled preemption) | |
354 | */ | |
355 | return 1; | |
97f7943d RL |
356 | } |
357 | ||
1da177e4 LT |
358 | /* |
359 | * Called after single-stepping. p->addr is the address of the | |
360 | * instruction whose first byte has been replaced by the "breakpoint" | |
361 | * instruction. To avoid the SMP problems that can occur when we | |
362 | * temporarily put back the original opcode to single-step, we | |
363 | * single-stepped a copy of the instruction. The address of this | |
364 | * copy is p->ainsn.insn. | |
365 | */ | |
bb144a85 | 366 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
367 | { |
368 | int ret; | |
9ec4b1f3 | 369 | unsigned int insn = *p->ainsn.insn; |
1da177e4 LT |
370 | |
371 | regs->nip = (unsigned long)p->addr; | |
9ec4b1f3 | 372 | ret = emulate_step(regs, insn); |
1da177e4 LT |
373 | if (ret == 0) |
374 | regs->nip = (unsigned long)p->addr + 4; | |
1da177e4 LT |
375 | } |
376 | ||
46dbe2f4 | 377 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
1da177e4 | 378 | { |
0dc036c9 AM |
379 | struct kprobe *cur = kprobe_running(); |
380 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
381 | ||
382 | if (!cur) | |
1da177e4 LT |
383 | return 0; |
384 | ||
0dc036c9 AM |
385 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
386 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
387 | cur->post_handler(cur, regs, 0); | |
42cc2060 | 388 | } |
1da177e4 | 389 | |
0dc036c9 AM |
390 | resume_execution(cur, regs); |
391 | regs->msr |= kcb->kprobe_saved_msr; | |
1da177e4 | 392 | |
42cc2060 | 393 | /*Restore back the original saved kprobes variables and continue. */ |
0dc036c9 AM |
394 | if (kcb->kprobe_status == KPROBE_REENTER) { |
395 | restore_previous_kprobe(kcb); | |
42cc2060 PP |
396 | goto out; |
397 | } | |
0dc036c9 | 398 | reset_current_kprobe(); |
42cc2060 | 399 | out: |
1da177e4 LT |
400 | preempt_enable_no_resched(); |
401 | ||
402 | /* | |
403 | * if somebody else is singlestepping across a probe point, msr | |
404 | * will have SE set, in which case, continue the remaining processing | |
405 | * of do_debug, as if this is not a probe hit. | |
406 | */ | |
407 | if (regs->msr & MSR_SE) | |
408 | return 0; | |
409 | ||
410 | return 1; | |
411 | } | |
412 | ||
46dbe2f4 | 413 | static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
1da177e4 | 414 | { |
0dc036c9 AM |
415 | struct kprobe *cur = kprobe_running(); |
416 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
50e21f2b PP |
417 | const struct exception_table_entry *entry; |
418 | ||
419 | switch(kcb->kprobe_status) { | |
420 | case KPROBE_HIT_SS: | |
421 | case KPROBE_REENTER: | |
422 | /* | |
423 | * We are here because the instruction being single | |
424 | * stepped caused a page fault. We reset the current | |
425 | * kprobe and the nip points back to the probe address | |
426 | * and allow the page fault handler to continue as a | |
427 | * normal page fault. | |
428 | */ | |
429 | regs->nip = (unsigned long)cur->addr; | |
f829fd23 | 430 | regs->msr &= ~MSR_SE; |
0dc036c9 | 431 | regs->msr |= kcb->kprobe_saved_msr; |
50e21f2b PP |
432 | if (kcb->kprobe_status == KPROBE_REENTER) |
433 | restore_previous_kprobe(kcb); | |
434 | else | |
435 | reset_current_kprobe(); | |
1da177e4 | 436 | preempt_enable_no_resched(); |
50e21f2b PP |
437 | break; |
438 | case KPROBE_HIT_ACTIVE: | |
439 | case KPROBE_HIT_SSDONE: | |
440 | /* | |
441 | * We increment the nmissed count for accounting, | |
442 | * we can also use npre/npostfault count for accouting | |
443 | * these specific fault cases. | |
444 | */ | |
445 | kprobes_inc_nmissed_count(cur); | |
446 | ||
447 | /* | |
448 | * We come here because instructions in the pre/post | |
449 | * handler caused the page_fault, this could happen | |
450 | * if handler tries to access user space by | |
451 | * copy_from_user(), get_user() etc. Let the | |
452 | * user-specified handler try to fix it first. | |
453 | */ | |
454 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
455 | return 1; | |
456 | ||
457 | /* | |
458 | * In case the user-specified fault handler returned | |
459 | * zero, try to fix up. | |
460 | */ | |
461 | if ((entry = search_exception_tables(regs->nip)) != NULL) { | |
462 | regs->nip = entry->fixup; | |
463 | return 1; | |
464 | } | |
465 | ||
466 | /* | |
467 | * fixup_exception() could not handle it, | |
468 | * Let do_page_fault() fix it. | |
469 | */ | |
470 | break; | |
471 | default: | |
472 | break; | |
1da177e4 LT |
473 | } |
474 | return 0; | |
475 | } | |
476 | ||
477 | /* | |
478 | * Wrapper routine to for handling exceptions. | |
479 | */ | |
bb144a85 PP |
480 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
481 | unsigned long val, void *data) | |
1da177e4 LT |
482 | { |
483 | struct die_args *args = (struct die_args *)data; | |
484 | int ret = NOTIFY_DONE; | |
485 | ||
2326c770 | 486 | if (args->regs && user_mode(args->regs)) |
487 | return ret; | |
488 | ||
1da177e4 | 489 | switch (val) { |
1da177e4 LT |
490 | case DIE_BPT: |
491 | if (kprobe_handler(args->regs)) | |
492 | ret = NOTIFY_STOP; | |
493 | break; | |
494 | case DIE_SSTEP: | |
495 | if (post_kprobe_handler(args->regs)) | |
496 | ret = NOTIFY_STOP; | |
497 | break; | |
1da177e4 | 498 | case DIE_PAGE_FAULT: |
d217d545 AM |
499 | /* kprobe_running() needs smp_processor_id() */ |
500 | preempt_disable(); | |
1da177e4 LT |
501 | if (kprobe_running() && |
502 | kprobe_fault_handler(args->regs, args->trapnr)) | |
503 | ret = NOTIFY_STOP; | |
d217d545 | 504 | preempt_enable(); |
1da177e4 LT |
505 | break; |
506 | default: | |
507 | break; | |
508 | } | |
1da177e4 LT |
509 | return ret; |
510 | } | |
511 | ||
bb144a85 | 512 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
513 | { |
514 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
0dc036c9 | 515 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 516 | |
0dc036c9 | 517 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
1da177e4 LT |
518 | |
519 | /* setup return addr to the jprobe handler routine */ | |
82090035 | 520 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
521 | regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); |
522 | regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); | |
82090035 KG |
523 | #else |
524 | regs->nip = (unsigned long)jp->entry; | |
525 | #endif | |
1da177e4 LT |
526 | |
527 | return 1; | |
528 | } | |
529 | ||
bb144a85 | 530 | void __kprobes jprobe_return(void) |
1da177e4 LT |
531 | { |
532 | asm volatile("trap" ::: "memory"); | |
533 | } | |
534 | ||
bb144a85 | 535 | void __kprobes jprobe_return_end(void) |
1da177e4 LT |
536 | { |
537 | }; | |
538 | ||
bb144a85 | 539 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 | 540 | { |
0dc036c9 AM |
541 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
542 | ||
1da177e4 LT |
543 | /* |
544 | * FIXME - we should ideally be validating that we got here 'cos | |
545 | * of the "trap" in jprobe_return() above, before restoring the | |
546 | * saved regs... | |
547 | */ | |
0dc036c9 | 548 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
d217d545 | 549 | preempt_enable_no_resched(); |
1da177e4 LT |
550 | return 1; |
551 | } | |
97f7943d RL |
552 | |
553 | static struct kprobe trampoline_p = { | |
554 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | |
555 | .pre_handler = trampoline_probe_handler | |
556 | }; | |
557 | ||
6772926b | 558 | int __init arch_init_kprobes(void) |
97f7943d RL |
559 | { |
560 | return register_kprobe(&trampoline_p); | |
561 | } |