2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corp. 2002, 2006
20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
23 #include <linux/kprobes.h>
24 #include <linux/ptrace.h>
25 #include <linux/preempt.h>
26 #include <linux/stop_machine.h>
27 #include <linux/kdebug.h>
28 #include <linux/uaccess.h>
29 #include <linux/extable.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/hardirq.h>
33 #include <linux/ftrace.h>
34 #include <asm/set_memory.h>
35 #include <asm/sections.h>
36 #include <linux/uaccess.h>
39 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
);
40 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
42 struct kretprobe_blackpoint kretprobe_blacklist
[] = { };
44 DEFINE_INSN_CACHE_OPS(dmainsn
);
46 static void *alloc_dmainsn_page(void)
50 page
= (void *) __get_free_page(GFP_KERNEL
| GFP_DMA
);
52 set_memory_x((unsigned long) page
, 1);
56 static void free_dmainsn_page(void *page
)
58 set_memory_nx((unsigned long) page
, 1);
59 free_page((unsigned long)page
);
62 struct kprobe_insn_cache kprobe_dmainsn_slots
= {
63 .mutex
= __MUTEX_INITIALIZER(kprobe_dmainsn_slots
.mutex
),
64 .alloc
= alloc_dmainsn_page
,
65 .free
= free_dmainsn_page
,
66 .pages
= LIST_HEAD_INIT(kprobe_dmainsn_slots
.pages
),
67 .insn_size
= MAX_INSN_SIZE
,
70 static void copy_instruction(struct kprobe
*p
)
72 unsigned long ip
= (unsigned long) p
->addr
;
76 if (ftrace_location(ip
) == ip
) {
78 * If kprobes patches the instruction that is morphed by
79 * ftrace make sure that kprobes always sees the branch
80 * "jg .+24" that skips the mcount block or the "brcl 0,0"
81 * in case of hotpatch.
83 ftrace_generate_nop_insn((struct ftrace_insn
*)p
->ainsn
.insn
);
84 p
->ainsn
.is_ftrace_insn
= 1;
86 memcpy(p
->ainsn
.insn
, p
->addr
, insn_length(*p
->addr
>> 8));
87 p
->opcode
= p
->ainsn
.insn
[0];
88 if (!probe_is_insn_relative_long(p
->ainsn
.insn
))
91 * For pc-relative instructions in RIL-b or RIL-c format patch the
92 * RI2 displacement field. We have already made sure that the insn
93 * slot for the patched instruction is within the same 2GB area
94 * as the original instruction (either kernel image or module area).
95 * Therefore the new displacement will always fit.
97 disp
= *(s32
*)&p
->ainsn
.insn
[1];
98 addr
= (u64
)(unsigned long)p
->addr
;
99 new_addr
= (u64
)(unsigned long)p
->ainsn
.insn
;
100 new_disp
= ((addr
+ (disp
* 2)) - new_addr
) / 2;
101 *(s32
*)&p
->ainsn
.insn
[1] = new_disp
;
103 NOKPROBE_SYMBOL(copy_instruction
);
105 static inline int is_kernel_addr(void *addr
)
107 return addr
< (void *)_end
;
110 static int s390_get_insn_slot(struct kprobe
*p
)
113 * Get an insn slot that is within the same 2GB area like the original
114 * instruction. That way instructions with a 32bit signed displacement
115 * field can be patched and executed within the insn slot.
117 p
->ainsn
.insn
= NULL
;
118 if (is_kernel_addr(p
->addr
))
119 p
->ainsn
.insn
= get_dmainsn_slot();
120 else if (is_module_addr(p
->addr
))
121 p
->ainsn
.insn
= get_insn_slot();
122 return p
->ainsn
.insn
? 0 : -ENOMEM
;
124 NOKPROBE_SYMBOL(s390_get_insn_slot
);
126 static void s390_free_insn_slot(struct kprobe
*p
)
130 if (is_kernel_addr(p
->addr
))
131 free_dmainsn_slot(p
->ainsn
.insn
, 0);
133 free_insn_slot(p
->ainsn
.insn
, 0);
134 p
->ainsn
.insn
= NULL
;
136 NOKPROBE_SYMBOL(s390_free_insn_slot
);
138 int arch_prepare_kprobe(struct kprobe
*p
)
140 if ((unsigned long) p
->addr
& 0x01)
142 /* Make sure the probe isn't going on a difficult instruction */
143 if (probe_is_prohibited_opcode(p
->addr
))
145 if (s390_get_insn_slot(p
))
150 NOKPROBE_SYMBOL(arch_prepare_kprobe
);
152 int arch_check_ftrace_location(struct kprobe
*p
)
157 struct swap_insn_args
{
159 unsigned int arm_kprobe
: 1;
162 static int swap_instruction(void *data
)
164 struct swap_insn_args
*args
= data
;
165 struct ftrace_insn new_insn
, *insn
;
166 struct kprobe
*p
= args
->p
;
169 new_insn
.opc
= args
->arm_kprobe
? BREAKPOINT_INSTRUCTION
: p
->opcode
;
170 len
= sizeof(new_insn
.opc
);
171 if (!p
->ainsn
.is_ftrace_insn
)
173 len
= sizeof(new_insn
);
174 insn
= (struct ftrace_insn
*) p
->addr
;
175 if (args
->arm_kprobe
) {
176 if (is_ftrace_nop(insn
))
177 new_insn
.disp
= KPROBE_ON_FTRACE_NOP
;
179 new_insn
.disp
= KPROBE_ON_FTRACE_CALL
;
181 ftrace_generate_call_insn(&new_insn
, (unsigned long)p
->addr
);
182 if (insn
->disp
== KPROBE_ON_FTRACE_NOP
)
183 ftrace_generate_nop_insn(&new_insn
);
186 s390_kernel_write(p
->addr
, &new_insn
, len
);
189 NOKPROBE_SYMBOL(swap_instruction
);
191 void arch_arm_kprobe(struct kprobe
*p
)
193 struct swap_insn_args args
= {.p
= p
, .arm_kprobe
= 1};
195 stop_machine_cpuslocked(swap_instruction
, &args
, NULL
);
197 NOKPROBE_SYMBOL(arch_arm_kprobe
);
199 void arch_disarm_kprobe(struct kprobe
*p
)
201 struct swap_insn_args args
= {.p
= p
, .arm_kprobe
= 0};
203 stop_machine_cpuslocked(swap_instruction
, &args
, NULL
);
205 NOKPROBE_SYMBOL(arch_disarm_kprobe
);
207 void arch_remove_kprobe(struct kprobe
*p
)
209 s390_free_insn_slot(p
);
211 NOKPROBE_SYMBOL(arch_remove_kprobe
);
213 static void enable_singlestep(struct kprobe_ctlblk
*kcb
,
214 struct pt_regs
*regs
,
217 struct per_regs per_kprobe
;
219 /* Set up the PER control registers %cr9-%cr11 */
220 per_kprobe
.control
= PER_EVENT_IFETCH
;
221 per_kprobe
.start
= ip
;
224 /* Save control regs and psw mask */
225 __ctl_store(kcb
->kprobe_saved_ctl
, 9, 11);
226 kcb
->kprobe_saved_imask
= regs
->psw
.mask
&
227 (PSW_MASK_PER
| PSW_MASK_IO
| PSW_MASK_EXT
);
229 /* Set PER control regs, turns on single step for the given address */
230 __ctl_load(per_kprobe
, 9, 11);
231 regs
->psw
.mask
|= PSW_MASK_PER
;
232 regs
->psw
.mask
&= ~(PSW_MASK_IO
| PSW_MASK_EXT
);
235 NOKPROBE_SYMBOL(enable_singlestep
);
237 static void disable_singlestep(struct kprobe_ctlblk
*kcb
,
238 struct pt_regs
*regs
,
241 /* Restore control regs and psw mask, set new psw address */
242 __ctl_load(kcb
->kprobe_saved_ctl
, 9, 11);
243 regs
->psw
.mask
&= ~PSW_MASK_PER
;
244 regs
->psw
.mask
|= kcb
->kprobe_saved_imask
;
247 NOKPROBE_SYMBOL(disable_singlestep
);
250 * Activate a kprobe by storing its pointer to current_kprobe. The
251 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
252 * two kprobes can be active, see KPROBE_REENTER.
254 static void push_kprobe(struct kprobe_ctlblk
*kcb
, struct kprobe
*p
)
256 kcb
->prev_kprobe
.kp
= __this_cpu_read(current_kprobe
);
257 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
258 __this_cpu_write(current_kprobe
, p
);
260 NOKPROBE_SYMBOL(push_kprobe
);
263 * Deactivate a kprobe by backing up to the previous state. If the
264 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
265 * for any other state prev_kprobe.kp will be NULL.
267 static void pop_kprobe(struct kprobe_ctlblk
*kcb
)
269 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
270 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
272 NOKPROBE_SYMBOL(pop_kprobe
);
274 void arch_prepare_kretprobe(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
276 ri
->ret_addr
= (kprobe_opcode_t
*) regs
->gprs
[14];
278 /* Replace the return addr with trampoline addr */
279 regs
->gprs
[14] = (unsigned long) &kretprobe_trampoline
;
281 NOKPROBE_SYMBOL(arch_prepare_kretprobe
);
283 static void kprobe_reenter_check(struct kprobe_ctlblk
*kcb
, struct kprobe
*p
)
285 switch (kcb
->kprobe_status
) {
286 case KPROBE_HIT_SSDONE
:
287 case KPROBE_HIT_ACTIVE
:
288 kprobes_inc_nmissed_count(p
);
294 * A kprobe on the code path to single step an instruction
295 * is a BUG. The code path resides in the .kprobes.text
296 * section and is executed with interrupts disabled.
298 printk(KERN_EMERG
"Invalid kprobe detected at %p.\n", p
->addr
);
303 NOKPROBE_SYMBOL(kprobe_reenter_check
);
305 static int kprobe_handler(struct pt_regs
*regs
)
307 struct kprobe_ctlblk
*kcb
;
311 * We want to disable preemption for the entire duration of kprobe
312 * processing. That includes the calls to the pre/post handlers
313 * and single stepping the kprobe instruction.
316 kcb
= get_kprobe_ctlblk();
317 p
= get_kprobe((void *)(regs
->psw
.addr
- 2));
320 if (kprobe_running()) {
322 * We have hit a kprobe while another is still
323 * active. This can happen in the pre and post
324 * handler. Single step the instruction of the
325 * new probe but do not call any handler function
326 * of this secondary kprobe.
327 * push_kprobe and pop_kprobe saves and restores
328 * the currently active kprobe.
330 kprobe_reenter_check(kcb
, p
);
332 kcb
->kprobe_status
= KPROBE_REENTER
;
335 * If we have no pre-handler or it returned 0, we
336 * continue with single stepping. If we have a
337 * pre-handler and it returned non-zero, it prepped
338 * for calling the break_handler below on re-entry
339 * for jprobe processing, so get out doing nothing
343 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
344 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
346 kcb
->kprobe_status
= KPROBE_HIT_SS
;
348 enable_singlestep(kcb
, regs
, (unsigned long) p
->ainsn
.insn
);
350 } else if (kprobe_running()) {
351 p
= __this_cpu_read(current_kprobe
);
352 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
354 * Continuation after the jprobe completed and
355 * caused the jprobe_return trap. The jprobe
356 * break_handler "returns" to the original
357 * function that still has the kprobe breakpoint
358 * installed. We continue with single stepping.
360 kcb
->kprobe_status
= KPROBE_HIT_SS
;
361 enable_singlestep(kcb
, regs
,
362 (unsigned long) p
->ainsn
.insn
);
365 * No kprobe at this address and the current kprobe
366 * has no break handler (no jprobe!). The kernel just
367 * exploded, let the standard trap handler pick up the
371 * No kprobe at this address and no active kprobe. The trap has
372 * not been caused by a kprobe breakpoint. The race of breakpoint
373 * vs. kprobe remove does not exist because on s390 as we use
374 * stop_machine to arm/disarm the breakpoints.
376 preempt_enable_no_resched();
379 NOKPROBE_SYMBOL(kprobe_handler
);
382 * Function return probe trampoline:
383 * - init_kprobes() establishes a probepoint here
384 * - When the probed function returns, this probe
385 * causes the handlers to fire
387 static void __used
kretprobe_trampoline_holder(void)
389 asm volatile(".global kretprobe_trampoline\n"
390 "kretprobe_trampoline: bcr 0,0\n");
394 * Called when the probe at kretprobe trampoline is hit
396 static int trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
398 struct kretprobe_instance
*ri
;
399 struct hlist_head
*head
, empty_rp
;
400 struct hlist_node
*tmp
;
401 unsigned long flags
, orig_ret_address
;
402 unsigned long trampoline_address
;
403 kprobe_opcode_t
*correct_ret_addr
;
405 INIT_HLIST_HEAD(&empty_rp
);
406 kretprobe_hash_lock(current
, &head
, &flags
);
409 * It is possible to have multiple instances associated with a given
410 * task either because an multiple functions in the call path
411 * have a return probe installed on them, and/or more than one return
412 * return probe was registered for a target function.
414 * We can handle this because:
415 * - instances are always inserted at the head of the list
416 * - when multiple return probes are registered for the same
417 * function, the first instance's ret_addr will point to the
418 * real return address, and all the rest will point to
419 * kretprobe_trampoline
422 orig_ret_address
= 0;
423 correct_ret_addr
= NULL
;
424 trampoline_address
= (unsigned long) &kretprobe_trampoline
;
425 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
426 if (ri
->task
!= current
)
427 /* another task is sharing our hash bucket */
430 orig_ret_address
= (unsigned long) ri
->ret_addr
;
432 if (orig_ret_address
!= trampoline_address
)
434 * This is the real return address. Any other
435 * instances associated with this task are for
436 * other calls deeper on the call stack
441 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
443 correct_ret_addr
= ri
->ret_addr
;
444 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
445 if (ri
->task
!= current
)
446 /* another task is sharing our hash bucket */
449 orig_ret_address
= (unsigned long) ri
->ret_addr
;
451 if (ri
->rp
&& ri
->rp
->handler
) {
452 ri
->ret_addr
= correct_ret_addr
;
453 ri
->rp
->handler(ri
, regs
);
456 recycle_rp_inst(ri
, &empty_rp
);
458 if (orig_ret_address
!= trampoline_address
)
460 * This is the real return address. Any other
461 * instances associated with this task are for
462 * other calls deeper on the call stack
467 regs
->psw
.addr
= orig_ret_address
;
469 pop_kprobe(get_kprobe_ctlblk());
470 kretprobe_hash_unlock(current
, &flags
);
471 preempt_enable_no_resched();
473 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
474 hlist_del(&ri
->hlist
);
478 * By returning a non-zero value, we are telling
479 * kprobe_handler() that we don't want the post_handler
480 * to run (and have re-enabled preemption)
484 NOKPROBE_SYMBOL(trampoline_probe_handler
);
487 * Called after single-stepping. p->addr is the address of the
488 * instruction whose first byte has been replaced by the "breakpoint"
489 * instruction. To avoid the SMP problems that can occur when we
490 * temporarily put back the original opcode to single-step, we
491 * single-stepped a copy of the instruction. The address of this
492 * copy is p->ainsn.insn.
494 static void resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
496 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
497 unsigned long ip
= regs
->psw
.addr
;
498 int fixup
= probe_get_fixup_type(p
->ainsn
.insn
);
500 /* Check if the kprobes location is an enabled ftrace caller */
501 if (p
->ainsn
.is_ftrace_insn
) {
502 struct ftrace_insn
*insn
= (struct ftrace_insn
*) p
->addr
;
503 struct ftrace_insn call_insn
;
505 ftrace_generate_call_insn(&call_insn
, (unsigned long) p
->addr
);
507 * A kprobe on an enabled ftrace call site actually single
508 * stepped an unconditional branch (ftrace nop equivalent).
509 * Now we need to fixup things and pretend that a brasl r0,...
510 * was executed instead.
512 if (insn
->disp
== KPROBE_ON_FTRACE_CALL
) {
513 ip
+= call_insn
.disp
* 2 - MCOUNT_INSN_SIZE
;
514 regs
->gprs
[0] = (unsigned long)p
->addr
+ sizeof(*insn
);
518 if (fixup
& FIXUP_PSW_NORMAL
)
519 ip
+= (unsigned long) p
->addr
- (unsigned long) p
->ainsn
.insn
;
521 if (fixup
& FIXUP_BRANCH_NOT_TAKEN
) {
522 int ilen
= insn_length(p
->ainsn
.insn
[0] >> 8);
523 if (ip
- (unsigned long) p
->ainsn
.insn
== ilen
)
524 ip
= (unsigned long) p
->addr
+ ilen
;
527 if (fixup
& FIXUP_RETURN_REGISTER
) {
528 int reg
= (p
->ainsn
.insn
[0] & 0xf0) >> 4;
529 regs
->gprs
[reg
] += (unsigned long) p
->addr
-
530 (unsigned long) p
->ainsn
.insn
;
533 disable_singlestep(kcb
, regs
, ip
);
535 NOKPROBE_SYMBOL(resume_execution
);
537 static int post_kprobe_handler(struct pt_regs
*regs
)
539 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
540 struct kprobe
*p
= kprobe_running();
545 if (kcb
->kprobe_status
!= KPROBE_REENTER
&& p
->post_handler
) {
546 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
547 p
->post_handler(p
, regs
, 0);
550 resume_execution(p
, regs
);
552 preempt_enable_no_resched();
555 * if somebody else is singlestepping across a probe point, psw mask
556 * will have PER set, in which case, continue the remaining processing
557 * of do_single_step, as if this is not a probe hit.
559 if (regs
->psw
.mask
& PSW_MASK_PER
)
564 NOKPROBE_SYMBOL(post_kprobe_handler
);
566 static int kprobe_trap_handler(struct pt_regs
*regs
, int trapnr
)
568 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
569 struct kprobe
*p
= kprobe_running();
570 const struct exception_table_entry
*entry
;
572 switch(kcb
->kprobe_status
) {
576 * We are here because the instruction being single
577 * stepped caused a page fault. We reset the current
578 * kprobe and the nip points back to the probe address
579 * and allow the page fault handler to continue as a
582 disable_singlestep(kcb
, regs
, (unsigned long) p
->addr
);
584 preempt_enable_no_resched();
586 case KPROBE_HIT_ACTIVE
:
587 case KPROBE_HIT_SSDONE
:
589 * We increment the nmissed count for accounting,
590 * we can also use npre/npostfault count for accounting
591 * these specific fault cases.
593 kprobes_inc_nmissed_count(p
);
596 * We come here because instructions in the pre/post
597 * handler caused the page_fault, this could happen
598 * if handler tries to access user space by
599 * copy_from_user(), get_user() etc. Let the
600 * user-specified handler try to fix it first.
602 if (p
->fault_handler
&& p
->fault_handler(p
, regs
, trapnr
))
606 * In case the user-specified fault handler returned
607 * zero, try to fix up.
609 entry
= search_exception_tables(regs
->psw
.addr
);
611 regs
->psw
.addr
= extable_fixup(entry
);
616 * fixup_exception() could not handle it,
617 * Let do_page_fault() fix it.
625 NOKPROBE_SYMBOL(kprobe_trap_handler
);
627 int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
631 if (regs
->psw
.mask
& (PSW_MASK_IO
| PSW_MASK_EXT
))
633 ret
= kprobe_trap_handler(regs
, trapnr
);
634 if (regs
->psw
.mask
& (PSW_MASK_IO
| PSW_MASK_EXT
))
635 local_irq_restore(regs
->psw
.mask
& ~PSW_MASK_PER
);
638 NOKPROBE_SYMBOL(kprobe_fault_handler
);
641 * Wrapper routine to for handling exceptions.
643 int kprobe_exceptions_notify(struct notifier_block
*self
,
644 unsigned long val
, void *data
)
646 struct die_args
*args
= (struct die_args
*) data
;
647 struct pt_regs
*regs
= args
->regs
;
648 int ret
= NOTIFY_DONE
;
650 if (regs
->psw
.mask
& (PSW_MASK_IO
| PSW_MASK_EXT
))
655 if (kprobe_handler(regs
))
659 if (post_kprobe_handler(regs
))
663 if (!preemptible() && kprobe_running() &&
664 kprobe_trap_handler(regs
, args
->trapnr
))
671 if (regs
->psw
.mask
& (PSW_MASK_IO
| PSW_MASK_EXT
))
672 local_irq_restore(regs
->psw
.mask
& ~PSW_MASK_PER
);
676 NOKPROBE_SYMBOL(kprobe_exceptions_notify
);
678 int setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
680 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
681 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
684 memcpy(&kcb
->jprobe_saved_regs
, regs
, sizeof(struct pt_regs
));
686 /* setup return addr to the jprobe handler routine */
687 regs
->psw
.addr
= (unsigned long) jp
->entry
;
688 regs
->psw
.mask
&= ~(PSW_MASK_IO
| PSW_MASK_EXT
);
690 /* r15 is the stack pointer */
691 stack
= (unsigned long) regs
->gprs
[15];
693 memcpy(kcb
->jprobes_stack
, (void *) stack
, MIN_STACK_SIZE(stack
));
696 * jprobes use jprobe_return() which skips the normal return
697 * path of the function, and this messes up the accounting of the
698 * function graph tracer to get messed up.
700 * Pause function graph tracing while performing the jprobe function.
702 pause_graph_tracing();
705 NOKPROBE_SYMBOL(setjmp_pre_handler
);
707 void jprobe_return(void)
709 asm volatile(".word 0x0002");
711 NOKPROBE_SYMBOL(jprobe_return
);
713 int longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
715 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
718 /* It's OK to start function graph tracing again */
719 unpause_graph_tracing();
721 stack
= (unsigned long) kcb
->jprobe_saved_regs
.gprs
[15];
723 /* Put the regs back */
724 memcpy(regs
, &kcb
->jprobe_saved_regs
, sizeof(struct pt_regs
));
725 /* put the stack back */
726 memcpy((void *) stack
, kcb
->jprobes_stack
, MIN_STACK_SIZE(stack
));
727 preempt_enable_no_resched();
730 NOKPROBE_SYMBOL(longjmp_break_handler
);
732 static struct kprobe trampoline
= {
733 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
734 .pre_handler
= trampoline_probe_handler
737 int __init
arch_init_kprobes(void)
739 return register_kprobe(&trampoline
);
742 int arch_trampoline_kprobe(struct kprobe
*p
)
744 return p
->addr
== (kprobe_opcode_t
*) &kretprobe_trampoline
;
746 NOKPROBE_SYMBOL(arch_trampoline_kprobe
);