2 * arch/tile/kernel/kprobes.c
5 * Some portions copied from the MIPS version.
7 * Copyright (C) IBM Corporation, 2002, 2004
8 * Copyright 2006 Sony Corp.
9 * Copyright 2010 Cavium Networks
11 * Copyright 2012 Tilera Corporation. All Rights Reserved.
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation, version 2.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
20 * NON INFRINGEMENT. See the GNU General Public License for
24 #include <linux/kprobes.h>
25 #include <linux/kdebug.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
31 #include <arch/opcode.h>
33 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
34 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
36 tile_bundle_bits breakpoint_insn
= TILEGX_BPT_BUNDLE
;
37 tile_bundle_bits breakpoint2_insn
= TILEGX_BPT_BUNDLE
| DIE_SSTEPBP
;
40 * Check whether instruction is branch or jump, or if executing it
41 * has different results depending on where it is executed (e.g. lnk).
43 static int __kprobes
insn_has_control(kprobe_opcode_t insn
)
45 if (get_Mode(insn
) != 0) { /* Y-format bundle */
46 if (get_Opcode_Y1(insn
) != RRR_1_OPCODE_Y1
||
47 get_RRROpcodeExtension_Y1(insn
) != UNARY_RRR_1_OPCODE_Y1
)
50 switch (get_UnaryOpcodeExtension_Y1(insn
)) {
51 case JALRP_UNARY_OPCODE_Y1
:
52 case JALR_UNARY_OPCODE_Y1
:
53 case JRP_UNARY_OPCODE_Y1
:
54 case JR_UNARY_OPCODE_Y1
:
55 case LNK_UNARY_OPCODE_Y1
:
62 switch (get_Opcode_X1(insn
)) {
63 case BRANCH_OPCODE_X1
: /* branch instructions */
64 case JUMP_OPCODE_X1
: /* jump instructions: j and jal */
67 case RRR_0_OPCODE_X1
: /* other jump instructions */
68 if (get_RRROpcodeExtension_X1(insn
) != UNARY_RRR_0_OPCODE_X1
)
70 switch (get_UnaryOpcodeExtension_X1(insn
)) {
71 case JALRP_UNARY_OPCODE_X1
:
72 case JALR_UNARY_OPCODE_X1
:
73 case JRP_UNARY_OPCODE_X1
:
74 case JR_UNARY_OPCODE_X1
:
75 case LNK_UNARY_OPCODE_X1
:
85 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
87 unsigned long addr
= (unsigned long)p
->addr
;
89 if (addr
& (sizeof(kprobe_opcode_t
) - 1))
92 if (insn_has_control(*p
->addr
)) {
93 pr_notice("Kprobes for control instructions are not supported\n");
97 /* insn: must be on special executable page on tile. */
98 p
->ainsn
.insn
= get_insn_slot();
103 * In the kprobe->ainsn.insn[] array we store the original
104 * instruction at index zero and a break trap instruction at
107 memcpy(&p
->ainsn
.insn
[0], p
->addr
, sizeof(kprobe_opcode_t
));
108 p
->ainsn
.insn
[1] = breakpoint2_insn
;
109 p
->opcode
= *p
->addr
;
114 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
116 unsigned long addr_wr
;
118 /* Operate on writable kernel text mapping. */
119 addr_wr
= ktext_writable_addr(p
->addr
);
121 if (probe_kernel_write((void *)addr_wr
, &breakpoint_insn
,
122 sizeof(breakpoint_insn
)))
123 pr_err("%s: failed to enable kprobe\n", __func__
);
129 void __kprobes
arch_disarm_kprobe(struct kprobe
*kp
)
131 unsigned long addr_wr
;
133 /* Operate on writable kernel text mapping. */
134 addr_wr
= ktext_writable_addr(kp
->addr
);
136 if (probe_kernel_write((void *)addr_wr
, &kp
->opcode
,
138 pr_err("%s: failed to enable kprobe\n", __func__
);
144 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
147 free_insn_slot(p
->ainsn
.insn
, 0);
148 p
->ainsn
.insn
= NULL
;
152 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
154 kcb
->prev_kprobe
.kp
= kprobe_running();
155 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
156 kcb
->prev_kprobe
.saved_pc
= kcb
->kprobe_saved_pc
;
159 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
161 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
162 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
163 kcb
->kprobe_saved_pc
= kcb
->prev_kprobe
.saved_pc
;
166 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
167 struct kprobe_ctlblk
*kcb
)
169 __this_cpu_write(current_kprobe
, p
);
170 kcb
->kprobe_saved_pc
= regs
->pc
;
173 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
175 /* Single step inline if the instruction is a break. */
176 if (p
->opcode
== breakpoint_insn
||
177 p
->opcode
== breakpoint2_insn
)
178 regs
->pc
= (unsigned long)p
->addr
;
180 regs
->pc
= (unsigned long)&p
->ainsn
.insn
[0];
183 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
187 kprobe_opcode_t
*addr
;
188 struct kprobe_ctlblk
*kcb
;
190 addr
= (kprobe_opcode_t
*)regs
->pc
;
193 * We don't want to be preempted for the entire
194 * duration of kprobe processing.
197 kcb
= get_kprobe_ctlblk();
199 /* Check we're not actually recursing. */
200 if (kprobe_running()) {
201 p
= get_kprobe(addr
);
203 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
204 p
->ainsn
.insn
[0] == breakpoint_insn
) {
208 * We have reentered the kprobe_handler(), since
209 * another probe was hit while within the handler.
210 * We here save the original kprobes variables and
211 * just single step on the instruction of the new probe
212 * without calling any user handlers.
214 save_previous_kprobe(kcb
);
215 set_current_kprobe(p
, regs
, kcb
);
216 kprobes_inc_nmissed_count(p
);
217 prepare_singlestep(p
, regs
);
218 kcb
->kprobe_status
= KPROBE_REENTER
;
221 if (*addr
!= breakpoint_insn
) {
223 * The breakpoint instruction was removed by
224 * another cpu right after we hit, no further
225 * handling of this interrupt is appropriate.
230 p
= __this_cpu_read(current_kprobe
);
231 if (p
->break_handler
&& p
->break_handler(p
, regs
))
237 p
= get_kprobe(addr
);
239 if (*addr
!= breakpoint_insn
) {
241 * The breakpoint instruction was removed right
242 * after we hit it. Another cpu has removed
243 * either a probepoint or a debugger breakpoint
244 * at this address. In either case, no further
245 * handling of this interrupt is appropriate.
249 /* Not one of ours: let kernel handle it. */
253 set_current_kprobe(p
, regs
, kcb
);
254 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
256 if (p
->pre_handler
&& p
->pre_handler(p
, regs
)) {
257 /* Handler has already set things up, so skip ss setup. */
262 prepare_singlestep(p
, regs
);
263 kcb
->kprobe_status
= KPROBE_HIT_SS
;
267 preempt_enable_no_resched();
272 * Called after single-stepping. p->addr is the address of the
273 * instruction that has been replaced by the breakpoint. To avoid the
274 * SMP problems that can occur when we temporarily put back the
275 * original opcode to single-step, we single-stepped a copy of the
276 * instruction. The address of this copy is p->ainsn.insn.
278 * This function prepares to return from the post-single-step
281 static void __kprobes
resume_execution(struct kprobe
*p
,
282 struct pt_regs
*regs
,
283 struct kprobe_ctlblk
*kcb
)
285 unsigned long orig_pc
= kcb
->kprobe_saved_pc
;
286 regs
->pc
= orig_pc
+ 8;
289 static inline int post_kprobe_handler(struct pt_regs
*regs
)
291 struct kprobe
*cur
= kprobe_running();
292 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
297 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
298 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
299 cur
->post_handler(cur
, regs
, 0);
302 resume_execution(cur
, regs
, kcb
);
304 /* Restore back the original saved kprobes variables and continue. */
305 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
306 restore_previous_kprobe(kcb
);
309 reset_current_kprobe();
311 preempt_enable_no_resched();
316 static inline int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
318 struct kprobe
*cur
= kprobe_running();
319 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
321 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
324 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
326 * We are here because the instruction being single
327 * stepped caused a page fault. We reset the current
328 * kprobe and the ip points back to the probe address
329 * and allow the page fault handler to continue as a
332 resume_execution(cur
, regs
, kcb
);
333 reset_current_kprobe();
334 preempt_enable_no_resched();
340 * Wrapper routine for handling exceptions.
342 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
343 unsigned long val
, void *data
)
345 struct die_args
*args
= (struct die_args
*)data
;
346 int ret
= NOTIFY_DONE
;
350 if (kprobe_handler(args
->regs
))
354 if (post_kprobe_handler(args
->regs
))
358 /* kprobe_running() needs smp_processor_id(). */
362 && kprobe_fault_handler(args
->regs
, args
->trapnr
))
372 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
374 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
375 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
377 kcb
->jprobe_saved_regs
= *regs
;
378 kcb
->jprobe_saved_sp
= regs
->sp
;
380 memcpy(kcb
->jprobes_stack
, (void *)kcb
->jprobe_saved_sp
,
381 MIN_JPROBES_STACK_SIZE(kcb
->jprobe_saved_sp
));
383 regs
->pc
= (unsigned long)(jp
->entry
);
388 /* Defined in the inline asm below. */
389 void jprobe_return_end(void);
391 void __kprobes
jprobe_return(void)
395 ".globl jprobe_return_end\n"
396 "jprobe_return_end:\n");
399 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
401 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
403 if (regs
->pc
>= (unsigned long)jprobe_return
&&
404 regs
->pc
<= (unsigned long)jprobe_return_end
) {
405 *regs
= kcb
->jprobe_saved_regs
;
406 memcpy((void *)kcb
->jprobe_saved_sp
, kcb
->jprobes_stack
,
407 MIN_JPROBES_STACK_SIZE(kcb
->jprobe_saved_sp
));
408 preempt_enable_no_resched();
416 * Function return probe trampoline:
417 * - init_kprobes() establishes a probepoint here
418 * - When the probed function returns, this probe causes the
421 static void __used
kretprobe_trampoline_holder(void)
425 ".global kretprobe_trampoline\n"
426 "kretprobe_trampoline:\n\t"
431 void kretprobe_trampoline(void);
433 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
434 struct pt_regs
*regs
)
436 ri
->ret_addr
= (kprobe_opcode_t
*) regs
->lr
;
438 /* Replace the return addr with trampoline addr */
439 regs
->lr
= (unsigned long)kretprobe_trampoline
;
443 * Called when the probe at kretprobe trampoline is hit.
445 static int __kprobes
trampoline_probe_handler(struct kprobe
*p
,
446 struct pt_regs
*regs
)
448 struct kretprobe_instance
*ri
= NULL
;
449 struct hlist_head
*head
, empty_rp
;
450 struct hlist_node
*tmp
;
451 unsigned long flags
, orig_ret_address
= 0;
452 unsigned long trampoline_address
= (unsigned long)kretprobe_trampoline
;
454 INIT_HLIST_HEAD(&empty_rp
);
455 kretprobe_hash_lock(current
, &head
, &flags
);
458 * It is possible to have multiple instances associated with a given
459 * task either because multiple functions in the call path have
460 * a return probe installed on them, and/or more than one return
461 * return probe was registered for a target function.
463 * We can handle this because:
464 * - instances are always inserted at the head of the list
465 * - when multiple return probes are registered for the same
466 * function, the first instance's ret_addr will point to the
467 * real return address, and all the rest will point to
468 * kretprobe_trampoline
470 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
471 if (ri
->task
!= current
)
472 /* another task is sharing our hash bucket */
475 if (ri
->rp
&& ri
->rp
->handler
)
476 ri
->rp
->handler(ri
, regs
);
478 orig_ret_address
= (unsigned long)ri
->ret_addr
;
479 recycle_rp_inst(ri
, &empty_rp
);
481 if (orig_ret_address
!= trampoline_address
) {
483 * This is the real return address. Any other
484 * instances associated with this task are for
485 * other calls deeper on the call stack
491 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
492 instruction_pointer(regs
) = orig_ret_address
;
494 reset_current_kprobe();
495 kretprobe_hash_unlock(current
, &flags
);
496 preempt_enable_no_resched();
498 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
499 hlist_del(&ri
->hlist
);
503 * By returning a non-zero value, we are telling
504 * kprobe_handler() that we don't want the post_handler
505 * to run (and have re-enabled preemption)
510 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
512 if (p
->addr
== (kprobe_opcode_t
*)kretprobe_trampoline
)
518 static struct kprobe trampoline_p
= {
519 .addr
= (kprobe_opcode_t
*)kretprobe_trampoline
,
520 .pre_handler
= trampoline_probe_handler
523 int __init
arch_init_kprobes(void)
525 register_kprobe(&trampoline_p
);