]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/kernel/probes/kprobes.c
2 * arch/arm64/kernel/probes/kprobes.c
4 * Kprobes support for ARM64
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kprobes.h>
22 #include <linux/extable.h>
23 #include <linux/slab.h>
24 #include <linux/stop_machine.h>
25 #include <linux/sched/debug.h>
26 #include <linux/stringify.h>
27 #include <asm/traps.h>
28 #include <asm/ptrace.h>
29 #include <asm/cacheflush.h>
30 #include <asm/debug-monitors.h>
31 #include <asm/system_misc.h>
33 #include <linux/uaccess.h>
35 #include <asm/sections.h>
37 #include "decode-insn.h"
39 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
40 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
43 post_kprobe_handler(struct kprobe_ctlblk
*, struct pt_regs
*);
45 static void __kprobes
arch_prepare_ss_slot(struct kprobe
*p
)
47 /* prepare insn slot */
48 p
->ainsn
.api
.insn
[0] = cpu_to_le32(p
->opcode
);
50 flush_icache_range((uintptr_t) (p
->ainsn
.api
.insn
),
51 (uintptr_t) (p
->ainsn
.api
.insn
) +
52 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
55 * Needs restoring of return address after stepping xol.
57 p
->ainsn
.api
.restore
= (unsigned long) p
->addr
+
58 sizeof(kprobe_opcode_t
);
61 static void __kprobes
arch_prepare_simulate(struct kprobe
*p
)
63 /* This instructions is not executed xol. No need to adjust the PC */
64 p
->ainsn
.api
.restore
= 0;
67 static void __kprobes
arch_simulate_insn(struct kprobe
*p
, struct pt_regs
*regs
)
69 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
71 if (p
->ainsn
.api
.handler
)
72 p
->ainsn
.api
.handler((u32
)p
->opcode
, (long)p
->addr
, regs
);
74 /* single step simulated, now go for post processing */
75 post_kprobe_handler(kcb
, regs
);
78 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
80 unsigned long probe_addr
= (unsigned long)p
->addr
;
81 extern char __start_rodata
[];
82 extern char __end_rodata
[];
87 /* copy instruction */
88 p
->opcode
= le32_to_cpu(*p
->addr
);
90 if (in_exception_text(probe_addr
))
92 if (probe_addr
>= (unsigned long) __start_rodata
&&
93 probe_addr
<= (unsigned long) __end_rodata
)
96 /* decode instruction */
97 switch (arm_kprobe_decode_insn(p
->addr
, &p
->ainsn
)) {
98 case INSN_REJECTED
: /* insn not supported */
101 case INSN_GOOD_NO_SLOT
: /* insn need simulation */
102 p
->ainsn
.api
.insn
= NULL
;
105 case INSN_GOOD
: /* instruction uses slot */
106 p
->ainsn
.api
.insn
= get_insn_slot();
107 if (!p
->ainsn
.api
.insn
)
112 /* prepare the instruction */
113 if (p
->ainsn
.api
.insn
)
114 arch_prepare_ss_slot(p
);
116 arch_prepare_simulate(p
);
121 static int __kprobes
patch_text(kprobe_opcode_t
*addr
, u32 opcode
)
126 addrs
[0] = (void *)addr
;
127 insns
[0] = (u32
)opcode
;
129 return aarch64_insn_patch_text(addrs
, insns
, 1);
132 /* arm kprobe: install breakpoint in text */
133 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
135 patch_text(p
->addr
, BRK64_OPCODE_KPROBES
);
138 /* disarm kprobe: remove breakpoint from text */
139 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
141 patch_text(p
->addr
, p
->opcode
);
144 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
146 if (p
->ainsn
.api
.insn
) {
147 free_insn_slot(p
->ainsn
.api
.insn
, 0);
148 p
->ainsn
.api
.insn
= NULL
;
152 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
154 kcb
->prev_kprobe
.kp
= kprobe_running();
155 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
158 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
160 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
161 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
164 static void __kprobes
set_current_kprobe(struct kprobe
*p
)
166 __this_cpu_write(current_kprobe
, p
);
170 * When PSTATE.D is set (masked), then software step exceptions can not be
172 * SPSR's D bit shows the value of PSTATE.D immediately before the
173 * exception was taken. PSTATE.D is set while entering into any exception
174 * mode, however software clears it for any normal (none-debug-exception)
175 * mode in the exception entry. Therefore, when we are entering into kprobe
176 * breakpoint handler from any normal mode then SPSR.D bit is already
177 * cleared, however it is set when we are entering from any debug exception
179 * Since we always need to generate single step exception after a kprobe
180 * breakpoint exception therefore we need to clear it unconditionally, when
181 * we become sure that the current breakpoint exception is for kprobe.
183 static void __kprobes
184 spsr_set_debug_flag(struct pt_regs
*regs
, int mask
)
186 unsigned long spsr
= regs
->pstate
;
197 * Interrupts need to be disabled before single-step mode is set, and not
198 * reenabled until after single-step mode ends.
199 * Without disabling interrupt on local CPU, there is a chance of
200 * interrupt occurrence in the period of exception return and start of
201 * out-of-line single-step, that result in wrongly single stepping
202 * into the interrupt handler.
204 static void __kprobes
kprobes_save_local_irqflag(struct kprobe_ctlblk
*kcb
,
205 struct pt_regs
*regs
)
207 kcb
->saved_irqflag
= regs
->pstate
;
208 regs
->pstate
|= PSR_I_BIT
;
211 static void __kprobes
kprobes_restore_local_irqflag(struct kprobe_ctlblk
*kcb
,
212 struct pt_regs
*regs
)
214 if (kcb
->saved_irqflag
& PSR_I_BIT
)
215 regs
->pstate
|= PSR_I_BIT
;
217 regs
->pstate
&= ~PSR_I_BIT
;
220 static void __kprobes
221 set_ss_context(struct kprobe_ctlblk
*kcb
, unsigned long addr
)
223 kcb
->ss_ctx
.ss_pending
= true;
224 kcb
->ss_ctx
.match_addr
= addr
+ sizeof(kprobe_opcode_t
);
227 static void __kprobes
clear_ss_context(struct kprobe_ctlblk
*kcb
)
229 kcb
->ss_ctx
.ss_pending
= false;
230 kcb
->ss_ctx
.match_addr
= 0;
233 static void __kprobes
setup_singlestep(struct kprobe
*p
,
234 struct pt_regs
*regs
,
235 struct kprobe_ctlblk
*kcb
, int reenter
)
240 save_previous_kprobe(kcb
);
241 set_current_kprobe(p
);
242 kcb
->kprobe_status
= KPROBE_REENTER
;
244 kcb
->kprobe_status
= KPROBE_HIT_SS
;
248 if (p
->ainsn
.api
.insn
) {
249 /* prepare for single stepping */
250 slot
= (unsigned long)p
->ainsn
.api
.insn
;
252 set_ss_context(kcb
, slot
); /* mark pending ss */
254 spsr_set_debug_flag(regs
, 0);
256 /* IRQs and single stepping do not mix well. */
257 kprobes_save_local_irqflag(kcb
, regs
);
258 kernel_enable_single_step(regs
);
259 instruction_pointer_set(regs
, slot
);
261 /* insn simulation */
262 arch_simulate_insn(p
, regs
);
266 static int __kprobes
reenter_kprobe(struct kprobe
*p
,
267 struct pt_regs
*regs
,
268 struct kprobe_ctlblk
*kcb
)
270 switch (kcb
->kprobe_status
) {
271 case KPROBE_HIT_SSDONE
:
272 case KPROBE_HIT_ACTIVE
:
273 kprobes_inc_nmissed_count(p
);
274 setup_singlestep(p
, regs
, kcb
, 1);
278 pr_warn("Unrecoverable kprobe detected at %p.\n", p
->addr
);
290 static void __kprobes
291 post_kprobe_handler(struct kprobe_ctlblk
*kcb
, struct pt_regs
*regs
)
293 struct kprobe
*cur
= kprobe_running();
298 /* return addr restore if non-branching insn */
299 if (cur
->ainsn
.api
.restore
!= 0)
300 instruction_pointer_set(regs
, cur
->ainsn
.api
.restore
);
302 /* restore back original saved kprobe variables and continue */
303 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
304 restore_previous_kprobe(kcb
);
307 /* call post handler */
308 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
309 if (cur
->post_handler
) {
310 /* post_handler can hit breakpoint and single step
311 * again, so we enable D-flag for recursive exception.
313 cur
->post_handler(cur
, regs
, 0);
316 reset_current_kprobe();
319 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, unsigned int fsr
)
321 struct kprobe
*cur
= kprobe_running();
322 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
324 switch (kcb
->kprobe_status
) {
328 * We are here because the instruction being single
329 * stepped caused a page fault. We reset the current
330 * kprobe and the ip points back to the probe address
331 * and allow the page fault handler to continue as a
334 instruction_pointer_set(regs
, (unsigned long) cur
->addr
);
335 if (!instruction_pointer(regs
))
338 kernel_disable_single_step();
340 if (kcb
->kprobe_status
== KPROBE_REENTER
)
341 restore_previous_kprobe(kcb
);
343 reset_current_kprobe();
346 case KPROBE_HIT_ACTIVE
:
347 case KPROBE_HIT_SSDONE
:
349 * We increment the nmissed count for accounting,
350 * we can also use npre/npostfault count for accounting
351 * these specific fault cases.
353 kprobes_inc_nmissed_count(cur
);
356 * We come here because instructions in the pre/post
357 * handler caused the page_fault, this could happen
358 * if handler tries to access user space by
359 * copy_from_user(), get_user() etc. Let the
360 * user-specified handler try to fix it first.
362 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, fsr
))
366 * In case the user-specified fault handler returned
367 * zero, try to fix up.
369 if (fixup_exception(regs
))
375 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
376 unsigned long val
, void *data
)
381 static void __kprobes
kprobe_handler(struct pt_regs
*regs
)
383 struct kprobe
*p
, *cur_kprobe
;
384 struct kprobe_ctlblk
*kcb
;
385 unsigned long addr
= instruction_pointer(regs
);
387 kcb
= get_kprobe_ctlblk();
388 cur_kprobe
= kprobe_running();
390 p
= get_kprobe((kprobe_opcode_t
*) addr
);
394 if (reenter_kprobe(p
, regs
, kcb
))
398 set_current_kprobe(p
);
399 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
402 * If we have no pre-handler or it returned 0, we
403 * continue with normal processing. If we have a
404 * pre-handler and it returned non-zero, it prepped
405 * for calling the break_handler below on re-entry,
406 * so get out doing nothing more here.
408 * pre_handler can hit a breakpoint and can step thru
409 * before return, keep PSTATE D-flag enabled until
410 * pre_handler return back.
412 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
)) {
413 setup_singlestep(p
, regs
, kcb
, 0);
417 } else if ((le32_to_cpu(*(kprobe_opcode_t
*) addr
) ==
418 BRK64_OPCODE_KPROBES
) && cur_kprobe
) {
419 /* We probably hit a jprobe. Call its break handler. */
420 if (cur_kprobe
->break_handler
&&
421 cur_kprobe
->break_handler(cur_kprobe
, regs
)) {
422 setup_singlestep(cur_kprobe
, regs
, kcb
, 0);
427 * The breakpoint instruction was removed right
428 * after we hit it. Another cpu has removed
429 * either a probepoint or a debugger breakpoint
430 * at this address. In either case, no further
431 * handling of this interrupt is appropriate.
432 * Return back to original instruction, and continue.
437 kprobe_ss_hit(struct kprobe_ctlblk
*kcb
, unsigned long addr
)
439 if ((kcb
->ss_ctx
.ss_pending
)
440 && (kcb
->ss_ctx
.match_addr
== addr
)) {
441 clear_ss_context(kcb
); /* clear pending ss */
442 return DBG_HOOK_HANDLED
;
444 /* not ours, kprobes should ignore it */
445 return DBG_HOOK_ERROR
;
449 kprobe_single_step_handler(struct pt_regs
*regs
, unsigned int esr
)
451 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
454 /* return error if this is not our step */
455 retval
= kprobe_ss_hit(kcb
, instruction_pointer(regs
));
457 if (retval
== DBG_HOOK_HANDLED
) {
458 kprobes_restore_local_irqflag(kcb
, regs
);
459 kernel_disable_single_step();
461 post_kprobe_handler(kcb
, regs
);
468 kprobe_breakpoint_handler(struct pt_regs
*regs
, unsigned int esr
)
470 kprobe_handler(regs
);
471 return DBG_HOOK_HANDLED
;
474 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
476 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
477 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
479 kcb
->jprobe_saved_regs
= *regs
;
481 * Since we can't be sure where in the stack frame "stacked"
482 * pass-by-value arguments are stored we just don't try to
483 * duplicate any of the stack. Do not use jprobes on functions that
484 * use more than 64 bytes (after padding each to an 8 byte boundary)
485 * of arguments, or pass individual arguments larger than 16 bytes.
488 instruction_pointer_set(regs
, (unsigned long) jp
->entry
);
490 pause_graph_tracing();
494 void __kprobes
jprobe_return(void)
496 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
499 * Jprobe handler return by entering break exception,
500 * encoded same as kprobe, but with following conditions
501 * -a special PC to identify it from the other kprobes.
502 * -restore stack addr to original saved pt_regs
504 asm volatile(" mov sp, %0 \n"
505 "jprobe_return_break: brk %1 \n"
507 : "r" (kcb
->jprobe_saved_regs
.sp
),
508 "I" (BRK64_ESR_KPROBES
)
514 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
516 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
517 long stack_addr
= kcb
->jprobe_saved_regs
.sp
;
518 long orig_sp
= kernel_stack_pointer(regs
);
519 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
520 extern const char jprobe_return_break
[];
522 if (instruction_pointer(regs
) != (u64
) jprobe_return_break
)
525 if (orig_sp
!= stack_addr
) {
526 struct pt_regs
*saved_regs
=
527 (struct pt_regs
*)kcb
->jprobe_saved_regs
.sp
;
528 pr_err("current sp %lx does not match saved sp %lx\n",
529 orig_sp
, stack_addr
);
530 pr_err("Saved registers for jprobe %p\n", jp
);
531 show_regs(saved_regs
);
532 pr_err("Current registers\n");
536 unpause_graph_tracing();
537 *regs
= kcb
->jprobe_saved_regs
;
538 preempt_enable_no_resched();
542 bool arch_within_kprobe_blacklist(unsigned long addr
)
544 if ((addr
>= (unsigned long)__kprobes_text_start
&&
545 addr
< (unsigned long)__kprobes_text_end
) ||
546 (addr
>= (unsigned long)__entry_text_start
&&
547 addr
< (unsigned long)__entry_text_end
) ||
548 (addr
>= (unsigned long)__idmap_text_start
&&
549 addr
< (unsigned long)__idmap_text_end
) ||
550 !!search_exception_tables(addr
))
553 if (!is_kernel_in_hyp_mode()) {
554 if ((addr
>= (unsigned long)__hyp_text_start
&&
555 addr
< (unsigned long)__hyp_text_end
) ||
556 (addr
>= (unsigned long)__hyp_idmap_text_start
&&
557 addr
< (unsigned long)__hyp_idmap_text_end
))
564 void __kprobes __used
*trampoline_probe_handler(struct pt_regs
*regs
)
566 struct kretprobe_instance
*ri
= NULL
;
567 struct hlist_head
*head
, empty_rp
;
568 struct hlist_node
*tmp
;
569 unsigned long flags
, orig_ret_address
= 0;
570 unsigned long trampoline_address
=
571 (unsigned long)&kretprobe_trampoline
;
572 kprobe_opcode_t
*correct_ret_addr
= NULL
;
574 INIT_HLIST_HEAD(&empty_rp
);
575 kretprobe_hash_lock(current
, &head
, &flags
);
578 * It is possible to have multiple instances associated with a given
579 * task either because multiple functions in the call path have
580 * return probes installed on them, and/or more than one
581 * return probe was registered for a target function.
583 * We can handle this because:
584 * - instances are always pushed into the head of the list
585 * - when multiple return probes are registered for the same
586 * function, the (chronologically) first instance's ret_addr
587 * will be the real return address, and all the rest will
588 * point to kretprobe_trampoline.
590 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
591 if (ri
->task
!= current
)
592 /* another task is sharing our hash bucket */
595 orig_ret_address
= (unsigned long)ri
->ret_addr
;
597 if (orig_ret_address
!= trampoline_address
)
599 * This is the real return address. Any other
600 * instances associated with this task are for
601 * other calls deeper on the call stack
606 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
608 correct_ret_addr
= ri
->ret_addr
;
609 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
610 if (ri
->task
!= current
)
611 /* another task is sharing our hash bucket */
614 orig_ret_address
= (unsigned long)ri
->ret_addr
;
615 if (ri
->rp
&& ri
->rp
->handler
) {
616 __this_cpu_write(current_kprobe
, &ri
->rp
->kp
);
617 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
618 ri
->ret_addr
= correct_ret_addr
;
619 ri
->rp
->handler(ri
, regs
);
620 __this_cpu_write(current_kprobe
, NULL
);
623 recycle_rp_inst(ri
, &empty_rp
);
625 if (orig_ret_address
!= trampoline_address
)
627 * This is the real return address. Any other
628 * instances associated with this task are for
629 * other calls deeper on the call stack
634 kretprobe_hash_unlock(current
, &flags
);
636 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
637 hlist_del(&ri
->hlist
);
640 return (void *)orig_ret_address
;
643 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
644 struct pt_regs
*regs
)
646 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->regs
[30];
648 /* replace return addr (x30) with trampoline */
649 regs
->regs
[30] = (long)&kretprobe_trampoline
;
652 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
657 int __init
arch_init_kprobes(void)