]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/arm64/kernel/probes/kprobes.c
2 * arch/arm64/kernel/probes/kprobes.c
4 * Kprobes support for ARM64
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kprobes.h>
22 #include <linux/extable.h>
23 #include <linux/slab.h>
24 #include <linux/stop_machine.h>
25 #include <linux/stringify.h>
26 #include <asm/traps.h>
27 #include <asm/ptrace.h>
28 #include <asm/cacheflush.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/system_misc.h>
32 #include <asm/uaccess.h>
34 #include <asm/sections.h>
36 #include "decode-insn.h"
38 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
39 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
42 post_kprobe_handler(struct kprobe_ctlblk
*, struct pt_regs
*);
44 static void __kprobes
arch_prepare_ss_slot(struct kprobe
*p
)
46 /* prepare insn slot */
47 p
->ainsn
.api
.insn
[0] = cpu_to_le32(p
->opcode
);
49 flush_icache_range((uintptr_t) (p
->ainsn
.api
.insn
),
50 (uintptr_t) (p
->ainsn
.api
.insn
) +
51 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
54 * Needs restoring of return address after stepping xol.
56 p
->ainsn
.api
.restore
= (unsigned long) p
->addr
+
57 sizeof(kprobe_opcode_t
);
60 static void __kprobes
arch_prepare_simulate(struct kprobe
*p
)
62 /* This instructions is not executed xol. No need to adjust the PC */
63 p
->ainsn
.api
.restore
= 0;
66 static void __kprobes
arch_simulate_insn(struct kprobe
*p
, struct pt_regs
*regs
)
68 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
70 if (p
->ainsn
.api
.handler
)
71 p
->ainsn
.api
.handler((u32
)p
->opcode
, (long)p
->addr
, regs
);
73 /* single step simulated, now go for post processing */
74 post_kprobe_handler(kcb
, regs
);
77 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
79 unsigned long probe_addr
= (unsigned long)p
->addr
;
80 extern char __start_rodata
[];
81 extern char __end_rodata
[];
86 /* copy instruction */
87 p
->opcode
= le32_to_cpu(*p
->addr
);
89 if (in_exception_text(probe_addr
))
91 if (probe_addr
>= (unsigned long) __start_rodata
&&
92 probe_addr
<= (unsigned long) __end_rodata
)
95 /* decode instruction */
96 switch (arm_kprobe_decode_insn(p
->addr
, &p
->ainsn
)) {
97 case INSN_REJECTED
: /* insn not supported */
100 case INSN_GOOD_NO_SLOT
: /* insn need simulation */
101 p
->ainsn
.api
.insn
= NULL
;
104 case INSN_GOOD
: /* instruction uses slot */
105 p
->ainsn
.api
.insn
= get_insn_slot();
106 if (!p
->ainsn
.api
.insn
)
111 /* prepare the instruction */
112 if (p
->ainsn
.api
.insn
)
113 arch_prepare_ss_slot(p
);
115 arch_prepare_simulate(p
);
120 static int __kprobes
patch_text(kprobe_opcode_t
*addr
, u32 opcode
)
125 addrs
[0] = (void *)addr
;
126 insns
[0] = (u32
)opcode
;
128 return aarch64_insn_patch_text(addrs
, insns
, 1);
131 /* arm kprobe: install breakpoint in text */
132 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
134 patch_text(p
->addr
, BRK64_OPCODE_KPROBES
);
137 /* disarm kprobe: remove breakpoint from text */
138 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
140 patch_text(p
->addr
, p
->opcode
);
143 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
145 if (p
->ainsn
.api
.insn
) {
146 free_insn_slot(p
->ainsn
.api
.insn
, 0);
147 p
->ainsn
.api
.insn
= NULL
;
151 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
153 kcb
->prev_kprobe
.kp
= kprobe_running();
154 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
157 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
159 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
160 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
163 static void __kprobes
set_current_kprobe(struct kprobe
*p
)
165 __this_cpu_write(current_kprobe
, p
);
169 * When PSTATE.D is set (masked), then software step exceptions can not be
171 * SPSR's D bit shows the value of PSTATE.D immediately before the
172 * exception was taken. PSTATE.D is set while entering into any exception
173 * mode, however software clears it for any normal (none-debug-exception)
174 * mode in the exception entry. Therefore, when we are entering into kprobe
175 * breakpoint handler from any normal mode then SPSR.D bit is already
176 * cleared, however it is set when we are entering from any debug exception
178 * Since we always need to generate single step exception after a kprobe
179 * breakpoint exception therefore we need to clear it unconditionally, when
180 * we become sure that the current breakpoint exception is for kprobe.
182 static void __kprobes
183 spsr_set_debug_flag(struct pt_regs
*regs
, int mask
)
185 unsigned long spsr
= regs
->pstate
;
196 * Interrupts need to be disabled before single-step mode is set, and not
197 * reenabled until after single-step mode ends.
198 * Without disabling interrupt on local CPU, there is a chance of
199 * interrupt occurrence in the period of exception return and start of
200 * out-of-line single-step, that result in wrongly single stepping
201 * into the interrupt handler.
203 static void __kprobes
kprobes_save_local_irqflag(struct kprobe_ctlblk
*kcb
,
204 struct pt_regs
*regs
)
206 kcb
->saved_irqflag
= regs
->pstate
;
207 regs
->pstate
|= PSR_I_BIT
;
210 static void __kprobes
kprobes_restore_local_irqflag(struct kprobe_ctlblk
*kcb
,
211 struct pt_regs
*regs
)
213 if (kcb
->saved_irqflag
& PSR_I_BIT
)
214 regs
->pstate
|= PSR_I_BIT
;
216 regs
->pstate
&= ~PSR_I_BIT
;
219 static void __kprobes
220 set_ss_context(struct kprobe_ctlblk
*kcb
, unsigned long addr
)
222 kcb
->ss_ctx
.ss_pending
= true;
223 kcb
->ss_ctx
.match_addr
= addr
+ sizeof(kprobe_opcode_t
);
226 static void __kprobes
clear_ss_context(struct kprobe_ctlblk
*kcb
)
228 kcb
->ss_ctx
.ss_pending
= false;
229 kcb
->ss_ctx
.match_addr
= 0;
232 static void __kprobes
setup_singlestep(struct kprobe
*p
,
233 struct pt_regs
*regs
,
234 struct kprobe_ctlblk
*kcb
, int reenter
)
239 save_previous_kprobe(kcb
);
240 set_current_kprobe(p
);
241 kcb
->kprobe_status
= KPROBE_REENTER
;
243 kcb
->kprobe_status
= KPROBE_HIT_SS
;
247 if (p
->ainsn
.api
.insn
) {
248 /* prepare for single stepping */
249 slot
= (unsigned long)p
->ainsn
.api
.insn
;
251 set_ss_context(kcb
, slot
); /* mark pending ss */
253 spsr_set_debug_flag(regs
, 0);
255 /* IRQs and single stepping do not mix well. */
256 kprobes_save_local_irqflag(kcb
, regs
);
257 kernel_enable_single_step(regs
);
258 instruction_pointer_set(regs
, slot
);
260 /* insn simulation */
261 arch_simulate_insn(p
, regs
);
265 static int __kprobes
reenter_kprobe(struct kprobe
*p
,
266 struct pt_regs
*regs
,
267 struct kprobe_ctlblk
*kcb
)
269 switch (kcb
->kprobe_status
) {
270 case KPROBE_HIT_SSDONE
:
271 case KPROBE_HIT_ACTIVE
:
272 kprobes_inc_nmissed_count(p
);
273 setup_singlestep(p
, regs
, kcb
, 1);
277 pr_warn("Unrecoverable kprobe detected at %p.\n", p
->addr
);
289 static void __kprobes
290 post_kprobe_handler(struct kprobe_ctlblk
*kcb
, struct pt_regs
*regs
)
292 struct kprobe
*cur
= kprobe_running();
297 /* return addr restore if non-branching insn */
298 if (cur
->ainsn
.api
.restore
!= 0)
299 instruction_pointer_set(regs
, cur
->ainsn
.api
.restore
);
301 /* restore back original saved kprobe variables and continue */
302 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
303 restore_previous_kprobe(kcb
);
306 /* call post handler */
307 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
308 if (cur
->post_handler
) {
309 /* post_handler can hit breakpoint and single step
310 * again, so we enable D-flag for recursive exception.
312 cur
->post_handler(cur
, regs
, 0);
315 reset_current_kprobe();
318 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, unsigned int fsr
)
320 struct kprobe
*cur
= kprobe_running();
321 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
323 switch (kcb
->kprobe_status
) {
327 * We are here because the instruction being single
328 * stepped caused a page fault. We reset the current
329 * kprobe and the ip points back to the probe address
330 * and allow the page fault handler to continue as a
333 instruction_pointer_set(regs
, (unsigned long) cur
->addr
);
334 if (!instruction_pointer(regs
))
337 kernel_disable_single_step();
339 if (kcb
->kprobe_status
== KPROBE_REENTER
)
340 restore_previous_kprobe(kcb
);
342 reset_current_kprobe();
345 case KPROBE_HIT_ACTIVE
:
346 case KPROBE_HIT_SSDONE
:
348 * We increment the nmissed count for accounting,
349 * we can also use npre/npostfault count for accounting
350 * these specific fault cases.
352 kprobes_inc_nmissed_count(cur
);
355 * We come here because instructions in the pre/post
356 * handler caused the page_fault, this could happen
357 * if handler tries to access user space by
358 * copy_from_user(), get_user() etc. Let the
359 * user-specified handler try to fix it first.
361 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, fsr
))
365 * In case the user-specified fault handler returned
366 * zero, try to fix up.
368 if (fixup_exception(regs
))
374 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
375 unsigned long val
, void *data
)
380 static void __kprobes
kprobe_handler(struct pt_regs
*regs
)
382 struct kprobe
*p
, *cur_kprobe
;
383 struct kprobe_ctlblk
*kcb
;
384 unsigned long addr
= instruction_pointer(regs
);
386 kcb
= get_kprobe_ctlblk();
387 cur_kprobe
= kprobe_running();
389 p
= get_kprobe((kprobe_opcode_t
*) addr
);
393 if (reenter_kprobe(p
, regs
, kcb
))
397 set_current_kprobe(p
);
398 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
401 * If we have no pre-handler or it returned 0, we
402 * continue with normal processing. If we have a
403 * pre-handler and it returned non-zero, it prepped
404 * for calling the break_handler below on re-entry,
405 * so get out doing nothing more here.
407 * pre_handler can hit a breakpoint and can step thru
408 * before return, keep PSTATE D-flag enabled until
409 * pre_handler return back.
411 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
)) {
412 setup_singlestep(p
, regs
, kcb
, 0);
416 } else if ((le32_to_cpu(*(kprobe_opcode_t
*) addr
) ==
417 BRK64_OPCODE_KPROBES
) && cur_kprobe
) {
418 /* We probably hit a jprobe. Call its break handler. */
419 if (cur_kprobe
->break_handler
&&
420 cur_kprobe
->break_handler(cur_kprobe
, regs
)) {
421 setup_singlestep(cur_kprobe
, regs
, kcb
, 0);
426 * The breakpoint instruction was removed right
427 * after we hit it. Another cpu has removed
428 * either a probepoint or a debugger breakpoint
429 * at this address. In either case, no further
430 * handling of this interrupt is appropriate.
431 * Return back to original instruction, and continue.
436 kprobe_ss_hit(struct kprobe_ctlblk
*kcb
, unsigned long addr
)
438 if ((kcb
->ss_ctx
.ss_pending
)
439 && (kcb
->ss_ctx
.match_addr
== addr
)) {
440 clear_ss_context(kcb
); /* clear pending ss */
441 return DBG_HOOK_HANDLED
;
443 /* not ours, kprobes should ignore it */
444 return DBG_HOOK_ERROR
;
448 kprobe_single_step_handler(struct pt_regs
*regs
, unsigned int esr
)
450 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
453 /* return error if this is not our step */
454 retval
= kprobe_ss_hit(kcb
, instruction_pointer(regs
));
456 if (retval
== DBG_HOOK_HANDLED
) {
457 kprobes_restore_local_irqflag(kcb
, regs
);
458 kernel_disable_single_step();
460 post_kprobe_handler(kcb
, regs
);
467 kprobe_breakpoint_handler(struct pt_regs
*regs
, unsigned int esr
)
469 kprobe_handler(regs
);
470 return DBG_HOOK_HANDLED
;
473 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
475 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
476 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
478 kcb
->jprobe_saved_regs
= *regs
;
480 * Since we can't be sure where in the stack frame "stacked"
481 * pass-by-value arguments are stored we just don't try to
482 * duplicate any of the stack. Do not use jprobes on functions that
483 * use more than 64 bytes (after padding each to an 8 byte boundary)
484 * of arguments, or pass individual arguments larger than 16 bytes.
487 instruction_pointer_set(regs
, (unsigned long) jp
->entry
);
489 pause_graph_tracing();
493 void __kprobes
jprobe_return(void)
495 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
498 * Jprobe handler return by entering break exception,
499 * encoded same as kprobe, but with following conditions
500 * -a special PC to identify it from the other kprobes.
501 * -restore stack addr to original saved pt_regs
503 asm volatile(" mov sp, %0 \n"
504 "jprobe_return_break: brk %1 \n"
506 : "r" (kcb
->jprobe_saved_regs
.sp
),
507 "I" (BRK64_ESR_KPROBES
)
513 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
515 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
516 long stack_addr
= kcb
->jprobe_saved_regs
.sp
;
517 long orig_sp
= kernel_stack_pointer(regs
);
518 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
519 extern const char jprobe_return_break
[];
521 if (instruction_pointer(regs
) != (u64
) jprobe_return_break
)
524 if (orig_sp
!= stack_addr
) {
525 struct pt_regs
*saved_regs
=
526 (struct pt_regs
*)kcb
->jprobe_saved_regs
.sp
;
527 pr_err("current sp %lx does not match saved sp %lx\n",
528 orig_sp
, stack_addr
);
529 pr_err("Saved registers for jprobe %p\n", jp
);
530 show_regs(saved_regs
);
531 pr_err("Current registers\n");
535 unpause_graph_tracing();
536 *regs
= kcb
->jprobe_saved_regs
;
537 preempt_enable_no_resched();
541 bool arch_within_kprobe_blacklist(unsigned long addr
)
543 if ((addr
>= (unsigned long)__kprobes_text_start
&&
544 addr
< (unsigned long)__kprobes_text_end
) ||
545 (addr
>= (unsigned long)__entry_text_start
&&
546 addr
< (unsigned long)__entry_text_end
) ||
547 (addr
>= (unsigned long)__idmap_text_start
&&
548 addr
< (unsigned long)__idmap_text_end
) ||
549 !!search_exception_tables(addr
))
552 if (!is_kernel_in_hyp_mode()) {
553 if ((addr
>= (unsigned long)__hyp_text_start
&&
554 addr
< (unsigned long)__hyp_text_end
) ||
555 (addr
>= (unsigned long)__hyp_idmap_text_start
&&
556 addr
< (unsigned long)__hyp_idmap_text_end
))
563 void __kprobes __used
*trampoline_probe_handler(struct pt_regs
*regs
)
565 struct kretprobe_instance
*ri
= NULL
;
566 struct hlist_head
*head
, empty_rp
;
567 struct hlist_node
*tmp
;
568 unsigned long flags
, orig_ret_address
= 0;
569 unsigned long trampoline_address
=
570 (unsigned long)&kretprobe_trampoline
;
571 kprobe_opcode_t
*correct_ret_addr
= NULL
;
573 INIT_HLIST_HEAD(&empty_rp
);
574 kretprobe_hash_lock(current
, &head
, &flags
);
577 * It is possible to have multiple instances associated with a given
578 * task either because multiple functions in the call path have
579 * return probes installed on them, and/or more than one
580 * return probe was registered for a target function.
582 * We can handle this because:
583 * - instances are always pushed into the head of the list
584 * - when multiple return probes are registered for the same
585 * function, the (chronologically) first instance's ret_addr
586 * will be the real return address, and all the rest will
587 * point to kretprobe_trampoline.
589 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
590 if (ri
->task
!= current
)
591 /* another task is sharing our hash bucket */
594 orig_ret_address
= (unsigned long)ri
->ret_addr
;
596 if (orig_ret_address
!= trampoline_address
)
598 * This is the real return address. Any other
599 * instances associated with this task are for
600 * other calls deeper on the call stack
605 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
607 correct_ret_addr
= ri
->ret_addr
;
608 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
609 if (ri
->task
!= current
)
610 /* another task is sharing our hash bucket */
613 orig_ret_address
= (unsigned long)ri
->ret_addr
;
614 if (ri
->rp
&& ri
->rp
->handler
) {
615 __this_cpu_write(current_kprobe
, &ri
->rp
->kp
);
616 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
617 ri
->ret_addr
= correct_ret_addr
;
618 ri
->rp
->handler(ri
, regs
);
619 __this_cpu_write(current_kprobe
, NULL
);
622 recycle_rp_inst(ri
, &empty_rp
);
624 if (orig_ret_address
!= trampoline_address
)
626 * This is the real return address. Any other
627 * instances associated with this task are for
628 * other calls deeper on the call stack
633 kretprobe_hash_unlock(current
, &flags
);
635 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
636 hlist_del(&ri
->hlist
);
639 return (void *)orig_ret_address
;
642 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
643 struct pt_regs
*regs
)
645 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->regs
[30];
647 /* replace return addr (x30) with trampoline */
648 regs
->regs
[30] = (long)&kretprobe_trampoline
;
651 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
656 int __init
arch_init_kprobes(void)