]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/i386/kernel/kprobes.c
2 * Kernel Probes (KProbes)
3 * arch/i386/kernel/kprobes.c
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation ( includes contributions from
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
31 #include <linux/config.h>
32 #include <linux/kprobes.h>
33 #include <linux/ptrace.h>
34 #include <linux/preempt.h>
35 #include <asm/cacheflush.h>
36 #include <asm/kdebug.h>
39 void jprobe_return_end(void);
41 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
42 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
45 * returns non-zero if opcode modifies the interrupt flag.
47 static inline int is_IF_modifier(kprobe_opcode_t opcode
)
52 case 0xcf: /* iret/iretd */
53 case 0x9d: /* popf/popfd */
59 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
61 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
66 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
68 *p
->addr
= BREAKPOINT_INSTRUCTION
;
69 flush_icache_range((unsigned long) p
->addr
,
70 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
73 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
76 flush_icache_range((unsigned long) p
->addr
,
77 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
80 static inline void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
82 kcb
->prev_kprobe
.kp
= kprobe_running();
83 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
84 kcb
->prev_kprobe
.old_eflags
= kcb
->kprobe_old_eflags
;
85 kcb
->prev_kprobe
.saved_eflags
= kcb
->kprobe_saved_eflags
;
88 static inline void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
90 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
91 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
92 kcb
->kprobe_old_eflags
= kcb
->prev_kprobe
.old_eflags
;
93 kcb
->kprobe_saved_eflags
= kcb
->prev_kprobe
.saved_eflags
;
96 static inline void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
97 struct kprobe_ctlblk
*kcb
)
99 __get_cpu_var(current_kprobe
) = p
;
100 kcb
->kprobe_saved_eflags
= kcb
->kprobe_old_eflags
101 = (regs
->eflags
& (TF_MASK
| IF_MASK
));
102 if (is_IF_modifier(p
->opcode
))
103 kcb
->kprobe_saved_eflags
&= ~IF_MASK
;
106 static inline void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
108 regs
->eflags
|= TF_MASK
;
109 regs
->eflags
&= ~IF_MASK
;
110 /*single step inline if the instruction is an int3*/
111 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
112 regs
->eip
= (unsigned long)p
->addr
;
114 regs
->eip
= (unsigned long)&p
->ainsn
.insn
;
117 /* Called with kretprobe_lock held */
118 void __kprobes
arch_prepare_kretprobe(struct kretprobe
*rp
,
119 struct pt_regs
*regs
)
121 unsigned long *sara
= (unsigned long *)®s
->esp
;
122 struct kretprobe_instance
*ri
;
124 if ((ri
= get_free_rp_inst(rp
)) != NULL
) {
127 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
129 /* Replace the return addr with trampoline addr */
130 *sara
= (unsigned long) &kretprobe_trampoline
;
139 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
140 * remain disabled thorough out this function.
142 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
146 kprobe_opcode_t
*addr
= NULL
;
148 struct kprobe_ctlblk
*kcb
;
151 * We don't want to be preempted for the entire
152 * duration of kprobe processing
155 kcb
= get_kprobe_ctlblk();
157 /* Check if the application is using LDT entry for its code segment and
158 * calculate the address by reading the base address from the LDT entry.
160 if ((regs
->xcs
& 4) && (current
->mm
)) {
161 lp
= (unsigned long *) ((unsigned long)((regs
->xcs
>> 3) * 8)
162 + (char *) current
->mm
->context
.ldt
);
163 addr
= (kprobe_opcode_t
*) (get_desc_base(lp
) + regs
->eip
-
164 sizeof(kprobe_opcode_t
));
166 addr
= (kprobe_opcode_t
*)(regs
->eip
- sizeof(kprobe_opcode_t
));
168 /* Check we're not actually recursing */
169 if (kprobe_running()) {
170 p
= get_kprobe(addr
);
172 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
173 *p
->ainsn
.insn
== BREAKPOINT_INSTRUCTION
) {
174 regs
->eflags
&= ~TF_MASK
;
175 regs
->eflags
|= kcb
->kprobe_saved_eflags
;
178 /* We have reentered the kprobe_handler(), since
179 * another probe was hit while within the handler.
180 * We here save the original kprobes variables and
181 * just single step on the instruction of the new probe
182 * without calling any user handlers.
184 save_previous_kprobe(kcb
);
185 set_current_kprobe(p
, regs
, kcb
);
186 kprobes_inc_nmissed_count(p
);
187 prepare_singlestep(p
, regs
);
188 kcb
->kprobe_status
= KPROBE_REENTER
;
191 if (regs
->eflags
& VM_MASK
) {
192 /* We are in virtual-8086 mode. Return 0 */
195 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
196 /* The breakpoint instruction was removed by
197 * another cpu right after we hit, no further
198 * handling of this interrupt is appropriate
200 regs
->eip
-= sizeof(kprobe_opcode_t
);
204 p
= __get_cpu_var(current_kprobe
);
205 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
212 p
= get_kprobe(addr
);
214 if (regs
->eflags
& VM_MASK
) {
215 /* We are in virtual-8086 mode. Return 0 */
219 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
221 * The breakpoint instruction was removed right
222 * after we hit it. Another cpu has removed
223 * either a probepoint or a debugger breakpoint
224 * at this address. In either case, no further
225 * handling of this interrupt is appropriate.
226 * Back up over the (now missing) int3 and run
227 * the original instruction.
229 regs
->eip
-= sizeof(kprobe_opcode_t
);
232 /* Not one of ours: let kernel handle it */
236 set_current_kprobe(p
, regs
, kcb
);
237 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
239 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
240 /* handler has already set things up, so skip ss setup */
244 prepare_singlestep(p
, regs
);
245 kcb
->kprobe_status
= KPROBE_HIT_SS
;
249 preempt_enable_no_resched();
254 * For function-return probes, init_kprobes() establishes a probepoint
255 * here. When a retprobed function returns, this probe is hit and
256 * trampoline_probe_handler() runs, calling the kretprobe's handler.
258 void kretprobe_trampoline_holder(void)
260 asm volatile ( ".global kretprobe_trampoline\n"
261 "kretprobe_trampoline: \n"
266 * Called when we hit the probe point at kretprobe_trampoline
268 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
270 struct kretprobe_instance
*ri
= NULL
;
271 struct hlist_head
*head
;
272 struct hlist_node
*node
, *tmp
;
273 unsigned long flags
, orig_ret_address
= 0;
274 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
276 spin_lock_irqsave(&kretprobe_lock
, flags
);
277 head
= kretprobe_inst_table_head(current
);
280 * It is possible to have multiple instances associated with a given
281 * task either because an multiple functions in the call path
282 * have a return probe installed on them, and/or more then one return
283 * return probe was registered for a target function.
285 * We can handle this because:
286 * - instances are always inserted at the head of the list
287 * - when multiple return probes are registered for the same
288 * function, the first instance's ret_addr will point to the
289 * real return address, and all the rest will point to
290 * kretprobe_trampoline
292 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
293 if (ri
->task
!= current
)
294 /* another task is sharing our hash bucket */
297 if (ri
->rp
&& ri
->rp
->handler
)
298 ri
->rp
->handler(ri
, regs
);
300 orig_ret_address
= (unsigned long)ri
->ret_addr
;
303 if (orig_ret_address
!= trampoline_address
)
305 * This is the real return address. Any other
306 * instances associated with this task are for
307 * other calls deeper on the call stack
312 BUG_ON(!orig_ret_address
|| (orig_ret_address
== trampoline_address
));
313 regs
->eip
= orig_ret_address
;
315 reset_current_kprobe();
316 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
317 preempt_enable_no_resched();
320 * By returning a non-zero value, we are telling
321 * kprobe_handler() that we don't want the post_handler
322 * to run (and have re-enabled preemption)
328 * Called after single-stepping. p->addr is the address of the
329 * instruction whose first byte has been replaced by the "int 3"
330 * instruction. To avoid the SMP problems that can occur when we
331 * temporarily put back the original opcode to single-step, we
332 * single-stepped a copy of the instruction. The address of this
333 * copy is p->ainsn.insn.
335 * This function prepares to return from the post-single-step
336 * interrupt. We have to fix up the stack as follows:
338 * 0) Except in the case of absolute or indirect jump or call instructions,
339 * the new eip is relative to the copied instruction. We need to make
340 * it relative to the original instruction.
342 * 1) If the single-stepped instruction was pushfl, then the TF and IF
343 * flags are set in the just-pushed eflags, and may need to be cleared.
345 * 2) If the single-stepped instruction was a call, the return address
346 * that is atop the stack is the address following the copied instruction.
347 * We need to make it the address following the original instruction.
349 static void __kprobes
resume_execution(struct kprobe
*p
,
350 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
352 unsigned long *tos
= (unsigned long *)®s
->esp
;
353 unsigned long next_eip
= 0;
354 unsigned long copy_eip
= (unsigned long)&p
->ainsn
.insn
;
355 unsigned long orig_eip
= (unsigned long)p
->addr
;
357 switch (p
->ainsn
.insn
[0]) {
358 case 0x9c: /* pushfl */
359 *tos
&= ~(TF_MASK
| IF_MASK
);
360 *tos
|= kcb
->kprobe_old_eflags
;
362 case 0xc3: /* ret/lret */
366 regs
->eflags
&= ~TF_MASK
;
367 /* eip is already adjusted, no more changes required*/
369 case 0xe8: /* call relative - Fix return addr */
370 *tos
= orig_eip
+ (*tos
- copy_eip
);
373 if ((p
->ainsn
.insn
[1] & 0x30) == 0x10) {
374 /* call absolute, indirect */
375 /* Fix return addr; eip is correct. */
376 next_eip
= regs
->eip
;
377 *tos
= orig_eip
+ (*tos
- copy_eip
);
378 } else if (((p
->ainsn
.insn
[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
379 ((p
->ainsn
.insn
[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
380 /* eip is correct. */
381 next_eip
= regs
->eip
;
384 case 0xea: /* jmp absolute -- eip is correct */
385 next_eip
= regs
->eip
;
391 regs
->eflags
&= ~TF_MASK
;
393 regs
->eip
= next_eip
;
395 regs
->eip
= orig_eip
+ (regs
->eip
- copy_eip
);
400 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
401 * remain disabled thoroughout this function.
403 static inline int post_kprobe_handler(struct pt_regs
*regs
)
405 struct kprobe
*cur
= kprobe_running();
406 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
411 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
412 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
413 cur
->post_handler(cur
, regs
, 0);
416 resume_execution(cur
, regs
, kcb
);
417 regs
->eflags
|= kcb
->kprobe_saved_eflags
;
419 /*Restore back the original saved kprobes variables and continue. */
420 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
421 restore_previous_kprobe(kcb
);
424 reset_current_kprobe();
426 preempt_enable_no_resched();
429 * if somebody else is singlestepping across a probe point, eflags
430 * will have TF set, in which case, continue the remaining processing
431 * of do_debug, as if this is not a probe hit.
433 if (regs
->eflags
& TF_MASK
)
439 static inline int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
441 struct kprobe
*cur
= kprobe_running();
442 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
444 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
447 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
448 resume_execution(cur
, regs
, kcb
);
449 regs
->eflags
|= kcb
->kprobe_old_eflags
;
451 reset_current_kprobe();
452 preempt_enable_no_resched();
458 * Wrapper routine to for handling exceptions.
460 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
461 unsigned long val
, void *data
)
463 struct die_args
*args
= (struct die_args
*)data
;
464 int ret
= NOTIFY_DONE
;
468 if (kprobe_handler(args
->regs
))
472 if (post_kprobe_handler(args
->regs
))
477 /* kprobe_running() needs smp_processor_id() */
479 if (kprobe_running() &&
480 kprobe_fault_handler(args
->regs
, args
->trapnr
))
490 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
492 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
494 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
496 kcb
->jprobe_saved_regs
= *regs
;
497 kcb
->jprobe_saved_esp
= ®s
->esp
;
498 addr
= (unsigned long)(kcb
->jprobe_saved_esp
);
501 * TBD: As Linus pointed out, gcc assumes that the callee
502 * owns the argument space and could overwrite it, e.g.
503 * tailcall optimization. So, to be absolutely safe
504 * we also save and restore enough stack bytes to cover
507 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
,
508 MIN_STACK_SIZE(addr
));
509 regs
->eflags
&= ~IF_MASK
;
510 regs
->eip
= (unsigned long)(jp
->entry
);
514 void __kprobes
jprobe_return(void)
516 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
518 asm volatile (" xchgl %%ebx,%%esp \n"
520 " .globl jprobe_return_end \n"
521 " jprobe_return_end: \n"
523 (kcb
->jprobe_saved_esp
):"memory");
526 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
528 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
529 u8
*addr
= (u8
*) (regs
->eip
- 1);
530 unsigned long stack_addr
= (unsigned long)(kcb
->jprobe_saved_esp
);
531 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
533 if ((addr
> (u8
*) jprobe_return
) && (addr
< (u8
*) jprobe_return_end
)) {
534 if (®s
->esp
!= kcb
->jprobe_saved_esp
) {
535 struct pt_regs
*saved_regs
=
536 container_of(kcb
->jprobe_saved_esp
,
537 struct pt_regs
, esp
);
538 printk("current esp %p does not match saved esp %p\n",
539 ®s
->esp
, kcb
->jprobe_saved_esp
);
540 printk("Saved registers for jprobe %p\n", jp
);
541 show_registers(saved_regs
);
542 printk("Current registers\n");
543 show_registers(regs
);
546 *regs
= kcb
->jprobe_saved_regs
;
547 memcpy((kprobe_opcode_t
*) stack_addr
, kcb
->jprobes_stack
,
548 MIN_STACK_SIZE(stack_addr
));
549 preempt_enable_no_resched();
555 static struct kprobe trampoline_p
= {
556 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
557 .pre_handler
= trampoline_probe_handler
560 int __init
arch_init_kprobes(void)
562 return register_kprobe(&trampoline_p
);