]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
3 | * arch/i386/kernel/kprobes.c | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 | * | |
19 | * Copyright (C) IBM Corporation, 2002, 2004 | |
20 | * | |
21 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | |
22 | * Probes initial implementation ( includes contributions from | |
23 | * Rusty Russell). | |
24 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | |
25 | * interface to access function arguments. | |
b94cce92 HN |
26 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
27 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | |
28 | * <prasanna@in.ibm.com> added function-return probes. | |
1da177e4 LT |
29 | */ |
30 | ||
31 | #include <linux/config.h> | |
32 | #include <linux/kprobes.h> | |
33 | #include <linux/ptrace.h> | |
34 | #include <linux/spinlock.h> | |
35 | #include <linux/preempt.h> | |
7e1048b1 | 36 | #include <asm/cacheflush.h> |
1da177e4 LT |
37 | #include <asm/kdebug.h> |
38 | #include <asm/desc.h> | |
39 | ||
1da177e4 LT |
40 | static struct kprobe *current_kprobe; |
41 | static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags; | |
417c8da6 PP |
42 | static struct kprobe *kprobe_prev; |
43 | static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev; | |
1da177e4 LT |
44 | static struct pt_regs jprobe_saved_regs; |
45 | static long *jprobe_saved_esp; | |
46 | /* copy of the kernel stack at the probe fire time */ | |
47 | static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | |
48 | void jprobe_return_end(void); | |
49 | ||
50 | /* | |
51 | * returns non-zero if opcode modifies the interrupt flag. | |
52 | */ | |
53 | static inline int is_IF_modifier(kprobe_opcode_t opcode) | |
54 | { | |
55 | switch (opcode) { | |
56 | case 0xfa: /* cli */ | |
57 | case 0xfb: /* sti */ | |
58 | case 0xcf: /* iret/iretd */ | |
59 | case 0x9d: /* popf/popfd */ | |
60 | return 1; | |
61 | } | |
62 | return 0; | |
63 | } | |
64 | ||
65 | int arch_prepare_kprobe(struct kprobe *p) | |
66 | { | |
67 | return 0; | |
68 | } | |
69 | ||
70 | void arch_copy_kprobe(struct kprobe *p) | |
71 | { | |
72 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | |
7e1048b1 | 73 | p->opcode = *p->addr; |
1da177e4 LT |
74 | } |
75 | ||
7e1048b1 | 76 | void arch_arm_kprobe(struct kprobe *p) |
1da177e4 | 77 | { |
7e1048b1 RL |
78 | *p->addr = BREAKPOINT_INSTRUCTION; |
79 | flush_icache_range((unsigned long) p->addr, | |
80 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
1da177e4 LT |
81 | } |
82 | ||
7e1048b1 | 83 | void arch_disarm_kprobe(struct kprobe *p) |
1da177e4 LT |
84 | { |
85 | *p->addr = p->opcode; | |
7e1048b1 RL |
86 | flush_icache_range((unsigned long) p->addr, |
87 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
88 | } | |
89 | ||
90 | void arch_remove_kprobe(struct kprobe *p) | |
91 | { | |
1da177e4 LT |
92 | } |
93 | ||
417c8da6 PP |
94 | static inline void save_previous_kprobe(void) |
95 | { | |
96 | kprobe_prev = current_kprobe; | |
97 | kprobe_status_prev = kprobe_status; | |
98 | kprobe_old_eflags_prev = kprobe_old_eflags; | |
99 | kprobe_saved_eflags_prev = kprobe_saved_eflags; | |
100 | } | |
101 | ||
102 | static inline void restore_previous_kprobe(void) | |
103 | { | |
104 | current_kprobe = kprobe_prev; | |
105 | kprobe_status = kprobe_status_prev; | |
106 | kprobe_old_eflags = kprobe_old_eflags_prev; | |
107 | kprobe_saved_eflags = kprobe_saved_eflags_prev; | |
108 | } | |
109 | ||
110 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) | |
111 | { | |
112 | current_kprobe = p; | |
113 | kprobe_saved_eflags = kprobe_old_eflags | |
114 | = (regs->eflags & (TF_MASK | IF_MASK)); | |
115 | if (is_IF_modifier(p->opcode)) | |
116 | kprobe_saved_eflags &= ~IF_MASK; | |
117 | } | |
118 | ||
1da177e4 LT |
119 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
120 | { | |
121 | regs->eflags |= TF_MASK; | |
122 | regs->eflags &= ~IF_MASK; | |
123 | /*single step inline if the instruction is an int3*/ | |
124 | if (p->opcode == BREAKPOINT_INSTRUCTION) | |
125 | regs->eip = (unsigned long)p->addr; | |
126 | else | |
127 | regs->eip = (unsigned long)&p->ainsn.insn; | |
128 | } | |
129 | ||
b94cce92 HN |
130 | struct task_struct *arch_get_kprobe_task(void *ptr) |
131 | { | |
132 | return ((struct thread_info *) (((unsigned long) ptr) & | |
133 | (~(THREAD_SIZE -1))))->task; | |
134 | } | |
135 | ||
136 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | |
137 | { | |
138 | unsigned long *sara = (unsigned long *)®s->esp; | |
139 | struct kretprobe_instance *ri; | |
140 | static void *orig_ret_addr; | |
141 | ||
142 | /* | |
143 | * Save the return address when the return probe hits | |
144 | * the first time, and use it to populate the (krprobe | |
145 | * instance)->ret_addr for subsequent return probes at | |
146 | * the same addrress since stack address would have | |
147 | * the kretprobe_trampoline by then. | |
148 | */ | |
149 | if (((void*) *sara) != kretprobe_trampoline) | |
150 | orig_ret_addr = (void*) *sara; | |
151 | ||
152 | if ((ri = get_free_rp_inst(rp)) != NULL) { | |
153 | ri->rp = rp; | |
154 | ri->stack_addr = sara; | |
155 | ri->ret_addr = orig_ret_addr; | |
156 | add_rp_inst(ri); | |
157 | /* Replace the return addr with trampoline addr */ | |
158 | *sara = (unsigned long) &kretprobe_trampoline; | |
159 | } else { | |
160 | rp->nmissed++; | |
161 | } | |
162 | } | |
163 | ||
0aa55e4d | 164 | void arch_kprobe_flush_task(struct task_struct *tk) |
b94cce92 | 165 | { |
b94cce92 | 166 | struct kretprobe_instance *ri; |
b94cce92 HN |
167 | while ((ri = get_rp_inst_tsk(tk)) != NULL) { |
168 | *((unsigned long *)(ri->stack_addr)) = | |
169 | (unsigned long) ri->ret_addr; | |
170 | recycle_rp_inst(ri); | |
171 | } | |
b94cce92 HN |
172 | } |
173 | ||
1da177e4 LT |
174 | /* |
175 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | |
176 | * remain disabled thorough out this function. | |
177 | */ | |
178 | static int kprobe_handler(struct pt_regs *regs) | |
179 | { | |
180 | struct kprobe *p; | |
181 | int ret = 0; | |
182 | kprobe_opcode_t *addr = NULL; | |
183 | unsigned long *lp; | |
184 | ||
185 | /* We're in an interrupt, but this is clear and BUG()-safe. */ | |
186 | preempt_disable(); | |
187 | /* Check if the application is using LDT entry for its code segment and | |
188 | * calculate the address by reading the base address from the LDT entry. | |
189 | */ | |
190 | if ((regs->xcs & 4) && (current->mm)) { | |
191 | lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8) | |
192 | + (char *) current->mm->context.ldt); | |
193 | addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip - | |
194 | sizeof(kprobe_opcode_t)); | |
195 | } else { | |
196 | addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); | |
197 | } | |
198 | /* Check we're not actually recursing */ | |
199 | if (kprobe_running()) { | |
200 | /* We *are* holding lock here, so this is safe. | |
201 | Disarm the probe we just hit, and ignore it. */ | |
202 | p = get_kprobe(addr); | |
203 | if (p) { | |
204 | if (kprobe_status == KPROBE_HIT_SS) { | |
205 | regs->eflags &= ~TF_MASK; | |
206 | regs->eflags |= kprobe_saved_eflags; | |
207 | unlock_kprobes(); | |
208 | goto no_kprobe; | |
209 | } | |
417c8da6 PP |
210 | /* We have reentered the kprobe_handler(), since |
211 | * another probe was hit while within the handler. | |
212 | * We here save the original kprobes variables and | |
213 | * just single step on the instruction of the new probe | |
214 | * without calling any user handlers. | |
215 | */ | |
216 | save_previous_kprobe(); | |
217 | set_current_kprobe(p, regs); | |
218 | p->nmissed++; | |
219 | prepare_singlestep(p, regs); | |
220 | kprobe_status = KPROBE_REENTER; | |
221 | return 1; | |
1da177e4 LT |
222 | } else { |
223 | p = current_kprobe; | |
224 | if (p->break_handler && p->break_handler(p, regs)) { | |
225 | goto ss_probe; | |
226 | } | |
227 | } | |
228 | /* If it's not ours, can't be delete race, (we hold lock). */ | |
229 | goto no_kprobe; | |
230 | } | |
231 | ||
232 | lock_kprobes(); | |
233 | p = get_kprobe(addr); | |
234 | if (!p) { | |
235 | unlock_kprobes(); | |
236 | if (regs->eflags & VM_MASK) { | |
237 | /* We are in virtual-8086 mode. Return 0 */ | |
238 | goto no_kprobe; | |
239 | } | |
240 | ||
241 | if (*addr != BREAKPOINT_INSTRUCTION) { | |
242 | /* | |
243 | * The breakpoint instruction was removed right | |
244 | * after we hit it. Another cpu has removed | |
245 | * either a probepoint or a debugger breakpoint | |
246 | * at this address. In either case, no further | |
247 | * handling of this interrupt is appropriate. | |
248 | */ | |
249 | ret = 1; | |
250 | } | |
251 | /* Not one of ours: let kernel handle it */ | |
252 | goto no_kprobe; | |
253 | } | |
254 | ||
255 | kprobe_status = KPROBE_HIT_ACTIVE; | |
417c8da6 | 256 | set_current_kprobe(p, regs); |
1da177e4 LT |
257 | |
258 | if (p->pre_handler && p->pre_handler(p, regs)) | |
259 | /* handler has already set things up, so skip ss setup */ | |
260 | return 1; | |
261 | ||
262 | ss_probe: | |
263 | prepare_singlestep(p, regs); | |
264 | kprobe_status = KPROBE_HIT_SS; | |
265 | return 1; | |
266 | ||
267 | no_kprobe: | |
268 | preempt_enable_no_resched(); | |
269 | return ret; | |
270 | } | |
271 | ||
b94cce92 HN |
272 | /* |
273 | * For function-return probes, init_kprobes() establishes a probepoint | |
274 | * here. When a retprobed function returns, this probe is hit and | |
275 | * trampoline_probe_handler() runs, calling the kretprobe's handler. | |
276 | */ | |
277 | void kretprobe_trampoline_holder(void) | |
278 | { | |
279 | asm volatile ( ".global kretprobe_trampoline\n" | |
280 | "kretprobe_trampoline: \n" | |
281 | "nop\n"); | |
282 | } | |
283 | ||
284 | /* | |
285 | * Called when we hit the probe point at kretprobe_trampoline | |
286 | */ | |
287 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |
288 | { | |
289 | struct task_struct *tsk; | |
290 | struct kretprobe_instance *ri; | |
291 | struct hlist_head *head; | |
292 | struct hlist_node *node; | |
293 | unsigned long *sara = ((unsigned long *) ®s->esp) - 1; | |
294 | ||
295 | tsk = arch_get_kprobe_task(sara); | |
296 | head = kretprobe_inst_table_head(tsk); | |
297 | ||
298 | hlist_for_each_entry(ri, node, head, hlist) { | |
299 | if (ri->stack_addr == sara && ri->rp) { | |
300 | if (ri->rp->handler) | |
301 | ri->rp->handler(ri, regs); | |
302 | } | |
303 | } | |
304 | return 0; | |
305 | } | |
306 | ||
307 | void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | |
308 | unsigned long flags) | |
309 | { | |
310 | struct kretprobe_instance *ri; | |
311 | /* RA already popped */ | |
312 | unsigned long *sara = ((unsigned long *)®s->esp) - 1; | |
313 | ||
314 | while ((ri = get_rp_inst(sara))) { | |
315 | regs->eip = (unsigned long)ri->ret_addr; | |
316 | recycle_rp_inst(ri); | |
317 | } | |
318 | regs->eflags &= ~TF_MASK; | |
319 | } | |
320 | ||
1da177e4 LT |
321 | /* |
322 | * Called after single-stepping. p->addr is the address of the | |
323 | * instruction whose first byte has been replaced by the "int 3" | |
324 | * instruction. To avoid the SMP problems that can occur when we | |
325 | * temporarily put back the original opcode to single-step, we | |
326 | * single-stepped a copy of the instruction. The address of this | |
327 | * copy is p->ainsn.insn. | |
328 | * | |
329 | * This function prepares to return from the post-single-step | |
330 | * interrupt. We have to fix up the stack as follows: | |
331 | * | |
332 | * 0) Except in the case of absolute or indirect jump or call instructions, | |
333 | * the new eip is relative to the copied instruction. We need to make | |
334 | * it relative to the original instruction. | |
335 | * | |
336 | * 1) If the single-stepped instruction was pushfl, then the TF and IF | |
337 | * flags are set in the just-pushed eflags, and may need to be cleared. | |
338 | * | |
339 | * 2) If the single-stepped instruction was a call, the return address | |
340 | * that is atop the stack is the address following the copied instruction. | |
341 | * We need to make it the address following the original instruction. | |
342 | */ | |
343 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | |
344 | { | |
345 | unsigned long *tos = (unsigned long *)®s->esp; | |
346 | unsigned long next_eip = 0; | |
347 | unsigned long copy_eip = (unsigned long)&p->ainsn.insn; | |
348 | unsigned long orig_eip = (unsigned long)p->addr; | |
349 | ||
350 | switch (p->ainsn.insn[0]) { | |
351 | case 0x9c: /* pushfl */ | |
352 | *tos &= ~(TF_MASK | IF_MASK); | |
353 | *tos |= kprobe_old_eflags; | |
354 | break; | |
0b9e2cac PP |
355 | case 0xc3: /* ret/lret */ |
356 | case 0xcb: | |
357 | case 0xc2: | |
358 | case 0xca: | |
359 | regs->eflags &= ~TF_MASK; | |
360 | /* eip is already adjusted, no more changes required*/ | |
361 | return; | |
1da177e4 LT |
362 | case 0xe8: /* call relative - Fix return addr */ |
363 | *tos = orig_eip + (*tos - copy_eip); | |
364 | break; | |
365 | case 0xff: | |
366 | if ((p->ainsn.insn[1] & 0x30) == 0x10) { | |
367 | /* call absolute, indirect */ | |
368 | /* Fix return addr; eip is correct. */ | |
369 | next_eip = regs->eip; | |
370 | *tos = orig_eip + (*tos - copy_eip); | |
371 | } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ | |
372 | ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | |
373 | /* eip is correct. */ | |
374 | next_eip = regs->eip; | |
375 | } | |
376 | break; | |
377 | case 0xea: /* jmp absolute -- eip is correct */ | |
378 | next_eip = regs->eip; | |
379 | break; | |
380 | default: | |
381 | break; | |
382 | } | |
383 | ||
384 | regs->eflags &= ~TF_MASK; | |
385 | if (next_eip) { | |
386 | regs->eip = next_eip; | |
387 | } else { | |
388 | regs->eip = orig_eip + (regs->eip - copy_eip); | |
389 | } | |
390 | } | |
391 | ||
392 | /* | |
393 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | |
394 | * remain disabled thoroughout this function. And we hold kprobe lock. | |
395 | */ | |
396 | static inline int post_kprobe_handler(struct pt_regs *regs) | |
397 | { | |
398 | if (!kprobe_running()) | |
399 | return 0; | |
400 | ||
417c8da6 PP |
401 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { |
402 | kprobe_status = KPROBE_HIT_SSDONE; | |
1da177e4 | 403 | current_kprobe->post_handler(current_kprobe, regs, 0); |
417c8da6 | 404 | } |
1da177e4 | 405 | |
b94cce92 HN |
406 | if (current_kprobe->post_handler != trampoline_post_handler) |
407 | resume_execution(current_kprobe, regs); | |
1da177e4 LT |
408 | regs->eflags |= kprobe_saved_eflags; |
409 | ||
417c8da6 PP |
410 | /*Restore back the original saved kprobes variables and continue. */ |
411 | if (kprobe_status == KPROBE_REENTER) { | |
412 | restore_previous_kprobe(); | |
413 | goto out; | |
414 | } | |
1da177e4 | 415 | unlock_kprobes(); |
417c8da6 | 416 | out: |
1da177e4 LT |
417 | preempt_enable_no_resched(); |
418 | ||
419 | /* | |
420 | * if somebody else is singlestepping across a probe point, eflags | |
421 | * will have TF set, in which case, continue the remaining processing | |
422 | * of do_debug, as if this is not a probe hit. | |
423 | */ | |
424 | if (regs->eflags & TF_MASK) | |
425 | return 0; | |
426 | ||
427 | return 1; | |
428 | } | |
429 | ||
430 | /* Interrupts disabled, kprobe_lock held. */ | |
431 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |
432 | { | |
433 | if (current_kprobe->fault_handler | |
434 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | |
435 | return 1; | |
436 | ||
437 | if (kprobe_status & KPROBE_HIT_SS) { | |
438 | resume_execution(current_kprobe, regs); | |
439 | regs->eflags |= kprobe_old_eflags; | |
440 | ||
441 | unlock_kprobes(); | |
442 | preempt_enable_no_resched(); | |
443 | } | |
444 | return 0; | |
445 | } | |
446 | ||
447 | /* | |
448 | * Wrapper routine to for handling exceptions. | |
449 | */ | |
450 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | |
451 | void *data) | |
452 | { | |
453 | struct die_args *args = (struct die_args *)data; | |
454 | switch (val) { | |
455 | case DIE_INT3: | |
456 | if (kprobe_handler(args->regs)) | |
457 | return NOTIFY_STOP; | |
458 | break; | |
459 | case DIE_DEBUG: | |
460 | if (post_kprobe_handler(args->regs)) | |
461 | return NOTIFY_STOP; | |
462 | break; | |
463 | case DIE_GPF: | |
464 | if (kprobe_running() && | |
465 | kprobe_fault_handler(args->regs, args->trapnr)) | |
466 | return NOTIFY_STOP; | |
467 | break; | |
468 | case DIE_PAGE_FAULT: | |
469 | if (kprobe_running() && | |
470 | kprobe_fault_handler(args->regs, args->trapnr)) | |
471 | return NOTIFY_STOP; | |
472 | break; | |
473 | default: | |
474 | break; | |
475 | } | |
476 | return NOTIFY_DONE; | |
477 | } | |
478 | ||
479 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
480 | { | |
481 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
482 | unsigned long addr; | |
483 | ||
484 | jprobe_saved_regs = *regs; | |
485 | jprobe_saved_esp = ®s->esp; | |
486 | addr = (unsigned long)jprobe_saved_esp; | |
487 | ||
488 | /* | |
489 | * TBD: As Linus pointed out, gcc assumes that the callee | |
490 | * owns the argument space and could overwrite it, e.g. | |
491 | * tailcall optimization. So, to be absolutely safe | |
492 | * we also save and restore enough stack bytes to cover | |
493 | * the argument area. | |
494 | */ | |
495 | memcpy(jprobes_stack, (kprobe_opcode_t *) addr, MIN_STACK_SIZE(addr)); | |
496 | regs->eflags &= ~IF_MASK; | |
497 | regs->eip = (unsigned long)(jp->entry); | |
498 | return 1; | |
499 | } | |
500 | ||
501 | void jprobe_return(void) | |
502 | { | |
503 | preempt_enable_no_resched(); | |
504 | asm volatile (" xchgl %%ebx,%%esp \n" | |
505 | " int3 \n" | |
506 | " .globl jprobe_return_end \n" | |
507 | " jprobe_return_end: \n" | |
508 | " nop \n"::"b" | |
509 | (jprobe_saved_esp):"memory"); | |
510 | } | |
511 | ||
512 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |
513 | { | |
514 | u8 *addr = (u8 *) (regs->eip - 1); | |
515 | unsigned long stack_addr = (unsigned long)jprobe_saved_esp; | |
516 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
517 | ||
518 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | |
519 | if (®s->esp != jprobe_saved_esp) { | |
520 | struct pt_regs *saved_regs = | |
521 | container_of(jprobe_saved_esp, struct pt_regs, esp); | |
522 | printk("current esp %p does not match saved esp %p\n", | |
523 | ®s->esp, jprobe_saved_esp); | |
524 | printk("Saved registers for jprobe %p\n", jp); | |
525 | show_registers(saved_regs); | |
526 | printk("Current registers\n"); | |
527 | show_registers(regs); | |
528 | BUG(); | |
529 | } | |
530 | *regs = jprobe_saved_regs; | |
531 | memcpy((kprobe_opcode_t *) stack_addr, jprobes_stack, | |
532 | MIN_STACK_SIZE(stack_addr)); | |
533 | return 1; | |
534 | } | |
535 | return 0; | |
536 | } |