]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
3 | * arch/i386/kernel/kprobes.c | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 | * | |
19 | * Copyright (C) IBM Corporation, 2002, 2004 | |
20 | * | |
21 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | |
22 | * Probes initial implementation ( includes contributions from | |
23 | * Rusty Russell). | |
24 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | |
25 | * interface to access function arguments. | |
b94cce92 HN |
26 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
27 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | |
28 | * <prasanna@in.ibm.com> added function-return probes. | |
1da177e4 LT |
29 | */ |
30 | ||
31 | #include <linux/config.h> | |
32 | #include <linux/kprobes.h> | |
33 | #include <linux/ptrace.h> | |
1da177e4 | 34 | #include <linux/preempt.h> |
7e1048b1 | 35 | #include <asm/cacheflush.h> |
1da177e4 LT |
36 | #include <asm/kdebug.h> |
37 | #include <asm/desc.h> | |
b4026513 | 38 | #include <asm/uaccess.h> |
1da177e4 | 39 | |
1da177e4 LT |
40 | void jprobe_return_end(void); |
41 | ||
9a0e3a86 AM |
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
43 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
44 | ||
311ac88f | 45 | /* insert a jmp code */ |
34c37e18 | 46 | static __always_inline void set_jmp_op(void *from, void *to) |
311ac88f MH |
47 | { |
48 | struct __arch_jmp_op { | |
49 | char op; | |
50 | long raddr; | |
51 | } __attribute__((packed)) *jop; | |
52 | jop = (struct __arch_jmp_op *)from; | |
53 | jop->raddr = (long)(to) - ((long)(from) + 5); | |
54 | jop->op = RELATIVEJUMP_INSTRUCTION; | |
55 | } | |
56 | ||
57 | /* | |
58 | * returns non-zero if opcodes can be boosted. | |
59 | */ | |
34c37e18 | 60 | static __always_inline int can_boost(kprobe_opcode_t opcode) |
311ac88f MH |
61 | { |
62 | switch (opcode & 0xf0 ) { | |
63 | case 0x70: | |
64 | return 0; /* can't boost conditional jump */ | |
65 | case 0x90: | |
66 | /* can't boost call and pushf */ | |
67 | return opcode != 0x9a && opcode != 0x9c; | |
68 | case 0xc0: | |
69 | /* can't boost undefined opcodes and soft-interruptions */ | |
70 | return (0xc1 < opcode && opcode < 0xc6) || | |
71 | (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf; | |
72 | case 0xd0: | |
73 | /* can boost AA* and XLAT */ | |
74 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); | |
75 | case 0xe0: | |
76 | /* can boost in/out and (may be) jmps */ | |
77 | return (0xe3 < opcode && opcode != 0xe8); | |
78 | case 0xf0: | |
79 | /* clear and set flags can be boost */ | |
80 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); | |
81 | default: | |
82 | /* currently, can't boost 2 bytes opcodes */ | |
83 | return opcode != 0x0f; | |
84 | } | |
85 | } | |
86 | ||
87 | ||
1da177e4 LT |
88 | /* |
89 | * returns non-zero if opcode modifies the interrupt flag. | |
90 | */ | |
34c37e18 | 91 | static int __kprobes is_IF_modifier(kprobe_opcode_t opcode) |
1da177e4 LT |
92 | { |
93 | switch (opcode) { | |
94 | case 0xfa: /* cli */ | |
95 | case 0xfb: /* sti */ | |
96 | case 0xcf: /* iret/iretd */ | |
97 | case 0x9d: /* popf/popfd */ | |
98 | return 1; | |
99 | } | |
100 | return 0; | |
101 | } | |
102 | ||
3d97ae5b | 103 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
1da177e4 | 104 | { |
124d90be PP |
105 | /* insn: must be on special executable page on i386. */ |
106 | p->ainsn.insn = get_insn_slot(); | |
107 | if (!p->ainsn.insn) | |
108 | return -ENOMEM; | |
109 | ||
1da177e4 | 110 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
7e1048b1 | 111 | p->opcode = *p->addr; |
311ac88f MH |
112 | if (can_boost(p->opcode)) { |
113 | p->ainsn.boostable = 0; | |
114 | } else { | |
115 | p->ainsn.boostable = -1; | |
116 | } | |
49a2a1b8 | 117 | return 0; |
1da177e4 LT |
118 | } |
119 | ||
3d97ae5b | 120 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
1da177e4 | 121 | { |
7e1048b1 RL |
122 | *p->addr = BREAKPOINT_INSTRUCTION; |
123 | flush_icache_range((unsigned long) p->addr, | |
124 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
1da177e4 LT |
125 | } |
126 | ||
3d97ae5b | 127 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
1da177e4 LT |
128 | { |
129 | *p->addr = p->opcode; | |
7e1048b1 RL |
130 | flush_icache_range((unsigned long) p->addr, |
131 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
132 | } | |
133 | ||
124d90be PP |
134 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
135 | { | |
7a7d1cf9 | 136 | mutex_lock(&kprobe_mutex); |
124d90be | 137 | free_insn_slot(p->ainsn.insn); |
7a7d1cf9 | 138 | mutex_unlock(&kprobe_mutex); |
124d90be PP |
139 | } |
140 | ||
34c37e18 | 141 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
417c8da6 | 142 | { |
9a0e3a86 AM |
143 | kcb->prev_kprobe.kp = kprobe_running(); |
144 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
145 | kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags; | |
146 | kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags; | |
417c8da6 PP |
147 | } |
148 | ||
34c37e18 | 149 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
417c8da6 | 150 | { |
9a0e3a86 AM |
151 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
152 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
153 | kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags; | |
154 | kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags; | |
417c8da6 PP |
155 | } |
156 | ||
34c37e18 | 157 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
9a0e3a86 | 158 | struct kprobe_ctlblk *kcb) |
417c8da6 | 159 | { |
9a0e3a86 AM |
160 | __get_cpu_var(current_kprobe) = p; |
161 | kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags | |
417c8da6 PP |
162 | = (regs->eflags & (TF_MASK | IF_MASK)); |
163 | if (is_IF_modifier(p->opcode)) | |
9a0e3a86 | 164 | kcb->kprobe_saved_eflags &= ~IF_MASK; |
417c8da6 PP |
165 | } |
166 | ||
34c37e18 | 167 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
168 | { |
169 | regs->eflags |= TF_MASK; | |
170 | regs->eflags &= ~IF_MASK; | |
171 | /*single step inline if the instruction is an int3*/ | |
172 | if (p->opcode == BREAKPOINT_INSTRUCTION) | |
173 | regs->eip = (unsigned long)p->addr; | |
174 | else | |
124d90be | 175 | regs->eip = (unsigned long)p->ainsn.insn; |
1da177e4 LT |
176 | } |
177 | ||
991a51d8 | 178 | /* Called with kretprobe_lock held */ |
3d97ae5b PP |
179 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
180 | struct pt_regs *regs) | |
b94cce92 HN |
181 | { |
182 | unsigned long *sara = (unsigned long *)®s->esp; | |
4bdbd37f RL |
183 | struct kretprobe_instance *ri; |
184 | ||
185 | if ((ri = get_free_rp_inst(rp)) != NULL) { | |
186 | ri->rp = rp; | |
187 | ri->task = current; | |
188 | ri->ret_addr = (kprobe_opcode_t *) *sara; | |
b94cce92 | 189 | |
b94cce92 HN |
190 | /* Replace the return addr with trampoline addr */ |
191 | *sara = (unsigned long) &kretprobe_trampoline; | |
b94cce92 | 192 | |
4bdbd37f RL |
193 | add_rp_inst(ri); |
194 | } else { | |
195 | rp->nmissed++; | |
196 | } | |
b94cce92 HN |
197 | } |
198 | ||
1da177e4 LT |
199 | /* |
200 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | |
201 | * remain disabled thorough out this function. | |
202 | */ | |
3d97ae5b | 203 | static int __kprobes kprobe_handler(struct pt_regs *regs) |
1da177e4 LT |
204 | { |
205 | struct kprobe *p; | |
206 | int ret = 0; | |
2326c770 | 207 | kprobe_opcode_t *addr; |
d217d545 | 208 | struct kprobe_ctlblk *kcb; |
311ac88f MH |
209 | #ifdef CONFIG_PREEMPT |
210 | unsigned pre_preempt_count = preempt_count(); | |
211 | #endif /* CONFIG_PREEMPT */ | |
d217d545 | 212 | |
2326c770 | 213 | addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); |
214 | ||
d217d545 AM |
215 | /* |
216 | * We don't want to be preempted for the entire | |
217 | * duration of kprobe processing | |
218 | */ | |
219 | preempt_disable(); | |
220 | kcb = get_kprobe_ctlblk(); | |
1da177e4 | 221 | |
1da177e4 LT |
222 | /* Check we're not actually recursing */ |
223 | if (kprobe_running()) { | |
1da177e4 LT |
224 | p = get_kprobe(addr); |
225 | if (p) { | |
9a0e3a86 | 226 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
deac66ae | 227 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
1da177e4 | 228 | regs->eflags &= ~TF_MASK; |
9a0e3a86 | 229 | regs->eflags |= kcb->kprobe_saved_eflags; |
1da177e4 LT |
230 | goto no_kprobe; |
231 | } | |
417c8da6 PP |
232 | /* We have reentered the kprobe_handler(), since |
233 | * another probe was hit while within the handler. | |
234 | * We here save the original kprobes variables and | |
235 | * just single step on the instruction of the new probe | |
236 | * without calling any user handlers. | |
237 | */ | |
9a0e3a86 AM |
238 | save_previous_kprobe(kcb); |
239 | set_current_kprobe(p, regs, kcb); | |
bf8d5c52 | 240 | kprobes_inc_nmissed_count(p); |
417c8da6 | 241 | prepare_singlestep(p, regs); |
9a0e3a86 | 242 | kcb->kprobe_status = KPROBE_REENTER; |
417c8da6 | 243 | return 1; |
1da177e4 | 244 | } else { |
eb3a7292 KA |
245 | if (*addr != BREAKPOINT_INSTRUCTION) { |
246 | /* The breakpoint instruction was removed by | |
247 | * another cpu right after we hit, no further | |
248 | * handling of this interrupt is appropriate | |
249 | */ | |
250 | regs->eip -= sizeof(kprobe_opcode_t); | |
251 | ret = 1; | |
252 | goto no_kprobe; | |
253 | } | |
9a0e3a86 | 254 | p = __get_cpu_var(current_kprobe); |
1da177e4 LT |
255 | if (p->break_handler && p->break_handler(p, regs)) { |
256 | goto ss_probe; | |
257 | } | |
258 | } | |
1da177e4 LT |
259 | goto no_kprobe; |
260 | } | |
261 | ||
1da177e4 LT |
262 | p = get_kprobe(addr); |
263 | if (!p) { | |
1da177e4 LT |
264 | if (*addr != BREAKPOINT_INSTRUCTION) { |
265 | /* | |
266 | * The breakpoint instruction was removed right | |
267 | * after we hit it. Another cpu has removed | |
268 | * either a probepoint or a debugger breakpoint | |
269 | * at this address. In either case, no further | |
270 | * handling of this interrupt is appropriate. | |
bce06494 JK |
271 | * Back up over the (now missing) int3 and run |
272 | * the original instruction. | |
1da177e4 | 273 | */ |
bce06494 | 274 | regs->eip -= sizeof(kprobe_opcode_t); |
1da177e4 LT |
275 | ret = 1; |
276 | } | |
277 | /* Not one of ours: let kernel handle it */ | |
278 | goto no_kprobe; | |
279 | } | |
280 | ||
9a0e3a86 AM |
281 | set_current_kprobe(p, regs, kcb); |
282 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
1da177e4 LT |
283 | |
284 | if (p->pre_handler && p->pre_handler(p, regs)) | |
285 | /* handler has already set things up, so skip ss setup */ | |
286 | return 1; | |
287 | ||
311ac88f MH |
288 | if (p->ainsn.boostable == 1 && |
289 | #ifdef CONFIG_PREEMPT | |
290 | !(pre_preempt_count) && /* | |
291 | * This enables booster when the direct | |
292 | * execution path aren't preempted. | |
293 | */ | |
294 | #endif /* CONFIG_PREEMPT */ | |
295 | !p->post_handler && !p->break_handler ) { | |
296 | /* Boost up -- we can execute copied instructions directly */ | |
297 | reset_current_kprobe(); | |
298 | regs->eip = (unsigned long)p->ainsn.insn; | |
299 | preempt_enable_no_resched(); | |
300 | return 1; | |
301 | } | |
302 | ||
1da177e4 LT |
303 | ss_probe: |
304 | prepare_singlestep(p, regs); | |
9a0e3a86 | 305 | kcb->kprobe_status = KPROBE_HIT_SS; |
1da177e4 LT |
306 | return 1; |
307 | ||
308 | no_kprobe: | |
d217d545 | 309 | preempt_enable_no_resched(); |
1da177e4 LT |
310 | return ret; |
311 | } | |
312 | ||
b94cce92 HN |
313 | /* |
314 | * For function-return probes, init_kprobes() establishes a probepoint | |
315 | * here. When a retprobed function returns, this probe is hit and | |
316 | * trampoline_probe_handler() runs, calling the kretprobe's handler. | |
317 | */ | |
c9becf58 | 318 | void __kprobes kretprobe_trampoline_holder(void) |
b94cce92 | 319 | { |
c9becf58 | 320 | asm volatile ( ".global kretprobe_trampoline\n" |
b94cce92 | 321 | "kretprobe_trampoline: \n" |
c9becf58 MH |
322 | " pushf\n" |
323 | /* skip cs, eip, orig_eax, es, ds */ | |
324 | " subl $20, %esp\n" | |
325 | " pushl %eax\n" | |
326 | " pushl %ebp\n" | |
327 | " pushl %edi\n" | |
328 | " pushl %esi\n" | |
329 | " pushl %edx\n" | |
330 | " pushl %ecx\n" | |
331 | " pushl %ebx\n" | |
332 | " movl %esp, %eax\n" | |
333 | " call trampoline_handler\n" | |
334 | /* move eflags to cs */ | |
335 | " movl 48(%esp), %edx\n" | |
336 | " movl %edx, 44(%esp)\n" | |
337 | /* save true return address on eflags */ | |
338 | " movl %eax, 48(%esp)\n" | |
339 | " popl %ebx\n" | |
340 | " popl %ecx\n" | |
341 | " popl %edx\n" | |
342 | " popl %esi\n" | |
343 | " popl %edi\n" | |
344 | " popl %ebp\n" | |
345 | " popl %eax\n" | |
346 | /* skip eip, orig_eax, es, ds */ | |
347 | " addl $16, %esp\n" | |
348 | " popf\n" | |
349 | " ret\n"); | |
350 | } | |
b94cce92 HN |
351 | |
352 | /* | |
c9becf58 | 353 | * Called from kretprobe_trampoline |
b94cce92 | 354 | */ |
c9becf58 | 355 | fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) |
b94cce92 | 356 | { |
4bdbd37f RL |
357 | struct kretprobe_instance *ri = NULL; |
358 | struct hlist_head *head; | |
359 | struct hlist_node *node, *tmp; | |
991a51d8 | 360 | unsigned long flags, orig_ret_address = 0; |
4bdbd37f | 361 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
b94cce92 | 362 | |
991a51d8 | 363 | spin_lock_irqsave(&kretprobe_lock, flags); |
4bdbd37f | 364 | head = kretprobe_inst_table_head(current); |
b94cce92 | 365 | |
4bdbd37f RL |
366 | /* |
367 | * It is possible to have multiple instances associated with a given | |
368 | * task either because an multiple functions in the call path | |
369 | * have a return probe installed on them, and/or more then one return | |
370 | * return probe was registered for a target function. | |
371 | * | |
372 | * We can handle this because: | |
373 | * - instances are always inserted at the head of the list | |
374 | * - when multiple return probes are registered for the same | |
375 | * function, the first instance's ret_addr will point to the | |
376 | * real return address, and all the rest will point to | |
377 | * kretprobe_trampoline | |
378 | */ | |
379 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
380 | if (ri->task != current) | |
381 | /* another task is sharing our hash bucket */ | |
382 | continue; | |
383 | ||
c9becf58 MH |
384 | if (ri->rp && ri->rp->handler){ |
385 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | |
4bdbd37f | 386 | ri->rp->handler(ri, regs); |
c9becf58 MH |
387 | __get_cpu_var(current_kprobe) = NULL; |
388 | } | |
4bdbd37f RL |
389 | |
390 | orig_ret_address = (unsigned long)ri->ret_addr; | |
b94cce92 | 391 | recycle_rp_inst(ri); |
4bdbd37f RL |
392 | |
393 | if (orig_ret_address != trampoline_address) | |
394 | /* | |
395 | * This is the real return address. Any other | |
396 | * instances associated with this task are for | |
397 | * other calls deeper on the call stack | |
398 | */ | |
399 | break; | |
b94cce92 | 400 | } |
4bdbd37f RL |
401 | |
402 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | |
4bdbd37f | 403 | |
991a51d8 | 404 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
4bdbd37f | 405 | |
c9becf58 | 406 | return (void*)orig_ret_address; |
b94cce92 HN |
407 | } |
408 | ||
1da177e4 LT |
409 | /* |
410 | * Called after single-stepping. p->addr is the address of the | |
411 | * instruction whose first byte has been replaced by the "int 3" | |
412 | * instruction. To avoid the SMP problems that can occur when we | |
413 | * temporarily put back the original opcode to single-step, we | |
414 | * single-stepped a copy of the instruction. The address of this | |
415 | * copy is p->ainsn.insn. | |
416 | * | |
417 | * This function prepares to return from the post-single-step | |
418 | * interrupt. We have to fix up the stack as follows: | |
419 | * | |
420 | * 0) Except in the case of absolute or indirect jump or call instructions, | |
421 | * the new eip is relative to the copied instruction. We need to make | |
422 | * it relative to the original instruction. | |
423 | * | |
424 | * 1) If the single-stepped instruction was pushfl, then the TF and IF | |
425 | * flags are set in the just-pushed eflags, and may need to be cleared. | |
426 | * | |
427 | * 2) If the single-stepped instruction was a call, the return address | |
428 | * that is atop the stack is the address following the copied instruction. | |
429 | * We need to make it the address following the original instruction. | |
311ac88f MH |
430 | * |
431 | * This function also checks instruction size for preparing direct execution. | |
1da177e4 | 432 | */ |
9a0e3a86 AM |
433 | static void __kprobes resume_execution(struct kprobe *p, |
434 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | |
1da177e4 LT |
435 | { |
436 | unsigned long *tos = (unsigned long *)®s->esp; | |
124d90be | 437 | unsigned long copy_eip = (unsigned long)p->ainsn.insn; |
1da177e4 LT |
438 | unsigned long orig_eip = (unsigned long)p->addr; |
439 | ||
b50ea74c | 440 | regs->eflags &= ~TF_MASK; |
1da177e4 LT |
441 | switch (p->ainsn.insn[0]) { |
442 | case 0x9c: /* pushfl */ | |
443 | *tos &= ~(TF_MASK | IF_MASK); | |
9a0e3a86 | 444 | *tos |= kcb->kprobe_old_eflags; |
1da177e4 | 445 | break; |
bcff5cd6 MH |
446 | case 0xc2: /* iret/ret/lret */ |
447 | case 0xc3: | |
0b9e2cac | 448 | case 0xca: |
bcff5cd6 MH |
449 | case 0xcb: |
450 | case 0xcf: | |
b50ea74c MH |
451 | case 0xea: /* jmp absolute -- eip is correct */ |
452 | /* eip is already adjusted, no more changes required */ | |
311ac88f | 453 | p->ainsn.boostable = 1; |
b50ea74c | 454 | goto no_change; |
1da177e4 LT |
455 | case 0xe8: /* call relative - Fix return addr */ |
456 | *tos = orig_eip + (*tos - copy_eip); | |
457 | break; | |
bcff5cd6 MH |
458 | case 0x9a: /* call absolute -- same as call absolute, indirect */ |
459 | *tos = orig_eip + (*tos - copy_eip); | |
460 | goto no_change; | |
1da177e4 LT |
461 | case 0xff: |
462 | if ((p->ainsn.insn[1] & 0x30) == 0x10) { | |
311ac88f | 463 | /* |
bcff5cd6 | 464 | * call absolute, indirect |
311ac88f MH |
465 | * Fix return addr; eip is correct. |
466 | * But this is not boostable | |
467 | */ | |
1da177e4 | 468 | *tos = orig_eip + (*tos - copy_eip); |
b50ea74c | 469 | goto no_change; |
1da177e4 LT |
470 | } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
471 | ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | |
311ac88f MH |
472 | /* eip is correct. And this is boostable */ |
473 | p->ainsn.boostable = 1; | |
b50ea74c | 474 | goto no_change; |
1da177e4 | 475 | } |
1da177e4 LT |
476 | default: |
477 | break; | |
478 | } | |
479 | ||
311ac88f MH |
480 | if (p->ainsn.boostable == 0) { |
481 | if ((regs->eip > copy_eip) && | |
482 | (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { | |
483 | /* | |
484 | * These instructions can be executed directly if it | |
485 | * jumps back to correct address. | |
486 | */ | |
487 | set_jmp_op((void *)regs->eip, | |
488 | (void *)orig_eip + (regs->eip - copy_eip)); | |
489 | p->ainsn.boostable = 1; | |
490 | } else { | |
491 | p->ainsn.boostable = -1; | |
492 | } | |
493 | } | |
494 | ||
b50ea74c MH |
495 | regs->eip = orig_eip + (regs->eip - copy_eip); |
496 | ||
497 | no_change: | |
498 | return; | |
1da177e4 LT |
499 | } |
500 | ||
501 | /* | |
502 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | |
991a51d8 | 503 | * remain disabled thoroughout this function. |
1da177e4 | 504 | */ |
34c37e18 | 505 | static int __kprobes post_kprobe_handler(struct pt_regs *regs) |
1da177e4 | 506 | { |
9a0e3a86 AM |
507 | struct kprobe *cur = kprobe_running(); |
508 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
509 | ||
510 | if (!cur) | |
1da177e4 LT |
511 | return 0; |
512 | ||
9a0e3a86 AM |
513 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
514 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
515 | cur->post_handler(cur, regs, 0); | |
417c8da6 | 516 | } |
1da177e4 | 517 | |
9a0e3a86 AM |
518 | resume_execution(cur, regs, kcb); |
519 | regs->eflags |= kcb->kprobe_saved_eflags; | |
1da177e4 | 520 | |
417c8da6 | 521 | /*Restore back the original saved kprobes variables and continue. */ |
9a0e3a86 AM |
522 | if (kcb->kprobe_status == KPROBE_REENTER) { |
523 | restore_previous_kprobe(kcb); | |
417c8da6 PP |
524 | goto out; |
525 | } | |
9a0e3a86 | 526 | reset_current_kprobe(); |
417c8da6 | 527 | out: |
1da177e4 LT |
528 | preempt_enable_no_resched(); |
529 | ||
530 | /* | |
531 | * if somebody else is singlestepping across a probe point, eflags | |
532 | * will have TF set, in which case, continue the remaining processing | |
533 | * of do_debug, as if this is not a probe hit. | |
534 | */ | |
535 | if (regs->eflags & TF_MASK) | |
536 | return 0; | |
537 | ||
538 | return 1; | |
539 | } | |
540 | ||
34c37e18 | 541 | static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
1da177e4 | 542 | { |
9a0e3a86 AM |
543 | struct kprobe *cur = kprobe_running(); |
544 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
545 | ||
b4026513 PP |
546 | switch(kcb->kprobe_status) { |
547 | case KPROBE_HIT_SS: | |
548 | case KPROBE_REENTER: | |
549 | /* | |
550 | * We are here because the instruction being single | |
551 | * stepped caused a page fault. We reset the current | |
552 | * kprobe and the eip points back to the probe address | |
553 | * and allow the page fault handler to continue as a | |
554 | * normal page fault. | |
555 | */ | |
556 | regs->eip = (unsigned long)cur->addr; | |
9a0e3a86 | 557 | regs->eflags |= kcb->kprobe_old_eflags; |
b4026513 PP |
558 | if (kcb->kprobe_status == KPROBE_REENTER) |
559 | restore_previous_kprobe(kcb); | |
560 | else | |
561 | reset_current_kprobe(); | |
1da177e4 | 562 | preempt_enable_no_resched(); |
b4026513 PP |
563 | break; |
564 | case KPROBE_HIT_ACTIVE: | |
565 | case KPROBE_HIT_SSDONE: | |
566 | /* | |
567 | * We increment the nmissed count for accounting, | |
568 | * we can also use npre/npostfault count for accouting | |
569 | * these specific fault cases. | |
570 | */ | |
571 | kprobes_inc_nmissed_count(cur); | |
572 | ||
573 | /* | |
574 | * We come here because instructions in the pre/post | |
575 | * handler caused the page_fault, this could happen | |
576 | * if handler tries to access user space by | |
577 | * copy_from_user(), get_user() etc. Let the | |
578 | * user-specified handler try to fix it first. | |
579 | */ | |
580 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
581 | return 1; | |
582 | ||
583 | /* | |
584 | * In case the user-specified fault handler returned | |
585 | * zero, try to fix up. | |
586 | */ | |
587 | if (fixup_exception(regs)) | |
588 | return 1; | |
589 | ||
590 | /* | |
591 | * fixup_exception() could not handle it, | |
592 | * Let do_page_fault() fix it. | |
593 | */ | |
594 | break; | |
595 | default: | |
596 | break; | |
1da177e4 LT |
597 | } |
598 | return 0; | |
599 | } | |
600 | ||
601 | /* | |
602 | * Wrapper routine to for handling exceptions. | |
603 | */ | |
3d97ae5b PP |
604 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
605 | unsigned long val, void *data) | |
1da177e4 LT |
606 | { |
607 | struct die_args *args = (struct die_args *)data; | |
66ff2d06 AM |
608 | int ret = NOTIFY_DONE; |
609 | ||
64445416 | 610 | if (args->regs && user_mode_vm(args->regs)) |
2326c770 | 611 | return ret; |
612 | ||
1da177e4 LT |
613 | switch (val) { |
614 | case DIE_INT3: | |
615 | if (kprobe_handler(args->regs)) | |
66ff2d06 | 616 | ret = NOTIFY_STOP; |
1da177e4 LT |
617 | break; |
618 | case DIE_DEBUG: | |
619 | if (post_kprobe_handler(args->regs)) | |
66ff2d06 | 620 | ret = NOTIFY_STOP; |
1da177e4 LT |
621 | break; |
622 | case DIE_GPF: | |
1da177e4 | 623 | case DIE_PAGE_FAULT: |
d217d545 AM |
624 | /* kprobe_running() needs smp_processor_id() */ |
625 | preempt_disable(); | |
1da177e4 LT |
626 | if (kprobe_running() && |
627 | kprobe_fault_handler(args->regs, args->trapnr)) | |
66ff2d06 | 628 | ret = NOTIFY_STOP; |
d217d545 | 629 | preempt_enable(); |
1da177e4 LT |
630 | break; |
631 | default: | |
632 | break; | |
633 | } | |
66ff2d06 | 634 | return ret; |
1da177e4 LT |
635 | } |
636 | ||
3d97ae5b | 637 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
638 | { |
639 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
640 | unsigned long addr; | |
9a0e3a86 | 641 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 642 | |
9a0e3a86 AM |
643 | kcb->jprobe_saved_regs = *regs; |
644 | kcb->jprobe_saved_esp = ®s->esp; | |
645 | addr = (unsigned long)(kcb->jprobe_saved_esp); | |
1da177e4 LT |
646 | |
647 | /* | |
648 | * TBD: As Linus pointed out, gcc assumes that the callee | |
649 | * owns the argument space and could overwrite it, e.g. | |
650 | * tailcall optimization. So, to be absolutely safe | |
651 | * we also save and restore enough stack bytes to cover | |
652 | * the argument area. | |
653 | */ | |
9a0e3a86 AM |
654 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
655 | MIN_STACK_SIZE(addr)); | |
1da177e4 LT |
656 | regs->eflags &= ~IF_MASK; |
657 | regs->eip = (unsigned long)(jp->entry); | |
658 | return 1; | |
659 | } | |
660 | ||
3d97ae5b | 661 | void __kprobes jprobe_return(void) |
1da177e4 | 662 | { |
9a0e3a86 AM |
663 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
664 | ||
1da177e4 LT |
665 | asm volatile (" xchgl %%ebx,%%esp \n" |
666 | " int3 \n" | |
667 | " .globl jprobe_return_end \n" | |
668 | " jprobe_return_end: \n" | |
669 | " nop \n"::"b" | |
9a0e3a86 | 670 | (kcb->jprobe_saved_esp):"memory"); |
1da177e4 LT |
671 | } |
672 | ||
3d97ae5b | 673 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 | 674 | { |
9a0e3a86 | 675 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 676 | u8 *addr = (u8 *) (regs->eip - 1); |
9a0e3a86 | 677 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp); |
1da177e4 LT |
678 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
679 | ||
680 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | |
9a0e3a86 | 681 | if (®s->esp != kcb->jprobe_saved_esp) { |
1da177e4 | 682 | struct pt_regs *saved_regs = |
9a0e3a86 AM |
683 | container_of(kcb->jprobe_saved_esp, |
684 | struct pt_regs, esp); | |
1da177e4 | 685 | printk("current esp %p does not match saved esp %p\n", |
9a0e3a86 | 686 | ®s->esp, kcb->jprobe_saved_esp); |
1da177e4 LT |
687 | printk("Saved registers for jprobe %p\n", jp); |
688 | show_registers(saved_regs); | |
689 | printk("Current registers\n"); | |
690 | show_registers(regs); | |
691 | BUG(); | |
692 | } | |
9a0e3a86 AM |
693 | *regs = kcb->jprobe_saved_regs; |
694 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, | |
1da177e4 | 695 | MIN_STACK_SIZE(stack_addr)); |
d217d545 | 696 | preempt_enable_no_resched(); |
1da177e4 LT |
697 | return 1; |
698 | } | |
699 | return 0; | |
700 | } | |
4bdbd37f | 701 | |
6772926b | 702 | int __init arch_init_kprobes(void) |
4bdbd37f | 703 | { |
c9becf58 | 704 | return 0; |
4bdbd37f | 705 | } |