]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/kprobes.c
[PATCH] kprobes: fix single-step out of line - take2
[mirror_ubuntu-zesty-kernel.git] / kernel / kprobes.c
1 /*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
33 */
34 #include <linux/kprobes.h>
35 #include <linux/spinlock.h>
36 #include <linux/hash.h>
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <linux/moduleloader.h>
40 #include <asm/cacheflush.h>
41 #include <asm/errno.h>
42 #include <asm/kdebug.h>
43
44 #define KPROBE_HASH_BITS 6
45 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
46
47 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
48 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
49
50 unsigned int kprobe_cpu = NR_CPUS;
51 static DEFINE_SPINLOCK(kprobe_lock);
52 static struct kprobe *curr_kprobe;
53
54 /*
55 * kprobe->ainsn.insn points to the copy of the instruction to be
56 * single-stepped. x86_64, POWER4 and above have no-exec support and
57 * stepping on the instruction on a vmalloced/kmalloced/data page
58 * is a recipe for disaster
59 */
60 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
61
62 struct kprobe_insn_page {
63 struct hlist_node hlist;
64 kprobe_opcode_t *insns; /* Page of instruction slots */
65 char slot_used[INSNS_PER_PAGE];
66 int nused;
67 };
68
69 static struct hlist_head kprobe_insn_pages;
70
71 /**
72 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones.
74 */
75 kprobe_opcode_t *get_insn_slot(void)
76 {
77 struct kprobe_insn_page *kip;
78 struct hlist_node *pos;
79
80 hlist_for_each(pos, &kprobe_insn_pages) {
81 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
82 if (kip->nused < INSNS_PER_PAGE) {
83 int i;
84 for (i = 0; i < INSNS_PER_PAGE; i++) {
85 if (!kip->slot_used[i]) {
86 kip->slot_used[i] = 1;
87 kip->nused++;
88 return kip->insns + (i * MAX_INSN_SIZE);
89 }
90 }
91 /* Surprise! No unused slots. Fix kip->nused. */
92 kip->nused = INSNS_PER_PAGE;
93 }
94 }
95
96 /* All out of space. Need to allocate a new page. Use slot 0.*/
97 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
98 if (!kip) {
99 return NULL;
100 }
101
102 /*
103 * Use module_alloc so this page is within +/- 2GB of where the
104 * kernel image and loaded module images reside. This is required
105 * so x86_64 can correctly handle the %rip-relative fixups.
106 */
107 kip->insns = module_alloc(PAGE_SIZE);
108 if (!kip->insns) {
109 kfree(kip);
110 return NULL;
111 }
112 INIT_HLIST_NODE(&kip->hlist);
113 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
114 memset(kip->slot_used, 0, INSNS_PER_PAGE);
115 kip->slot_used[0] = 1;
116 kip->nused = 1;
117 return kip->insns;
118 }
119
120 void free_insn_slot(kprobe_opcode_t *slot)
121 {
122 struct kprobe_insn_page *kip;
123 struct hlist_node *pos;
124
125 hlist_for_each(pos, &kprobe_insn_pages) {
126 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
127 if (kip->insns <= slot &&
128 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
129 int i = (slot - kip->insns) / MAX_INSN_SIZE;
130 kip->slot_used[i] = 0;
131 kip->nused--;
132 if (kip->nused == 0) {
133 /*
134 * Page is no longer in use. Free it unless
135 * it's the last one. We keep the last one
136 * so as not to have to set it up again the
137 * next time somebody inserts a probe.
138 */
139 hlist_del(&kip->hlist);
140 if (hlist_empty(&kprobe_insn_pages)) {
141 INIT_HLIST_NODE(&kip->hlist);
142 hlist_add_head(&kip->hlist,
143 &kprobe_insn_pages);
144 } else {
145 module_free(NULL, kip->insns);
146 kfree(kip);
147 }
148 }
149 return;
150 }
151 }
152 }
153
154 /* Locks kprobe: irqs must be disabled */
155 void lock_kprobes(void)
156 {
157 spin_lock(&kprobe_lock);
158 kprobe_cpu = smp_processor_id();
159 }
160
161 void unlock_kprobes(void)
162 {
163 kprobe_cpu = NR_CPUS;
164 spin_unlock(&kprobe_lock);
165 }
166
167 /* You have to be holding the kprobe_lock */
168 struct kprobe *get_kprobe(void *addr)
169 {
170 struct hlist_head *head;
171 struct hlist_node *node;
172
173 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
174 hlist_for_each(node, head) {
175 struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
176 if (p->addr == addr)
177 return p;
178 }
179 return NULL;
180 }
181
182 /*
183 * Aggregate handlers for multiple kprobes support - these handlers
184 * take care of invoking the individual kprobe handlers on p->list
185 */
186 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
187 {
188 struct kprobe *kp;
189
190 list_for_each_entry(kp, &p->list, list) {
191 if (kp->pre_handler) {
192 curr_kprobe = kp;
193 if (kp->pre_handler(kp, regs))
194 return 1;
195 }
196 curr_kprobe = NULL;
197 }
198 return 0;
199 }
200
201 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
202 unsigned long flags)
203 {
204 struct kprobe *kp;
205
206 list_for_each_entry(kp, &p->list, list) {
207 if (kp->post_handler) {
208 curr_kprobe = kp;
209 kp->post_handler(kp, regs, flags);
210 curr_kprobe = NULL;
211 }
212 }
213 return;
214 }
215
216 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
217 int trapnr)
218 {
219 /*
220 * if we faulted "during" the execution of a user specified
221 * probe handler, invoke just that probe's fault handler
222 */
223 if (curr_kprobe && curr_kprobe->fault_handler) {
224 if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
225 return 1;
226 }
227 return 0;
228 }
229
230 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
231 {
232 struct kprobe *kp = curr_kprobe;
233 if (curr_kprobe && kp->break_handler) {
234 if (kp->break_handler(kp, regs)) {
235 curr_kprobe = NULL;
236 return 1;
237 }
238 }
239 curr_kprobe = NULL;
240 return 0;
241 }
242
243 struct kprobe trampoline_p = {
244 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
245 .pre_handler = trampoline_probe_handler,
246 .post_handler = trampoline_post_handler
247 };
248
249 struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
250 {
251 struct hlist_node *node;
252 struct kretprobe_instance *ri;
253 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
254 return ri;
255 return NULL;
256 }
257
258 static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
259 {
260 struct hlist_node *node;
261 struct kretprobe_instance *ri;
262 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
263 return ri;
264 return NULL;
265 }
266
267 struct kretprobe_instance *get_rp_inst(void *sara)
268 {
269 struct hlist_head *head;
270 struct hlist_node *node;
271 struct task_struct *tsk;
272 struct kretprobe_instance *ri;
273
274 tsk = arch_get_kprobe_task(sara);
275 head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
276 hlist_for_each_entry(ri, node, head, hlist) {
277 if (ri->stack_addr == sara)
278 return ri;
279 }
280 return NULL;
281 }
282
283 void add_rp_inst(struct kretprobe_instance *ri)
284 {
285 struct task_struct *tsk;
286 /*
287 * Remove rp inst off the free list -
288 * Add it back when probed function returns
289 */
290 hlist_del(&ri->uflist);
291 tsk = arch_get_kprobe_task(ri->stack_addr);
292 /* Add rp inst onto table */
293 INIT_HLIST_NODE(&ri->hlist);
294 hlist_add_head(&ri->hlist,
295 &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
296
297 /* Also add this rp inst to the used list. */
298 INIT_HLIST_NODE(&ri->uflist);
299 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
300 }
301
302 void recycle_rp_inst(struct kretprobe_instance *ri)
303 {
304 /* remove rp inst off the rprobe_inst_table */
305 hlist_del(&ri->hlist);
306 if (ri->rp) {
307 /* remove rp inst off the used list */
308 hlist_del(&ri->uflist);
309 /* put rp inst back onto the free list */
310 INIT_HLIST_NODE(&ri->uflist);
311 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
312 } else
313 /* Unregistering */
314 kfree(ri);
315 }
316
317 struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
318 {
319 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
320 }
321
322 struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
323 {
324 struct task_struct *tsk;
325 struct hlist_head *head;
326 struct hlist_node *node;
327 struct kretprobe_instance *ri;
328
329 head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
330
331 hlist_for_each_entry(ri, node, head, hlist) {
332 tsk = arch_get_kprobe_task(ri->stack_addr);
333 if (tsk == tk)
334 return ri;
335 }
336 return NULL;
337 }
338
339 /*
340 * This function is called from do_exit or do_execv when task tk's stack is
341 * about to be recycled. Recycle any function-return probe instances
342 * associated with this task. These represent probed functions that have
343 * been called but may never return.
344 */
345 void kprobe_flush_task(struct task_struct *tk)
346 {
347 unsigned long flags = 0;
348 spin_lock_irqsave(&kprobe_lock, flags);
349 arch_kprobe_flush_task(tk);
350 spin_unlock_irqrestore(&kprobe_lock, flags);
351 }
352
353 /*
354 * This kprobe pre_handler is registered with every kretprobe. When probe
355 * hits it will set up the return probe.
356 */
357 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
358 {
359 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
360
361 /*TODO: consider to only swap the RA after the last pre_handler fired */
362 arch_prepare_kretprobe(rp, regs);
363 return 0;
364 }
365
366 static inline void free_rp_inst(struct kretprobe *rp)
367 {
368 struct kretprobe_instance *ri;
369 while ((ri = get_free_rp_inst(rp)) != NULL) {
370 hlist_del(&ri->uflist);
371 kfree(ri);
372 }
373 }
374
375 /*
376 * Keep all fields in the kprobe consistent
377 */
378 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
379 {
380 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
381 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
382 }
383
384 /*
385 * Add the new probe to old_p->list. Fail if this is the
386 * second jprobe at the address - two jprobes can't coexist
387 */
388 static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
389 {
390 struct kprobe *kp;
391
392 if (p->break_handler) {
393 list_for_each_entry(kp, &old_p->list, list) {
394 if (kp->break_handler)
395 return -EEXIST;
396 }
397 list_add_tail(&p->list, &old_p->list);
398 } else
399 list_add(&p->list, &old_p->list);
400 return 0;
401 }
402
403 /*
404 * Fill in the required fields of the "manager kprobe". Replace the
405 * earlier kprobe in the hlist with the manager kprobe
406 */
407 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
408 {
409 copy_kprobe(p, ap);
410 ap->addr = p->addr;
411 ap->pre_handler = aggr_pre_handler;
412 ap->post_handler = aggr_post_handler;
413 ap->fault_handler = aggr_fault_handler;
414 ap->break_handler = aggr_break_handler;
415
416 INIT_LIST_HEAD(&ap->list);
417 list_add(&p->list, &ap->list);
418
419 INIT_HLIST_NODE(&ap->hlist);
420 hlist_del(&p->hlist);
421 hlist_add_head(&ap->hlist,
422 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
423 }
424
425 /*
426 * This is the second or subsequent kprobe at the address - handle
427 * the intricacies
428 * TODO: Move kcalloc outside the spinlock
429 */
430 static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
431 {
432 int ret = 0;
433 struct kprobe *ap;
434
435 if (old_p->pre_handler == aggr_pre_handler) {
436 copy_kprobe(old_p, p);
437 ret = add_new_kprobe(old_p, p);
438 } else {
439 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
440 if (!ap)
441 return -ENOMEM;
442 add_aggr_kprobe(ap, old_p);
443 copy_kprobe(ap, p);
444 ret = add_new_kprobe(ap, p);
445 }
446 return ret;
447 }
448
449 /* kprobe removal house-keeping routines */
450 static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
451 {
452 arch_disarm_kprobe(p);
453 hlist_del(&p->hlist);
454 spin_unlock_irqrestore(&kprobe_lock, flags);
455 arch_remove_kprobe(p);
456 }
457
458 static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
459 struct kprobe *p, unsigned long flags)
460 {
461 list_del(&p->list);
462 if (list_empty(&old_p->list)) {
463 cleanup_kprobe(old_p, flags);
464 kfree(old_p);
465 } else
466 spin_unlock_irqrestore(&kprobe_lock, flags);
467 }
468
469 int register_kprobe(struct kprobe *p)
470 {
471 int ret = 0;
472 unsigned long flags = 0;
473 struct kprobe *old_p;
474
475 if ((ret = arch_prepare_kprobe(p)) != 0) {
476 goto rm_kprobe;
477 }
478 spin_lock_irqsave(&kprobe_lock, flags);
479 old_p = get_kprobe(p->addr);
480 p->nmissed = 0;
481 if (old_p) {
482 ret = register_aggr_kprobe(old_p, p);
483 goto out;
484 }
485
486 arch_copy_kprobe(p);
487 INIT_HLIST_NODE(&p->hlist);
488 hlist_add_head(&p->hlist,
489 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
490
491 arch_arm_kprobe(p);
492
493 out:
494 spin_unlock_irqrestore(&kprobe_lock, flags);
495 rm_kprobe:
496 if (ret == -EEXIST)
497 arch_remove_kprobe(p);
498 return ret;
499 }
500
501 void unregister_kprobe(struct kprobe *p)
502 {
503 unsigned long flags;
504 struct kprobe *old_p;
505
506 spin_lock_irqsave(&kprobe_lock, flags);
507 old_p = get_kprobe(p->addr);
508 if (old_p) {
509 if (old_p->pre_handler == aggr_pre_handler)
510 cleanup_aggr_kprobe(old_p, p, flags);
511 else
512 cleanup_kprobe(p, flags);
513 } else
514 spin_unlock_irqrestore(&kprobe_lock, flags);
515 }
516
517 static struct notifier_block kprobe_exceptions_nb = {
518 .notifier_call = kprobe_exceptions_notify,
519 .priority = 0x7fffffff /* we need to notified first */
520 };
521
522 int register_jprobe(struct jprobe *jp)
523 {
524 /* Todo: Verify probepoint is a function entry point */
525 jp->kp.pre_handler = setjmp_pre_handler;
526 jp->kp.break_handler = longjmp_break_handler;
527
528 return register_kprobe(&jp->kp);
529 }
530
531 void unregister_jprobe(struct jprobe *jp)
532 {
533 unregister_kprobe(&jp->kp);
534 }
535
536 #ifdef ARCH_SUPPORTS_KRETPROBES
537
538 int register_kretprobe(struct kretprobe *rp)
539 {
540 int ret = 0;
541 struct kretprobe_instance *inst;
542 int i;
543
544 rp->kp.pre_handler = pre_handler_kretprobe;
545
546 /* Pre-allocate memory for max kretprobe instances */
547 if (rp->maxactive <= 0) {
548 #ifdef CONFIG_PREEMPT
549 rp->maxactive = max(10, 2 * NR_CPUS);
550 #else
551 rp->maxactive = NR_CPUS;
552 #endif
553 }
554 INIT_HLIST_HEAD(&rp->used_instances);
555 INIT_HLIST_HEAD(&rp->free_instances);
556 for (i = 0; i < rp->maxactive; i++) {
557 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
558 if (inst == NULL) {
559 free_rp_inst(rp);
560 return -ENOMEM;
561 }
562 INIT_HLIST_NODE(&inst->uflist);
563 hlist_add_head(&inst->uflist, &rp->free_instances);
564 }
565
566 rp->nmissed = 0;
567 /* Establish function entry probe point */
568 if ((ret = register_kprobe(&rp->kp)) != 0)
569 free_rp_inst(rp);
570 return ret;
571 }
572
573 #else /* ARCH_SUPPORTS_KRETPROBES */
574
575 int register_kretprobe(struct kretprobe *rp)
576 {
577 return -ENOSYS;
578 }
579
580 #endif /* ARCH_SUPPORTS_KRETPROBES */
581
582 void unregister_kretprobe(struct kretprobe *rp)
583 {
584 unsigned long flags;
585 struct kretprobe_instance *ri;
586
587 unregister_kprobe(&rp->kp);
588 /* No race here */
589 spin_lock_irqsave(&kprobe_lock, flags);
590 free_rp_inst(rp);
591 while ((ri = get_used_rp_inst(rp)) != NULL) {
592 ri->rp = NULL;
593 hlist_del(&ri->uflist);
594 }
595 spin_unlock_irqrestore(&kprobe_lock, flags);
596 }
597
598 static int __init init_kprobes(void)
599 {
600 int i, err = 0;
601
602 /* FIXME allocate the probe table, currently defined statically */
603 /* initialize all list heads */
604 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
605 INIT_HLIST_HEAD(&kprobe_table[i]);
606 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
607 }
608
609 err = register_die_notifier(&kprobe_exceptions_nb);
610 /* Register the trampoline probe for return probe */
611 register_kprobe(&trampoline_p);
612 return err;
613 }
614
615 __initcall(init_kprobes);
616
617 EXPORT_SYMBOL_GPL(register_kprobe);
618 EXPORT_SYMBOL_GPL(unregister_kprobe);
619 EXPORT_SYMBOL_GPL(register_jprobe);
620 EXPORT_SYMBOL_GPL(unregister_jprobe);
621 EXPORT_SYMBOL_GPL(jprobe_return);
622 EXPORT_SYMBOL_GPL(register_kretprobe);
623 EXPORT_SYMBOL_GPL(unregister_kretprobe);
624