]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/kprobes.c
kprobes: Introduce generic insn_slot framework
[mirror_ubuntu-zesty-kernel.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
e3869792 38#include <linux/stddef.h>
1da177e4 39#include <linux/module.h>
9ec4b1f3 40#include <linux/moduleloader.h>
3a872d89 41#include <linux/kallsyms.h>
b4c6c34a 42#include <linux/freezer.h>
346fd59b
SD
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
1eeb66a1 45#include <linux/kdebug.h>
4460fdad 46#include <linux/memory.h>
4554dbcb 47#include <linux/ftrace.h>
bf8f6e5b 48
d0aaff97 49#include <asm-generic/sections.h>
1da177e4
LT
50#include <asm/cacheflush.h>
51#include <asm/errno.h>
bf8f6e5b 52#include <asm/uaccess.h>
1da177e4
LT
53
54#define KPROBE_HASH_BITS 6
55#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
56
3a872d89
AM
57
58/*
59 * Some oddball architectures like 64bit powerpc have function descriptors
60 * so this must be overridable.
61 */
62#ifndef kprobe_lookup_name
63#define kprobe_lookup_name(name, addr) \
64 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
65#endif
66
ef53d9c5 67static int kprobes_initialized;
1da177e4 68static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 69static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4 70
bf8f6e5b 71/* NOTE: change this value only with kprobe_mutex held */
e579abeb 72static bool kprobes_all_disarmed;
bf8f6e5b 73
12941560 74static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
e6584523 75static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5 76static struct {
7e036d04 77 spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5
S
78} kretprobe_table_locks[KPROBE_TABLE_SIZE];
79
80static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
81{
82 return &(kretprobe_table_locks[hash].lock);
83}
1da177e4 84
3d8d996e
SD
85/*
86 * Normally, functions that we'd want to prohibit kprobes in, are marked
87 * __kprobes. But, there are cases where such functions already belong to
88 * a different section (__sched for preempt_schedule)
89 *
90 * For such cases, we now have a blacklist
91 */
544304b2 92static struct kprobe_blackpoint kprobe_blacklist[] = {
3d8d996e 93 {"preempt_schedule",},
65e234ec 94 {"native_get_debugreg",},
a00e817f
MH
95 {"irq_entries_start",},
96 {"common_interrupt",},
5ecaafdb 97 {"mcount",}, /* mcount can be called from everywhere */
3d8d996e
SD
98 {NULL} /* Terminator */
99};
100
2d14e39d 101#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
102/*
103 * kprobe->ainsn.insn points to the copy of the instruction to be
104 * single-stepped. x86_64, POWER4 and above have no-exec support and
105 * stepping on the instruction on a vmalloced/kmalloced/data page
106 * is a recipe for disaster
107 */
9ec4b1f3 108struct kprobe_insn_page {
c5cb5a2d 109 struct list_head list;
9ec4b1f3 110 kprobe_opcode_t *insns; /* Page of instruction slots */
9ec4b1f3 111 int nused;
b4c6c34a 112 int ngarbage;
4610ee1d 113 char slot_used[];
9ec4b1f3
AM
114};
115
4610ee1d
MH
116#define KPROBE_INSN_PAGE_SIZE(slots) \
117 (offsetof(struct kprobe_insn_page, slot_used) + \
118 (sizeof(char) * (slots)))
119
120struct kprobe_insn_cache {
121 struct list_head pages; /* list of kprobe_insn_page */
122 size_t insn_size; /* size of instruction slot */
123 int nr_garbage;
124};
125
126static int slots_per_page(struct kprobe_insn_cache *c)
127{
128 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
129}
130
ab40c5c6
MH
131enum kprobe_slot_state {
132 SLOT_CLEAN = 0,
133 SLOT_DIRTY = 1,
134 SLOT_USED = 2,
135};
136
4610ee1d
MH
137static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
138static struct kprobe_insn_cache kprobe_insn_slots = {
139 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
140 .insn_size = MAX_INSN_SIZE,
141 .nr_garbage = 0,
142};
143static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a 144
9ec4b1f3 145/**
12941560 146 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
147 * We allocate an executable page if there's no room on existing ones.
148 */
4610ee1d 149static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f3
AM
150{
151 struct kprobe_insn_page *kip;
9ec4b1f3 152
6f716acd 153 retry:
4610ee1d
MH
154 list_for_each_entry(kip, &c->pages, list) {
155 if (kip->nused < slots_per_page(c)) {
9ec4b1f3 156 int i;
4610ee1d 157 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6
MH
158 if (kip->slot_used[i] == SLOT_CLEAN) {
159 kip->slot_used[i] = SLOT_USED;
9ec4b1f3 160 kip->nused++;
4610ee1d 161 return kip->insns + (i * c->insn_size);
9ec4b1f3
AM
162 }
163 }
4610ee1d
MH
164 /* kip->nused is broken. Fix it. */
165 kip->nused = slots_per_page(c);
166 WARN_ON(1);
9ec4b1f3
AM
167 }
168 }
169
b4c6c34a 170 /* If there are any garbage slots, collect it and try again. */
4610ee1d 171 if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a 172 goto retry;
4610ee1d
MH
173
174 /* All out of space. Need to allocate a new page. */
175 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd 176 if (!kip)
9ec4b1f3 177 return NULL;
9ec4b1f3
AM
178
179 /*
180 * Use module_alloc so this page is within +/- 2GB of where the
181 * kernel image and loaded module images reside. This is required
182 * so x86_64 can correctly handle the %rip-relative fixups.
183 */
184 kip->insns = module_alloc(PAGE_SIZE);
185 if (!kip->insns) {
186 kfree(kip);
187 return NULL;
188 }
c5cb5a2d 189 INIT_LIST_HEAD(&kip->list);
4610ee1d 190 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6 191 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 192 kip->nused = 1;
b4c6c34a 193 kip->ngarbage = 0;
4610ee1d 194 list_add(&kip->list, &c->pages);
9ec4b1f3
AM
195 return kip->insns;
196}
197
4610ee1d 198
12941560
MH
199kprobe_opcode_t __kprobes *get_insn_slot(void)
200{
4610ee1d
MH
201 kprobe_opcode_t *ret = NULL;
202
12941560 203 mutex_lock(&kprobe_insn_mutex);
4610ee1d 204 ret = __get_insn_slot(&kprobe_insn_slots);
12941560 205 mutex_unlock(&kprobe_insn_mutex);
4610ee1d 206
12941560
MH
207 return ret;
208}
209
b4c6c34a
MH
210/* Return 1 if all garbages are collected, otherwise 0. */
211static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
212{
ab40c5c6 213 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
214 kip->nused--;
215 if (kip->nused == 0) {
216 /*
217 * Page is no longer in use. Free it unless
218 * it's the last one. We keep the last one
219 * so as not to have to set it up again the
220 * next time somebody inserts a probe.
221 */
4610ee1d 222 if (!list_is_singular(&kip->list)) {
c5cb5a2d 223 list_del(&kip->list);
b4c6c34a
MH
224 module_free(NULL, kip->insns);
225 kfree(kip);
226 }
227 return 1;
228 }
229 return 0;
230}
231
4610ee1d 232static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a 233{
c5cb5a2d 234 struct kprobe_insn_page *kip, *next;
b4c6c34a 235
615d0ebb
MH
236 /* Ensure no-one is interrupted on the garbages */
237 synchronize_sched();
b4c6c34a 238
4610ee1d 239 list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a 240 int i;
b4c6c34a
MH
241 if (kip->ngarbage == 0)
242 continue;
243 kip->ngarbage = 0; /* we will collect all garbages */
4610ee1d 244 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6 245 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
246 collect_one_slot(kip, i))
247 break;
248 }
249 }
4610ee1d 250 c->nr_garbage = 0;
b4c6c34a
MH
251 return 0;
252}
253
4610ee1d
MH
254static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
255 kprobe_opcode_t *slot, int dirty)
9ec4b1f3
AM
256{
257 struct kprobe_insn_page *kip;
9ec4b1f3 258
4610ee1d
MH
259 list_for_each_entry(kip, &c->pages, list) {
260 long idx = ((long)slot - (long)kip->insns) / c->insn_size;
261 if (idx >= 0 && idx < slots_per_page(c)) {
262 WARN_ON(kip->slot_used[idx] != SLOT_USED);
b4c6c34a 263 if (dirty) {
4610ee1d 264 kip->slot_used[idx] = SLOT_DIRTY;
b4c6c34a 265 kip->ngarbage++;
4610ee1d
MH
266 if (++c->nr_garbage > slots_per_page(c))
267 collect_garbage_slots(c);
c5cb5a2d 268 } else
4610ee1d
MH
269 collect_one_slot(kip, idx);
270 return;
9ec4b1f3
AM
271 }
272 }
4610ee1d
MH
273 /* Could not free this slot. */
274 WARN_ON(1);
275}
6f716acd 276
4610ee1d
MH
277void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
278{
279 mutex_lock(&kprobe_insn_mutex);
280 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
12941560 281 mutex_unlock(&kprobe_insn_mutex);
9ec4b1f3 282}
2d14e39d 283#endif
9ec4b1f3 284
e6584523
AM
285/* We have preemption disabled.. so it is safe to use __ versions */
286static inline void set_kprobe_instance(struct kprobe *kp)
287{
288 __get_cpu_var(kprobe_instance) = kp;
289}
290
291static inline void reset_kprobe_instance(void)
292{
293 __get_cpu_var(kprobe_instance) = NULL;
294}
295
3516a460
AM
296/*
297 * This routine is called either:
49a2a1b8 298 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 299 * OR
d217d545 300 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 301 */
d0aaff97 302struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
303{
304 struct hlist_head *head;
305 struct hlist_node *node;
3516a460 306 struct kprobe *p;
1da177e4
LT
307
308 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a460 309 hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4
LT
310 if (p->addr == addr)
311 return p;
312 }
313 return NULL;
314}
315
201517a7
MH
316/* Arm a kprobe with text_mutex */
317static void __kprobes arm_kprobe(struct kprobe *kp)
318{
319 mutex_lock(&text_mutex);
320 arch_arm_kprobe(kp);
321 mutex_unlock(&text_mutex);
322}
323
324/* Disarm a kprobe with text_mutex */
325static void __kprobes disarm_kprobe(struct kprobe *kp)
326{
327 mutex_lock(&text_mutex);
328 arch_disarm_kprobe(kp);
329 mutex_unlock(&text_mutex);
330}
331
64f562c6
AM
332/*
333 * Aggregate handlers for multiple kprobes support - these handlers
334 * take care of invoking the individual kprobe handlers on p->list
335 */
d0aaff97 336static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
337{
338 struct kprobe *kp;
339
3516a460 340 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 341 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 342 set_kprobe_instance(kp);
8b0914ea
PP
343 if (kp->pre_handler(kp, regs))
344 return 1;
64f562c6 345 }
e6584523 346 reset_kprobe_instance();
64f562c6
AM
347 }
348 return 0;
349}
350
d0aaff97
PP
351static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
352 unsigned long flags)
64f562c6
AM
353{
354 struct kprobe *kp;
355
3516a460 356 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 357 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 358 set_kprobe_instance(kp);
64f562c6 359 kp->post_handler(kp, regs, flags);
e6584523 360 reset_kprobe_instance();
64f562c6
AM
361 }
362 }
64f562c6
AM
363}
364
d0aaff97
PP
365static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
366 int trapnr)
64f562c6 367{
e6584523
AM
368 struct kprobe *cur = __get_cpu_var(kprobe_instance);
369
64f562c6
AM
370 /*
371 * if we faulted "during" the execution of a user specified
372 * probe handler, invoke just that probe's fault handler
373 */
e6584523
AM
374 if (cur && cur->fault_handler) {
375 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
376 return 1;
377 }
378 return 0;
379}
380
d0aaff97 381static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 382{
e6584523
AM
383 struct kprobe *cur = __get_cpu_var(kprobe_instance);
384 int ret = 0;
385
386 if (cur && cur->break_handler) {
387 if (cur->break_handler(cur, regs))
388 ret = 1;
8b0914ea 389 }
e6584523
AM
390 reset_kprobe_instance();
391 return ret;
8b0914ea
PP
392}
393
bf8d5c52
KA
394/* Walks the list and increments nmissed count for multiprobe case */
395void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
396{
397 struct kprobe *kp;
398 if (p->pre_handler != aggr_pre_handler) {
399 p->nmissed++;
400 } else {
401 list_for_each_entry_rcu(kp, &p->list, list)
402 kp->nmissed++;
403 }
404 return;
405}
406
99219a3f 407void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
408 struct hlist_head *head)
b94cce92 409{
ef53d9c5
S
410 struct kretprobe *rp = ri->rp;
411
b94cce92
HN
412 /* remove rp inst off the rprobe_inst_table */
413 hlist_del(&ri->hlist);
ef53d9c5
S
414 INIT_HLIST_NODE(&ri->hlist);
415 if (likely(rp)) {
416 spin_lock(&rp->lock);
417 hlist_add_head(&ri->hlist, &rp->free_instances);
418 spin_unlock(&rp->lock);
b94cce92
HN
419 } else
420 /* Unregistering */
99219a3f 421 hlist_add_head(&ri->hlist, head);
b94cce92
HN
422}
423
017c39bd 424void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5
S
425 struct hlist_head **head, unsigned long *flags)
426{
427 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
428 spinlock_t *hlist_lock;
429
430 *head = &kretprobe_inst_table[hash];
431 hlist_lock = kretprobe_table_lock_ptr(hash);
432 spin_lock_irqsave(hlist_lock, *flags);
433}
434
017c39bd
MH
435static void __kprobes kretprobe_table_lock(unsigned long hash,
436 unsigned long *flags)
b94cce92 437{
ef53d9c5
S
438 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
439 spin_lock_irqsave(hlist_lock, *flags);
440}
441
017c39bd
MH
442void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
443 unsigned long *flags)
ef53d9c5
S
444{
445 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
446 spinlock_t *hlist_lock;
447
448 hlist_lock = kretprobe_table_lock_ptr(hash);
449 spin_unlock_irqrestore(hlist_lock, *flags);
450}
451
017c39bd 452void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
ef53d9c5
S
453{
454 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
455 spin_unlock_irqrestore(hlist_lock, *flags);
b94cce92
HN
456}
457
b94cce92 458/*
c6fd91f0 459 * This function is called from finish_task_switch when task tk becomes dead,
460 * so that we can recycle any function-return probe instances associated
461 * with this task. These left over instances represent probed functions
462 * that have been called but will never return.
b94cce92 463 */
d0aaff97 464void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 465{
62c27be0 466 struct kretprobe_instance *ri;
99219a3f 467 struct hlist_head *head, empty_rp;
802eae7c 468 struct hlist_node *node, *tmp;
ef53d9c5 469 unsigned long hash, flags = 0;
802eae7c 470
ef53d9c5
S
471 if (unlikely(!kprobes_initialized))
472 /* Early boot. kretprobe_table_locks not yet initialized. */
473 return;
474
475 hash = hash_ptr(tk, KPROBE_HASH_BITS);
476 head = &kretprobe_inst_table[hash];
477 kretprobe_table_lock(hash, &flags);
62c27be0 478 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
479 if (ri->task == tk)
99219a3f 480 recycle_rp_inst(ri, &empty_rp);
62c27be0 481 }
ef53d9c5
S
482 kretprobe_table_unlock(hash, &flags);
483 INIT_HLIST_HEAD(&empty_rp);
99219a3f 484 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
485 hlist_del(&ri->hlist);
486 kfree(ri);
487 }
b94cce92
HN
488}
489
b94cce92
HN
490static inline void free_rp_inst(struct kretprobe *rp)
491{
492 struct kretprobe_instance *ri;
4c4308cb
CH
493 struct hlist_node *pos, *next;
494
ef53d9c5
S
495 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
496 hlist_del(&ri->hlist);
b94cce92
HN
497 kfree(ri);
498 }
499}
500
4a296e07
MH
501static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
502{
ef53d9c5 503 unsigned long flags, hash;
4a296e07
MH
504 struct kretprobe_instance *ri;
505 struct hlist_node *pos, *next;
ef53d9c5
S
506 struct hlist_head *head;
507
4a296e07 508 /* No race here */
ef53d9c5
S
509 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
510 kretprobe_table_lock(hash, &flags);
511 head = &kretprobe_inst_table[hash];
512 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
513 if (ri->rp == rp)
514 ri->rp = NULL;
515 }
516 kretprobe_table_unlock(hash, &flags);
4a296e07 517 }
4a296e07
MH
518 free_rp_inst(rp);
519}
520
8b0914ea
PP
521/*
522 * Keep all fields in the kprobe consistent
523 */
524static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
525{
526 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
527 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
528}
529
530/*
b918e5e6 531* Add the new probe to ap->list. Fail if this is the
8b0914ea
PP
532* second jprobe at the address - two jprobes can't coexist
533*/
b918e5e6 534static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 535{
de5bd88d 536 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
8b0914ea 537 if (p->break_handler) {
b918e5e6 538 if (ap->break_handler)
36721656 539 return -EEXIST;
b918e5e6
MH
540 list_add_tail_rcu(&p->list, &ap->list);
541 ap->break_handler = aggr_break_handler;
8b0914ea 542 } else
b918e5e6
MH
543 list_add_rcu(&p->list, &ap->list);
544 if (p->post_handler && !ap->post_handler)
545 ap->post_handler = aggr_post_handler;
de5bd88d
MH
546
547 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
548 ap->flags &= ~KPROBE_FLAG_DISABLED;
549 if (!kprobes_all_disarmed)
550 /* Arm the breakpoint again. */
201517a7 551 arm_kprobe(ap);
de5bd88d 552 }
8b0914ea
PP
553 return 0;
554}
555
64f562c6
AM
556/*
557 * Fill in the required fields of the "manager kprobe". Replace the
558 * earlier kprobe in the hlist with the manager kprobe
559 */
560static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
561{
8b0914ea 562 copy_kprobe(p, ap);
a9ad965e 563 flush_insn_slot(ap);
64f562c6 564 ap->addr = p->addr;
b918e5e6 565 ap->flags = p->flags;
64f562c6 566 ap->pre_handler = aggr_pre_handler;
64f562c6 567 ap->fault_handler = aggr_fault_handler;
e8386a0c
MH
568 /* We don't care the kprobe which has gone. */
569 if (p->post_handler && !kprobe_gone(p))
36721656 570 ap->post_handler = aggr_post_handler;
e8386a0c 571 if (p->break_handler && !kprobe_gone(p))
36721656 572 ap->break_handler = aggr_break_handler;
64f562c6
AM
573
574 INIT_LIST_HEAD(&ap->list);
3516a460 575 list_add_rcu(&p->list, &ap->list);
64f562c6 576
adad0f33 577 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
578}
579
580/*
581 * This is the second or subsequent kprobe at the address - handle
582 * the intricacies
64f562c6 583 */
d0aaff97
PP
584static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
585 struct kprobe *p)
64f562c6
AM
586{
587 int ret = 0;
b918e5e6 588 struct kprobe *ap = old_p;
64f562c6 589
b918e5e6
MH
590 if (old_p->pre_handler != aggr_pre_handler) {
591 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
592 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
593 if (!ap)
594 return -ENOMEM;
595 add_aggr_kprobe(ap, old_p);
596 }
597
598 if (kprobe_gone(ap)) {
e8386a0c
MH
599 /*
600 * Attempting to insert new probe at the same location that
601 * had a probe in the module vaddr area which already
602 * freed. So, the instruction slot has already been
603 * released. We need a new slot for the new probe.
604 */
b918e5e6 605 ret = arch_prepare_kprobe(ap);
e8386a0c 606 if (ret)
b918e5e6
MH
607 /*
608 * Even if fail to allocate new slot, don't need to
609 * free aggr_probe. It will be used next time, or
610 * freed by unregister_kprobe.
611 */
e8386a0c 612 return ret;
de5bd88d 613
e8386a0c 614 /*
de5bd88d
MH
615 * Clear gone flag to prevent allocating new slot again, and
616 * set disabled flag because it is not armed yet.
e8386a0c 617 */
de5bd88d
MH
618 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
619 | KPROBE_FLAG_DISABLED;
e8386a0c 620 }
b918e5e6
MH
621
622 copy_kprobe(ap, p);
623 return add_new_kprobe(ap, p);
64f562c6
AM
624}
625
de5bd88d
MH
626/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
627static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
628{
629 struct kprobe *kp;
630
631 list_for_each_entry_rcu(kp, &p->list, list) {
632 if (!kprobe_disabled(kp))
633 /*
634 * There is an active probe on the list.
635 * We can't disable aggr_kprobe.
636 */
637 return 0;
638 }
639 p->flags |= KPROBE_FLAG_DISABLED;
640 return 1;
641}
642
d0aaff97
PP
643static int __kprobes in_kprobes_functions(unsigned long addr)
644{
3d8d996e
SD
645 struct kprobe_blackpoint *kb;
646
6f716acd
CH
647 if (addr >= (unsigned long)__kprobes_text_start &&
648 addr < (unsigned long)__kprobes_text_end)
d0aaff97 649 return -EINVAL;
3d8d996e
SD
650 /*
651 * If there exists a kprobe_blacklist, verify and
652 * fail any probe registration in the prohibited area
653 */
654 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
655 if (kb->start_addr) {
656 if (addr >= kb->start_addr &&
657 addr < (kb->start_addr + kb->range))
658 return -EINVAL;
659 }
660 }
d0aaff97
PP
661 return 0;
662}
663
b2a5cd69
MH
664/*
665 * If we have a symbol_name argument, look it up and add the offset field
666 * to it. This way, we can specify a relative address to a symbol.
667 */
668static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
669{
670 kprobe_opcode_t *addr = p->addr;
671 if (p->symbol_name) {
672 if (addr)
673 return NULL;
674 kprobe_lookup_name(p->symbol_name, addr);
675 }
676
677 if (!addr)
678 return NULL;
679 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
680}
681
1f0ab409
AM
682/* Check passed kprobe is valid and return kprobe in kprobe_table. */
683static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
684{
685 struct kprobe *old_p, *list_p;
686
687 old_p = get_kprobe(p->addr);
688 if (unlikely(!old_p))
689 return NULL;
690
691 if (p != old_p) {
692 list_for_each_entry_rcu(list_p, &old_p->list, list)
693 if (list_p == p)
694 /* kprobe p is a valid probe */
695 goto valid;
696 return NULL;
697 }
698valid:
699 return old_p;
700}
701
702/* Return error if the kprobe is being re-registered */
703static inline int check_kprobe_rereg(struct kprobe *p)
704{
705 int ret = 0;
706 struct kprobe *old_p;
707
708 mutex_lock(&kprobe_mutex);
709 old_p = __get_valid_kprobe(p);
710 if (old_p)
711 ret = -EINVAL;
712 mutex_unlock(&kprobe_mutex);
713 return ret;
714}
715
49ad2fd7 716int __kprobes register_kprobe(struct kprobe *p)
1da177e4
LT
717{
718 int ret = 0;
64f562c6 719 struct kprobe *old_p;
df019b1d 720 struct module *probed_mod;
b2a5cd69 721 kprobe_opcode_t *addr;
b3e55c72 722
b2a5cd69
MH
723 addr = kprobe_addr(p);
724 if (!addr)
3a872d89 725 return -EINVAL;
b2a5cd69 726 p->addr = addr;
3a872d89 727
1f0ab409
AM
728 ret = check_kprobe_rereg(p);
729 if (ret)
730 return ret;
731
a189d035 732 preempt_disable();
ec30c5f3 733 if (!kernel_text_address((unsigned long) p->addr) ||
4554dbcb
MH
734 in_kprobes_functions((unsigned long) p->addr) ||
735 ftrace_text_reserved(p->addr, p->addr)) {
a189d035 736 preempt_enable();
b3e55c72 737 return -EINVAL;
a189d035 738 }
b3e55c72 739
de5bd88d
MH
740 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
741 p->flags &= KPROBE_FLAG_DISABLED;
742
6f716acd
CH
743 /*
744 * Check if are we probing a module.
745 */
a189d035 746 probed_mod = __module_text_address((unsigned long) p->addr);
6f716acd 747 if (probed_mod) {
6f716acd 748 /*
e8386a0c
MH
749 * We must hold a refcount of the probed module while updating
750 * its code to prohibit unexpected unloading.
df019b1d 751 */
49ad2fd7
MH
752 if (unlikely(!try_module_get(probed_mod))) {
753 preempt_enable();
754 return -EINVAL;
755 }
f24659d9
MH
756 /*
757 * If the module freed .init.text, we couldn't insert
758 * kprobes in there.
759 */
760 if (within_module_init((unsigned long)p->addr, probed_mod) &&
761 probed_mod->state != MODULE_STATE_COMING) {
762 module_put(probed_mod);
763 preempt_enable();
764 return -EINVAL;
765 }
df019b1d 766 }
a189d035 767 preempt_enable();
1da177e4 768
3516a460 769 p->nmissed = 0;
9861668f 770 INIT_LIST_HEAD(&p->list);
7a7d1cf9 771 mutex_lock(&kprobe_mutex);
64f562c6
AM
772 old_p = get_kprobe(p->addr);
773 if (old_p) {
774 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
775 goto out;
776 }
1da177e4 777
4460fdad 778 mutex_lock(&text_mutex);
6f716acd
CH
779 ret = arch_prepare_kprobe(p);
780 if (ret)
4460fdad 781 goto out_unlock_text;
49a2a1b8 782
64f562c6 783 INIT_HLIST_NODE(&p->hlist);
3516a460 784 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
785 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
786
de5bd88d 787 if (!kprobes_all_disarmed && !kprobe_disabled(p))
bf8f6e5b 788 arch_arm_kprobe(p);
74a0b576 789
4460fdad
MD
790out_unlock_text:
791 mutex_unlock(&text_mutex);
1da177e4 792out:
7a7d1cf9 793 mutex_unlock(&kprobe_mutex);
49a2a1b8 794
e8386a0c 795 if (probed_mod)
df019b1d 796 module_put(probed_mod);
e8386a0c 797
1da177e4
LT
798 return ret;
799}
99081ab5 800EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 801
de5bd88d
MH
802/*
803 * Unregister a kprobe without a scheduler synchronization.
804 */
805static int __kprobes __unregister_kprobe_top(struct kprobe *p)
806{
807 struct kprobe *old_p, *list_p;
808
809 old_p = __get_valid_kprobe(p);
810 if (old_p == NULL)
811 return -EINVAL;
812
6f716acd
CH
813 if (old_p == p ||
814 (old_p->pre_handler == aggr_pre_handler &&
9861668f 815 list_is_singular(&old_p->list))) {
bf8f6e5b
AM
816 /*
817 * Only probe on the hash list. Disarm only if kprobes are
e8386a0c
MH
818 * enabled and not gone - otherwise, the breakpoint would
819 * already have been removed. We save on flushing icache.
bf8f6e5b 820 */
201517a7
MH
821 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
822 disarm_kprobe(p);
49a2a1b8 823 hlist_del_rcu(&old_p->hlist);
49a2a1b8 824 } else {
e8386a0c 825 if (p->break_handler && !kprobe_gone(p))
9861668f 826 old_p->break_handler = NULL;
e8386a0c 827 if (p->post_handler && !kprobe_gone(p)) {
9861668f
MH
828 list_for_each_entry_rcu(list_p, &old_p->list, list) {
829 if ((list_p != p) && (list_p->post_handler))
830 goto noclean;
831 }
832 old_p->post_handler = NULL;
833 }
834noclean:
49a2a1b8 835 list_del_rcu(&p->list);
de5bd88d
MH
836 if (!kprobe_disabled(old_p)) {
837 try_to_disable_aggr_kprobe(old_p);
838 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
201517a7 839 disarm_kprobe(old_p);
de5bd88d 840 }
49a2a1b8 841 }
9861668f
MH
842 return 0;
843}
3516a460 844
9861668f
MH
845static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
846{
9861668f 847 struct kprobe *old_p;
b3e55c72 848
e8386a0c 849 if (list_empty(&p->list))
0498b635 850 arch_remove_kprobe(p);
e8386a0c
MH
851 else if (list_is_singular(&p->list)) {
852 /* "p" is the last child of an aggr_kprobe */
853 old_p = list_entry(p->list.next, struct kprobe, list);
854 list_del(&p->list);
855 arch_remove_kprobe(old_p);
856 kfree(old_p);
9861668f
MH
857 }
858}
859
49ad2fd7 860int __kprobes register_kprobes(struct kprobe **kps, int num)
9861668f
MH
861{
862 int i, ret = 0;
863
864 if (num <= 0)
865 return -EINVAL;
866 for (i = 0; i < num; i++) {
49ad2fd7 867 ret = register_kprobe(kps[i]);
67dddaad
MH
868 if (ret < 0) {
869 if (i > 0)
870 unregister_kprobes(kps, i);
9861668f 871 break;
36721656 872 }
49a2a1b8 873 }
9861668f
MH
874 return ret;
875}
99081ab5 876EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 877
9861668f
MH
878void __kprobes unregister_kprobe(struct kprobe *p)
879{
880 unregister_kprobes(&p, 1);
881}
99081ab5 882EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 883
9861668f
MH
884void __kprobes unregister_kprobes(struct kprobe **kps, int num)
885{
886 int i;
887
888 if (num <= 0)
889 return;
890 mutex_lock(&kprobe_mutex);
891 for (i = 0; i < num; i++)
892 if (__unregister_kprobe_top(kps[i]) < 0)
893 kps[i]->addr = NULL;
894 mutex_unlock(&kprobe_mutex);
895
896 synchronize_sched();
897 for (i = 0; i < num; i++)
898 if (kps[i]->addr)
899 __unregister_kprobe_bottom(kps[i]);
1da177e4 900}
99081ab5 901EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4
LT
902
903static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
904 .notifier_call = kprobe_exceptions_notify,
905 .priority = 0x7fffffff /* we need to be notified first */
906};
907
3d7e3382
ME
908unsigned long __weak arch_deref_entry_point(void *entry)
909{
910 return (unsigned long)entry;
911}
1da177e4 912
49ad2fd7 913int __kprobes register_jprobes(struct jprobe **jps, int num)
1da177e4 914{
26b31c19
MH
915 struct jprobe *jp;
916 int ret = 0, i;
3d7e3382 917
26b31c19 918 if (num <= 0)
3d7e3382 919 return -EINVAL;
26b31c19
MH
920 for (i = 0; i < num; i++) {
921 unsigned long addr;
922 jp = jps[i];
923 addr = arch_deref_entry_point(jp->entry);
924
925 if (!kernel_text_address(addr))
926 ret = -EINVAL;
927 else {
928 /* Todo: Verify probepoint is a function entry point */
929 jp->kp.pre_handler = setjmp_pre_handler;
930 jp->kp.break_handler = longjmp_break_handler;
49ad2fd7 931 ret = register_kprobe(&jp->kp);
26b31c19 932 }
67dddaad
MH
933 if (ret < 0) {
934 if (i > 0)
935 unregister_jprobes(jps, i);
26b31c19
MH
936 break;
937 }
938 }
939 return ret;
940}
99081ab5 941EXPORT_SYMBOL_GPL(register_jprobes);
3d7e3382 942
26b31c19
MH
943int __kprobes register_jprobe(struct jprobe *jp)
944{
49ad2fd7 945 return register_jprobes(&jp, 1);
1da177e4 946}
99081ab5 947EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4 948
d0aaff97 949void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4 950{
26b31c19
MH
951 unregister_jprobes(&jp, 1);
952}
99081ab5 953EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c19 954
26b31c19
MH
955void __kprobes unregister_jprobes(struct jprobe **jps, int num)
956{
957 int i;
958
959 if (num <= 0)
960 return;
961 mutex_lock(&kprobe_mutex);
962 for (i = 0; i < num; i++)
963 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
964 jps[i]->kp.addr = NULL;
965 mutex_unlock(&kprobe_mutex);
966
967 synchronize_sched();
968 for (i = 0; i < num; i++) {
969 if (jps[i]->kp.addr)
970 __unregister_kprobe_bottom(&jps[i]->kp);
971 }
1da177e4 972}
99081ab5 973EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4 974
9edddaa2 975#ifdef CONFIG_KRETPROBES
e65cefe8
AB
976/*
977 * This kprobe pre_handler is registered with every kretprobe. When probe
978 * hits it will set up the return probe.
979 */
980static int __kprobes pre_handler_kretprobe(struct kprobe *p,
981 struct pt_regs *regs)
982{
983 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5
S
984 unsigned long hash, flags = 0;
985 struct kretprobe_instance *ri;
e65cefe8
AB
986
987 /*TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5
S
988 hash = hash_ptr(current, KPROBE_HASH_BITS);
989 spin_lock_irqsave(&rp->lock, flags);
4c4308cb 990 if (!hlist_empty(&rp->free_instances)) {
4c4308cb 991 ri = hlist_entry(rp->free_instances.first,
ef53d9c5
S
992 struct kretprobe_instance, hlist);
993 hlist_del(&ri->hlist);
994 spin_unlock_irqrestore(&rp->lock, flags);
995
4c4308cb
CH
996 ri->rp = rp;
997 ri->task = current;
f47cd9b5 998
f02b8624 999 if (rp->entry_handler && rp->entry_handler(ri, regs))
f47cd9b5 1000 return 0;
f47cd9b5 1001
4c4308cb
CH
1002 arch_prepare_kretprobe(ri, regs);
1003
1004 /* XXX(hch): why is there no hlist_move_head? */
ef53d9c5
S
1005 INIT_HLIST_NODE(&ri->hlist);
1006 kretprobe_table_lock(hash, &flags);
1007 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1008 kretprobe_table_unlock(hash, &flags);
1009 } else {
4c4308cb 1010 rp->nmissed++;
ef53d9c5
S
1011 spin_unlock_irqrestore(&rp->lock, flags);
1012 }
e65cefe8
AB
1013 return 0;
1014}
1015
49ad2fd7 1016int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1017{
1018 int ret = 0;
1019 struct kretprobe_instance *inst;
1020 int i;
b2a5cd69 1021 void *addr;
f438d914
MH
1022
1023 if (kretprobe_blacklist_size) {
b2a5cd69
MH
1024 addr = kprobe_addr(&rp->kp);
1025 if (!addr)
1026 return -EINVAL;
f438d914
MH
1027
1028 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1029 if (kretprobe_blacklist[i].addr == addr)
1030 return -EINVAL;
1031 }
1032 }
b94cce92
HN
1033
1034 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
1035 rp->kp.post_handler = NULL;
1036 rp->kp.fault_handler = NULL;
1037 rp->kp.break_handler = NULL;
b94cce92
HN
1038
1039 /* Pre-allocate memory for max kretprobe instances */
1040 if (rp->maxactive <= 0) {
1041#ifdef CONFIG_PREEMPT
c2ef6661 1042 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce92 1043#else
4dae560f 1044 rp->maxactive = num_possible_cpus();
b94cce92
HN
1045#endif
1046 }
ef53d9c5 1047 spin_lock_init(&rp->lock);
b94cce92
HN
1048 INIT_HLIST_HEAD(&rp->free_instances);
1049 for (i = 0; i < rp->maxactive; i++) {
f47cd9b5
AS
1050 inst = kmalloc(sizeof(struct kretprobe_instance) +
1051 rp->data_size, GFP_KERNEL);
b94cce92
HN
1052 if (inst == NULL) {
1053 free_rp_inst(rp);
1054 return -ENOMEM;
1055 }
ef53d9c5
S
1056 INIT_HLIST_NODE(&inst->hlist);
1057 hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce92
HN
1058 }
1059
1060 rp->nmissed = 0;
1061 /* Establish function entry probe point */
49ad2fd7 1062 ret = register_kprobe(&rp->kp);
4a296e07 1063 if (ret != 0)
b94cce92
HN
1064 free_rp_inst(rp);
1065 return ret;
1066}
99081ab5 1067EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1068
49ad2fd7 1069int __kprobes register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
1070{
1071 int ret = 0, i;
1072
1073 if (num <= 0)
1074 return -EINVAL;
1075 for (i = 0; i < num; i++) {
49ad2fd7 1076 ret = register_kretprobe(rps[i]);
67dddaad
MH
1077 if (ret < 0) {
1078 if (i > 0)
1079 unregister_kretprobes(rps, i);
4a296e07
MH
1080 break;
1081 }
1082 }
1083 return ret;
1084}
99081ab5 1085EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 1086
4a296e07
MH
1087void __kprobes unregister_kretprobe(struct kretprobe *rp)
1088{
1089 unregister_kretprobes(&rp, 1);
1090}
99081ab5 1091EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 1092
4a296e07
MH
1093void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1094{
1095 int i;
1096
1097 if (num <= 0)
1098 return;
1099 mutex_lock(&kprobe_mutex);
1100 for (i = 0; i < num; i++)
1101 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1102 rps[i]->kp.addr = NULL;
1103 mutex_unlock(&kprobe_mutex);
1104
1105 synchronize_sched();
1106 for (i = 0; i < num; i++) {
1107 if (rps[i]->kp.addr) {
1108 __unregister_kprobe_bottom(&rps[i]->kp);
1109 cleanup_rp_inst(rps[i]);
1110 }
1111 }
1112}
99081ab5 1113EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 1114
9edddaa2 1115#else /* CONFIG_KRETPROBES */
d0aaff97 1116int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1117{
1118 return -ENOSYS;
1119}
99081ab5 1120EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1121
4a296e07 1122int __kprobes register_kretprobes(struct kretprobe **rps, int num)
346fd59b 1123{
4a296e07 1124 return -ENOSYS;
346fd59b 1125}
99081ab5
MH
1126EXPORT_SYMBOL_GPL(register_kretprobes);
1127
d0aaff97 1128void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92 1129{
4a296e07 1130}
99081ab5 1131EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 1132
4a296e07
MH
1133void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1134{
1135}
99081ab5 1136EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 1137
4a296e07
MH
1138static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1139 struct pt_regs *regs)
1140{
1141 return 0;
b94cce92
HN
1142}
1143
4a296e07
MH
1144#endif /* CONFIG_KRETPROBES */
1145
e8386a0c
MH
1146/* Set the kprobe gone and remove its instruction buffer. */
1147static void __kprobes kill_kprobe(struct kprobe *p)
1148{
1149 struct kprobe *kp;
de5bd88d 1150
e8386a0c
MH
1151 p->flags |= KPROBE_FLAG_GONE;
1152 if (p->pre_handler == aggr_pre_handler) {
1153 /*
1154 * If this is an aggr_kprobe, we have to list all the
1155 * chained probes and mark them GONE.
1156 */
1157 list_for_each_entry_rcu(kp, &p->list, list)
1158 kp->flags |= KPROBE_FLAG_GONE;
1159 p->post_handler = NULL;
1160 p->break_handler = NULL;
1161 }
1162 /*
1163 * Here, we can remove insn_slot safely, because no thread calls
1164 * the original probed function (which will be freed soon) any more.
1165 */
1166 arch_remove_kprobe(p);
1167}
1168
24851d24
FW
1169void __kprobes dump_kprobe(struct kprobe *kp)
1170{
1171 printk(KERN_WARNING "Dumping kprobe:\n");
1172 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1173 kp->symbol_name, kp->addr, kp->offset);
1174}
1175
e8386a0c
MH
1176/* Module notifier call back, checking kprobes on the module */
1177static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1178 unsigned long val, void *data)
1179{
1180 struct module *mod = data;
1181 struct hlist_head *head;
1182 struct hlist_node *node;
1183 struct kprobe *p;
1184 unsigned int i;
f24659d9 1185 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 1186
f24659d9 1187 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
1188 return NOTIFY_DONE;
1189
1190 /*
f24659d9
MH
1191 * When MODULE_STATE_GOING was notified, both of module .text and
1192 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1193 * notified, only .init.text section would be freed. We need to
1194 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
1195 */
1196 mutex_lock(&kprobe_mutex);
1197 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1198 head = &kprobe_table[i];
1199 hlist_for_each_entry_rcu(p, node, head, hlist)
f24659d9
MH
1200 if (within_module_init((unsigned long)p->addr, mod) ||
1201 (checkcore &&
1202 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
1203 /*
1204 * The vaddr this probe is installed will soon
1205 * be vfreed buy not synced to disk. Hence,
1206 * disarming the breakpoint isn't needed.
1207 */
1208 kill_kprobe(p);
1209 }
1210 }
1211 mutex_unlock(&kprobe_mutex);
1212 return NOTIFY_DONE;
1213}
1214
1215static struct notifier_block kprobe_module_nb = {
1216 .notifier_call = kprobes_module_callback,
1217 .priority = 0
1218};
1219
1da177e4
LT
1220static int __init init_kprobes(void)
1221{
1222 int i, err = 0;
3d8d996e
SD
1223 unsigned long offset = 0, size = 0;
1224 char *modname, namebuf[128];
1225 const char *symbol_name;
1226 void *addr;
1227 struct kprobe_blackpoint *kb;
1da177e4
LT
1228
1229 /* FIXME allocate the probe table, currently defined statically */
1230 /* initialize all list heads */
b94cce92 1231 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 1232 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92 1233 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ef53d9c5 1234 spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce92 1235 }
1da177e4 1236
3d8d996e
SD
1237 /*
1238 * Lookup and populate the kprobe_blacklist.
1239 *
1240 * Unlike the kretprobe blacklist, we'll need to determine
1241 * the range of addresses that belong to the said functions,
1242 * since a kprobe need not necessarily be at the beginning
1243 * of a function.
1244 */
1245 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1246 kprobe_lookup_name(kb->name, addr);
1247 if (!addr)
1248 continue;
1249
1250 kb->start_addr = (unsigned long)addr;
1251 symbol_name = kallsyms_lookup(kb->start_addr,
1252 &size, &offset, &modname, namebuf);
1253 if (!symbol_name)
1254 kb->range = 0;
1255 else
1256 kb->range = size;
1257 }
1258
f438d914
MH
1259 if (kretprobe_blacklist_size) {
1260 /* lookup the function address from its name */
1261 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1262 kprobe_lookup_name(kretprobe_blacklist[i].name,
1263 kretprobe_blacklist[i].addr);
1264 if (!kretprobe_blacklist[i].addr)
1265 printk("kretprobe: lookup failed: %s\n",
1266 kretprobe_blacklist[i].name);
1267 }
1268 }
1269
e579abeb
MH
1270 /* By default, kprobes are armed */
1271 kprobes_all_disarmed = false;
bf8f6e5b 1272
6772926b 1273 err = arch_init_kprobes();
802eae7c
RL
1274 if (!err)
1275 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c
MH
1276 if (!err)
1277 err = register_module_notifier(&kprobe_module_nb);
1278
ef53d9c5 1279 kprobes_initialized = (err == 0);
802eae7c 1280
8c1c9356
AM
1281 if (!err)
1282 init_test_probes();
1da177e4
LT
1283 return err;
1284}
1285
346fd59b
SD
1286#ifdef CONFIG_DEBUG_FS
1287static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
bf8f6e5b 1288 const char *sym, int offset,char *modname)
346fd59b
SD
1289{
1290 char *kprobe_type;
1291
1292 if (p->pre_handler == pre_handler_kretprobe)
1293 kprobe_type = "r";
1294 else if (p->pre_handler == setjmp_pre_handler)
1295 kprobe_type = "j";
1296 else
1297 kprobe_type = "k";
1298 if (sym)
de5bd88d
MH
1299 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1300 p->addr, kprobe_type, sym, offset,
1301 (modname ? modname : " "),
1302 (kprobe_gone(p) ? "[GONE]" : ""),
1303 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1304 "[DISABLED]" : ""));
346fd59b 1305 else
de5bd88d
MH
1306 seq_printf(pi, "%p %s %p %s%s\n",
1307 p->addr, kprobe_type, p->addr,
1308 (kprobe_gone(p) ? "[GONE]" : ""),
1309 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1310 "[DISABLED]" : ""));
346fd59b
SD
1311}
1312
1313static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1314{
1315 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1316}
1317
1318static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1319{
1320 (*pos)++;
1321 if (*pos >= KPROBE_TABLE_SIZE)
1322 return NULL;
1323 return pos;
1324}
1325
1326static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1327{
1328 /* Nothing to do */
1329}
1330
1331static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1332{
1333 struct hlist_head *head;
1334 struct hlist_node *node;
1335 struct kprobe *p, *kp;
1336 const char *sym = NULL;
1337 unsigned int i = *(loff_t *) v;
ffb45122 1338 unsigned long offset = 0;
346fd59b
SD
1339 char *modname, namebuf[128];
1340
1341 head = &kprobe_table[i];
1342 preempt_disable();
1343 hlist_for_each_entry_rcu(p, node, head, hlist) {
ffb45122 1344 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b
SD
1345 &offset, &modname, namebuf);
1346 if (p->pre_handler == aggr_pre_handler) {
1347 list_for_each_entry_rcu(kp, &p->list, list)
1348 report_probe(pi, kp, sym, offset, modname);
1349 } else
1350 report_probe(pi, p, sym, offset, modname);
1351 }
1352 preempt_enable();
1353 return 0;
1354}
1355
88e9d34c 1356static const struct seq_operations kprobes_seq_ops = {
346fd59b
SD
1357 .start = kprobe_seq_start,
1358 .next = kprobe_seq_next,
1359 .stop = kprobe_seq_stop,
1360 .show = show_kprobe_addr
1361};
1362
1363static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1364{
1365 return seq_open(filp, &kprobes_seq_ops);
1366}
1367
828c0950 1368static const struct file_operations debugfs_kprobes_operations = {
346fd59b
SD
1369 .open = kprobes_open,
1370 .read = seq_read,
1371 .llseek = seq_lseek,
1372 .release = seq_release,
1373};
1374
de5bd88d
MH
1375/* Disable one kprobe */
1376int __kprobes disable_kprobe(struct kprobe *kp)
1377{
1378 int ret = 0;
1379 struct kprobe *p;
1380
1381 mutex_lock(&kprobe_mutex);
1382
1383 /* Check whether specified probe is valid. */
1384 p = __get_valid_kprobe(kp);
1385 if (unlikely(p == NULL)) {
1386 ret = -EINVAL;
1387 goto out;
1388 }
1389
1390 /* If the probe is already disabled (or gone), just return */
1391 if (kprobe_disabled(kp))
1392 goto out;
1393
1394 kp->flags |= KPROBE_FLAG_DISABLED;
1395 if (p != kp)
1396 /* When kp != p, p is always enabled. */
1397 try_to_disable_aggr_kprobe(p);
1398
1399 if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7 1400 disarm_kprobe(p);
de5bd88d
MH
1401out:
1402 mutex_unlock(&kprobe_mutex);
1403 return ret;
1404}
1405EXPORT_SYMBOL_GPL(disable_kprobe);
1406
1407/* Enable one kprobe */
1408int __kprobes enable_kprobe(struct kprobe *kp)
1409{
1410 int ret = 0;
1411 struct kprobe *p;
1412
1413 mutex_lock(&kprobe_mutex);
1414
1415 /* Check whether specified probe is valid. */
1416 p = __get_valid_kprobe(kp);
1417 if (unlikely(p == NULL)) {
1418 ret = -EINVAL;
1419 goto out;
1420 }
1421
1422 if (kprobe_gone(kp)) {
1423 /* This kprobe has gone, we couldn't enable it. */
1424 ret = -EINVAL;
1425 goto out;
1426 }
1427
1428 if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7 1429 arm_kprobe(p);
de5bd88d
MH
1430
1431 p->flags &= ~KPROBE_FLAG_DISABLED;
1432 if (p != kp)
1433 kp->flags &= ~KPROBE_FLAG_DISABLED;
1434out:
1435 mutex_unlock(&kprobe_mutex);
1436 return ret;
1437}
1438EXPORT_SYMBOL_GPL(enable_kprobe);
1439
e579abeb 1440static void __kprobes arm_all_kprobes(void)
bf8f6e5b
AM
1441{
1442 struct hlist_head *head;
1443 struct hlist_node *node;
1444 struct kprobe *p;
1445 unsigned int i;
1446
1447 mutex_lock(&kprobe_mutex);
1448
e579abeb
MH
1449 /* If kprobes are armed, just return */
1450 if (!kprobes_all_disarmed)
bf8f6e5b
AM
1451 goto already_enabled;
1452
4460fdad 1453 mutex_lock(&text_mutex);
bf8f6e5b
AM
1454 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1455 head = &kprobe_table[i];
1456 hlist_for_each_entry_rcu(p, node, head, hlist)
de5bd88d 1457 if (!kprobe_disabled(p))
e8386a0c 1458 arch_arm_kprobe(p);
bf8f6e5b 1459 }
4460fdad 1460 mutex_unlock(&text_mutex);
bf8f6e5b 1461
e579abeb 1462 kprobes_all_disarmed = false;
bf8f6e5b
AM
1463 printk(KERN_INFO "Kprobes globally enabled\n");
1464
1465already_enabled:
1466 mutex_unlock(&kprobe_mutex);
1467 return;
1468}
1469
e579abeb 1470static void __kprobes disarm_all_kprobes(void)
bf8f6e5b
AM
1471{
1472 struct hlist_head *head;
1473 struct hlist_node *node;
1474 struct kprobe *p;
1475 unsigned int i;
1476
1477 mutex_lock(&kprobe_mutex);
1478
e579abeb
MH
1479 /* If kprobes are already disarmed, just return */
1480 if (kprobes_all_disarmed)
bf8f6e5b
AM
1481 goto already_disabled;
1482
e579abeb 1483 kprobes_all_disarmed = true;
bf8f6e5b 1484 printk(KERN_INFO "Kprobes globally disabled\n");
4460fdad 1485 mutex_lock(&text_mutex);
bf8f6e5b
AM
1486 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1487 head = &kprobe_table[i];
1488 hlist_for_each_entry_rcu(p, node, head, hlist) {
de5bd88d 1489 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
bf8f6e5b
AM
1490 arch_disarm_kprobe(p);
1491 }
1492 }
1493
4460fdad 1494 mutex_unlock(&text_mutex);
bf8f6e5b
AM
1495 mutex_unlock(&kprobe_mutex);
1496 /* Allow all currently running kprobes to complete */
1497 synchronize_sched();
74a0b576 1498 return;
bf8f6e5b
AM
1499
1500already_disabled:
1501 mutex_unlock(&kprobe_mutex);
1502 return;
1503}
1504
1505/*
1506 * XXX: The debugfs bool file interface doesn't allow for callbacks
1507 * when the bool state is switched. We can reuse that facility when
1508 * available
1509 */
1510static ssize_t read_enabled_file_bool(struct file *file,
1511 char __user *user_buf, size_t count, loff_t *ppos)
1512{
1513 char buf[3];
1514
e579abeb 1515 if (!kprobes_all_disarmed)
bf8f6e5b
AM
1516 buf[0] = '1';
1517 else
1518 buf[0] = '0';
1519 buf[1] = '\n';
1520 buf[2] = 0x00;
1521 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1522}
1523
1524static ssize_t write_enabled_file_bool(struct file *file,
1525 const char __user *user_buf, size_t count, loff_t *ppos)
1526{
1527 char buf[32];
1528 int buf_size;
1529
1530 buf_size = min(count, (sizeof(buf)-1));
1531 if (copy_from_user(buf, user_buf, buf_size))
1532 return -EFAULT;
1533
1534 switch (buf[0]) {
1535 case 'y':
1536 case 'Y':
1537 case '1':
e579abeb 1538 arm_all_kprobes();
bf8f6e5b
AM
1539 break;
1540 case 'n':
1541 case 'N':
1542 case '0':
e579abeb 1543 disarm_all_kprobes();
bf8f6e5b
AM
1544 break;
1545 }
1546
1547 return count;
1548}
1549
828c0950 1550static const struct file_operations fops_kp = {
bf8f6e5b
AM
1551 .read = read_enabled_file_bool,
1552 .write = write_enabled_file_bool,
1553};
1554
346fd59b
SD
1555static int __kprobes debugfs_kprobe_init(void)
1556{
1557 struct dentry *dir, *file;
bf8f6e5b 1558 unsigned int value = 1;
346fd59b
SD
1559
1560 dir = debugfs_create_dir("kprobes", NULL);
1561 if (!dir)
1562 return -ENOMEM;
1563
e3869792 1564 file = debugfs_create_file("list", 0444, dir, NULL,
346fd59b
SD
1565 &debugfs_kprobes_operations);
1566 if (!file) {
1567 debugfs_remove(dir);
1568 return -ENOMEM;
1569 }
1570
bf8f6e5b
AM
1571 file = debugfs_create_file("enabled", 0600, dir,
1572 &value, &fops_kp);
1573 if (!file) {
1574 debugfs_remove(dir);
1575 return -ENOMEM;
1576 }
1577
346fd59b
SD
1578 return 0;
1579}
1580
1581late_initcall(debugfs_kprobe_init);
1582#endif /* CONFIG_DEBUG_FS */
1583
1584module_init(init_kprobes);
1da177e4 1585
99081ab5 1586/* defined in arch/.../kernel/kprobes.c */
1da177e4 1587EXPORT_SYMBOL_GPL(jprobe_return);