]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/kprobes.c
UBUNTU: Start new release
[mirror_ubuntu-artful-kernel.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
e3869792 38#include <linux/stddef.h>
9984de1a 39#include <linux/export.h>
9ec4b1f3 40#include <linux/moduleloader.h>
3a872d89 41#include <linux/kallsyms.h>
b4c6c34a 42#include <linux/freezer.h>
346fd59b
SD
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
b2be84df 45#include <linux/sysctl.h>
1eeb66a1 46#include <linux/kdebug.h>
4460fdad 47#include <linux/memory.h>
4554dbcb 48#include <linux/ftrace.h>
afd66255 49#include <linux/cpu.h>
bf5438fc 50#include <linux/jump_label.h>
bf8f6e5b 51
bfd45be0 52#include <asm/sections.h>
1da177e4
LT
53#include <asm/cacheflush.h>
54#include <asm/errno.h>
7c0f6ba6 55#include <linux/uaccess.h>
1da177e4
LT
56
57#define KPROBE_HASH_BITS 6
58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
3a872d89 60
ef53d9c5 61static int kprobes_initialized;
1da177e4 62static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 63static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4 64
bf8f6e5b 65/* NOTE: change this value only with kprobe_mutex held */
e579abeb 66static bool kprobes_all_disarmed;
bf8f6e5b 67
43948f50
MH
68/* This protects kprobe_table and optimizing_list */
69static DEFINE_MUTEX(kprobe_mutex);
e6584523 70static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5 71static struct {
ec484608 72 raw_spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5
S
73} kretprobe_table_locks[KPROBE_TABLE_SIZE];
74
290e3070
NR
75kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
76 unsigned int __unused)
49e0b465
NR
77{
78 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
79}
80
ec484608 81static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
ef53d9c5
S
82{
83 return &(kretprobe_table_locks[hash].lock);
84}
1da177e4 85
376e2424
MH
86/* Blacklist -- list of struct kprobe_blacklist_entry */
87static LIST_HEAD(kprobe_blacklist);
3d8d996e 88
2d14e39d 89#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
90/*
91 * kprobe->ainsn.insn points to the copy of the instruction to be
92 * single-stepped. x86_64, POWER4 and above have no-exec support and
93 * stepping on the instruction on a vmalloced/kmalloced/data page
94 * is a recipe for disaster
95 */
9ec4b1f3 96struct kprobe_insn_page {
c5cb5a2d 97 struct list_head list;
9ec4b1f3 98 kprobe_opcode_t *insns; /* Page of instruction slots */
af96397d 99 struct kprobe_insn_cache *cache;
9ec4b1f3 100 int nused;
b4c6c34a 101 int ngarbage;
4610ee1d 102 char slot_used[];
9ec4b1f3
AM
103};
104
4610ee1d
MH
105#define KPROBE_INSN_PAGE_SIZE(slots) \
106 (offsetof(struct kprobe_insn_page, slot_used) + \
107 (sizeof(char) * (slots)))
108
4610ee1d
MH
109static int slots_per_page(struct kprobe_insn_cache *c)
110{
111 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
112}
113
ab40c5c6
MH
114enum kprobe_slot_state {
115 SLOT_CLEAN = 0,
116 SLOT_DIRTY = 1,
117 SLOT_USED = 2,
118};
119
af96397d
HC
120static void *alloc_insn_page(void)
121{
122 return module_alloc(PAGE_SIZE);
123}
124
c93f5cf5 125void __weak free_insn_page(void *page)
af96397d 126{
be1f221c 127 module_memfree(page);
af96397d
HC
128}
129
c802d64a
HC
130struct kprobe_insn_cache kprobe_insn_slots = {
131 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
af96397d
HC
132 .alloc = alloc_insn_page,
133 .free = free_insn_page,
4610ee1d
MH
134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 .insn_size = MAX_INSN_SIZE,
136 .nr_garbage = 0,
137};
55479f64 138static int collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a 139
9ec4b1f3 140/**
12941560 141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
142 * We allocate an executable page if there's no room on existing ones.
143 */
55479f64 144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f3
AM
145{
146 struct kprobe_insn_page *kip;
c802d64a 147 kprobe_opcode_t *slot = NULL;
9ec4b1f3 148
5b485629 149 /* Since the slot array is not protected by rcu, we need a mutex */
c802d64a 150 mutex_lock(&c->mutex);
6f716acd 151 retry:
5b485629
MH
152 rcu_read_lock();
153 list_for_each_entry_rcu(kip, &c->pages, list) {
4610ee1d 154 if (kip->nused < slots_per_page(c)) {
9ec4b1f3 155 int i;
4610ee1d 156 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6
MH
157 if (kip->slot_used[i] == SLOT_CLEAN) {
158 kip->slot_used[i] = SLOT_USED;
9ec4b1f3 159 kip->nused++;
c802d64a 160 slot = kip->insns + (i * c->insn_size);
5b485629 161 rcu_read_unlock();
c802d64a 162 goto out;
9ec4b1f3
AM
163 }
164 }
4610ee1d
MH
165 /* kip->nused is broken. Fix it. */
166 kip->nused = slots_per_page(c);
167 WARN_ON(1);
9ec4b1f3
AM
168 }
169 }
5b485629 170 rcu_read_unlock();
9ec4b1f3 171
b4c6c34a 172 /* If there are any garbage slots, collect it and try again. */
4610ee1d 173 if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a 174 goto retry;
4610ee1d
MH
175
176 /* All out of space. Need to allocate a new page. */
177 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd 178 if (!kip)
c802d64a 179 goto out;
9ec4b1f3
AM
180
181 /*
182 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups.
185 */
af96397d 186 kip->insns = c->alloc();
9ec4b1f3
AM
187 if (!kip->insns) {
188 kfree(kip);
c802d64a 189 goto out;
9ec4b1f3 190 }
c5cb5a2d 191 INIT_LIST_HEAD(&kip->list);
4610ee1d 192 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6 193 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 194 kip->nused = 1;
b4c6c34a 195 kip->ngarbage = 0;
af96397d 196 kip->cache = c;
5b485629 197 list_add_rcu(&kip->list, &c->pages);
c802d64a
HC
198 slot = kip->insns;
199out:
200 mutex_unlock(&c->mutex);
201 return slot;
12941560
MH
202}
203
b4c6c34a 204/* Return 1 if all garbages are collected, otherwise 0. */
55479f64 205static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
b4c6c34a 206{
ab40c5c6 207 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
208 kip->nused--;
209 if (kip->nused == 0) {
210 /*
211 * Page is no longer in use. Free it unless
212 * it's the last one. We keep the last one
213 * so as not to have to set it up again the
214 * next time somebody inserts a probe.
215 */
4610ee1d 216 if (!list_is_singular(&kip->list)) {
5b485629
MH
217 list_del_rcu(&kip->list);
218 synchronize_rcu();
af96397d 219 kip->cache->free(kip->insns);
b4c6c34a
MH
220 kfree(kip);
221 }
222 return 1;
223 }
224 return 0;
225}
226
55479f64 227static int collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a 228{
c5cb5a2d 229 struct kprobe_insn_page *kip, *next;
b4c6c34a 230
615d0ebb
MH
231 /* Ensure no-one is interrupted on the garbages */
232 synchronize_sched();
b4c6c34a 233
4610ee1d 234 list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a 235 int i;
b4c6c34a
MH
236 if (kip->ngarbage == 0)
237 continue;
238 kip->ngarbage = 0; /* we will collect all garbages */
4610ee1d 239 for (i = 0; i < slots_per_page(c); i++) {
5b485629 240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
b4c6c34a
MH
241 break;
242 }
243 }
4610ee1d 244 c->nr_garbage = 0;
b4c6c34a
MH
245 return 0;
246}
247
55479f64
MH
248void __free_insn_slot(struct kprobe_insn_cache *c,
249 kprobe_opcode_t *slot, int dirty)
9ec4b1f3
AM
250{
251 struct kprobe_insn_page *kip;
5b485629 252 long idx;
9ec4b1f3 253
c802d64a 254 mutex_lock(&c->mutex);
5b485629
MH
255 rcu_read_lock();
256 list_for_each_entry_rcu(kip, &c->pages, list) {
257 idx = ((long)slot - (long)kip->insns) /
258 (c->insn_size * sizeof(kprobe_opcode_t));
259 if (idx >= 0 && idx < slots_per_page(c))
c802d64a 260 goto out;
9ec4b1f3 261 }
5b485629 262 /* Could not find this slot. */
4610ee1d 263 WARN_ON(1);
5b485629 264 kip = NULL;
c802d64a 265out:
5b485629
MH
266 rcu_read_unlock();
267 /* Mark and sweep: this may sleep */
268 if (kip) {
269 /* Check double free */
270 WARN_ON(kip->slot_used[idx] != SLOT_USED);
271 if (dirty) {
272 kip->slot_used[idx] = SLOT_DIRTY;
273 kip->ngarbage++;
274 if (++c->nr_garbage > slots_per_page(c))
275 collect_garbage_slots(c);
276 } else {
277 collect_one_slot(kip, idx);
278 }
279 }
c802d64a 280 mutex_unlock(&c->mutex);
4610ee1d 281}
6f716acd 282
5b485629
MH
283/*
284 * Check given address is on the page of kprobe instruction slots.
285 * This will be used for checking whether the address on a stack
286 * is on a text area or not.
287 */
288bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
289{
290 struct kprobe_insn_page *kip;
291 bool ret = false;
292
293 rcu_read_lock();
294 list_for_each_entry_rcu(kip, &c->pages, list) {
295 if (addr >= (unsigned long)kip->insns &&
296 addr < (unsigned long)kip->insns + PAGE_SIZE) {
297 ret = true;
298 break;
299 }
300 }
301 rcu_read_unlock();
302
303 return ret;
304}
305
afd66255
MH
306#ifdef CONFIG_OPTPROBES
307/* For optimized_kprobe buffer */
c802d64a
HC
308struct kprobe_insn_cache kprobe_optinsn_slots = {
309 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
af96397d
HC
310 .alloc = alloc_insn_page,
311 .free = free_insn_page,
afd66255
MH
312 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
313 /* .insn_size is initialized later */
314 .nr_garbage = 0,
315};
afd66255 316#endif
2d14e39d 317#endif
9ec4b1f3 318
e6584523
AM
319/* We have preemption disabled.. so it is safe to use __ versions */
320static inline void set_kprobe_instance(struct kprobe *kp)
321{
b76834bc 322 __this_cpu_write(kprobe_instance, kp);
e6584523
AM
323}
324
325static inline void reset_kprobe_instance(void)
326{
b76834bc 327 __this_cpu_write(kprobe_instance, NULL);
e6584523
AM
328}
329
3516a460
AM
330/*
331 * This routine is called either:
49a2a1b8 332 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 333 * OR
d217d545 334 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 335 */
820aede0 336struct kprobe *get_kprobe(void *addr)
1da177e4
LT
337{
338 struct hlist_head *head;
3516a460 339 struct kprobe *p;
1da177e4
LT
340
341 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
b67bfe0d 342 hlist_for_each_entry_rcu(p, head, hlist) {
1da177e4
LT
343 if (p->addr == addr)
344 return p;
345 }
afd66255 346
1da177e4
LT
347 return NULL;
348}
820aede0 349NOKPROBE_SYMBOL(get_kprobe);
1da177e4 350
820aede0 351static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
afd66255
MH
352
353/* Return true if the kprobe is an aggregator */
354static inline int kprobe_aggrprobe(struct kprobe *p)
355{
356 return p->pre_handler == aggr_pre_handler;
357}
358
6274de49
MH
359/* Return true(!0) if the kprobe is unused */
360static inline int kprobe_unused(struct kprobe *p)
361{
362 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
363 list_empty(&p->list);
364}
365
afd66255
MH
366/*
367 * Keep all fields in the kprobe consistent
368 */
6d8e40a8 369static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
afd66255 370{
6d8e40a8
MH
371 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
372 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
afd66255
MH
373}
374
375#ifdef CONFIG_OPTPROBES
b2be84df
MH
376/* NOTE: change this value only with kprobe_mutex held */
377static bool kprobes_allow_optimization;
378
afd66255
MH
379/*
380 * Call all pre_handler on the list, but ignores its return value.
381 * This must be called from arch-dep optimized caller.
382 */
820aede0 383void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
afd66255
MH
384{
385 struct kprobe *kp;
386
387 list_for_each_entry_rcu(kp, &p->list, list) {
388 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
389 set_kprobe_instance(kp);
390 kp->pre_handler(kp, regs);
391 }
392 reset_kprobe_instance();
393 }
394}
820aede0 395NOKPROBE_SYMBOL(opt_pre_handler);
afd66255 396
6274de49 397/* Free optimized instructions and optimized_kprobe */
55479f64 398static void free_aggr_kprobe(struct kprobe *p)
6274de49
MH
399{
400 struct optimized_kprobe *op;
401
402 op = container_of(p, struct optimized_kprobe, kp);
403 arch_remove_optimized_kprobe(op);
404 arch_remove_kprobe(p);
405 kfree(op);
406}
407
afd66255
MH
408/* Return true(!0) if the kprobe is ready for optimization. */
409static inline int kprobe_optready(struct kprobe *p)
410{
411 struct optimized_kprobe *op;
412
413 if (kprobe_aggrprobe(p)) {
414 op = container_of(p, struct optimized_kprobe, kp);
415 return arch_prepared_optinsn(&op->optinsn);
416 }
417
418 return 0;
419}
420
6274de49
MH
421/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
422static inline int kprobe_disarmed(struct kprobe *p)
423{
424 struct optimized_kprobe *op;
425
426 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
427 if (!kprobe_aggrprobe(p))
428 return kprobe_disabled(p);
429
430 op = container_of(p, struct optimized_kprobe, kp);
431
432 return kprobe_disabled(p) && list_empty(&op->list);
433}
434
435/* Return true(!0) if the probe is queued on (un)optimizing lists */
55479f64 436static int kprobe_queued(struct kprobe *p)
6274de49
MH
437{
438 struct optimized_kprobe *op;
439
440 if (kprobe_aggrprobe(p)) {
441 op = container_of(p, struct optimized_kprobe, kp);
442 if (!list_empty(&op->list))
443 return 1;
444 }
445 return 0;
446}
447
afd66255
MH
448/*
449 * Return an optimized kprobe whose optimizing code replaces
450 * instructions including addr (exclude breakpoint).
451 */
55479f64 452static struct kprobe *get_optimized_kprobe(unsigned long addr)
afd66255
MH
453{
454 int i;
455 struct kprobe *p = NULL;
456 struct optimized_kprobe *op;
457
458 /* Don't check i == 0, since that is a breakpoint case. */
459 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
460 p = get_kprobe((void *)(addr - i));
461
462 if (p && kprobe_optready(p)) {
463 op = container_of(p, struct optimized_kprobe, kp);
464 if (arch_within_optimized_kprobe(op, addr))
465 return p;
466 }
467
468 return NULL;
469}
470
471/* Optimization staging list, protected by kprobe_mutex */
472static LIST_HEAD(optimizing_list);
6274de49 473static LIST_HEAD(unoptimizing_list);
7b959fc5 474static LIST_HEAD(freeing_list);
afd66255
MH
475
476static void kprobe_optimizer(struct work_struct *work);
477static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
478#define OPTIMIZE_DELAY 5
479
61f4e13f
MH
480/*
481 * Optimize (replace a breakpoint with a jump) kprobes listed on
482 * optimizing_list.
483 */
55479f64 484static void do_optimize_kprobes(void)
afd66255 485{
afd66255
MH
486 /*
487 * The optimization/unoptimization refers online_cpus via
488 * stop_machine() and cpu-hotplug modifies online_cpus.
489 * And same time, text_mutex will be held in cpu-hotplug and here.
490 * This combination can cause a deadlock (cpu-hotplug try to lock
491 * text_mutex but stop_machine can not be done because online_cpus
492 * has been changed)
2d1e38f5 493 * To avoid this deadlock, caller must have locked cpu hotplug
afd66255
MH
494 * for preventing cpu-hotplug outside of text_mutex locking.
495 */
2d1e38f5
TG
496 lockdep_assert_cpus_held();
497
498 /* Optimization never be done when disarmed */
499 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
500 list_empty(&optimizing_list))
501 return;
502
afd66255 503 mutex_lock(&text_mutex);
cd7ebe22 504 arch_optimize_kprobes(&optimizing_list);
afd66255 505 mutex_unlock(&text_mutex);
61f4e13f
MH
506}
507
6274de49
MH
508/*
509 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
510 * if need) kprobes listed on unoptimizing_list.
511 */
55479f64 512static void do_unoptimize_kprobes(void)
6274de49
MH
513{
514 struct optimized_kprobe *op, *tmp;
515
2d1e38f5
TG
516 /* See comment in do_optimize_kprobes() */
517 lockdep_assert_cpus_held();
518
6274de49
MH
519 /* Unoptimization must be done anytime */
520 if (list_empty(&unoptimizing_list))
521 return;
522
6274de49 523 mutex_lock(&text_mutex);
7b959fc5 524 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
f984ba4e 525 /* Loop free_list for disarming */
7b959fc5 526 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49
MH
527 /* Disarm probes if marked disabled */
528 if (kprobe_disabled(&op->kp))
529 arch_disarm_kprobe(&op->kp);
530 if (kprobe_unused(&op->kp)) {
531 /*
532 * Remove unused probes from hash list. After waiting
533 * for synchronization, these probes are reclaimed.
534 * (reclaiming is done by do_free_cleaned_kprobes.)
535 */
536 hlist_del_rcu(&op->kp.hlist);
6274de49
MH
537 } else
538 list_del_init(&op->list);
539 }
540 mutex_unlock(&text_mutex);
6274de49
MH
541}
542
543/* Reclaim all kprobes on the free_list */
55479f64 544static void do_free_cleaned_kprobes(void)
6274de49
MH
545{
546 struct optimized_kprobe *op, *tmp;
547
7b959fc5 548 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49
MH
549 BUG_ON(!kprobe_unused(&op->kp));
550 list_del_init(&op->list);
551 free_aggr_kprobe(&op->kp);
552 }
553}
554
555/* Start optimizer after OPTIMIZE_DELAY passed */
55479f64 556static void kick_kprobe_optimizer(void)
6274de49 557{
ad72b3be 558 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
6274de49
MH
559}
560
61f4e13f 561/* Kprobe jump optimizer */
55479f64 562static void kprobe_optimizer(struct work_struct *work)
61f4e13f 563{
72ef3794 564 mutex_lock(&kprobe_mutex);
2d1e38f5 565 cpus_read_lock();
61f4e13f
MH
566 /* Lock modules while optimizing kprobes */
567 mutex_lock(&module_mutex);
61f4e13f
MH
568
569 /*
6274de49
MH
570 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
571 * kprobes before waiting for quiesence period.
572 */
7b959fc5 573 do_unoptimize_kprobes();
6274de49
MH
574
575 /*
576 * Step 2: Wait for quiesence period to ensure all running interrupts
61f4e13f
MH
577 * are done. Because optprobe may modify multiple instructions
578 * there is a chance that Nth instruction is interrupted. In that
579 * case, running interrupt can return to 2nd-Nth byte of jump
580 * instruction. This wait is for avoiding it.
581 */
582 synchronize_sched();
583
6274de49 584 /* Step 3: Optimize kprobes after quiesence period */
61f4e13f 585 do_optimize_kprobes();
6274de49
MH
586
587 /* Step 4: Free cleaned kprobes after quiesence period */
7b959fc5 588 do_free_cleaned_kprobes();
6274de49 589
afd66255 590 mutex_unlock(&module_mutex);
2d1e38f5 591 cpus_read_unlock();
72ef3794 592 mutex_unlock(&kprobe_mutex);
6274de49 593
cd7ebe22 594 /* Step 5: Kick optimizer again if needed */
f984ba4e 595 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
cd7ebe22 596 kick_kprobe_optimizer();
6274de49
MH
597}
598
599/* Wait for completing optimization and unoptimization */
30e7d894 600void wait_for_kprobe_optimizer(void)
6274de49 601{
ad72b3be
TH
602 mutex_lock(&kprobe_mutex);
603
604 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
605 mutex_unlock(&kprobe_mutex);
606
607 /* this will also make optimizing_work execute immmediately */
608 flush_delayed_work(&optimizing_work);
609 /* @optimizing_work might not have been queued yet, relax */
610 cpu_relax();
611
612 mutex_lock(&kprobe_mutex);
613 }
614
615 mutex_unlock(&kprobe_mutex);
afd66255
MH
616}
617
618/* Optimize kprobe if p is ready to be optimized */
55479f64 619static void optimize_kprobe(struct kprobe *p)
afd66255
MH
620{
621 struct optimized_kprobe *op;
622
623 /* Check if the kprobe is disabled or not ready for optimization. */
b2be84df 624 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255
MH
625 (kprobe_disabled(p) || kprobes_all_disarmed))
626 return;
627
628 /* Both of break_handler and post_handler are not supported. */
629 if (p->break_handler || p->post_handler)
630 return;
631
632 op = container_of(p, struct optimized_kprobe, kp);
633
634 /* Check there is no other kprobes at the optimized instructions */
635 if (arch_check_optimized_kprobe(op) < 0)
636 return;
637
638 /* Check if it is already optimized. */
639 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
640 return;
afd66255 641 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
6274de49
MH
642
643 if (!list_empty(&op->list))
644 /* This is under unoptimizing. Just dequeue the probe */
645 list_del_init(&op->list);
646 else {
647 list_add(&op->list, &optimizing_list);
648 kick_kprobe_optimizer();
649 }
650}
651
652/* Short cut to direct unoptimizing */
55479f64 653static void force_unoptimize_kprobe(struct optimized_kprobe *op)
6274de49 654{
2d1e38f5 655 lockdep_assert_cpus_held();
6274de49 656 arch_unoptimize_kprobe(op);
6274de49
MH
657 if (kprobe_disabled(&op->kp))
658 arch_disarm_kprobe(&op->kp);
afd66255
MH
659}
660
661/* Unoptimize a kprobe if p is optimized */
55479f64 662static void unoptimize_kprobe(struct kprobe *p, bool force)
afd66255
MH
663{
664 struct optimized_kprobe *op;
665
6274de49
MH
666 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
667 return; /* This is not an optprobe nor optimized */
668
669 op = container_of(p, struct optimized_kprobe, kp);
670 if (!kprobe_optimized(p)) {
671 /* Unoptimized or unoptimizing case */
672 if (force && !list_empty(&op->list)) {
673 /*
674 * Only if this is unoptimizing kprobe and forced,
675 * forcibly unoptimize it. (No need to unoptimize
676 * unoptimized kprobe again :)
677 */
afd66255 678 list_del_init(&op->list);
6274de49
MH
679 force_unoptimize_kprobe(op);
680 }
681 return;
682 }
683
684 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
685 if (!list_empty(&op->list)) {
686 /* Dequeue from the optimization queue */
687 list_del_init(&op->list);
688 return;
689 }
690 /* Optimized kprobe case */
691 if (force)
692 /* Forcibly update the code: this is a special case */
693 force_unoptimize_kprobe(op);
694 else {
695 list_add(&op->list, &unoptimizing_list);
696 kick_kprobe_optimizer();
afd66255
MH
697 }
698}
699
0490cd1f
MH
700/* Cancel unoptimizing for reusing */
701static void reuse_unused_kprobe(struct kprobe *ap)
702{
703 struct optimized_kprobe *op;
704
705 BUG_ON(!kprobe_unused(ap));
706 /*
707 * Unused kprobe MUST be on the way of delayed unoptimizing (means
708 * there is still a relative jump) and disabled.
709 */
710 op = container_of(ap, struct optimized_kprobe, kp);
711 if (unlikely(list_empty(&op->list)))
712 printk(KERN_WARNING "Warning: found a stray unused "
713 "aggrprobe@%p\n", ap->addr);
714 /* Enable the probe again */
715 ap->flags &= ~KPROBE_FLAG_DISABLED;
716 /* Optimize it again (remove from op->list) */
717 BUG_ON(!kprobe_optready(ap));
718 optimize_kprobe(ap);
719}
720
afd66255 721/* Remove optimized instructions */
55479f64 722static void kill_optimized_kprobe(struct kprobe *p)
afd66255
MH
723{
724 struct optimized_kprobe *op;
725
726 op = container_of(p, struct optimized_kprobe, kp);
6274de49
MH
727 if (!list_empty(&op->list))
728 /* Dequeue from the (un)optimization queue */
afd66255 729 list_del_init(&op->list);
6274de49 730 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
7b959fc5
MH
731
732 if (kprobe_unused(p)) {
733 /* Enqueue if it is unused */
734 list_add(&op->list, &freeing_list);
735 /*
736 * Remove unused probes from the hash list. After waiting
737 * for synchronization, this probe is reclaimed.
738 * (reclaiming is done by do_free_cleaned_kprobes().)
739 */
740 hlist_del_rcu(&op->kp.hlist);
741 }
742
6274de49 743 /* Don't touch the code, because it is already freed. */
afd66255
MH
744 arch_remove_optimized_kprobe(op);
745}
746
a460246c
MH
747static inline
748void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
749{
750 if (!kprobe_ftrace(p))
751 arch_prepare_optimized_kprobe(op, p);
752}
753
afd66255 754/* Try to prepare optimized instructions */
55479f64 755static void prepare_optimized_kprobe(struct kprobe *p)
afd66255
MH
756{
757 struct optimized_kprobe *op;
758
759 op = container_of(p, struct optimized_kprobe, kp);
a460246c 760 __prepare_optimized_kprobe(op, p);
afd66255
MH
761}
762
afd66255 763/* Allocate new optimized_kprobe and try to prepare optimized instructions */
55479f64 764static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
765{
766 struct optimized_kprobe *op;
767
768 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
769 if (!op)
770 return NULL;
771
772 INIT_LIST_HEAD(&op->list);
773 op->kp.addr = p->addr;
a460246c 774 __prepare_optimized_kprobe(op, p);
afd66255
MH
775
776 return &op->kp;
777}
778
55479f64 779static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
afd66255
MH
780
781/*
782 * Prepare an optimized_kprobe and optimize it
783 * NOTE: p must be a normal registered kprobe
784 */
55479f64 785static void try_to_optimize_kprobe(struct kprobe *p)
afd66255
MH
786{
787 struct kprobe *ap;
788 struct optimized_kprobe *op;
789
ae6aa16f
MH
790 /* Impossible to optimize ftrace-based kprobe */
791 if (kprobe_ftrace(p))
792 return;
793
25764288 794 /* For preparing optimization, jump_label_text_reserved() is called */
2d1e38f5 795 cpus_read_lock();
25764288
MH
796 jump_label_lock();
797 mutex_lock(&text_mutex);
798
afd66255
MH
799 ap = alloc_aggr_kprobe(p);
800 if (!ap)
25764288 801 goto out;
afd66255
MH
802
803 op = container_of(ap, struct optimized_kprobe, kp);
804 if (!arch_prepared_optinsn(&op->optinsn)) {
805 /* If failed to setup optimizing, fallback to kprobe */
6274de49
MH
806 arch_remove_optimized_kprobe(op);
807 kfree(op);
25764288 808 goto out;
afd66255
MH
809 }
810
811 init_aggr_kprobe(ap, p);
25764288
MH
812 optimize_kprobe(ap); /* This just kicks optimizer thread */
813
814out:
815 mutex_unlock(&text_mutex);
816 jump_label_unlock();
2d1e38f5 817 cpus_read_unlock();
afd66255
MH
818}
819
b2be84df 820#ifdef CONFIG_SYSCTL
55479f64 821static void optimize_all_kprobes(void)
b2be84df
MH
822{
823 struct hlist_head *head;
b2be84df
MH
824 struct kprobe *p;
825 unsigned int i;
826
5c51543b 827 mutex_lock(&kprobe_mutex);
b2be84df
MH
828 /* If optimization is already allowed, just return */
829 if (kprobes_allow_optimization)
5c51543b 830 goto out;
b2be84df 831
2d1e38f5 832 cpus_read_lock();
b2be84df 833 kprobes_allow_optimization = true;
b2be84df
MH
834 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
835 head = &kprobe_table[i];
b67bfe0d 836 hlist_for_each_entry_rcu(p, head, hlist)
b2be84df
MH
837 if (!kprobe_disabled(p))
838 optimize_kprobe(p);
839 }
2d1e38f5 840 cpus_read_unlock();
b2be84df 841 printk(KERN_INFO "Kprobes globally optimized\n");
5c51543b
MH
842out:
843 mutex_unlock(&kprobe_mutex);
b2be84df
MH
844}
845
55479f64 846static void unoptimize_all_kprobes(void)
b2be84df
MH
847{
848 struct hlist_head *head;
b2be84df
MH
849 struct kprobe *p;
850 unsigned int i;
851
5c51543b 852 mutex_lock(&kprobe_mutex);
b2be84df 853 /* If optimization is already prohibited, just return */
5c51543b
MH
854 if (!kprobes_allow_optimization) {
855 mutex_unlock(&kprobe_mutex);
b2be84df 856 return;
5c51543b 857 }
b2be84df 858
2d1e38f5 859 cpus_read_lock();
b2be84df 860 kprobes_allow_optimization = false;
b2be84df
MH
861 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
862 head = &kprobe_table[i];
b67bfe0d 863 hlist_for_each_entry_rcu(p, head, hlist) {
b2be84df 864 if (!kprobe_disabled(p))
6274de49 865 unoptimize_kprobe(p, false);
b2be84df
MH
866 }
867 }
2d1e38f5 868 cpus_read_unlock();
5c51543b
MH
869 mutex_unlock(&kprobe_mutex);
870
6274de49
MH
871 /* Wait for unoptimizing completion */
872 wait_for_kprobe_optimizer();
873 printk(KERN_INFO "Kprobes globally unoptimized\n");
b2be84df
MH
874}
875
5c51543b 876static DEFINE_MUTEX(kprobe_sysctl_mutex);
b2be84df
MH
877int sysctl_kprobes_optimization;
878int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
879 void __user *buffer, size_t *length,
880 loff_t *ppos)
881{
882 int ret;
883
5c51543b 884 mutex_lock(&kprobe_sysctl_mutex);
b2be84df
MH
885 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
886 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
887
888 if (sysctl_kprobes_optimization)
889 optimize_all_kprobes();
890 else
891 unoptimize_all_kprobes();
5c51543b 892 mutex_unlock(&kprobe_sysctl_mutex);
b2be84df
MH
893
894 return ret;
895}
896#endif /* CONFIG_SYSCTL */
897
6274de49 898/* Put a breakpoint for a probe. Must be called with text_mutex locked */
55479f64 899static void __arm_kprobe(struct kprobe *p)
afd66255 900{
6d8e40a8 901 struct kprobe *_p;
afd66255
MH
902
903 /* Check collision with other optimized kprobes */
6d8e40a8
MH
904 _p = get_optimized_kprobe((unsigned long)p->addr);
905 if (unlikely(_p))
6274de49
MH
906 /* Fallback to unoptimized kprobe */
907 unoptimize_kprobe(_p, true);
afd66255
MH
908
909 arch_arm_kprobe(p);
910 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
911}
912
6274de49 913/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
55479f64 914static void __disarm_kprobe(struct kprobe *p, bool reopt)
afd66255 915{
6d8e40a8 916 struct kprobe *_p;
afd66255 917
69d54b91
WN
918 /* Try to unoptimize */
919 unoptimize_kprobe(p, kprobes_all_disarmed);
afd66255 920
6274de49
MH
921 if (!kprobe_queued(p)) {
922 arch_disarm_kprobe(p);
923 /* If another kprobe was blocked, optimize it. */
924 _p = get_optimized_kprobe((unsigned long)p->addr);
925 if (unlikely(_p) && reopt)
926 optimize_kprobe(_p);
927 }
928 /* TODO: reoptimize others after unoptimized this probe */
afd66255
MH
929}
930
931#else /* !CONFIG_OPTPROBES */
932
933#define optimize_kprobe(p) do {} while (0)
6274de49 934#define unoptimize_kprobe(p, f) do {} while (0)
afd66255
MH
935#define kill_optimized_kprobe(p) do {} while (0)
936#define prepare_optimized_kprobe(p) do {} while (0)
937#define try_to_optimize_kprobe(p) do {} while (0)
938#define __arm_kprobe(p) arch_arm_kprobe(p)
6274de49
MH
939#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
940#define kprobe_disarmed(p) kprobe_disabled(p)
941#define wait_for_kprobe_optimizer() do {} while (0)
afd66255 942
0490cd1f
MH
943/* There should be no unused kprobes can be reused without optimization */
944static void reuse_unused_kprobe(struct kprobe *ap)
945{
946 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
947 BUG_ON(kprobe_unused(ap));
948}
949
55479f64 950static void free_aggr_kprobe(struct kprobe *p)
afd66255 951{
6274de49 952 arch_remove_kprobe(p);
afd66255
MH
953 kfree(p);
954}
955
55479f64 956static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
afd66255
MH
957{
958 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
959}
960#endif /* CONFIG_OPTPROBES */
961
e7dbfe34 962#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 963static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
e5253896 964 .func = kprobe_ftrace_handler,
1d70be34 965 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
ae6aa16f
MH
966};
967static int kprobe_ftrace_enabled;
968
969/* Must ensure p->addr is really on ftrace */
55479f64 970static int prepare_kprobe(struct kprobe *p)
ae6aa16f
MH
971{
972 if (!kprobe_ftrace(p))
973 return arch_prepare_kprobe(p);
974
975 return arch_prepare_kprobe_ftrace(p);
976}
977
978/* Caller must lock kprobe_mutex */
55479f64 979static void arm_kprobe_ftrace(struct kprobe *p)
ae6aa16f
MH
980{
981 int ret;
982
983 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
984 (unsigned long)p->addr, 0, 0);
985 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
986 kprobe_ftrace_enabled++;
987 if (kprobe_ftrace_enabled == 1) {
988 ret = register_ftrace_function(&kprobe_ftrace_ops);
989 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
990 }
991}
992
993/* Caller must lock kprobe_mutex */
55479f64 994static void disarm_kprobe_ftrace(struct kprobe *p)
ae6aa16f
MH
995{
996 int ret;
997
998 kprobe_ftrace_enabled--;
999 if (kprobe_ftrace_enabled == 0) {
1000 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1001 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1002 }
1003 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1004 (unsigned long)p->addr, 1, 0);
1005 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1006}
e7dbfe34 1007#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
1008#define prepare_kprobe(p) arch_prepare_kprobe(p)
1009#define arm_kprobe_ftrace(p) do {} while (0)
1010#define disarm_kprobe_ftrace(p) do {} while (0)
1011#endif
1012
201517a7 1013/* Arm a kprobe with text_mutex */
55479f64 1014static void arm_kprobe(struct kprobe *kp)
201517a7 1015{
ae6aa16f
MH
1016 if (unlikely(kprobe_ftrace(kp))) {
1017 arm_kprobe_ftrace(kp);
1018 return;
1019 }
2d1e38f5 1020 cpus_read_lock();
201517a7 1021 mutex_lock(&text_mutex);
afd66255 1022 __arm_kprobe(kp);
201517a7 1023 mutex_unlock(&text_mutex);
2d1e38f5 1024 cpus_read_unlock();
201517a7
MH
1025}
1026
1027/* Disarm a kprobe with text_mutex */
55479f64 1028static void disarm_kprobe(struct kprobe *kp, bool reopt)
201517a7 1029{
ae6aa16f
MH
1030 if (unlikely(kprobe_ftrace(kp))) {
1031 disarm_kprobe_ftrace(kp);
1032 return;
1033 }
2d1e38f5
TG
1034
1035 cpus_read_lock();
201517a7 1036 mutex_lock(&text_mutex);
ae6aa16f 1037 __disarm_kprobe(kp, reopt);
201517a7 1038 mutex_unlock(&text_mutex);
2d1e38f5 1039 cpus_read_unlock();
201517a7
MH
1040}
1041
64f562c6
AM
1042/*
1043 * Aggregate handlers for multiple kprobes support - these handlers
1044 * take care of invoking the individual kprobe handlers on p->list
1045 */
820aede0 1046static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
1047{
1048 struct kprobe *kp;
1049
3516a460 1050 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1051 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 1052 set_kprobe_instance(kp);
8b0914ea
PP
1053 if (kp->pre_handler(kp, regs))
1054 return 1;
64f562c6 1055 }
e6584523 1056 reset_kprobe_instance();
64f562c6
AM
1057 }
1058 return 0;
1059}
820aede0 1060NOKPROBE_SYMBOL(aggr_pre_handler);
64f562c6 1061
820aede0
MH
1062static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1063 unsigned long flags)
64f562c6
AM
1064{
1065 struct kprobe *kp;
1066
3516a460 1067 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1068 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 1069 set_kprobe_instance(kp);
64f562c6 1070 kp->post_handler(kp, regs, flags);
e6584523 1071 reset_kprobe_instance();
64f562c6
AM
1072 }
1073 }
64f562c6 1074}
820aede0 1075NOKPROBE_SYMBOL(aggr_post_handler);
64f562c6 1076
820aede0
MH
1077static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1078 int trapnr)
64f562c6 1079{
b76834bc 1080 struct kprobe *cur = __this_cpu_read(kprobe_instance);
e6584523 1081
64f562c6
AM
1082 /*
1083 * if we faulted "during" the execution of a user specified
1084 * probe handler, invoke just that probe's fault handler
1085 */
e6584523
AM
1086 if (cur && cur->fault_handler) {
1087 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
1088 return 1;
1089 }
1090 return 0;
1091}
820aede0 1092NOKPROBE_SYMBOL(aggr_fault_handler);
64f562c6 1093
820aede0 1094static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 1095{
b76834bc 1096 struct kprobe *cur = __this_cpu_read(kprobe_instance);
e6584523
AM
1097 int ret = 0;
1098
1099 if (cur && cur->break_handler) {
1100 if (cur->break_handler(cur, regs))
1101 ret = 1;
8b0914ea 1102 }
e6584523
AM
1103 reset_kprobe_instance();
1104 return ret;
8b0914ea 1105}
820aede0 1106NOKPROBE_SYMBOL(aggr_break_handler);
8b0914ea 1107
bf8d5c52 1108/* Walks the list and increments nmissed count for multiprobe case */
820aede0 1109void kprobes_inc_nmissed_count(struct kprobe *p)
bf8d5c52
KA
1110{
1111 struct kprobe *kp;
afd66255 1112 if (!kprobe_aggrprobe(p)) {
bf8d5c52
KA
1113 p->nmissed++;
1114 } else {
1115 list_for_each_entry_rcu(kp, &p->list, list)
1116 kp->nmissed++;
1117 }
1118 return;
1119}
820aede0 1120NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
bf8d5c52 1121
820aede0
MH
1122void recycle_rp_inst(struct kretprobe_instance *ri,
1123 struct hlist_head *head)
b94cce92 1124{
ef53d9c5
S
1125 struct kretprobe *rp = ri->rp;
1126
b94cce92
HN
1127 /* remove rp inst off the rprobe_inst_table */
1128 hlist_del(&ri->hlist);
ef53d9c5
S
1129 INIT_HLIST_NODE(&ri->hlist);
1130 if (likely(rp)) {
ec484608 1131 raw_spin_lock(&rp->lock);
ef53d9c5 1132 hlist_add_head(&ri->hlist, &rp->free_instances);
ec484608 1133 raw_spin_unlock(&rp->lock);
b94cce92
HN
1134 } else
1135 /* Unregistering */
99219a3f 1136 hlist_add_head(&ri->hlist, head);
b94cce92 1137}
820aede0 1138NOKPROBE_SYMBOL(recycle_rp_inst);
b94cce92 1139
820aede0 1140void kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5 1141 struct hlist_head **head, unsigned long *flags)
635c17c2 1142__acquires(hlist_lock)
ef53d9c5
S
1143{
1144 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
ec484608 1145 raw_spinlock_t *hlist_lock;
ef53d9c5
S
1146
1147 *head = &kretprobe_inst_table[hash];
1148 hlist_lock = kretprobe_table_lock_ptr(hash);
ec484608 1149 raw_spin_lock_irqsave(hlist_lock, *flags);
ef53d9c5 1150}
820aede0 1151NOKPROBE_SYMBOL(kretprobe_hash_lock);
ef53d9c5 1152
820aede0
MH
1153static void kretprobe_table_lock(unsigned long hash,
1154 unsigned long *flags)
635c17c2 1155__acquires(hlist_lock)
b94cce92 1156{
ec484608
TG
1157 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1158 raw_spin_lock_irqsave(hlist_lock, *flags);
ef53d9c5 1159}
820aede0 1160NOKPROBE_SYMBOL(kretprobe_table_lock);
ef53d9c5 1161
820aede0
MH
1162void kretprobe_hash_unlock(struct task_struct *tsk,
1163 unsigned long *flags)
635c17c2 1164__releases(hlist_lock)
ef53d9c5
S
1165{
1166 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
ec484608 1167 raw_spinlock_t *hlist_lock;
ef53d9c5
S
1168
1169 hlist_lock = kretprobe_table_lock_ptr(hash);
ec484608 1170 raw_spin_unlock_irqrestore(hlist_lock, *flags);
ef53d9c5 1171}
820aede0 1172NOKPROBE_SYMBOL(kretprobe_hash_unlock);
ef53d9c5 1173
820aede0
MH
1174static void kretprobe_table_unlock(unsigned long hash,
1175 unsigned long *flags)
635c17c2 1176__releases(hlist_lock)
ef53d9c5 1177{
ec484608
TG
1178 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1179 raw_spin_unlock_irqrestore(hlist_lock, *flags);
b94cce92 1180}
820aede0 1181NOKPROBE_SYMBOL(kretprobe_table_unlock);
b94cce92 1182
b94cce92 1183/*
c6fd91f0 1184 * This function is called from finish_task_switch when task tk becomes dead,
1185 * so that we can recycle any function-return probe instances associated
1186 * with this task. These left over instances represent probed functions
1187 * that have been called but will never return.
b94cce92 1188 */
820aede0 1189void kprobe_flush_task(struct task_struct *tk)
b94cce92 1190{
62c27be0 1191 struct kretprobe_instance *ri;
99219a3f 1192 struct hlist_head *head, empty_rp;
b67bfe0d 1193 struct hlist_node *tmp;
ef53d9c5 1194 unsigned long hash, flags = 0;
802eae7c 1195
ef53d9c5
S
1196 if (unlikely(!kprobes_initialized))
1197 /* Early boot. kretprobe_table_locks not yet initialized. */
1198 return;
1199
d496aab5 1200 INIT_HLIST_HEAD(&empty_rp);
ef53d9c5
S
1201 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1202 head = &kretprobe_inst_table[hash];
1203 kretprobe_table_lock(hash, &flags);
b67bfe0d 1204 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
62c27be0 1205 if (ri->task == tk)
99219a3f 1206 recycle_rp_inst(ri, &empty_rp);
62c27be0 1207 }
ef53d9c5 1208 kretprobe_table_unlock(hash, &flags);
b67bfe0d 1209 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
99219a3f 1210 hlist_del(&ri->hlist);
1211 kfree(ri);
1212 }
b94cce92 1213}
820aede0 1214NOKPROBE_SYMBOL(kprobe_flush_task);
b94cce92 1215
b94cce92
HN
1216static inline void free_rp_inst(struct kretprobe *rp)
1217{
1218 struct kretprobe_instance *ri;
b67bfe0d 1219 struct hlist_node *next;
4c4308cb 1220
b67bfe0d 1221 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
ef53d9c5 1222 hlist_del(&ri->hlist);
b94cce92
HN
1223 kfree(ri);
1224 }
1225}
1226
820aede0 1227static void cleanup_rp_inst(struct kretprobe *rp)
4a296e07 1228{
ef53d9c5 1229 unsigned long flags, hash;
4a296e07 1230 struct kretprobe_instance *ri;
b67bfe0d 1231 struct hlist_node *next;
ef53d9c5
S
1232 struct hlist_head *head;
1233
4a296e07 1234 /* No race here */
ef53d9c5
S
1235 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1236 kretprobe_table_lock(hash, &flags);
1237 head = &kretprobe_inst_table[hash];
b67bfe0d 1238 hlist_for_each_entry_safe(ri, next, head, hlist) {
ef53d9c5
S
1239 if (ri->rp == rp)
1240 ri->rp = NULL;
1241 }
1242 kretprobe_table_unlock(hash, &flags);
4a296e07 1243 }
4a296e07
MH
1244 free_rp_inst(rp);
1245}
820aede0 1246NOKPROBE_SYMBOL(cleanup_rp_inst);
4a296e07 1247
8b0914ea 1248/*
b918e5e6 1249* Add the new probe to ap->list. Fail if this is the
8b0914ea
PP
1250* second jprobe at the address - two jprobes can't coexist
1251*/
55479f64 1252static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 1253{
de5bd88d 1254 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
afd66255
MH
1255
1256 if (p->break_handler || p->post_handler)
6274de49 1257 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
afd66255 1258
8b0914ea 1259 if (p->break_handler) {
b918e5e6 1260 if (ap->break_handler)
36721656 1261 return -EEXIST;
b918e5e6
MH
1262 list_add_tail_rcu(&p->list, &ap->list);
1263 ap->break_handler = aggr_break_handler;
8b0914ea 1264 } else
b918e5e6
MH
1265 list_add_rcu(&p->list, &ap->list);
1266 if (p->post_handler && !ap->post_handler)
1267 ap->post_handler = aggr_post_handler;
de5bd88d 1268
8b0914ea
PP
1269 return 0;
1270}
1271
64f562c6
AM
1272/*
1273 * Fill in the required fields of the "manager kprobe". Replace the
1274 * earlier kprobe in the hlist with the manager kprobe
1275 */
55479f64 1276static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6 1277{
afd66255 1278 /* Copy p's insn slot to ap */
8b0914ea 1279 copy_kprobe(p, ap);
a9ad965e 1280 flush_insn_slot(ap);
64f562c6 1281 ap->addr = p->addr;
afd66255 1282 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6 1283 ap->pre_handler = aggr_pre_handler;
64f562c6 1284 ap->fault_handler = aggr_fault_handler;
e8386a0c
MH
1285 /* We don't care the kprobe which has gone. */
1286 if (p->post_handler && !kprobe_gone(p))
36721656 1287 ap->post_handler = aggr_post_handler;
e8386a0c 1288 if (p->break_handler && !kprobe_gone(p))
36721656 1289 ap->break_handler = aggr_break_handler;
64f562c6
AM
1290
1291 INIT_LIST_HEAD(&ap->list);
afd66255 1292 INIT_HLIST_NODE(&ap->hlist);
64f562c6 1293
afd66255 1294 list_add_rcu(&p->list, &ap->list);
adad0f33 1295 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
1296}
1297
1298/*
1299 * This is the second or subsequent kprobe at the address - handle
1300 * the intricacies
64f562c6 1301 */
55479f64 1302static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
64f562c6
AM
1303{
1304 int ret = 0;
6d8e40a8 1305 struct kprobe *ap = orig_p;
64f562c6 1306
2d1e38f5
TG
1307 cpus_read_lock();
1308
25764288
MH
1309 /* For preparing optimization, jump_label_text_reserved() is called */
1310 jump_label_lock();
25764288
MH
1311 mutex_lock(&text_mutex);
1312
6d8e40a8
MH
1313 if (!kprobe_aggrprobe(orig_p)) {
1314 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1315 ap = alloc_aggr_kprobe(orig_p);
25764288
MH
1316 if (!ap) {
1317 ret = -ENOMEM;
1318 goto out;
1319 }
6d8e40a8 1320 init_aggr_kprobe(ap, orig_p);
6274de49 1321 } else if (kprobe_unused(ap))
0490cd1f
MH
1322 /* This probe is going to die. Rescue it */
1323 reuse_unused_kprobe(ap);
b918e5e6
MH
1324
1325 if (kprobe_gone(ap)) {
e8386a0c
MH
1326 /*
1327 * Attempting to insert new probe at the same location that
1328 * had a probe in the module vaddr area which already
1329 * freed. So, the instruction slot has already been
1330 * released. We need a new slot for the new probe.
1331 */
b918e5e6 1332 ret = arch_prepare_kprobe(ap);
e8386a0c 1333 if (ret)
b918e5e6
MH
1334 /*
1335 * Even if fail to allocate new slot, don't need to
1336 * free aggr_probe. It will be used next time, or
1337 * freed by unregister_kprobe.
1338 */
25764288 1339 goto out;
de5bd88d 1340
afd66255
MH
1341 /* Prepare optimized instructions if possible. */
1342 prepare_optimized_kprobe(ap);
1343
e8386a0c 1344 /*
de5bd88d
MH
1345 * Clear gone flag to prevent allocating new slot again, and
1346 * set disabled flag because it is not armed yet.
e8386a0c 1347 */
de5bd88d
MH
1348 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1349 | KPROBE_FLAG_DISABLED;
e8386a0c 1350 }
b918e5e6 1351
afd66255 1352 /* Copy ap's insn slot to p */
b918e5e6 1353 copy_kprobe(ap, p);
25764288
MH
1354 ret = add_new_kprobe(ap, p);
1355
1356out:
1357 mutex_unlock(&text_mutex);
25764288 1358 jump_label_unlock();
2d1e38f5 1359 cpus_read_unlock();
25764288
MH
1360
1361 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1362 ap->flags &= ~KPROBE_FLAG_DISABLED;
1363 if (!kprobes_all_disarmed)
1364 /* Arm the breakpoint again. */
1365 arm_kprobe(ap);
1366 }
1367 return ret;
64f562c6
AM
1368}
1369
be8f2743
MH
1370bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1371{
1372 /* The __kprobes marked functions and entry code must not be probed */
1373 return addr >= (unsigned long)__kprobes_text_start &&
1374 addr < (unsigned long)__kprobes_text_end;
1375}
1376
e5779e8e 1377bool within_kprobe_blacklist(unsigned long addr)
d0aaff97 1378{
376e2424 1379 struct kprobe_blacklist_entry *ent;
3d8d996e 1380
be8f2743 1381 if (arch_within_kprobe_blacklist(addr))
376e2424 1382 return true;
3d8d996e
SD
1383 /*
1384 * If there exists a kprobe_blacklist, verify and
1385 * fail any probe registration in the prohibited area
1386 */
376e2424
MH
1387 list_for_each_entry(ent, &kprobe_blacklist, list) {
1388 if (addr >= ent->start_addr && addr < ent->end_addr)
1389 return true;
3d8d996e 1390 }
376e2424
MH
1391
1392 return false;
d0aaff97
PP
1393}
1394
b2a5cd69
MH
1395/*
1396 * If we have a symbol_name argument, look it up and add the offset field
1397 * to it. This way, we can specify a relative address to a symbol.
bc81d48d
MH
1398 * This returns encoded errors if it fails to look up symbol or invalid
1399 * combination of parameters.
b2a5cd69 1400 */
1d585e70
NR
1401static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1402 const char *symbol_name, unsigned int offset)
b2a5cd69 1403{
1d585e70 1404 if ((symbol_name && addr) || (!symbol_name && !addr))
bc81d48d
MH
1405 goto invalid;
1406
1d585e70 1407 if (symbol_name) {
7246f600 1408 addr = kprobe_lookup_name(symbol_name, offset);
bc81d48d
MH
1409 if (!addr)
1410 return ERR_PTR(-ENOENT);
b2a5cd69
MH
1411 }
1412
1d585e70 1413 addr = (kprobe_opcode_t *)(((char *)addr) + offset);
bc81d48d
MH
1414 if (addr)
1415 return addr;
1416
1417invalid:
1418 return ERR_PTR(-EINVAL);
b2a5cd69
MH
1419}
1420
1d585e70
NR
1421static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1422{
1423 return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1424}
1425
1f0ab409 1426/* Check passed kprobe is valid and return kprobe in kprobe_table. */
55479f64 1427static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1f0ab409 1428{
6d8e40a8 1429 struct kprobe *ap, *list_p;
1f0ab409 1430
6d8e40a8
MH
1431 ap = get_kprobe(p->addr);
1432 if (unlikely(!ap))
1f0ab409
AM
1433 return NULL;
1434
6d8e40a8
MH
1435 if (p != ap) {
1436 list_for_each_entry_rcu(list_p, &ap->list, list)
1f0ab409
AM
1437 if (list_p == p)
1438 /* kprobe p is a valid probe */
1439 goto valid;
1440 return NULL;
1441 }
1442valid:
6d8e40a8 1443 return ap;
1f0ab409
AM
1444}
1445
1446/* Return error if the kprobe is being re-registered */
1447static inline int check_kprobe_rereg(struct kprobe *p)
1448{
1449 int ret = 0;
1f0ab409
AM
1450
1451 mutex_lock(&kprobe_mutex);
6d8e40a8 1452 if (__get_valid_kprobe(p))
1f0ab409
AM
1453 ret = -EINVAL;
1454 mutex_unlock(&kprobe_mutex);
6d8e40a8 1455
1f0ab409
AM
1456 return ret;
1457}
1458
f7f242ff 1459int __weak arch_check_ftrace_location(struct kprobe *p)
1da177e4 1460{
ae6aa16f
MH
1461 unsigned long ftrace_addr;
1462
ae6aa16f
MH
1463 ftrace_addr = ftrace_location((unsigned long)p->addr);
1464 if (ftrace_addr) {
e7dbfe34 1465#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f
MH
1466 /* Given address is not on the instruction boundary */
1467 if ((unsigned long)p->addr != ftrace_addr)
1468 return -EILSEQ;
ae6aa16f 1469 p->flags |= KPROBE_FLAG_FTRACE;
e7dbfe34 1470#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
1471 return -EINVAL;
1472#endif
1473 }
f7f242ff
HC
1474 return 0;
1475}
1476
1477static int check_kprobe_address_safe(struct kprobe *p,
1478 struct module **probed_mod)
1479{
1480 int ret;
1f0ab409 1481
f7f242ff
HC
1482 ret = arch_check_ftrace_location(p);
1483 if (ret)
1484 return ret;
91bad2f8 1485 jump_label_lock();
de31c3ca 1486 preempt_disable();
f7fa6ef0
MH
1487
1488 /* Ensure it is not in reserved area nor out of text */
ec30c5f3 1489 if (!kernel_text_address((unsigned long) p->addr) ||
376e2424 1490 within_kprobe_blacklist((unsigned long) p->addr) ||
f986a499
PN
1491 jump_label_text_reserved(p->addr, p->addr)) {
1492 ret = -EINVAL;
f7fa6ef0 1493 goto out;
f986a499 1494 }
b3e55c72 1495
f7fa6ef0
MH
1496 /* Check if are we probing a module */
1497 *probed_mod = __module_text_address((unsigned long) p->addr);
1498 if (*probed_mod) {
6f716acd 1499 /*
e8386a0c
MH
1500 * We must hold a refcount of the probed module while updating
1501 * its code to prohibit unexpected unloading.
df019b1d 1502 */
f7fa6ef0
MH
1503 if (unlikely(!try_module_get(*probed_mod))) {
1504 ret = -ENOENT;
1505 goto out;
1506 }
de31c3ca 1507
f24659d9
MH
1508 /*
1509 * If the module freed .init.text, we couldn't insert
1510 * kprobes in there.
1511 */
f7fa6ef0
MH
1512 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1513 (*probed_mod)->state != MODULE_STATE_COMING) {
1514 module_put(*probed_mod);
1515 *probed_mod = NULL;
1516 ret = -ENOENT;
f24659d9 1517 }
df019b1d 1518 }
f7fa6ef0 1519out:
a189d035 1520 preempt_enable();
de31c3ca 1521 jump_label_unlock();
1da177e4 1522
f7fa6ef0
MH
1523 return ret;
1524}
1525
55479f64 1526int register_kprobe(struct kprobe *p)
f7fa6ef0
MH
1527{
1528 int ret;
1529 struct kprobe *old_p;
1530 struct module *probed_mod;
1531 kprobe_opcode_t *addr;
1532
1533 /* Adjust probe address from symbol */
1534 addr = kprobe_addr(p);
1535 if (IS_ERR(addr))
1536 return PTR_ERR(addr);
1537 p->addr = addr;
1538
1539 ret = check_kprobe_rereg(p);
1540 if (ret)
1541 return ret;
1542
1543 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1544 p->flags &= KPROBE_FLAG_DISABLED;
3516a460 1545 p->nmissed = 0;
9861668f 1546 INIT_LIST_HEAD(&p->list);
afd66255 1547
f7fa6ef0
MH
1548 ret = check_kprobe_address_safe(p, &probed_mod);
1549 if (ret)
1550 return ret;
1551
1552 mutex_lock(&kprobe_mutex);
afd66255 1553
64f562c6
AM
1554 old_p = get_kprobe(p->addr);
1555 if (old_p) {
afd66255 1556 /* Since this may unoptimize old_p, locking text_mutex. */
64f562c6 1557 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
1558 goto out;
1559 }
1da177e4 1560
2d1e38f5
TG
1561 cpus_read_lock();
1562 /* Prevent text modification */
1563 mutex_lock(&text_mutex);
ae6aa16f 1564 ret = prepare_kprobe(p);
25764288 1565 mutex_unlock(&text_mutex);
2d1e38f5 1566 cpus_read_unlock();
6f716acd 1567 if (ret)
afd66255 1568 goto out;
49a2a1b8 1569
64f562c6 1570 INIT_HLIST_NODE(&p->hlist);
3516a460 1571 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
1572 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1573
de5bd88d 1574 if (!kprobes_all_disarmed && !kprobe_disabled(p))
25764288 1575 arm_kprobe(p);
afd66255
MH
1576
1577 /* Try to optimize kprobe */
1578 try_to_optimize_kprobe(p);
1da177e4 1579out:
7a7d1cf9 1580 mutex_unlock(&kprobe_mutex);
49a2a1b8 1581
e8386a0c 1582 if (probed_mod)
df019b1d 1583 module_put(probed_mod);
e8386a0c 1584
1da177e4
LT
1585 return ret;
1586}
99081ab5 1587EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 1588
6f0f1dd7 1589/* Check if all probes on the aggrprobe are disabled */
55479f64 1590static int aggr_kprobe_disabled(struct kprobe *ap)
6f0f1dd7
MH
1591{
1592 struct kprobe *kp;
1593
1594 list_for_each_entry_rcu(kp, &ap->list, list)
1595 if (!kprobe_disabled(kp))
1596 /*
1597 * There is an active probe on the list.
1598 * We can't disable this ap.
1599 */
1600 return 0;
1601
1602 return 1;
1603}
1604
1605/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
55479f64 1606static struct kprobe *__disable_kprobe(struct kprobe *p)
6f0f1dd7
MH
1607{
1608 struct kprobe *orig_p;
1609
1610 /* Get an original kprobe for return */
1611 orig_p = __get_valid_kprobe(p);
1612 if (unlikely(orig_p == NULL))
1613 return NULL;
1614
1615 if (!kprobe_disabled(p)) {
1616 /* Disable probe if it is a child probe */
1617 if (p != orig_p)
1618 p->flags |= KPROBE_FLAG_DISABLED;
1619
1620 /* Try to disarm and disable this/parent probe */
1621 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
69d54b91
WN
1622 /*
1623 * If kprobes_all_disarmed is set, orig_p
1624 * should have already been disarmed, so
1625 * skip unneed disarming process.
1626 */
1627 if (!kprobes_all_disarmed)
1628 disarm_kprobe(orig_p, true);
6f0f1dd7
MH
1629 orig_p->flags |= KPROBE_FLAG_DISABLED;
1630 }
1631 }
1632
1633 return orig_p;
1634}
1635
de5bd88d
MH
1636/*
1637 * Unregister a kprobe without a scheduler synchronization.
1638 */
55479f64 1639static int __unregister_kprobe_top(struct kprobe *p)
de5bd88d 1640{
6d8e40a8 1641 struct kprobe *ap, *list_p;
de5bd88d 1642
6f0f1dd7
MH
1643 /* Disable kprobe. This will disarm it if needed. */
1644 ap = __disable_kprobe(p);
6d8e40a8 1645 if (ap == NULL)
de5bd88d
MH
1646 return -EINVAL;
1647
6f0f1dd7 1648 if (ap == p)
bf8f6e5b 1649 /*
6f0f1dd7
MH
1650 * This probe is an independent(and non-optimized) kprobe
1651 * (not an aggrprobe). Remove from the hash list.
bf8f6e5b 1652 */
6f0f1dd7
MH
1653 goto disarmed;
1654
1655 /* Following process expects this probe is an aggrprobe */
1656 WARN_ON(!kprobe_aggrprobe(ap));
1657
6274de49
MH
1658 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1659 /*
1660 * !disarmed could be happen if the probe is under delayed
1661 * unoptimizing.
1662 */
6f0f1dd7
MH
1663 goto disarmed;
1664 else {
1665 /* If disabling probe has special handlers, update aggrprobe */
e8386a0c 1666 if (p->break_handler && !kprobe_gone(p))
6d8e40a8 1667 ap->break_handler = NULL;
e8386a0c 1668 if (p->post_handler && !kprobe_gone(p)) {
6d8e40a8 1669 list_for_each_entry_rcu(list_p, &ap->list, list) {
9861668f
MH
1670 if ((list_p != p) && (list_p->post_handler))
1671 goto noclean;
1672 }
6d8e40a8 1673 ap->post_handler = NULL;
9861668f
MH
1674 }
1675noclean:
6f0f1dd7
MH
1676 /*
1677 * Remove from the aggrprobe: this path will do nothing in
1678 * __unregister_kprobe_bottom().
1679 */
49a2a1b8 1680 list_del_rcu(&p->list);
6f0f1dd7
MH
1681 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1682 /*
1683 * Try to optimize this probe again, because post
1684 * handler may have been changed.
1685 */
1686 optimize_kprobe(ap);
49a2a1b8 1687 }
9861668f 1688 return 0;
6f0f1dd7
MH
1689
1690disarmed:
6274de49 1691 BUG_ON(!kprobe_disarmed(ap));
6f0f1dd7
MH
1692 hlist_del_rcu(&ap->hlist);
1693 return 0;
9861668f 1694}
3516a460 1695
55479f64 1696static void __unregister_kprobe_bottom(struct kprobe *p)
9861668f 1697{
6d8e40a8 1698 struct kprobe *ap;
b3e55c72 1699
e8386a0c 1700 if (list_empty(&p->list))
6274de49 1701 /* This is an independent kprobe */
0498b635 1702 arch_remove_kprobe(p);
e8386a0c 1703 else if (list_is_singular(&p->list)) {
6274de49 1704 /* This is the last child of an aggrprobe */
6d8e40a8 1705 ap = list_entry(p->list.next, struct kprobe, list);
e8386a0c 1706 list_del(&p->list);
6d8e40a8 1707 free_aggr_kprobe(ap);
9861668f 1708 }
6274de49 1709 /* Otherwise, do nothing. */
9861668f
MH
1710}
1711
55479f64 1712int register_kprobes(struct kprobe **kps, int num)
9861668f
MH
1713{
1714 int i, ret = 0;
1715
1716 if (num <= 0)
1717 return -EINVAL;
1718 for (i = 0; i < num; i++) {
49ad2fd7 1719 ret = register_kprobe(kps[i]);
67dddaad
MH
1720 if (ret < 0) {
1721 if (i > 0)
1722 unregister_kprobes(kps, i);
9861668f 1723 break;
36721656 1724 }
49a2a1b8 1725 }
9861668f
MH
1726 return ret;
1727}
99081ab5 1728EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 1729
55479f64 1730void unregister_kprobe(struct kprobe *p)
9861668f
MH
1731{
1732 unregister_kprobes(&p, 1);
1733}
99081ab5 1734EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 1735
55479f64 1736void unregister_kprobes(struct kprobe **kps, int num)
9861668f
MH
1737{
1738 int i;
1739
1740 if (num <= 0)
1741 return;
1742 mutex_lock(&kprobe_mutex);
1743 for (i = 0; i < num; i++)
1744 if (__unregister_kprobe_top(kps[i]) < 0)
1745 kps[i]->addr = NULL;
1746 mutex_unlock(&kprobe_mutex);
1747
1748 synchronize_sched();
1749 for (i = 0; i < num; i++)
1750 if (kps[i]->addr)
1751 __unregister_kprobe_bottom(kps[i]);
1da177e4 1752}
99081ab5 1753EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4 1754
5f6bee34
NR
1755int __weak kprobe_exceptions_notify(struct notifier_block *self,
1756 unsigned long val, void *data)
fc62d020
NR
1757{
1758 return NOTIFY_DONE;
1759}
5f6bee34 1760NOKPROBE_SYMBOL(kprobe_exceptions_notify);
fc62d020 1761
1da177e4 1762static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
1763 .notifier_call = kprobe_exceptions_notify,
1764 .priority = 0x7fffffff /* we need to be notified first */
1765};
1766
3d7e3382
ME
1767unsigned long __weak arch_deref_entry_point(void *entry)
1768{
1769 return (unsigned long)entry;
1770}
1da177e4 1771
55479f64 1772int register_jprobes(struct jprobe **jps, int num)
1da177e4 1773{
26b31c19 1774 int ret = 0, i;
3d7e3382 1775
26b31c19 1776 if (num <= 0)
3d7e3382 1777 return -EINVAL;
0f73ff80 1778
26b31c19 1779 for (i = 0; i < num; i++) {
0f73ff80 1780 ret = register_jprobe(jps[i]);
edbaadbe 1781
67dddaad
MH
1782 if (ret < 0) {
1783 if (i > 0)
1784 unregister_jprobes(jps, i);
26b31c19
MH
1785 break;
1786 }
1787 }
0f73ff80 1788
26b31c19
MH
1789 return ret;
1790}
99081ab5 1791EXPORT_SYMBOL_GPL(register_jprobes);
3d7e3382 1792
55479f64 1793int register_jprobe(struct jprobe *jp)
26b31c19 1794{
0f73ff80
NR
1795 unsigned long addr, offset;
1796 struct kprobe *kp = &jp->kp;
1797
dbf58062
NR
1798 /*
1799 * Verify probepoint as well as the jprobe handler are
1800 * valid function entry points.
1801 */
0f73ff80
NR
1802 addr = arch_deref_entry_point(jp->entry);
1803
dbf58062
NR
1804 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1805 kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
0f73ff80
NR
1806 kp->pre_handler = setjmp_pre_handler;
1807 kp->break_handler = longjmp_break_handler;
1808 return register_kprobe(kp);
1809 }
1810
1811 return -EINVAL;
1da177e4 1812}
99081ab5 1813EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4 1814
55479f64 1815void unregister_jprobe(struct jprobe *jp)
1da177e4 1816{
26b31c19
MH
1817 unregister_jprobes(&jp, 1);
1818}
99081ab5 1819EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c19 1820
55479f64 1821void unregister_jprobes(struct jprobe **jps, int num)
26b31c19
MH
1822{
1823 int i;
1824
1825 if (num <= 0)
1826 return;
1827 mutex_lock(&kprobe_mutex);
1828 for (i = 0; i < num; i++)
1829 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1830 jps[i]->kp.addr = NULL;
1831 mutex_unlock(&kprobe_mutex);
1832
1833 synchronize_sched();
1834 for (i = 0; i < num; i++) {
1835 if (jps[i]->kp.addr)
1836 __unregister_kprobe_bottom(&jps[i]->kp);
1837 }
1da177e4 1838}
99081ab5 1839EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4 1840
9edddaa2 1841#ifdef CONFIG_KRETPROBES
e65cefe8
AB
1842/*
1843 * This kprobe pre_handler is registered with every kretprobe. When probe
1844 * hits it will set up the return probe.
1845 */
820aede0 1846static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
e65cefe8
AB
1847{
1848 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5
S
1849 unsigned long hash, flags = 0;
1850 struct kretprobe_instance *ri;
e65cefe8 1851
f96f5678
MH
1852 /*
1853 * To avoid deadlocks, prohibit return probing in NMI contexts,
1854 * just skip the probe and increase the (inexact) 'nmissed'
1855 * statistical counter, so that the user is informed that
1856 * something happened:
1857 */
1858 if (unlikely(in_nmi())) {
1859 rp->nmissed++;
1860 return 0;
1861 }
1862
1863 /* TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5 1864 hash = hash_ptr(current, KPROBE_HASH_BITS);
ec484608 1865 raw_spin_lock_irqsave(&rp->lock, flags);
4c4308cb 1866 if (!hlist_empty(&rp->free_instances)) {
4c4308cb 1867 ri = hlist_entry(rp->free_instances.first,
ef53d9c5
S
1868 struct kretprobe_instance, hlist);
1869 hlist_del(&ri->hlist);
ec484608 1870 raw_spin_unlock_irqrestore(&rp->lock, flags);
ef53d9c5 1871
4c4308cb
CH
1872 ri->rp = rp;
1873 ri->task = current;
f47cd9b5 1874
55ca6140
JL
1875 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1876 raw_spin_lock_irqsave(&rp->lock, flags);
1877 hlist_add_head(&ri->hlist, &rp->free_instances);
1878 raw_spin_unlock_irqrestore(&rp->lock, flags);
f47cd9b5 1879 return 0;
55ca6140 1880 }
f47cd9b5 1881
4c4308cb
CH
1882 arch_prepare_kretprobe(ri, regs);
1883
1884 /* XXX(hch): why is there no hlist_move_head? */
ef53d9c5
S
1885 INIT_HLIST_NODE(&ri->hlist);
1886 kretprobe_table_lock(hash, &flags);
1887 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1888 kretprobe_table_unlock(hash, &flags);
1889 } else {
4c4308cb 1890 rp->nmissed++;
ec484608 1891 raw_spin_unlock_irqrestore(&rp->lock, flags);
ef53d9c5 1892 }
e65cefe8
AB
1893 return 0;
1894}
820aede0 1895NOKPROBE_SYMBOL(pre_handler_kretprobe);
e65cefe8 1896
659b957f 1897bool __weak arch_kprobe_on_func_entry(unsigned long offset)
90ec5e89
NR
1898{
1899 return !offset;
1900}
1901
659b957f 1902bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1d585e70
NR
1903{
1904 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1905
1906 if (IS_ERR(kp_addr))
1907 return false;
1908
1909 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
659b957f 1910 !arch_kprobe_on_func_entry(offset))
1d585e70
NR
1911 return false;
1912
1913 return true;
1914}
1915
55479f64 1916int register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1917{
1918 int ret = 0;
1919 struct kretprobe_instance *inst;
1920 int i;
b2a5cd69 1921 void *addr;
90ec5e89 1922
659b957f 1923 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
90ec5e89 1924 return -EINVAL;
f438d914
MH
1925
1926 if (kretprobe_blacklist_size) {
b2a5cd69 1927 addr = kprobe_addr(&rp->kp);
bc81d48d
MH
1928 if (IS_ERR(addr))
1929 return PTR_ERR(addr);
f438d914
MH
1930
1931 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1932 if (kretprobe_blacklist[i].addr == addr)
1933 return -EINVAL;
1934 }
1935 }
b94cce92
HN
1936
1937 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
1938 rp->kp.post_handler = NULL;
1939 rp->kp.fault_handler = NULL;
1940 rp->kp.break_handler = NULL;
b94cce92
HN
1941
1942 /* Pre-allocate memory for max kretprobe instances */
1943 if (rp->maxactive <= 0) {
1944#ifdef CONFIG_PREEMPT
c2ef6661 1945 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce92 1946#else
4dae560f 1947 rp->maxactive = num_possible_cpus();
b94cce92
HN
1948#endif
1949 }
ec484608 1950 raw_spin_lock_init(&rp->lock);
b94cce92
HN
1951 INIT_HLIST_HEAD(&rp->free_instances);
1952 for (i = 0; i < rp->maxactive; i++) {
f47cd9b5
AS
1953 inst = kmalloc(sizeof(struct kretprobe_instance) +
1954 rp->data_size, GFP_KERNEL);
b94cce92
HN
1955 if (inst == NULL) {
1956 free_rp_inst(rp);
1957 return -ENOMEM;
1958 }
ef53d9c5
S
1959 INIT_HLIST_NODE(&inst->hlist);
1960 hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce92
HN
1961 }
1962
1963 rp->nmissed = 0;
1964 /* Establish function entry probe point */
49ad2fd7 1965 ret = register_kprobe(&rp->kp);
4a296e07 1966 if (ret != 0)
b94cce92
HN
1967 free_rp_inst(rp);
1968 return ret;
1969}
99081ab5 1970EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1971
55479f64 1972int register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
1973{
1974 int ret = 0, i;
1975
1976 if (num <= 0)
1977 return -EINVAL;
1978 for (i = 0; i < num; i++) {
49ad2fd7 1979 ret = register_kretprobe(rps[i]);
67dddaad
MH
1980 if (ret < 0) {
1981 if (i > 0)
1982 unregister_kretprobes(rps, i);
4a296e07
MH
1983 break;
1984 }
1985 }
1986 return ret;
1987}
99081ab5 1988EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 1989
55479f64 1990void unregister_kretprobe(struct kretprobe *rp)
4a296e07
MH
1991{
1992 unregister_kretprobes(&rp, 1);
1993}
99081ab5 1994EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 1995
55479f64 1996void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
1997{
1998 int i;
1999
2000 if (num <= 0)
2001 return;
2002 mutex_lock(&kprobe_mutex);
2003 for (i = 0; i < num; i++)
2004 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2005 rps[i]->kp.addr = NULL;
2006 mutex_unlock(&kprobe_mutex);
2007
2008 synchronize_sched();
2009 for (i = 0; i < num; i++) {
2010 if (rps[i]->kp.addr) {
2011 __unregister_kprobe_bottom(&rps[i]->kp);
2012 cleanup_rp_inst(rps[i]);
2013 }
2014 }
2015}
99081ab5 2016EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 2017
9edddaa2 2018#else /* CONFIG_KRETPROBES */
55479f64 2019int register_kretprobe(struct kretprobe *rp)
b94cce92
HN
2020{
2021 return -ENOSYS;
2022}
99081ab5 2023EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 2024
55479f64 2025int register_kretprobes(struct kretprobe **rps, int num)
346fd59b 2026{
4a296e07 2027 return -ENOSYS;
346fd59b 2028}
99081ab5
MH
2029EXPORT_SYMBOL_GPL(register_kretprobes);
2030
55479f64 2031void unregister_kretprobe(struct kretprobe *rp)
b94cce92 2032{
4a296e07 2033}
99081ab5 2034EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 2035
55479f64 2036void unregister_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
2037{
2038}
99081ab5 2039EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 2040
820aede0 2041static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
4a296e07
MH
2042{
2043 return 0;
b94cce92 2044}
820aede0 2045NOKPROBE_SYMBOL(pre_handler_kretprobe);
b94cce92 2046
4a296e07
MH
2047#endif /* CONFIG_KRETPROBES */
2048
e8386a0c 2049/* Set the kprobe gone and remove its instruction buffer. */
55479f64 2050static void kill_kprobe(struct kprobe *p)
e8386a0c
MH
2051{
2052 struct kprobe *kp;
de5bd88d 2053
e8386a0c 2054 p->flags |= KPROBE_FLAG_GONE;
afd66255 2055 if (kprobe_aggrprobe(p)) {
e8386a0c
MH
2056 /*
2057 * If this is an aggr_kprobe, we have to list all the
2058 * chained probes and mark them GONE.
2059 */
2060 list_for_each_entry_rcu(kp, &p->list, list)
2061 kp->flags |= KPROBE_FLAG_GONE;
2062 p->post_handler = NULL;
2063 p->break_handler = NULL;
afd66255 2064 kill_optimized_kprobe(p);
e8386a0c
MH
2065 }
2066 /*
2067 * Here, we can remove insn_slot safely, because no thread calls
2068 * the original probed function (which will be freed soon) any more.
2069 */
2070 arch_remove_kprobe(p);
2071}
2072
c0614829 2073/* Disable one kprobe */
55479f64 2074int disable_kprobe(struct kprobe *kp)
c0614829
MH
2075{
2076 int ret = 0;
c0614829
MH
2077
2078 mutex_lock(&kprobe_mutex);
2079
6f0f1dd7
MH
2080 /* Disable this kprobe */
2081 if (__disable_kprobe(kp) == NULL)
c0614829 2082 ret = -EINVAL;
c0614829 2083
c0614829
MH
2084 mutex_unlock(&kprobe_mutex);
2085 return ret;
2086}
2087EXPORT_SYMBOL_GPL(disable_kprobe);
2088
2089/* Enable one kprobe */
55479f64 2090int enable_kprobe(struct kprobe *kp)
c0614829
MH
2091{
2092 int ret = 0;
2093 struct kprobe *p;
2094
2095 mutex_lock(&kprobe_mutex);
2096
2097 /* Check whether specified probe is valid. */
2098 p = __get_valid_kprobe(kp);
2099 if (unlikely(p == NULL)) {
2100 ret = -EINVAL;
2101 goto out;
2102 }
2103
2104 if (kprobe_gone(kp)) {
2105 /* This kprobe has gone, we couldn't enable it. */
2106 ret = -EINVAL;
2107 goto out;
2108 }
2109
2110 if (p != kp)
2111 kp->flags &= ~KPROBE_FLAG_DISABLED;
2112
2113 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2114 p->flags &= ~KPROBE_FLAG_DISABLED;
2115 arm_kprobe(p);
2116 }
2117out:
2118 mutex_unlock(&kprobe_mutex);
2119 return ret;
2120}
2121EXPORT_SYMBOL_GPL(enable_kprobe);
2122
820aede0 2123void dump_kprobe(struct kprobe *kp)
24851d24
FW
2124{
2125 printk(KERN_WARNING "Dumping kprobe:\n");
2126 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2127 kp->symbol_name, kp->addr, kp->offset);
2128}
820aede0 2129NOKPROBE_SYMBOL(dump_kprobe);
24851d24 2130
376e2424
MH
2131/*
2132 * Lookup and populate the kprobe_blacklist.
2133 *
2134 * Unlike the kretprobe blacklist, we'll need to determine
2135 * the range of addresses that belong to the said functions,
2136 * since a kprobe need not necessarily be at the beginning
2137 * of a function.
2138 */
2139static int __init populate_kprobe_blacklist(unsigned long *start,
2140 unsigned long *end)
2141{
2142 unsigned long *iter;
2143 struct kprobe_blacklist_entry *ent;
d81b4253 2144 unsigned long entry, offset = 0, size = 0;
376e2424
MH
2145
2146 for (iter = start; iter < end; iter++) {
d81b4253
MH
2147 entry = arch_deref_entry_point((void *)*iter);
2148
2149 if (!kernel_text_address(entry) ||
2150 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2151 pr_err("Failed to find blacklist at %p\n",
2152 (void *)entry);
376e2424
MH
2153 continue;
2154 }
2155
2156 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2157 if (!ent)
2158 return -ENOMEM;
d81b4253
MH
2159 ent->start_addr = entry;
2160 ent->end_addr = entry + size;
376e2424
MH
2161 INIT_LIST_HEAD(&ent->list);
2162 list_add_tail(&ent->list, &kprobe_blacklist);
2163 }
2164 return 0;
2165}
2166
e8386a0c 2167/* Module notifier call back, checking kprobes on the module */
55479f64
MH
2168static int kprobes_module_callback(struct notifier_block *nb,
2169 unsigned long val, void *data)
e8386a0c
MH
2170{
2171 struct module *mod = data;
2172 struct hlist_head *head;
e8386a0c
MH
2173 struct kprobe *p;
2174 unsigned int i;
f24659d9 2175 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 2176
f24659d9 2177 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
2178 return NOTIFY_DONE;
2179
2180 /*
f24659d9
MH
2181 * When MODULE_STATE_GOING was notified, both of module .text and
2182 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2183 * notified, only .init.text section would be freed. We need to
2184 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
2185 */
2186 mutex_lock(&kprobe_mutex);
2187 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2188 head = &kprobe_table[i];
b67bfe0d 2189 hlist_for_each_entry_rcu(p, head, hlist)
f24659d9
MH
2190 if (within_module_init((unsigned long)p->addr, mod) ||
2191 (checkcore &&
2192 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
2193 /*
2194 * The vaddr this probe is installed will soon
2195 * be vfreed buy not synced to disk. Hence,
2196 * disarming the breakpoint isn't needed.
545a0281
SRV
2197 *
2198 * Note, this will also move any optimized probes
2199 * that are pending to be removed from their
2200 * corresponding lists to the freeing_list and
2201 * will not be touched by the delayed
2202 * kprobe_optimizer work handler.
e8386a0c
MH
2203 */
2204 kill_kprobe(p);
2205 }
2206 }
2207 mutex_unlock(&kprobe_mutex);
2208 return NOTIFY_DONE;
2209}
2210
2211static struct notifier_block kprobe_module_nb = {
2212 .notifier_call = kprobes_module_callback,
2213 .priority = 0
2214};
2215
376e2424
MH
2216/* Markers of _kprobe_blacklist section */
2217extern unsigned long __start_kprobe_blacklist[];
2218extern unsigned long __stop_kprobe_blacklist[];
2219
1da177e4
LT
2220static int __init init_kprobes(void)
2221{
2222 int i, err = 0;
2223
2224 /* FIXME allocate the probe table, currently defined statically */
2225 /* initialize all list heads */
b94cce92 2226 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 2227 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92 2228 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ec484608 2229 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce92 2230 }
1da177e4 2231
376e2424
MH
2232 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2233 __stop_kprobe_blacklist);
2234 if (err) {
2235 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2236 pr_err("Please take care of using kprobes.\n");
3d8d996e
SD
2237 }
2238
f438d914
MH
2239 if (kretprobe_blacklist_size) {
2240 /* lookup the function address from its name */
2241 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
49e0b465 2242 kretprobe_blacklist[i].addr =
290e3070 2243 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
f438d914
MH
2244 if (!kretprobe_blacklist[i].addr)
2245 printk("kretprobe: lookup failed: %s\n",
2246 kretprobe_blacklist[i].name);
2247 }
2248 }
2249
b2be84df
MH
2250#if defined(CONFIG_OPTPROBES)
2251#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
afd66255
MH
2252 /* Init kprobe_optinsn_slots */
2253 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2254#endif
b2be84df
MH
2255 /* By default, kprobes can be optimized */
2256 kprobes_allow_optimization = true;
2257#endif
afd66255 2258
e579abeb
MH
2259 /* By default, kprobes are armed */
2260 kprobes_all_disarmed = false;
bf8f6e5b 2261
6772926b 2262 err = arch_init_kprobes();
802eae7c
RL
2263 if (!err)
2264 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c
MH
2265 if (!err)
2266 err = register_module_notifier(&kprobe_module_nb);
2267
ef53d9c5 2268 kprobes_initialized = (err == 0);
802eae7c 2269
8c1c9356
AM
2270 if (!err)
2271 init_test_probes();
1da177e4
LT
2272 return err;
2273}
2274
346fd59b 2275#ifdef CONFIG_DEBUG_FS
55479f64 2276static void report_probe(struct seq_file *pi, struct kprobe *p,
afd66255 2277 const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59b
SD
2278{
2279 char *kprobe_type;
2280
2281 if (p->pre_handler == pre_handler_kretprobe)
2282 kprobe_type = "r";
2283 else if (p->pre_handler == setjmp_pre_handler)
2284 kprobe_type = "j";
2285 else
2286 kprobe_type = "k";
afd66255 2287
346fd59b 2288 if (sym)
afd66255 2289 seq_printf(pi, "%p %s %s+0x%x %s ",
de5bd88d 2290 p->addr, kprobe_type, sym, offset,
afd66255 2291 (modname ? modname : " "));
346fd59b 2292 else
afd66255
MH
2293 seq_printf(pi, "%p %s %p ",
2294 p->addr, kprobe_type, p->addr);
2295
2296 if (!pp)
2297 pp = p;
ae6aa16f 2298 seq_printf(pi, "%s%s%s%s\n",
afd66255
MH
2299 (kprobe_gone(p) ? "[GONE]" : ""),
2300 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
ae6aa16f
MH
2301 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2302 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
346fd59b
SD
2303}
2304
55479f64 2305static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
346fd59b
SD
2306{
2307 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2308}
2309
55479f64 2310static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
346fd59b
SD
2311{
2312 (*pos)++;
2313 if (*pos >= KPROBE_TABLE_SIZE)
2314 return NULL;
2315 return pos;
2316}
2317
55479f64 2318static void kprobe_seq_stop(struct seq_file *f, void *v)
346fd59b
SD
2319{
2320 /* Nothing to do */
2321}
2322
55479f64 2323static int show_kprobe_addr(struct seq_file *pi, void *v)
346fd59b
SD
2324{
2325 struct hlist_head *head;
346fd59b
SD
2326 struct kprobe *p, *kp;
2327 const char *sym = NULL;
2328 unsigned int i = *(loff_t *) v;
ffb45122 2329 unsigned long offset = 0;
ab767865 2330 char *modname, namebuf[KSYM_NAME_LEN];
346fd59b
SD
2331
2332 head = &kprobe_table[i];
2333 preempt_disable();
b67bfe0d 2334 hlist_for_each_entry_rcu(p, head, hlist) {
ffb45122 2335 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b 2336 &offset, &modname, namebuf);
afd66255 2337 if (kprobe_aggrprobe(p)) {
346fd59b 2338 list_for_each_entry_rcu(kp, &p->list, list)
afd66255 2339 report_probe(pi, kp, sym, offset, modname, p);
346fd59b 2340 } else
afd66255 2341 report_probe(pi, p, sym, offset, modname, NULL);
346fd59b
SD
2342 }
2343 preempt_enable();
2344 return 0;
2345}
2346
88e9d34c 2347static const struct seq_operations kprobes_seq_ops = {
346fd59b
SD
2348 .start = kprobe_seq_start,
2349 .next = kprobe_seq_next,
2350 .stop = kprobe_seq_stop,
2351 .show = show_kprobe_addr
2352};
2353
55479f64 2354static int kprobes_open(struct inode *inode, struct file *filp)
346fd59b
SD
2355{
2356 return seq_open(filp, &kprobes_seq_ops);
2357}
2358
828c0950 2359static const struct file_operations debugfs_kprobes_operations = {
346fd59b
SD
2360 .open = kprobes_open,
2361 .read = seq_read,
2362 .llseek = seq_lseek,
2363 .release = seq_release,
2364};
2365
63724740
MH
2366/* kprobes/blacklist -- shows which functions can not be probed */
2367static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2368{
2369 return seq_list_start(&kprobe_blacklist, *pos);
2370}
2371
2372static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2373{
2374 return seq_list_next(v, &kprobe_blacklist, pos);
2375}
2376
2377static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2378{
2379 struct kprobe_blacklist_entry *ent =
2380 list_entry(v, struct kprobe_blacklist_entry, list);
2381
2382 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2383 (void *)ent->end_addr, (void *)ent->start_addr);
2384 return 0;
2385}
2386
2387static const struct seq_operations kprobe_blacklist_seq_ops = {
2388 .start = kprobe_blacklist_seq_start,
2389 .next = kprobe_blacklist_seq_next,
2390 .stop = kprobe_seq_stop, /* Reuse void function */
2391 .show = kprobe_blacklist_seq_show,
2392};
2393
2394static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2395{
2396 return seq_open(filp, &kprobe_blacklist_seq_ops);
2397}
2398
2399static const struct file_operations debugfs_kprobe_blacklist_ops = {
2400 .open = kprobe_blacklist_open,
2401 .read = seq_read,
2402 .llseek = seq_lseek,
2403 .release = seq_release,
2404};
2405
55479f64 2406static void arm_all_kprobes(void)
bf8f6e5b
AM
2407{
2408 struct hlist_head *head;
bf8f6e5b
AM
2409 struct kprobe *p;
2410 unsigned int i;
2411
2412 mutex_lock(&kprobe_mutex);
2413
e579abeb
MH
2414 /* If kprobes are armed, just return */
2415 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2416 goto already_enabled;
2417
977ad481
WN
2418 /*
2419 * optimize_kprobe() called by arm_kprobe() checks
2420 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2421 * arm_kprobe.
2422 */
2423 kprobes_all_disarmed = false;
afd66255 2424 /* Arming kprobes doesn't optimize kprobe itself */
bf8f6e5b
AM
2425 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2426 head = &kprobe_table[i];
b67bfe0d 2427 hlist_for_each_entry_rcu(p, head, hlist)
de5bd88d 2428 if (!kprobe_disabled(p))
ae6aa16f 2429 arm_kprobe(p);
bf8f6e5b
AM
2430 }
2431
bf8f6e5b
AM
2432 printk(KERN_INFO "Kprobes globally enabled\n");
2433
2434already_enabled:
2435 mutex_unlock(&kprobe_mutex);
2436 return;
2437}
2438
55479f64 2439static void disarm_all_kprobes(void)
bf8f6e5b
AM
2440{
2441 struct hlist_head *head;
bf8f6e5b
AM
2442 struct kprobe *p;
2443 unsigned int i;
2444
2445 mutex_lock(&kprobe_mutex);
2446
e579abeb 2447 /* If kprobes are already disarmed, just return */
6274de49
MH
2448 if (kprobes_all_disarmed) {
2449 mutex_unlock(&kprobe_mutex);
2450 return;
2451 }
bf8f6e5b 2452
e579abeb 2453 kprobes_all_disarmed = true;
bf8f6e5b 2454 printk(KERN_INFO "Kprobes globally disabled\n");
afd66255 2455
bf8f6e5b
AM
2456 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2457 head = &kprobe_table[i];
b67bfe0d 2458 hlist_for_each_entry_rcu(p, head, hlist) {
de5bd88d 2459 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
ae6aa16f 2460 disarm_kprobe(p, false);
bf8f6e5b
AM
2461 }
2462 }
bf8f6e5b 2463 mutex_unlock(&kprobe_mutex);
bf8f6e5b 2464
6274de49
MH
2465 /* Wait for disarming all kprobes by optimizer */
2466 wait_for_kprobe_optimizer();
bf8f6e5b
AM
2467}
2468
2469/*
2470 * XXX: The debugfs bool file interface doesn't allow for callbacks
2471 * when the bool state is switched. We can reuse that facility when
2472 * available
2473 */
2474static ssize_t read_enabled_file_bool(struct file *file,
2475 char __user *user_buf, size_t count, loff_t *ppos)
2476{
2477 char buf[3];
2478
e579abeb 2479 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2480 buf[0] = '1';
2481 else
2482 buf[0] = '0';
2483 buf[1] = '\n';
2484 buf[2] = 0x00;
2485 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2486}
2487
2488static ssize_t write_enabled_file_bool(struct file *file,
2489 const char __user *user_buf, size_t count, loff_t *ppos)
2490{
2491 char buf[32];
efeb156e 2492 size_t buf_size;
bf8f6e5b
AM
2493
2494 buf_size = min(count, (sizeof(buf)-1));
2495 if (copy_from_user(buf, user_buf, buf_size))
2496 return -EFAULT;
2497
10fb46d5 2498 buf[buf_size] = '\0';
bf8f6e5b
AM
2499 switch (buf[0]) {
2500 case 'y':
2501 case 'Y':
2502 case '1':
e579abeb 2503 arm_all_kprobes();
bf8f6e5b
AM
2504 break;
2505 case 'n':
2506 case 'N':
2507 case '0':
e579abeb 2508 disarm_all_kprobes();
bf8f6e5b 2509 break;
10fb46d5
MK
2510 default:
2511 return -EINVAL;
bf8f6e5b
AM
2512 }
2513
2514 return count;
2515}
2516
828c0950 2517static const struct file_operations fops_kp = {
bf8f6e5b
AM
2518 .read = read_enabled_file_bool,
2519 .write = write_enabled_file_bool,
6038f373 2520 .llseek = default_llseek,
bf8f6e5b
AM
2521};
2522
55479f64 2523static int __init debugfs_kprobe_init(void)
346fd59b
SD
2524{
2525 struct dentry *dir, *file;
bf8f6e5b 2526 unsigned int value = 1;
346fd59b
SD
2527
2528 dir = debugfs_create_dir("kprobes", NULL);
2529 if (!dir)
2530 return -ENOMEM;
2531
e3869792 2532 file = debugfs_create_file("list", 0444, dir, NULL,
346fd59b 2533 &debugfs_kprobes_operations);
63724740
MH
2534 if (!file)
2535 goto error;
346fd59b 2536
bf8f6e5b
AM
2537 file = debugfs_create_file("enabled", 0600, dir,
2538 &value, &fops_kp);
63724740
MH
2539 if (!file)
2540 goto error;
2541
2542 file = debugfs_create_file("blacklist", 0444, dir, NULL,
2543 &debugfs_kprobe_blacklist_ops);
2544 if (!file)
2545 goto error;
bf8f6e5b 2546
346fd59b 2547 return 0;
63724740
MH
2548
2549error:
2550 debugfs_remove(dir);
2551 return -ENOMEM;
346fd59b
SD
2552}
2553
2554late_initcall(debugfs_kprobe_init);
2555#endif /* CONFIG_DEBUG_FS */
2556
2557module_init(init_kprobes);
1da177e4 2558
99081ab5 2559/* defined in arch/.../kernel/kprobes.c */
1da177e4 2560EXPORT_SYMBOL_GPL(jprobe_return);