]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/kprobes.c
kprobes: unify insn caches
[mirror_ubuntu-bionic-kernel.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
e3869792 38#include <linux/stddef.h>
9984de1a 39#include <linux/export.h>
9ec4b1f3 40#include <linux/moduleloader.h>
3a872d89 41#include <linux/kallsyms.h>
b4c6c34a 42#include <linux/freezer.h>
346fd59b
SD
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
b2be84df 45#include <linux/sysctl.h>
1eeb66a1 46#include <linux/kdebug.h>
4460fdad 47#include <linux/memory.h>
4554dbcb 48#include <linux/ftrace.h>
afd66255 49#include <linux/cpu.h>
bf5438fc 50#include <linux/jump_label.h>
bf8f6e5b 51
d0aaff97 52#include <asm-generic/sections.h>
1da177e4
LT
53#include <asm/cacheflush.h>
54#include <asm/errno.h>
bf8f6e5b 55#include <asm/uaccess.h>
1da177e4
LT
56
57#define KPROBE_HASH_BITS 6
58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
3a872d89
AM
60
61/*
62 * Some oddball architectures like 64bit powerpc have function descriptors
63 * so this must be overridable.
64 */
65#ifndef kprobe_lookup_name
66#define kprobe_lookup_name(name, addr) \
67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
68#endif
69
ef53d9c5 70static int kprobes_initialized;
1da177e4 71static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 72static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4 73
bf8f6e5b 74/* NOTE: change this value only with kprobe_mutex held */
e579abeb 75static bool kprobes_all_disarmed;
bf8f6e5b 76
43948f50
MH
77/* This protects kprobe_table and optimizing_list */
78static DEFINE_MUTEX(kprobe_mutex);
e6584523 79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5 80static struct {
ec484608 81 raw_spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5
S
82} kretprobe_table_locks[KPROBE_TABLE_SIZE];
83
ec484608 84static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
ef53d9c5
S
85{
86 return &(kretprobe_table_locks[hash].lock);
87}
1da177e4 88
3d8d996e
SD
89/*
90 * Normally, functions that we'd want to prohibit kprobes in, are marked
91 * __kprobes. But, there are cases where such functions already belong to
92 * a different section (__sched for preempt_schedule)
93 *
94 * For such cases, we now have a blacklist
95 */
544304b2 96static struct kprobe_blackpoint kprobe_blacklist[] = {
3d8d996e 97 {"preempt_schedule",},
65e234ec 98 {"native_get_debugreg",},
a00e817f
MH
99 {"irq_entries_start",},
100 {"common_interrupt",},
5ecaafdb 101 {"mcount",}, /* mcount can be called from everywhere */
3d8d996e
SD
102 {NULL} /* Terminator */
103};
104
2d14e39d 105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
106/*
107 * kprobe->ainsn.insn points to the copy of the instruction to be
108 * single-stepped. x86_64, POWER4 and above have no-exec support and
109 * stepping on the instruction on a vmalloced/kmalloced/data page
110 * is a recipe for disaster
111 */
9ec4b1f3 112struct kprobe_insn_page {
c5cb5a2d 113 struct list_head list;
9ec4b1f3 114 kprobe_opcode_t *insns; /* Page of instruction slots */
9ec4b1f3 115 int nused;
b4c6c34a 116 int ngarbage;
4610ee1d 117 char slot_used[];
9ec4b1f3
AM
118};
119
4610ee1d
MH
120#define KPROBE_INSN_PAGE_SIZE(slots) \
121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots)))
123
4610ee1d
MH
124static int slots_per_page(struct kprobe_insn_cache *c)
125{
126 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
127}
128
ab40c5c6
MH
129enum kprobe_slot_state {
130 SLOT_CLEAN = 0,
131 SLOT_DIRTY = 1,
132 SLOT_USED = 2,
133};
134
c802d64a
HC
135struct kprobe_insn_cache kprobe_insn_slots = {
136 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
4610ee1d
MH
137 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
138 .insn_size = MAX_INSN_SIZE,
139 .nr_garbage = 0,
140};
141static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
b4c6c34a 142
9ec4b1f3 143/**
12941560 144 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
145 * We allocate an executable page if there's no room on existing ones.
146 */
c802d64a 147kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
9ec4b1f3
AM
148{
149 struct kprobe_insn_page *kip;
c802d64a 150 kprobe_opcode_t *slot = NULL;
9ec4b1f3 151
c802d64a 152 mutex_lock(&c->mutex);
6f716acd 153 retry:
4610ee1d
MH
154 list_for_each_entry(kip, &c->pages, list) {
155 if (kip->nused < slots_per_page(c)) {
9ec4b1f3 156 int i;
4610ee1d 157 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6
MH
158 if (kip->slot_used[i] == SLOT_CLEAN) {
159 kip->slot_used[i] = SLOT_USED;
9ec4b1f3 160 kip->nused++;
c802d64a
HC
161 slot = kip->insns + (i * c->insn_size);
162 goto out;
9ec4b1f3
AM
163 }
164 }
4610ee1d
MH
165 /* kip->nused is broken. Fix it. */
166 kip->nused = slots_per_page(c);
167 WARN_ON(1);
9ec4b1f3
AM
168 }
169 }
170
b4c6c34a 171 /* If there are any garbage slots, collect it and try again. */
4610ee1d 172 if (c->nr_garbage && collect_garbage_slots(c) == 0)
b4c6c34a 173 goto retry;
4610ee1d
MH
174
175 /* All out of space. Need to allocate a new page. */
176 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
6f716acd 177 if (!kip)
c802d64a 178 goto out;
9ec4b1f3
AM
179
180 /*
181 * Use module_alloc so this page is within +/- 2GB of where the
182 * kernel image and loaded module images reside. This is required
183 * so x86_64 can correctly handle the %rip-relative fixups.
184 */
185 kip->insns = module_alloc(PAGE_SIZE);
186 if (!kip->insns) {
187 kfree(kip);
c802d64a 188 goto out;
9ec4b1f3 189 }
c5cb5a2d 190 INIT_LIST_HEAD(&kip->list);
4610ee1d 191 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
ab40c5c6 192 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 193 kip->nused = 1;
b4c6c34a 194 kip->ngarbage = 0;
4610ee1d 195 list_add(&kip->list, &c->pages);
c802d64a
HC
196 slot = kip->insns;
197out:
198 mutex_unlock(&c->mutex);
199 return slot;
12941560
MH
200}
201
b4c6c34a
MH
202/* Return 1 if all garbages are collected, otherwise 0. */
203static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
204{
ab40c5c6 205 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
206 kip->nused--;
207 if (kip->nused == 0) {
208 /*
209 * Page is no longer in use. Free it unless
210 * it's the last one. We keep the last one
211 * so as not to have to set it up again the
212 * next time somebody inserts a probe.
213 */
4610ee1d 214 if (!list_is_singular(&kip->list)) {
c5cb5a2d 215 list_del(&kip->list);
b4c6c34a
MH
216 module_free(NULL, kip->insns);
217 kfree(kip);
218 }
219 return 1;
220 }
221 return 0;
222}
223
4610ee1d 224static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
b4c6c34a 225{
c5cb5a2d 226 struct kprobe_insn_page *kip, *next;
b4c6c34a 227
615d0ebb
MH
228 /* Ensure no-one is interrupted on the garbages */
229 synchronize_sched();
b4c6c34a 230
4610ee1d 231 list_for_each_entry_safe(kip, next, &c->pages, list) {
b4c6c34a 232 int i;
b4c6c34a
MH
233 if (kip->ngarbage == 0)
234 continue;
235 kip->ngarbage = 0; /* we will collect all garbages */
4610ee1d 236 for (i = 0; i < slots_per_page(c); i++) {
ab40c5c6 237 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
238 collect_one_slot(kip, i))
239 break;
240 }
241 }
4610ee1d 242 c->nr_garbage = 0;
b4c6c34a
MH
243 return 0;
244}
245
c802d64a
HC
246void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
247 kprobe_opcode_t *slot, int dirty)
9ec4b1f3
AM
248{
249 struct kprobe_insn_page *kip;
9ec4b1f3 250
c802d64a 251 mutex_lock(&c->mutex);
4610ee1d 252 list_for_each_entry(kip, &c->pages, list) {
83ff56f4
MH
253 long idx = ((long)slot - (long)kip->insns) /
254 (c->insn_size * sizeof(kprobe_opcode_t));
4610ee1d
MH
255 if (idx >= 0 && idx < slots_per_page(c)) {
256 WARN_ON(kip->slot_used[idx] != SLOT_USED);
b4c6c34a 257 if (dirty) {
4610ee1d 258 kip->slot_used[idx] = SLOT_DIRTY;
b4c6c34a 259 kip->ngarbage++;
4610ee1d
MH
260 if (++c->nr_garbage > slots_per_page(c))
261 collect_garbage_slots(c);
c5cb5a2d 262 } else
4610ee1d 263 collect_one_slot(kip, idx);
c802d64a 264 goto out;
9ec4b1f3
AM
265 }
266 }
4610ee1d
MH
267 /* Could not free this slot. */
268 WARN_ON(1);
c802d64a
HC
269out:
270 mutex_unlock(&c->mutex);
4610ee1d 271}
6f716acd 272
afd66255
MH
273#ifdef CONFIG_OPTPROBES
274/* For optimized_kprobe buffer */
c802d64a
HC
275struct kprobe_insn_cache kprobe_optinsn_slots = {
276 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
afd66255
MH
277 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
278 /* .insn_size is initialized later */
279 .nr_garbage = 0,
280};
afd66255 281#endif
2d14e39d 282#endif
9ec4b1f3 283
e6584523
AM
284/* We have preemption disabled.. so it is safe to use __ versions */
285static inline void set_kprobe_instance(struct kprobe *kp)
286{
b76834bc 287 __this_cpu_write(kprobe_instance, kp);
e6584523
AM
288}
289
290static inline void reset_kprobe_instance(void)
291{
b76834bc 292 __this_cpu_write(kprobe_instance, NULL);
e6584523
AM
293}
294
3516a460
AM
295/*
296 * This routine is called either:
49a2a1b8 297 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 298 * OR
d217d545 299 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 300 */
d0aaff97 301struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
302{
303 struct hlist_head *head;
3516a460 304 struct kprobe *p;
1da177e4
LT
305
306 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
b67bfe0d 307 hlist_for_each_entry_rcu(p, head, hlist) {
1da177e4
LT
308 if (p->addr == addr)
309 return p;
310 }
afd66255 311
1da177e4
LT
312 return NULL;
313}
314
afd66255
MH
315static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
316
317/* Return true if the kprobe is an aggregator */
318static inline int kprobe_aggrprobe(struct kprobe *p)
319{
320 return p->pre_handler == aggr_pre_handler;
321}
322
6274de49
MH
323/* Return true(!0) if the kprobe is unused */
324static inline int kprobe_unused(struct kprobe *p)
325{
326 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
327 list_empty(&p->list);
328}
329
afd66255
MH
330/*
331 * Keep all fields in the kprobe consistent
332 */
6d8e40a8 333static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
afd66255 334{
6d8e40a8
MH
335 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
336 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
afd66255
MH
337}
338
339#ifdef CONFIG_OPTPROBES
b2be84df
MH
340/* NOTE: change this value only with kprobe_mutex held */
341static bool kprobes_allow_optimization;
342
afd66255
MH
343/*
344 * Call all pre_handler on the list, but ignores its return value.
345 * This must be called from arch-dep optimized caller.
346 */
347void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
348{
349 struct kprobe *kp;
350
351 list_for_each_entry_rcu(kp, &p->list, list) {
352 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
353 set_kprobe_instance(kp);
354 kp->pre_handler(kp, regs);
355 }
356 reset_kprobe_instance();
357 }
358}
359
6274de49
MH
360/* Free optimized instructions and optimized_kprobe */
361static __kprobes void free_aggr_kprobe(struct kprobe *p)
362{
363 struct optimized_kprobe *op;
364
365 op = container_of(p, struct optimized_kprobe, kp);
366 arch_remove_optimized_kprobe(op);
367 arch_remove_kprobe(p);
368 kfree(op);
369}
370
afd66255
MH
371/* Return true(!0) if the kprobe is ready for optimization. */
372static inline int kprobe_optready(struct kprobe *p)
373{
374 struct optimized_kprobe *op;
375
376 if (kprobe_aggrprobe(p)) {
377 op = container_of(p, struct optimized_kprobe, kp);
378 return arch_prepared_optinsn(&op->optinsn);
379 }
380
381 return 0;
382}
383
6274de49
MH
384/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
385static inline int kprobe_disarmed(struct kprobe *p)
386{
387 struct optimized_kprobe *op;
388
389 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
390 if (!kprobe_aggrprobe(p))
391 return kprobe_disabled(p);
392
393 op = container_of(p, struct optimized_kprobe, kp);
394
395 return kprobe_disabled(p) && list_empty(&op->list);
396}
397
398/* Return true(!0) if the probe is queued on (un)optimizing lists */
399static int __kprobes kprobe_queued(struct kprobe *p)
400{
401 struct optimized_kprobe *op;
402
403 if (kprobe_aggrprobe(p)) {
404 op = container_of(p, struct optimized_kprobe, kp);
405 if (!list_empty(&op->list))
406 return 1;
407 }
408 return 0;
409}
410
afd66255
MH
411/*
412 * Return an optimized kprobe whose optimizing code replaces
413 * instructions including addr (exclude breakpoint).
414 */
6376b229 415static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
afd66255
MH
416{
417 int i;
418 struct kprobe *p = NULL;
419 struct optimized_kprobe *op;
420
421 /* Don't check i == 0, since that is a breakpoint case. */
422 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
423 p = get_kprobe((void *)(addr - i));
424
425 if (p && kprobe_optready(p)) {
426 op = container_of(p, struct optimized_kprobe, kp);
427 if (arch_within_optimized_kprobe(op, addr))
428 return p;
429 }
430
431 return NULL;
432}
433
434/* Optimization staging list, protected by kprobe_mutex */
435static LIST_HEAD(optimizing_list);
6274de49 436static LIST_HEAD(unoptimizing_list);
7b959fc5 437static LIST_HEAD(freeing_list);
afd66255
MH
438
439static void kprobe_optimizer(struct work_struct *work);
440static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
441#define OPTIMIZE_DELAY 5
442
61f4e13f
MH
443/*
444 * Optimize (replace a breakpoint with a jump) kprobes listed on
445 * optimizing_list.
446 */
447static __kprobes void do_optimize_kprobes(void)
afd66255 448{
6274de49
MH
449 /* Optimization never be done when disarmed */
450 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
451 list_empty(&optimizing_list))
452 return;
453
afd66255
MH
454 /*
455 * The optimization/unoptimization refers online_cpus via
456 * stop_machine() and cpu-hotplug modifies online_cpus.
457 * And same time, text_mutex will be held in cpu-hotplug and here.
458 * This combination can cause a deadlock (cpu-hotplug try to lock
459 * text_mutex but stop_machine can not be done because online_cpus
460 * has been changed)
461 * To avoid this deadlock, we need to call get_online_cpus()
462 * for preventing cpu-hotplug outside of text_mutex locking.
463 */
464 get_online_cpus();
465 mutex_lock(&text_mutex);
cd7ebe22 466 arch_optimize_kprobes(&optimizing_list);
afd66255
MH
467 mutex_unlock(&text_mutex);
468 put_online_cpus();
61f4e13f
MH
469}
470
6274de49
MH
471/*
472 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
473 * if need) kprobes listed on unoptimizing_list.
474 */
7b959fc5 475static __kprobes void do_unoptimize_kprobes(void)
6274de49
MH
476{
477 struct optimized_kprobe *op, *tmp;
478
479 /* Unoptimization must be done anytime */
480 if (list_empty(&unoptimizing_list))
481 return;
482
483 /* Ditto to do_optimize_kprobes */
484 get_online_cpus();
485 mutex_lock(&text_mutex);
7b959fc5 486 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
f984ba4e 487 /* Loop free_list for disarming */
7b959fc5 488 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49
MH
489 /* Disarm probes if marked disabled */
490 if (kprobe_disabled(&op->kp))
491 arch_disarm_kprobe(&op->kp);
492 if (kprobe_unused(&op->kp)) {
493 /*
494 * Remove unused probes from hash list. After waiting
495 * for synchronization, these probes are reclaimed.
496 * (reclaiming is done by do_free_cleaned_kprobes.)
497 */
498 hlist_del_rcu(&op->kp.hlist);
6274de49
MH
499 } else
500 list_del_init(&op->list);
501 }
502 mutex_unlock(&text_mutex);
503 put_online_cpus();
504}
505
506/* Reclaim all kprobes on the free_list */
7b959fc5 507static __kprobes void do_free_cleaned_kprobes(void)
6274de49
MH
508{
509 struct optimized_kprobe *op, *tmp;
510
7b959fc5 511 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
6274de49
MH
512 BUG_ON(!kprobe_unused(&op->kp));
513 list_del_init(&op->list);
514 free_aggr_kprobe(&op->kp);
515 }
516}
517
518/* Start optimizer after OPTIMIZE_DELAY passed */
519static __kprobes void kick_kprobe_optimizer(void)
520{
ad72b3be 521 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
6274de49
MH
522}
523
61f4e13f
MH
524/* Kprobe jump optimizer */
525static __kprobes void kprobe_optimizer(struct work_struct *work)
526{
72ef3794 527 mutex_lock(&kprobe_mutex);
61f4e13f
MH
528 /* Lock modules while optimizing kprobes */
529 mutex_lock(&module_mutex);
61f4e13f
MH
530
531 /*
6274de49
MH
532 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
533 * kprobes before waiting for quiesence period.
534 */
7b959fc5 535 do_unoptimize_kprobes();
6274de49
MH
536
537 /*
538 * Step 2: Wait for quiesence period to ensure all running interrupts
61f4e13f
MH
539 * are done. Because optprobe may modify multiple instructions
540 * there is a chance that Nth instruction is interrupted. In that
541 * case, running interrupt can return to 2nd-Nth byte of jump
542 * instruction. This wait is for avoiding it.
543 */
544 synchronize_sched();
545
6274de49 546 /* Step 3: Optimize kprobes after quiesence period */
61f4e13f 547 do_optimize_kprobes();
6274de49
MH
548
549 /* Step 4: Free cleaned kprobes after quiesence period */
7b959fc5 550 do_free_cleaned_kprobes();
6274de49 551
afd66255 552 mutex_unlock(&module_mutex);
72ef3794 553 mutex_unlock(&kprobe_mutex);
6274de49 554
cd7ebe22 555 /* Step 5: Kick optimizer again if needed */
f984ba4e 556 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
cd7ebe22 557 kick_kprobe_optimizer();
6274de49
MH
558}
559
560/* Wait for completing optimization and unoptimization */
561static __kprobes void wait_for_kprobe_optimizer(void)
562{
ad72b3be
TH
563 mutex_lock(&kprobe_mutex);
564
565 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
566 mutex_unlock(&kprobe_mutex);
567
568 /* this will also make optimizing_work execute immmediately */
569 flush_delayed_work(&optimizing_work);
570 /* @optimizing_work might not have been queued yet, relax */
571 cpu_relax();
572
573 mutex_lock(&kprobe_mutex);
574 }
575
576 mutex_unlock(&kprobe_mutex);
afd66255
MH
577}
578
579/* Optimize kprobe if p is ready to be optimized */
580static __kprobes void optimize_kprobe(struct kprobe *p)
581{
582 struct optimized_kprobe *op;
583
584 /* Check if the kprobe is disabled or not ready for optimization. */
b2be84df 585 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
afd66255
MH
586 (kprobe_disabled(p) || kprobes_all_disarmed))
587 return;
588
589 /* Both of break_handler and post_handler are not supported. */
590 if (p->break_handler || p->post_handler)
591 return;
592
593 op = container_of(p, struct optimized_kprobe, kp);
594
595 /* Check there is no other kprobes at the optimized instructions */
596 if (arch_check_optimized_kprobe(op) < 0)
597 return;
598
599 /* Check if it is already optimized. */
600 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
601 return;
afd66255 602 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
6274de49
MH
603
604 if (!list_empty(&op->list))
605 /* This is under unoptimizing. Just dequeue the probe */
606 list_del_init(&op->list);
607 else {
608 list_add(&op->list, &optimizing_list);
609 kick_kprobe_optimizer();
610 }
611}
612
613/* Short cut to direct unoptimizing */
614static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
615{
616 get_online_cpus();
617 arch_unoptimize_kprobe(op);
618 put_online_cpus();
619 if (kprobe_disabled(&op->kp))
620 arch_disarm_kprobe(&op->kp);
afd66255
MH
621}
622
623/* Unoptimize a kprobe if p is optimized */
6274de49 624static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
afd66255
MH
625{
626 struct optimized_kprobe *op;
627
6274de49
MH
628 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
629 return; /* This is not an optprobe nor optimized */
630
631 op = container_of(p, struct optimized_kprobe, kp);
632 if (!kprobe_optimized(p)) {
633 /* Unoptimized or unoptimizing case */
634 if (force && !list_empty(&op->list)) {
635 /*
636 * Only if this is unoptimizing kprobe and forced,
637 * forcibly unoptimize it. (No need to unoptimize
638 * unoptimized kprobe again :)
639 */
afd66255 640 list_del_init(&op->list);
6274de49
MH
641 force_unoptimize_kprobe(op);
642 }
643 return;
644 }
645
646 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
647 if (!list_empty(&op->list)) {
648 /* Dequeue from the optimization queue */
649 list_del_init(&op->list);
650 return;
651 }
652 /* Optimized kprobe case */
653 if (force)
654 /* Forcibly update the code: this is a special case */
655 force_unoptimize_kprobe(op);
656 else {
657 list_add(&op->list, &unoptimizing_list);
658 kick_kprobe_optimizer();
afd66255
MH
659 }
660}
661
0490cd1f
MH
662/* Cancel unoptimizing for reusing */
663static void reuse_unused_kprobe(struct kprobe *ap)
664{
665 struct optimized_kprobe *op;
666
667 BUG_ON(!kprobe_unused(ap));
668 /*
669 * Unused kprobe MUST be on the way of delayed unoptimizing (means
670 * there is still a relative jump) and disabled.
671 */
672 op = container_of(ap, struct optimized_kprobe, kp);
673 if (unlikely(list_empty(&op->list)))
674 printk(KERN_WARNING "Warning: found a stray unused "
675 "aggrprobe@%p\n", ap->addr);
676 /* Enable the probe again */
677 ap->flags &= ~KPROBE_FLAG_DISABLED;
678 /* Optimize it again (remove from op->list) */
679 BUG_ON(!kprobe_optready(ap));
680 optimize_kprobe(ap);
681}
682
afd66255
MH
683/* Remove optimized instructions */
684static void __kprobes kill_optimized_kprobe(struct kprobe *p)
685{
686 struct optimized_kprobe *op;
687
688 op = container_of(p, struct optimized_kprobe, kp);
6274de49
MH
689 if (!list_empty(&op->list))
690 /* Dequeue from the (un)optimization queue */
afd66255 691 list_del_init(&op->list);
6274de49 692 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
7b959fc5
MH
693
694 if (kprobe_unused(p)) {
695 /* Enqueue if it is unused */
696 list_add(&op->list, &freeing_list);
697 /*
698 * Remove unused probes from the hash list. After waiting
699 * for synchronization, this probe is reclaimed.
700 * (reclaiming is done by do_free_cleaned_kprobes().)
701 */
702 hlist_del_rcu(&op->kp.hlist);
703 }
704
6274de49 705 /* Don't touch the code, because it is already freed. */
afd66255
MH
706 arch_remove_optimized_kprobe(op);
707}
708
709/* Try to prepare optimized instructions */
710static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
711{
712 struct optimized_kprobe *op;
713
714 op = container_of(p, struct optimized_kprobe, kp);
715 arch_prepare_optimized_kprobe(op);
716}
717
afd66255
MH
718/* Allocate new optimized_kprobe and try to prepare optimized instructions */
719static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
720{
721 struct optimized_kprobe *op;
722
723 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
724 if (!op)
725 return NULL;
726
727 INIT_LIST_HEAD(&op->list);
728 op->kp.addr = p->addr;
729 arch_prepare_optimized_kprobe(op);
730
731 return &op->kp;
732}
733
734static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
735
736/*
737 * Prepare an optimized_kprobe and optimize it
738 * NOTE: p must be a normal registered kprobe
739 */
740static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
741{
742 struct kprobe *ap;
743 struct optimized_kprobe *op;
744
ae6aa16f
MH
745 /* Impossible to optimize ftrace-based kprobe */
746 if (kprobe_ftrace(p))
747 return;
748
25764288
MH
749 /* For preparing optimization, jump_label_text_reserved() is called */
750 jump_label_lock();
751 mutex_lock(&text_mutex);
752
afd66255
MH
753 ap = alloc_aggr_kprobe(p);
754 if (!ap)
25764288 755 goto out;
afd66255
MH
756
757 op = container_of(ap, struct optimized_kprobe, kp);
758 if (!arch_prepared_optinsn(&op->optinsn)) {
759 /* If failed to setup optimizing, fallback to kprobe */
6274de49
MH
760 arch_remove_optimized_kprobe(op);
761 kfree(op);
25764288 762 goto out;
afd66255
MH
763 }
764
765 init_aggr_kprobe(ap, p);
25764288
MH
766 optimize_kprobe(ap); /* This just kicks optimizer thread */
767
768out:
769 mutex_unlock(&text_mutex);
770 jump_label_unlock();
afd66255
MH
771}
772
b2be84df
MH
773#ifdef CONFIG_SYSCTL
774static void __kprobes optimize_all_kprobes(void)
775{
776 struct hlist_head *head;
b2be84df
MH
777 struct kprobe *p;
778 unsigned int i;
779
5c51543b 780 mutex_lock(&kprobe_mutex);
b2be84df
MH
781 /* If optimization is already allowed, just return */
782 if (kprobes_allow_optimization)
5c51543b 783 goto out;
b2be84df
MH
784
785 kprobes_allow_optimization = true;
b2be84df
MH
786 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
787 head = &kprobe_table[i];
b67bfe0d 788 hlist_for_each_entry_rcu(p, head, hlist)
b2be84df
MH
789 if (!kprobe_disabled(p))
790 optimize_kprobe(p);
791 }
b2be84df 792 printk(KERN_INFO "Kprobes globally optimized\n");
5c51543b
MH
793out:
794 mutex_unlock(&kprobe_mutex);
b2be84df
MH
795}
796
797static void __kprobes unoptimize_all_kprobes(void)
798{
799 struct hlist_head *head;
b2be84df
MH
800 struct kprobe *p;
801 unsigned int i;
802
5c51543b 803 mutex_lock(&kprobe_mutex);
b2be84df 804 /* If optimization is already prohibited, just return */
5c51543b
MH
805 if (!kprobes_allow_optimization) {
806 mutex_unlock(&kprobe_mutex);
b2be84df 807 return;
5c51543b 808 }
b2be84df
MH
809
810 kprobes_allow_optimization = false;
b2be84df
MH
811 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
812 head = &kprobe_table[i];
b67bfe0d 813 hlist_for_each_entry_rcu(p, head, hlist) {
b2be84df 814 if (!kprobe_disabled(p))
6274de49 815 unoptimize_kprobe(p, false);
b2be84df
MH
816 }
817 }
5c51543b
MH
818 mutex_unlock(&kprobe_mutex);
819
6274de49
MH
820 /* Wait for unoptimizing completion */
821 wait_for_kprobe_optimizer();
822 printk(KERN_INFO "Kprobes globally unoptimized\n");
b2be84df
MH
823}
824
5c51543b 825static DEFINE_MUTEX(kprobe_sysctl_mutex);
b2be84df
MH
826int sysctl_kprobes_optimization;
827int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
828 void __user *buffer, size_t *length,
829 loff_t *ppos)
830{
831 int ret;
832
5c51543b 833 mutex_lock(&kprobe_sysctl_mutex);
b2be84df
MH
834 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
835 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
836
837 if (sysctl_kprobes_optimization)
838 optimize_all_kprobes();
839 else
840 unoptimize_all_kprobes();
5c51543b 841 mutex_unlock(&kprobe_sysctl_mutex);
b2be84df
MH
842
843 return ret;
844}
845#endif /* CONFIG_SYSCTL */
846
6274de49 847/* Put a breakpoint for a probe. Must be called with text_mutex locked */
afd66255
MH
848static void __kprobes __arm_kprobe(struct kprobe *p)
849{
6d8e40a8 850 struct kprobe *_p;
afd66255
MH
851
852 /* Check collision with other optimized kprobes */
6d8e40a8
MH
853 _p = get_optimized_kprobe((unsigned long)p->addr);
854 if (unlikely(_p))
6274de49
MH
855 /* Fallback to unoptimized kprobe */
856 unoptimize_kprobe(_p, true);
afd66255
MH
857
858 arch_arm_kprobe(p);
859 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
860}
861
6274de49
MH
862/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
863static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
afd66255 864{
6d8e40a8 865 struct kprobe *_p;
afd66255 866
6274de49 867 unoptimize_kprobe(p, false); /* Try to unoptimize */
afd66255 868
6274de49
MH
869 if (!kprobe_queued(p)) {
870 arch_disarm_kprobe(p);
871 /* If another kprobe was blocked, optimize it. */
872 _p = get_optimized_kprobe((unsigned long)p->addr);
873 if (unlikely(_p) && reopt)
874 optimize_kprobe(_p);
875 }
876 /* TODO: reoptimize others after unoptimized this probe */
afd66255
MH
877}
878
879#else /* !CONFIG_OPTPROBES */
880
881#define optimize_kprobe(p) do {} while (0)
6274de49 882#define unoptimize_kprobe(p, f) do {} while (0)
afd66255
MH
883#define kill_optimized_kprobe(p) do {} while (0)
884#define prepare_optimized_kprobe(p) do {} while (0)
885#define try_to_optimize_kprobe(p) do {} while (0)
886#define __arm_kprobe(p) arch_arm_kprobe(p)
6274de49
MH
887#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
888#define kprobe_disarmed(p) kprobe_disabled(p)
889#define wait_for_kprobe_optimizer() do {} while (0)
afd66255 890
0490cd1f
MH
891/* There should be no unused kprobes can be reused without optimization */
892static void reuse_unused_kprobe(struct kprobe *ap)
893{
894 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
895 BUG_ON(kprobe_unused(ap));
896}
897
afd66255
MH
898static __kprobes void free_aggr_kprobe(struct kprobe *p)
899{
6274de49 900 arch_remove_kprobe(p);
afd66255
MH
901 kfree(p);
902}
903
904static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
905{
906 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
907}
908#endif /* CONFIG_OPTPROBES */
909
e7dbfe34 910#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f 911static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
e5253896 912 .func = kprobe_ftrace_handler,
ae6aa16f
MH
913 .flags = FTRACE_OPS_FL_SAVE_REGS,
914};
915static int kprobe_ftrace_enabled;
916
917/* Must ensure p->addr is really on ftrace */
918static int __kprobes prepare_kprobe(struct kprobe *p)
919{
920 if (!kprobe_ftrace(p))
921 return arch_prepare_kprobe(p);
922
923 return arch_prepare_kprobe_ftrace(p);
924}
925
926/* Caller must lock kprobe_mutex */
927static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
928{
929 int ret;
930
931 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
932 (unsigned long)p->addr, 0, 0);
933 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
934 kprobe_ftrace_enabled++;
935 if (kprobe_ftrace_enabled == 1) {
936 ret = register_ftrace_function(&kprobe_ftrace_ops);
937 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
938 }
939}
940
941/* Caller must lock kprobe_mutex */
942static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
943{
944 int ret;
945
946 kprobe_ftrace_enabled--;
947 if (kprobe_ftrace_enabled == 0) {
948 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
949 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
950 }
951 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
952 (unsigned long)p->addr, 1, 0);
953 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
954}
e7dbfe34 955#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
956#define prepare_kprobe(p) arch_prepare_kprobe(p)
957#define arm_kprobe_ftrace(p) do {} while (0)
958#define disarm_kprobe_ftrace(p) do {} while (0)
959#endif
960
201517a7
MH
961/* Arm a kprobe with text_mutex */
962static void __kprobes arm_kprobe(struct kprobe *kp)
963{
ae6aa16f
MH
964 if (unlikely(kprobe_ftrace(kp))) {
965 arm_kprobe_ftrace(kp);
966 return;
967 }
afd66255
MH
968 /*
969 * Here, since __arm_kprobe() doesn't use stop_machine(),
970 * this doesn't cause deadlock on text_mutex. So, we don't
971 * need get_online_cpus().
972 */
201517a7 973 mutex_lock(&text_mutex);
afd66255 974 __arm_kprobe(kp);
201517a7
MH
975 mutex_unlock(&text_mutex);
976}
977
978/* Disarm a kprobe with text_mutex */
ae6aa16f 979static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
201517a7 980{
ae6aa16f
MH
981 if (unlikely(kprobe_ftrace(kp))) {
982 disarm_kprobe_ftrace(kp);
983 return;
984 }
6274de49 985 /* Ditto */
201517a7 986 mutex_lock(&text_mutex);
ae6aa16f 987 __disarm_kprobe(kp, reopt);
201517a7
MH
988 mutex_unlock(&text_mutex);
989}
990
64f562c6
AM
991/*
992 * Aggregate handlers for multiple kprobes support - these handlers
993 * take care of invoking the individual kprobe handlers on p->list
994 */
d0aaff97 995static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
996{
997 struct kprobe *kp;
998
3516a460 999 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1000 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 1001 set_kprobe_instance(kp);
8b0914ea
PP
1002 if (kp->pre_handler(kp, regs))
1003 return 1;
64f562c6 1004 }
e6584523 1005 reset_kprobe_instance();
64f562c6
AM
1006 }
1007 return 0;
1008}
1009
d0aaff97
PP
1010static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1011 unsigned long flags)
64f562c6
AM
1012{
1013 struct kprobe *kp;
1014
3516a460 1015 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 1016 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 1017 set_kprobe_instance(kp);
64f562c6 1018 kp->post_handler(kp, regs, flags);
e6584523 1019 reset_kprobe_instance();
64f562c6
AM
1020 }
1021 }
64f562c6
AM
1022}
1023
d0aaff97
PP
1024static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1025 int trapnr)
64f562c6 1026{
b76834bc 1027 struct kprobe *cur = __this_cpu_read(kprobe_instance);
e6584523 1028
64f562c6
AM
1029 /*
1030 * if we faulted "during" the execution of a user specified
1031 * probe handler, invoke just that probe's fault handler
1032 */
e6584523
AM
1033 if (cur && cur->fault_handler) {
1034 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
1035 return 1;
1036 }
1037 return 0;
1038}
1039
d0aaff97 1040static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 1041{
b76834bc 1042 struct kprobe *cur = __this_cpu_read(kprobe_instance);
e6584523
AM
1043 int ret = 0;
1044
1045 if (cur && cur->break_handler) {
1046 if (cur->break_handler(cur, regs))
1047 ret = 1;
8b0914ea 1048 }
e6584523
AM
1049 reset_kprobe_instance();
1050 return ret;
8b0914ea
PP
1051}
1052
bf8d5c52
KA
1053/* Walks the list and increments nmissed count for multiprobe case */
1054void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1055{
1056 struct kprobe *kp;
afd66255 1057 if (!kprobe_aggrprobe(p)) {
bf8d5c52
KA
1058 p->nmissed++;
1059 } else {
1060 list_for_each_entry_rcu(kp, &p->list, list)
1061 kp->nmissed++;
1062 }
1063 return;
1064}
1065
99219a3f 1066void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1067 struct hlist_head *head)
b94cce92 1068{
ef53d9c5
S
1069 struct kretprobe *rp = ri->rp;
1070
b94cce92
HN
1071 /* remove rp inst off the rprobe_inst_table */
1072 hlist_del(&ri->hlist);
ef53d9c5
S
1073 INIT_HLIST_NODE(&ri->hlist);
1074 if (likely(rp)) {
ec484608 1075 raw_spin_lock(&rp->lock);
ef53d9c5 1076 hlist_add_head(&ri->hlist, &rp->free_instances);
ec484608 1077 raw_spin_unlock(&rp->lock);
b94cce92
HN
1078 } else
1079 /* Unregistering */
99219a3f 1080 hlist_add_head(&ri->hlist, head);
b94cce92
HN
1081}
1082
017c39bd 1083void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5 1084 struct hlist_head **head, unsigned long *flags)
635c17c2 1085__acquires(hlist_lock)
ef53d9c5
S
1086{
1087 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
ec484608 1088 raw_spinlock_t *hlist_lock;
ef53d9c5
S
1089
1090 *head = &kretprobe_inst_table[hash];
1091 hlist_lock = kretprobe_table_lock_ptr(hash);
ec484608 1092 raw_spin_lock_irqsave(hlist_lock, *flags);
ef53d9c5
S
1093}
1094
017c39bd
MH
1095static void __kprobes kretprobe_table_lock(unsigned long hash,
1096 unsigned long *flags)
635c17c2 1097__acquires(hlist_lock)
b94cce92 1098{
ec484608
TG
1099 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1100 raw_spin_lock_irqsave(hlist_lock, *flags);
ef53d9c5
S
1101}
1102
017c39bd
MH
1103void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1104 unsigned long *flags)
635c17c2 1105__releases(hlist_lock)
ef53d9c5
S
1106{
1107 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
ec484608 1108 raw_spinlock_t *hlist_lock;
ef53d9c5
S
1109
1110 hlist_lock = kretprobe_table_lock_ptr(hash);
ec484608 1111 raw_spin_unlock_irqrestore(hlist_lock, *flags);
ef53d9c5
S
1112}
1113
6376b229
NK
1114static void __kprobes kretprobe_table_unlock(unsigned long hash,
1115 unsigned long *flags)
635c17c2 1116__releases(hlist_lock)
ef53d9c5 1117{
ec484608
TG
1118 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1119 raw_spin_unlock_irqrestore(hlist_lock, *flags);
b94cce92
HN
1120}
1121
b94cce92 1122/*
c6fd91f0 1123 * This function is called from finish_task_switch when task tk becomes dead,
1124 * so that we can recycle any function-return probe instances associated
1125 * with this task. These left over instances represent probed functions
1126 * that have been called but will never return.
b94cce92 1127 */
d0aaff97 1128void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 1129{
62c27be0 1130 struct kretprobe_instance *ri;
99219a3f 1131 struct hlist_head *head, empty_rp;
b67bfe0d 1132 struct hlist_node *tmp;
ef53d9c5 1133 unsigned long hash, flags = 0;
802eae7c 1134
ef53d9c5
S
1135 if (unlikely(!kprobes_initialized))
1136 /* Early boot. kretprobe_table_locks not yet initialized. */
1137 return;
1138
d496aab5 1139 INIT_HLIST_HEAD(&empty_rp);
ef53d9c5
S
1140 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1141 head = &kretprobe_inst_table[hash];
1142 kretprobe_table_lock(hash, &flags);
b67bfe0d 1143 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
62c27be0 1144 if (ri->task == tk)
99219a3f 1145 recycle_rp_inst(ri, &empty_rp);
62c27be0 1146 }
ef53d9c5 1147 kretprobe_table_unlock(hash, &flags);
b67bfe0d 1148 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
99219a3f 1149 hlist_del(&ri->hlist);
1150 kfree(ri);
1151 }
b94cce92
HN
1152}
1153
b94cce92
HN
1154static inline void free_rp_inst(struct kretprobe *rp)
1155{
1156 struct kretprobe_instance *ri;
b67bfe0d 1157 struct hlist_node *next;
4c4308cb 1158
b67bfe0d 1159 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
ef53d9c5 1160 hlist_del(&ri->hlist);
b94cce92
HN
1161 kfree(ri);
1162 }
1163}
1164
4a296e07
MH
1165static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1166{
ef53d9c5 1167 unsigned long flags, hash;
4a296e07 1168 struct kretprobe_instance *ri;
b67bfe0d 1169 struct hlist_node *next;
ef53d9c5
S
1170 struct hlist_head *head;
1171
4a296e07 1172 /* No race here */
ef53d9c5
S
1173 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1174 kretprobe_table_lock(hash, &flags);
1175 head = &kretprobe_inst_table[hash];
b67bfe0d 1176 hlist_for_each_entry_safe(ri, next, head, hlist) {
ef53d9c5
S
1177 if (ri->rp == rp)
1178 ri->rp = NULL;
1179 }
1180 kretprobe_table_unlock(hash, &flags);
4a296e07 1181 }
4a296e07
MH
1182 free_rp_inst(rp);
1183}
1184
8b0914ea 1185/*
b918e5e6 1186* Add the new probe to ap->list. Fail if this is the
8b0914ea
PP
1187* second jprobe at the address - two jprobes can't coexist
1188*/
b918e5e6 1189static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 1190{
de5bd88d 1191 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
afd66255
MH
1192
1193 if (p->break_handler || p->post_handler)
6274de49 1194 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
afd66255 1195
8b0914ea 1196 if (p->break_handler) {
b918e5e6 1197 if (ap->break_handler)
36721656 1198 return -EEXIST;
b918e5e6
MH
1199 list_add_tail_rcu(&p->list, &ap->list);
1200 ap->break_handler = aggr_break_handler;
8b0914ea 1201 } else
b918e5e6
MH
1202 list_add_rcu(&p->list, &ap->list);
1203 if (p->post_handler && !ap->post_handler)
1204 ap->post_handler = aggr_post_handler;
de5bd88d 1205
8b0914ea
PP
1206 return 0;
1207}
1208
64f562c6
AM
1209/*
1210 * Fill in the required fields of the "manager kprobe". Replace the
1211 * earlier kprobe in the hlist with the manager kprobe
1212 */
afd66255 1213static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
64f562c6 1214{
afd66255 1215 /* Copy p's insn slot to ap */
8b0914ea 1216 copy_kprobe(p, ap);
a9ad965e 1217 flush_insn_slot(ap);
64f562c6 1218 ap->addr = p->addr;
afd66255 1219 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
64f562c6 1220 ap->pre_handler = aggr_pre_handler;
64f562c6 1221 ap->fault_handler = aggr_fault_handler;
e8386a0c
MH
1222 /* We don't care the kprobe which has gone. */
1223 if (p->post_handler && !kprobe_gone(p))
36721656 1224 ap->post_handler = aggr_post_handler;
e8386a0c 1225 if (p->break_handler && !kprobe_gone(p))
36721656 1226 ap->break_handler = aggr_break_handler;
64f562c6
AM
1227
1228 INIT_LIST_HEAD(&ap->list);
afd66255 1229 INIT_HLIST_NODE(&ap->hlist);
64f562c6 1230
afd66255 1231 list_add_rcu(&p->list, &ap->list);
adad0f33 1232 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
1233}
1234
1235/*
1236 * This is the second or subsequent kprobe at the address - handle
1237 * the intricacies
64f562c6 1238 */
6d8e40a8 1239static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
d0aaff97 1240 struct kprobe *p)
64f562c6
AM
1241{
1242 int ret = 0;
6d8e40a8 1243 struct kprobe *ap = orig_p;
64f562c6 1244
25764288
MH
1245 /* For preparing optimization, jump_label_text_reserved() is called */
1246 jump_label_lock();
1247 /*
1248 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1249 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1250 */
1251 get_online_cpus();
1252 mutex_lock(&text_mutex);
1253
6d8e40a8
MH
1254 if (!kprobe_aggrprobe(orig_p)) {
1255 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1256 ap = alloc_aggr_kprobe(orig_p);
25764288
MH
1257 if (!ap) {
1258 ret = -ENOMEM;
1259 goto out;
1260 }
6d8e40a8 1261 init_aggr_kprobe(ap, orig_p);
6274de49 1262 } else if (kprobe_unused(ap))
0490cd1f
MH
1263 /* This probe is going to die. Rescue it */
1264 reuse_unused_kprobe(ap);
b918e5e6
MH
1265
1266 if (kprobe_gone(ap)) {
e8386a0c
MH
1267 /*
1268 * Attempting to insert new probe at the same location that
1269 * had a probe in the module vaddr area which already
1270 * freed. So, the instruction slot has already been
1271 * released. We need a new slot for the new probe.
1272 */
b918e5e6 1273 ret = arch_prepare_kprobe(ap);
e8386a0c 1274 if (ret)
b918e5e6
MH
1275 /*
1276 * Even if fail to allocate new slot, don't need to
1277 * free aggr_probe. It will be used next time, or
1278 * freed by unregister_kprobe.
1279 */
25764288 1280 goto out;
de5bd88d 1281
afd66255
MH
1282 /* Prepare optimized instructions if possible. */
1283 prepare_optimized_kprobe(ap);
1284
e8386a0c 1285 /*
de5bd88d
MH
1286 * Clear gone flag to prevent allocating new slot again, and
1287 * set disabled flag because it is not armed yet.
e8386a0c 1288 */
de5bd88d
MH
1289 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1290 | KPROBE_FLAG_DISABLED;
e8386a0c 1291 }
b918e5e6 1292
afd66255 1293 /* Copy ap's insn slot to p */
b918e5e6 1294 copy_kprobe(ap, p);
25764288
MH
1295 ret = add_new_kprobe(ap, p);
1296
1297out:
1298 mutex_unlock(&text_mutex);
1299 put_online_cpus();
1300 jump_label_unlock();
1301
1302 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1303 ap->flags &= ~KPROBE_FLAG_DISABLED;
1304 if (!kprobes_all_disarmed)
1305 /* Arm the breakpoint again. */
1306 arm_kprobe(ap);
1307 }
1308 return ret;
64f562c6
AM
1309}
1310
d0aaff97
PP
1311static int __kprobes in_kprobes_functions(unsigned long addr)
1312{
3d8d996e
SD
1313 struct kprobe_blackpoint *kb;
1314
6f716acd
CH
1315 if (addr >= (unsigned long)__kprobes_text_start &&
1316 addr < (unsigned long)__kprobes_text_end)
d0aaff97 1317 return -EINVAL;
3d8d996e
SD
1318 /*
1319 * If there exists a kprobe_blacklist, verify and
1320 * fail any probe registration in the prohibited area
1321 */
1322 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1323 if (kb->start_addr) {
1324 if (addr >= kb->start_addr &&
1325 addr < (kb->start_addr + kb->range))
1326 return -EINVAL;
1327 }
1328 }
d0aaff97
PP
1329 return 0;
1330}
1331
b2a5cd69
MH
1332/*
1333 * If we have a symbol_name argument, look it up and add the offset field
1334 * to it. This way, we can specify a relative address to a symbol.
bc81d48d
MH
1335 * This returns encoded errors if it fails to look up symbol or invalid
1336 * combination of parameters.
b2a5cd69
MH
1337 */
1338static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1339{
1340 kprobe_opcode_t *addr = p->addr;
bc81d48d
MH
1341
1342 if ((p->symbol_name && p->addr) ||
1343 (!p->symbol_name && !p->addr))
1344 goto invalid;
1345
b2a5cd69 1346 if (p->symbol_name) {
b2a5cd69 1347 kprobe_lookup_name(p->symbol_name, addr);
bc81d48d
MH
1348 if (!addr)
1349 return ERR_PTR(-ENOENT);
b2a5cd69
MH
1350 }
1351
bc81d48d
MH
1352 addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
1353 if (addr)
1354 return addr;
1355
1356invalid:
1357 return ERR_PTR(-EINVAL);
b2a5cd69
MH
1358}
1359
1f0ab409
AM
1360/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1361static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1362{
6d8e40a8 1363 struct kprobe *ap, *list_p;
1f0ab409 1364
6d8e40a8
MH
1365 ap = get_kprobe(p->addr);
1366 if (unlikely(!ap))
1f0ab409
AM
1367 return NULL;
1368
6d8e40a8
MH
1369 if (p != ap) {
1370 list_for_each_entry_rcu(list_p, &ap->list, list)
1f0ab409
AM
1371 if (list_p == p)
1372 /* kprobe p is a valid probe */
1373 goto valid;
1374 return NULL;
1375 }
1376valid:
6d8e40a8 1377 return ap;
1f0ab409
AM
1378}
1379
1380/* Return error if the kprobe is being re-registered */
1381static inline int check_kprobe_rereg(struct kprobe *p)
1382{
1383 int ret = 0;
1f0ab409
AM
1384
1385 mutex_lock(&kprobe_mutex);
6d8e40a8 1386 if (__get_valid_kprobe(p))
1f0ab409
AM
1387 ret = -EINVAL;
1388 mutex_unlock(&kprobe_mutex);
6d8e40a8 1389
1f0ab409
AM
1390 return ret;
1391}
1392
f7fa6ef0
MH
1393static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1394 struct module **probed_mod)
1da177e4
LT
1395{
1396 int ret = 0;
ae6aa16f
MH
1397 unsigned long ftrace_addr;
1398
1399 /*
1400 * If the address is located on a ftrace nop, set the
1401 * breakpoint to the following instruction.
1402 */
1403 ftrace_addr = ftrace_location((unsigned long)p->addr);
1404 if (ftrace_addr) {
e7dbfe34 1405#ifdef CONFIG_KPROBES_ON_FTRACE
ae6aa16f
MH
1406 /* Given address is not on the instruction boundary */
1407 if ((unsigned long)p->addr != ftrace_addr)
1408 return -EILSEQ;
ae6aa16f 1409 p->flags |= KPROBE_FLAG_FTRACE;
e7dbfe34 1410#else /* !CONFIG_KPROBES_ON_FTRACE */
ae6aa16f
MH
1411 return -EINVAL;
1412#endif
1413 }
1f0ab409 1414
91bad2f8 1415 jump_label_lock();
de31c3ca 1416 preempt_disable();
f7fa6ef0
MH
1417
1418 /* Ensure it is not in reserved area nor out of text */
ec30c5f3 1419 if (!kernel_text_address((unsigned long) p->addr) ||
4554dbcb 1420 in_kprobes_functions((unsigned long) p->addr) ||
f986a499
PN
1421 jump_label_text_reserved(p->addr, p->addr)) {
1422 ret = -EINVAL;
f7fa6ef0 1423 goto out;
f986a499 1424 }
b3e55c72 1425
f7fa6ef0
MH
1426 /* Check if are we probing a module */
1427 *probed_mod = __module_text_address((unsigned long) p->addr);
1428 if (*probed_mod) {
6f716acd 1429 /*
e8386a0c
MH
1430 * We must hold a refcount of the probed module while updating
1431 * its code to prohibit unexpected unloading.
df019b1d 1432 */
f7fa6ef0
MH
1433 if (unlikely(!try_module_get(*probed_mod))) {
1434 ret = -ENOENT;
1435 goto out;
1436 }
de31c3ca 1437
f24659d9
MH
1438 /*
1439 * If the module freed .init.text, we couldn't insert
1440 * kprobes in there.
1441 */
f7fa6ef0
MH
1442 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1443 (*probed_mod)->state != MODULE_STATE_COMING) {
1444 module_put(*probed_mod);
1445 *probed_mod = NULL;
1446 ret = -ENOENT;
f24659d9 1447 }
df019b1d 1448 }
f7fa6ef0 1449out:
a189d035 1450 preempt_enable();
de31c3ca 1451 jump_label_unlock();
1da177e4 1452
f7fa6ef0
MH
1453 return ret;
1454}
1455
1456int __kprobes register_kprobe(struct kprobe *p)
1457{
1458 int ret;
1459 struct kprobe *old_p;
1460 struct module *probed_mod;
1461 kprobe_opcode_t *addr;
1462
1463 /* Adjust probe address from symbol */
1464 addr = kprobe_addr(p);
1465 if (IS_ERR(addr))
1466 return PTR_ERR(addr);
1467 p->addr = addr;
1468
1469 ret = check_kprobe_rereg(p);
1470 if (ret)
1471 return ret;
1472
1473 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1474 p->flags &= KPROBE_FLAG_DISABLED;
3516a460 1475 p->nmissed = 0;
9861668f 1476 INIT_LIST_HEAD(&p->list);
afd66255 1477
f7fa6ef0
MH
1478 ret = check_kprobe_address_safe(p, &probed_mod);
1479 if (ret)
1480 return ret;
1481
1482 mutex_lock(&kprobe_mutex);
afd66255 1483
64f562c6
AM
1484 old_p = get_kprobe(p->addr);
1485 if (old_p) {
afd66255 1486 /* Since this may unoptimize old_p, locking text_mutex. */
64f562c6 1487 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
1488 goto out;
1489 }
1da177e4 1490
25764288 1491 mutex_lock(&text_mutex); /* Avoiding text modification */
ae6aa16f 1492 ret = prepare_kprobe(p);
25764288 1493 mutex_unlock(&text_mutex);
6f716acd 1494 if (ret)
afd66255 1495 goto out;
49a2a1b8 1496
64f562c6 1497 INIT_HLIST_NODE(&p->hlist);
3516a460 1498 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
1499 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1500
de5bd88d 1501 if (!kprobes_all_disarmed && !kprobe_disabled(p))
25764288 1502 arm_kprobe(p);
afd66255
MH
1503
1504 /* Try to optimize kprobe */
1505 try_to_optimize_kprobe(p);
74a0b576 1506
1da177e4 1507out:
7a7d1cf9 1508 mutex_unlock(&kprobe_mutex);
49a2a1b8 1509
e8386a0c 1510 if (probed_mod)
df019b1d 1511 module_put(probed_mod);
e8386a0c 1512
1da177e4
LT
1513 return ret;
1514}
99081ab5 1515EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 1516
6f0f1dd7
MH
1517/* Check if all probes on the aggrprobe are disabled */
1518static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1519{
1520 struct kprobe *kp;
1521
1522 list_for_each_entry_rcu(kp, &ap->list, list)
1523 if (!kprobe_disabled(kp))
1524 /*
1525 * There is an active probe on the list.
1526 * We can't disable this ap.
1527 */
1528 return 0;
1529
1530 return 1;
1531}
1532
1533/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1534static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1535{
1536 struct kprobe *orig_p;
1537
1538 /* Get an original kprobe for return */
1539 orig_p = __get_valid_kprobe(p);
1540 if (unlikely(orig_p == NULL))
1541 return NULL;
1542
1543 if (!kprobe_disabled(p)) {
1544 /* Disable probe if it is a child probe */
1545 if (p != orig_p)
1546 p->flags |= KPROBE_FLAG_DISABLED;
1547
1548 /* Try to disarm and disable this/parent probe */
1549 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
ae6aa16f 1550 disarm_kprobe(orig_p, true);
6f0f1dd7
MH
1551 orig_p->flags |= KPROBE_FLAG_DISABLED;
1552 }
1553 }
1554
1555 return orig_p;
1556}
1557
de5bd88d
MH
1558/*
1559 * Unregister a kprobe without a scheduler synchronization.
1560 */
1561static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1562{
6d8e40a8 1563 struct kprobe *ap, *list_p;
de5bd88d 1564
6f0f1dd7
MH
1565 /* Disable kprobe. This will disarm it if needed. */
1566 ap = __disable_kprobe(p);
6d8e40a8 1567 if (ap == NULL)
de5bd88d
MH
1568 return -EINVAL;
1569
6f0f1dd7 1570 if (ap == p)
bf8f6e5b 1571 /*
6f0f1dd7
MH
1572 * This probe is an independent(and non-optimized) kprobe
1573 * (not an aggrprobe). Remove from the hash list.
bf8f6e5b 1574 */
6f0f1dd7
MH
1575 goto disarmed;
1576
1577 /* Following process expects this probe is an aggrprobe */
1578 WARN_ON(!kprobe_aggrprobe(ap));
1579
6274de49
MH
1580 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1581 /*
1582 * !disarmed could be happen if the probe is under delayed
1583 * unoptimizing.
1584 */
6f0f1dd7
MH
1585 goto disarmed;
1586 else {
1587 /* If disabling probe has special handlers, update aggrprobe */
e8386a0c 1588 if (p->break_handler && !kprobe_gone(p))
6d8e40a8 1589 ap->break_handler = NULL;
e8386a0c 1590 if (p->post_handler && !kprobe_gone(p)) {
6d8e40a8 1591 list_for_each_entry_rcu(list_p, &ap->list, list) {
9861668f
MH
1592 if ((list_p != p) && (list_p->post_handler))
1593 goto noclean;
1594 }
6d8e40a8 1595 ap->post_handler = NULL;
9861668f
MH
1596 }
1597noclean:
6f0f1dd7
MH
1598 /*
1599 * Remove from the aggrprobe: this path will do nothing in
1600 * __unregister_kprobe_bottom().
1601 */
49a2a1b8 1602 list_del_rcu(&p->list);
6f0f1dd7
MH
1603 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1604 /*
1605 * Try to optimize this probe again, because post
1606 * handler may have been changed.
1607 */
1608 optimize_kprobe(ap);
49a2a1b8 1609 }
9861668f 1610 return 0;
6f0f1dd7
MH
1611
1612disarmed:
6274de49 1613 BUG_ON(!kprobe_disarmed(ap));
6f0f1dd7
MH
1614 hlist_del_rcu(&ap->hlist);
1615 return 0;
9861668f 1616}
3516a460 1617
9861668f
MH
1618static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1619{
6d8e40a8 1620 struct kprobe *ap;
b3e55c72 1621
e8386a0c 1622 if (list_empty(&p->list))
6274de49 1623 /* This is an independent kprobe */
0498b635 1624 arch_remove_kprobe(p);
e8386a0c 1625 else if (list_is_singular(&p->list)) {
6274de49 1626 /* This is the last child of an aggrprobe */
6d8e40a8 1627 ap = list_entry(p->list.next, struct kprobe, list);
e8386a0c 1628 list_del(&p->list);
6d8e40a8 1629 free_aggr_kprobe(ap);
9861668f 1630 }
6274de49 1631 /* Otherwise, do nothing. */
9861668f
MH
1632}
1633
49ad2fd7 1634int __kprobes register_kprobes(struct kprobe **kps, int num)
9861668f
MH
1635{
1636 int i, ret = 0;
1637
1638 if (num <= 0)
1639 return -EINVAL;
1640 for (i = 0; i < num; i++) {
49ad2fd7 1641 ret = register_kprobe(kps[i]);
67dddaad
MH
1642 if (ret < 0) {
1643 if (i > 0)
1644 unregister_kprobes(kps, i);
9861668f 1645 break;
36721656 1646 }
49a2a1b8 1647 }
9861668f
MH
1648 return ret;
1649}
99081ab5 1650EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 1651
9861668f
MH
1652void __kprobes unregister_kprobe(struct kprobe *p)
1653{
1654 unregister_kprobes(&p, 1);
1655}
99081ab5 1656EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 1657
9861668f
MH
1658void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1659{
1660 int i;
1661
1662 if (num <= 0)
1663 return;
1664 mutex_lock(&kprobe_mutex);
1665 for (i = 0; i < num; i++)
1666 if (__unregister_kprobe_top(kps[i]) < 0)
1667 kps[i]->addr = NULL;
1668 mutex_unlock(&kprobe_mutex);
1669
1670 synchronize_sched();
1671 for (i = 0; i < num; i++)
1672 if (kps[i]->addr)
1673 __unregister_kprobe_bottom(kps[i]);
1da177e4 1674}
99081ab5 1675EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4
LT
1676
1677static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
1678 .notifier_call = kprobe_exceptions_notify,
1679 .priority = 0x7fffffff /* we need to be notified first */
1680};
1681
3d7e3382
ME
1682unsigned long __weak arch_deref_entry_point(void *entry)
1683{
1684 return (unsigned long)entry;
1685}
1da177e4 1686
49ad2fd7 1687int __kprobes register_jprobes(struct jprobe **jps, int num)
1da177e4 1688{
26b31c19
MH
1689 struct jprobe *jp;
1690 int ret = 0, i;
3d7e3382 1691
26b31c19 1692 if (num <= 0)
3d7e3382 1693 return -EINVAL;
26b31c19 1694 for (i = 0; i < num; i++) {
05662bdb 1695 unsigned long addr, offset;
26b31c19
MH
1696 jp = jps[i];
1697 addr = arch_deref_entry_point(jp->entry);
1698
05662bdb
NK
1699 /* Verify probepoint is a function entry point */
1700 if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1701 offset == 0) {
1702 jp->kp.pre_handler = setjmp_pre_handler;
1703 jp->kp.break_handler = longjmp_break_handler;
1704 ret = register_kprobe(&jp->kp);
1705 } else
1706 ret = -EINVAL;
edbaadbe 1707
67dddaad
MH
1708 if (ret < 0) {
1709 if (i > 0)
1710 unregister_jprobes(jps, i);
26b31c19
MH
1711 break;
1712 }
1713 }
1714 return ret;
1715}
99081ab5 1716EXPORT_SYMBOL_GPL(register_jprobes);
3d7e3382 1717
26b31c19
MH
1718int __kprobes register_jprobe(struct jprobe *jp)
1719{
49ad2fd7 1720 return register_jprobes(&jp, 1);
1da177e4 1721}
99081ab5 1722EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4 1723
d0aaff97 1724void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4 1725{
26b31c19
MH
1726 unregister_jprobes(&jp, 1);
1727}
99081ab5 1728EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c19 1729
26b31c19
MH
1730void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1731{
1732 int i;
1733
1734 if (num <= 0)
1735 return;
1736 mutex_lock(&kprobe_mutex);
1737 for (i = 0; i < num; i++)
1738 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1739 jps[i]->kp.addr = NULL;
1740 mutex_unlock(&kprobe_mutex);
1741
1742 synchronize_sched();
1743 for (i = 0; i < num; i++) {
1744 if (jps[i]->kp.addr)
1745 __unregister_kprobe_bottom(&jps[i]->kp);
1746 }
1da177e4 1747}
99081ab5 1748EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4 1749
9edddaa2 1750#ifdef CONFIG_KRETPROBES
e65cefe8
AB
1751/*
1752 * This kprobe pre_handler is registered with every kretprobe. When probe
1753 * hits it will set up the return probe.
1754 */
1755static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1756 struct pt_regs *regs)
1757{
1758 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5
S
1759 unsigned long hash, flags = 0;
1760 struct kretprobe_instance *ri;
e65cefe8
AB
1761
1762 /*TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5 1763 hash = hash_ptr(current, KPROBE_HASH_BITS);
ec484608 1764 raw_spin_lock_irqsave(&rp->lock, flags);
4c4308cb 1765 if (!hlist_empty(&rp->free_instances)) {
4c4308cb 1766 ri = hlist_entry(rp->free_instances.first,
ef53d9c5
S
1767 struct kretprobe_instance, hlist);
1768 hlist_del(&ri->hlist);
ec484608 1769 raw_spin_unlock_irqrestore(&rp->lock, flags);
ef53d9c5 1770
4c4308cb
CH
1771 ri->rp = rp;
1772 ri->task = current;
f47cd9b5 1773
55ca6140
JL
1774 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1775 raw_spin_lock_irqsave(&rp->lock, flags);
1776 hlist_add_head(&ri->hlist, &rp->free_instances);
1777 raw_spin_unlock_irqrestore(&rp->lock, flags);
f47cd9b5 1778 return 0;
55ca6140 1779 }
f47cd9b5 1780
4c4308cb
CH
1781 arch_prepare_kretprobe(ri, regs);
1782
1783 /* XXX(hch): why is there no hlist_move_head? */
ef53d9c5
S
1784 INIT_HLIST_NODE(&ri->hlist);
1785 kretprobe_table_lock(hash, &flags);
1786 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1787 kretprobe_table_unlock(hash, &flags);
1788 } else {
4c4308cb 1789 rp->nmissed++;
ec484608 1790 raw_spin_unlock_irqrestore(&rp->lock, flags);
ef53d9c5 1791 }
e65cefe8
AB
1792 return 0;
1793}
1794
49ad2fd7 1795int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1796{
1797 int ret = 0;
1798 struct kretprobe_instance *inst;
1799 int i;
b2a5cd69 1800 void *addr;
f438d914
MH
1801
1802 if (kretprobe_blacklist_size) {
b2a5cd69 1803 addr = kprobe_addr(&rp->kp);
bc81d48d
MH
1804 if (IS_ERR(addr))
1805 return PTR_ERR(addr);
f438d914
MH
1806
1807 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1808 if (kretprobe_blacklist[i].addr == addr)
1809 return -EINVAL;
1810 }
1811 }
b94cce92
HN
1812
1813 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
1814 rp->kp.post_handler = NULL;
1815 rp->kp.fault_handler = NULL;
1816 rp->kp.break_handler = NULL;
b94cce92
HN
1817
1818 /* Pre-allocate memory for max kretprobe instances */
1819 if (rp->maxactive <= 0) {
1820#ifdef CONFIG_PREEMPT
c2ef6661 1821 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
b94cce92 1822#else
4dae560f 1823 rp->maxactive = num_possible_cpus();
b94cce92
HN
1824#endif
1825 }
ec484608 1826 raw_spin_lock_init(&rp->lock);
b94cce92
HN
1827 INIT_HLIST_HEAD(&rp->free_instances);
1828 for (i = 0; i < rp->maxactive; i++) {
f47cd9b5
AS
1829 inst = kmalloc(sizeof(struct kretprobe_instance) +
1830 rp->data_size, GFP_KERNEL);
b94cce92
HN
1831 if (inst == NULL) {
1832 free_rp_inst(rp);
1833 return -ENOMEM;
1834 }
ef53d9c5
S
1835 INIT_HLIST_NODE(&inst->hlist);
1836 hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce92
HN
1837 }
1838
1839 rp->nmissed = 0;
1840 /* Establish function entry probe point */
49ad2fd7 1841 ret = register_kprobe(&rp->kp);
4a296e07 1842 if (ret != 0)
b94cce92
HN
1843 free_rp_inst(rp);
1844 return ret;
1845}
99081ab5 1846EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1847
49ad2fd7 1848int __kprobes register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
1849{
1850 int ret = 0, i;
1851
1852 if (num <= 0)
1853 return -EINVAL;
1854 for (i = 0; i < num; i++) {
49ad2fd7 1855 ret = register_kretprobe(rps[i]);
67dddaad
MH
1856 if (ret < 0) {
1857 if (i > 0)
1858 unregister_kretprobes(rps, i);
4a296e07
MH
1859 break;
1860 }
1861 }
1862 return ret;
1863}
99081ab5 1864EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 1865
4a296e07
MH
1866void __kprobes unregister_kretprobe(struct kretprobe *rp)
1867{
1868 unregister_kretprobes(&rp, 1);
1869}
99081ab5 1870EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 1871
4a296e07
MH
1872void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1873{
1874 int i;
1875
1876 if (num <= 0)
1877 return;
1878 mutex_lock(&kprobe_mutex);
1879 for (i = 0; i < num; i++)
1880 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1881 rps[i]->kp.addr = NULL;
1882 mutex_unlock(&kprobe_mutex);
1883
1884 synchronize_sched();
1885 for (i = 0; i < num; i++) {
1886 if (rps[i]->kp.addr) {
1887 __unregister_kprobe_bottom(&rps[i]->kp);
1888 cleanup_rp_inst(rps[i]);
1889 }
1890 }
1891}
99081ab5 1892EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 1893
9edddaa2 1894#else /* CONFIG_KRETPROBES */
d0aaff97 1895int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1896{
1897 return -ENOSYS;
1898}
99081ab5 1899EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1900
4a296e07 1901int __kprobes register_kretprobes(struct kretprobe **rps, int num)
346fd59b 1902{
4a296e07 1903 return -ENOSYS;
346fd59b 1904}
99081ab5
MH
1905EXPORT_SYMBOL_GPL(register_kretprobes);
1906
d0aaff97 1907void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92 1908{
4a296e07 1909}
99081ab5 1910EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 1911
4a296e07
MH
1912void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1913{
1914}
99081ab5 1915EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 1916
4a296e07
MH
1917static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1918 struct pt_regs *regs)
1919{
1920 return 0;
b94cce92
HN
1921}
1922
4a296e07
MH
1923#endif /* CONFIG_KRETPROBES */
1924
e8386a0c
MH
1925/* Set the kprobe gone and remove its instruction buffer. */
1926static void __kprobes kill_kprobe(struct kprobe *p)
1927{
1928 struct kprobe *kp;
de5bd88d 1929
e8386a0c 1930 p->flags |= KPROBE_FLAG_GONE;
afd66255 1931 if (kprobe_aggrprobe(p)) {
e8386a0c
MH
1932 /*
1933 * If this is an aggr_kprobe, we have to list all the
1934 * chained probes and mark them GONE.
1935 */
1936 list_for_each_entry_rcu(kp, &p->list, list)
1937 kp->flags |= KPROBE_FLAG_GONE;
1938 p->post_handler = NULL;
1939 p->break_handler = NULL;
afd66255 1940 kill_optimized_kprobe(p);
e8386a0c
MH
1941 }
1942 /*
1943 * Here, we can remove insn_slot safely, because no thread calls
1944 * the original probed function (which will be freed soon) any more.
1945 */
1946 arch_remove_kprobe(p);
1947}
1948
c0614829
MH
1949/* Disable one kprobe */
1950int __kprobes disable_kprobe(struct kprobe *kp)
1951{
1952 int ret = 0;
c0614829
MH
1953
1954 mutex_lock(&kprobe_mutex);
1955
6f0f1dd7
MH
1956 /* Disable this kprobe */
1957 if (__disable_kprobe(kp) == NULL)
c0614829 1958 ret = -EINVAL;
c0614829 1959
c0614829
MH
1960 mutex_unlock(&kprobe_mutex);
1961 return ret;
1962}
1963EXPORT_SYMBOL_GPL(disable_kprobe);
1964
1965/* Enable one kprobe */
1966int __kprobes enable_kprobe(struct kprobe *kp)
1967{
1968 int ret = 0;
1969 struct kprobe *p;
1970
1971 mutex_lock(&kprobe_mutex);
1972
1973 /* Check whether specified probe is valid. */
1974 p = __get_valid_kprobe(kp);
1975 if (unlikely(p == NULL)) {
1976 ret = -EINVAL;
1977 goto out;
1978 }
1979
1980 if (kprobe_gone(kp)) {
1981 /* This kprobe has gone, we couldn't enable it. */
1982 ret = -EINVAL;
1983 goto out;
1984 }
1985
1986 if (p != kp)
1987 kp->flags &= ~KPROBE_FLAG_DISABLED;
1988
1989 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
1990 p->flags &= ~KPROBE_FLAG_DISABLED;
1991 arm_kprobe(p);
1992 }
1993out:
1994 mutex_unlock(&kprobe_mutex);
1995 return ret;
1996}
1997EXPORT_SYMBOL_GPL(enable_kprobe);
1998
24851d24
FW
1999void __kprobes dump_kprobe(struct kprobe *kp)
2000{
2001 printk(KERN_WARNING "Dumping kprobe:\n");
2002 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2003 kp->symbol_name, kp->addr, kp->offset);
2004}
2005
e8386a0c
MH
2006/* Module notifier call back, checking kprobes on the module */
2007static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2008 unsigned long val, void *data)
2009{
2010 struct module *mod = data;
2011 struct hlist_head *head;
e8386a0c
MH
2012 struct kprobe *p;
2013 unsigned int i;
f24659d9 2014 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 2015
f24659d9 2016 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
2017 return NOTIFY_DONE;
2018
2019 /*
f24659d9
MH
2020 * When MODULE_STATE_GOING was notified, both of module .text and
2021 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2022 * notified, only .init.text section would be freed. We need to
2023 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
2024 */
2025 mutex_lock(&kprobe_mutex);
2026 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2027 head = &kprobe_table[i];
b67bfe0d 2028 hlist_for_each_entry_rcu(p, head, hlist)
f24659d9
MH
2029 if (within_module_init((unsigned long)p->addr, mod) ||
2030 (checkcore &&
2031 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
2032 /*
2033 * The vaddr this probe is installed will soon
2034 * be vfreed buy not synced to disk. Hence,
2035 * disarming the breakpoint isn't needed.
2036 */
2037 kill_kprobe(p);
2038 }
2039 }
2040 mutex_unlock(&kprobe_mutex);
2041 return NOTIFY_DONE;
2042}
2043
2044static struct notifier_block kprobe_module_nb = {
2045 .notifier_call = kprobes_module_callback,
2046 .priority = 0
2047};
2048
1da177e4
LT
2049static int __init init_kprobes(void)
2050{
2051 int i, err = 0;
3d8d996e
SD
2052 unsigned long offset = 0, size = 0;
2053 char *modname, namebuf[128];
2054 const char *symbol_name;
2055 void *addr;
2056 struct kprobe_blackpoint *kb;
1da177e4
LT
2057
2058 /* FIXME allocate the probe table, currently defined statically */
2059 /* initialize all list heads */
b94cce92 2060 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 2061 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92 2062 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ec484608 2063 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce92 2064 }
1da177e4 2065
3d8d996e
SD
2066 /*
2067 * Lookup and populate the kprobe_blacklist.
2068 *
2069 * Unlike the kretprobe blacklist, we'll need to determine
2070 * the range of addresses that belong to the said functions,
2071 * since a kprobe need not necessarily be at the beginning
2072 * of a function.
2073 */
2074 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2075 kprobe_lookup_name(kb->name, addr);
2076 if (!addr)
2077 continue;
2078
2079 kb->start_addr = (unsigned long)addr;
2080 symbol_name = kallsyms_lookup(kb->start_addr,
2081 &size, &offset, &modname, namebuf);
2082 if (!symbol_name)
2083 kb->range = 0;
2084 else
2085 kb->range = size;
2086 }
2087
f438d914
MH
2088 if (kretprobe_blacklist_size) {
2089 /* lookup the function address from its name */
2090 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2091 kprobe_lookup_name(kretprobe_blacklist[i].name,
2092 kretprobe_blacklist[i].addr);
2093 if (!kretprobe_blacklist[i].addr)
2094 printk("kretprobe: lookup failed: %s\n",
2095 kretprobe_blacklist[i].name);
2096 }
2097 }
2098
b2be84df
MH
2099#if defined(CONFIG_OPTPROBES)
2100#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
afd66255
MH
2101 /* Init kprobe_optinsn_slots */
2102 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2103#endif
b2be84df
MH
2104 /* By default, kprobes can be optimized */
2105 kprobes_allow_optimization = true;
2106#endif
afd66255 2107
e579abeb
MH
2108 /* By default, kprobes are armed */
2109 kprobes_all_disarmed = false;
bf8f6e5b 2110
6772926b 2111 err = arch_init_kprobes();
802eae7c
RL
2112 if (!err)
2113 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c
MH
2114 if (!err)
2115 err = register_module_notifier(&kprobe_module_nb);
2116
ef53d9c5 2117 kprobes_initialized = (err == 0);
802eae7c 2118
8c1c9356
AM
2119 if (!err)
2120 init_test_probes();
1da177e4
LT
2121 return err;
2122}
2123
346fd59b
SD
2124#ifdef CONFIG_DEBUG_FS
2125static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
afd66255 2126 const char *sym, int offset, char *modname, struct kprobe *pp)
346fd59b
SD
2127{
2128 char *kprobe_type;
2129
2130 if (p->pre_handler == pre_handler_kretprobe)
2131 kprobe_type = "r";
2132 else if (p->pre_handler == setjmp_pre_handler)
2133 kprobe_type = "j";
2134 else
2135 kprobe_type = "k";
afd66255 2136
346fd59b 2137 if (sym)
afd66255 2138 seq_printf(pi, "%p %s %s+0x%x %s ",
de5bd88d 2139 p->addr, kprobe_type, sym, offset,
afd66255 2140 (modname ? modname : " "));
346fd59b 2141 else
afd66255
MH
2142 seq_printf(pi, "%p %s %p ",
2143 p->addr, kprobe_type, p->addr);
2144
2145 if (!pp)
2146 pp = p;
ae6aa16f 2147 seq_printf(pi, "%s%s%s%s\n",
afd66255
MH
2148 (kprobe_gone(p) ? "[GONE]" : ""),
2149 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
ae6aa16f
MH
2150 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2151 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
346fd59b
SD
2152}
2153
2154static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2155{
2156 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2157}
2158
2159static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2160{
2161 (*pos)++;
2162 if (*pos >= KPROBE_TABLE_SIZE)
2163 return NULL;
2164 return pos;
2165}
2166
2167static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2168{
2169 /* Nothing to do */
2170}
2171
2172static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2173{
2174 struct hlist_head *head;
346fd59b
SD
2175 struct kprobe *p, *kp;
2176 const char *sym = NULL;
2177 unsigned int i = *(loff_t *) v;
ffb45122 2178 unsigned long offset = 0;
346fd59b
SD
2179 char *modname, namebuf[128];
2180
2181 head = &kprobe_table[i];
2182 preempt_disable();
b67bfe0d 2183 hlist_for_each_entry_rcu(p, head, hlist) {
ffb45122 2184 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b 2185 &offset, &modname, namebuf);
afd66255 2186 if (kprobe_aggrprobe(p)) {
346fd59b 2187 list_for_each_entry_rcu(kp, &p->list, list)
afd66255 2188 report_probe(pi, kp, sym, offset, modname, p);
346fd59b 2189 } else
afd66255 2190 report_probe(pi, p, sym, offset, modname, NULL);
346fd59b
SD
2191 }
2192 preempt_enable();
2193 return 0;
2194}
2195
88e9d34c 2196static const struct seq_operations kprobes_seq_ops = {
346fd59b
SD
2197 .start = kprobe_seq_start,
2198 .next = kprobe_seq_next,
2199 .stop = kprobe_seq_stop,
2200 .show = show_kprobe_addr
2201};
2202
2203static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2204{
2205 return seq_open(filp, &kprobes_seq_ops);
2206}
2207
828c0950 2208static const struct file_operations debugfs_kprobes_operations = {
346fd59b
SD
2209 .open = kprobes_open,
2210 .read = seq_read,
2211 .llseek = seq_lseek,
2212 .release = seq_release,
2213};
2214
e579abeb 2215static void __kprobes arm_all_kprobes(void)
bf8f6e5b
AM
2216{
2217 struct hlist_head *head;
bf8f6e5b
AM
2218 struct kprobe *p;
2219 unsigned int i;
2220
2221 mutex_lock(&kprobe_mutex);
2222
e579abeb
MH
2223 /* If kprobes are armed, just return */
2224 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2225 goto already_enabled;
2226
afd66255 2227 /* Arming kprobes doesn't optimize kprobe itself */
bf8f6e5b
AM
2228 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2229 head = &kprobe_table[i];
b67bfe0d 2230 hlist_for_each_entry_rcu(p, head, hlist)
de5bd88d 2231 if (!kprobe_disabled(p))
ae6aa16f 2232 arm_kprobe(p);
bf8f6e5b
AM
2233 }
2234
e579abeb 2235 kprobes_all_disarmed = false;
bf8f6e5b
AM
2236 printk(KERN_INFO "Kprobes globally enabled\n");
2237
2238already_enabled:
2239 mutex_unlock(&kprobe_mutex);
2240 return;
2241}
2242
e579abeb 2243static void __kprobes disarm_all_kprobes(void)
bf8f6e5b
AM
2244{
2245 struct hlist_head *head;
bf8f6e5b
AM
2246 struct kprobe *p;
2247 unsigned int i;
2248
2249 mutex_lock(&kprobe_mutex);
2250
e579abeb 2251 /* If kprobes are already disarmed, just return */
6274de49
MH
2252 if (kprobes_all_disarmed) {
2253 mutex_unlock(&kprobe_mutex);
2254 return;
2255 }
bf8f6e5b 2256
e579abeb 2257 kprobes_all_disarmed = true;
bf8f6e5b 2258 printk(KERN_INFO "Kprobes globally disabled\n");
afd66255 2259
bf8f6e5b
AM
2260 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2261 head = &kprobe_table[i];
b67bfe0d 2262 hlist_for_each_entry_rcu(p, head, hlist) {
de5bd88d 2263 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
ae6aa16f 2264 disarm_kprobe(p, false);
bf8f6e5b
AM
2265 }
2266 }
bf8f6e5b 2267 mutex_unlock(&kprobe_mutex);
bf8f6e5b 2268
6274de49
MH
2269 /* Wait for disarming all kprobes by optimizer */
2270 wait_for_kprobe_optimizer();
bf8f6e5b
AM
2271}
2272
2273/*
2274 * XXX: The debugfs bool file interface doesn't allow for callbacks
2275 * when the bool state is switched. We can reuse that facility when
2276 * available
2277 */
2278static ssize_t read_enabled_file_bool(struct file *file,
2279 char __user *user_buf, size_t count, loff_t *ppos)
2280{
2281 char buf[3];
2282
e579abeb 2283 if (!kprobes_all_disarmed)
bf8f6e5b
AM
2284 buf[0] = '1';
2285 else
2286 buf[0] = '0';
2287 buf[1] = '\n';
2288 buf[2] = 0x00;
2289 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2290}
2291
2292static ssize_t write_enabled_file_bool(struct file *file,
2293 const char __user *user_buf, size_t count, loff_t *ppos)
2294{
2295 char buf[32];
efeb156e 2296 size_t buf_size;
bf8f6e5b
AM
2297
2298 buf_size = min(count, (sizeof(buf)-1));
2299 if (copy_from_user(buf, user_buf, buf_size))
2300 return -EFAULT;
2301
10fb46d5 2302 buf[buf_size] = '\0';
bf8f6e5b
AM
2303 switch (buf[0]) {
2304 case 'y':
2305 case 'Y':
2306 case '1':
e579abeb 2307 arm_all_kprobes();
bf8f6e5b
AM
2308 break;
2309 case 'n':
2310 case 'N':
2311 case '0':
e579abeb 2312 disarm_all_kprobes();
bf8f6e5b 2313 break;
10fb46d5
MK
2314 default:
2315 return -EINVAL;
bf8f6e5b
AM
2316 }
2317
2318 return count;
2319}
2320
828c0950 2321static const struct file_operations fops_kp = {
bf8f6e5b
AM
2322 .read = read_enabled_file_bool,
2323 .write = write_enabled_file_bool,
6038f373 2324 .llseek = default_llseek,
bf8f6e5b
AM
2325};
2326
346fd59b
SD
2327static int __kprobes debugfs_kprobe_init(void)
2328{
2329 struct dentry *dir, *file;
bf8f6e5b 2330 unsigned int value = 1;
346fd59b
SD
2331
2332 dir = debugfs_create_dir("kprobes", NULL);
2333 if (!dir)
2334 return -ENOMEM;
2335
e3869792 2336 file = debugfs_create_file("list", 0444, dir, NULL,
346fd59b
SD
2337 &debugfs_kprobes_operations);
2338 if (!file) {
2339 debugfs_remove(dir);
2340 return -ENOMEM;
2341 }
2342
bf8f6e5b
AM
2343 file = debugfs_create_file("enabled", 0600, dir,
2344 &value, &fops_kp);
2345 if (!file) {
2346 debugfs_remove(dir);
2347 return -ENOMEM;
2348 }
2349
346fd59b
SD
2350 return 0;
2351}
2352
2353late_initcall(debugfs_kprobe_init);
2354#endif /* CONFIG_DEBUG_FS */
2355
2356module_init(init_kprobes);
1da177e4 2357
99081ab5 2358/* defined in arch/.../kernel/kprobes.c */
1da177e4 2359EXPORT_SYMBOL_GPL(jprobe_return);