]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/trace/trace_kprobe.c
eadd96ef772f783b7e7d91a33db52522e7a53ade
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_kprobe.c
1 /*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define pr_fmt(fmt) "trace_kprobe: " fmt
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23
24 #include "trace_probe.h"
25
26 #define KPROBE_EVENT_SYSTEM "kprobes"
27
28 /**
29 * Kprobe event core functions
30 */
31 struct trace_kprobe {
32 struct list_head list;
33 struct kretprobe rp; /* Use rp.kp for kprobe use */
34 unsigned long __percpu *nhit;
35 const char *symbol; /* symbol name */
36 struct trace_probe tp;
37 };
38
39 #define SIZEOF_TRACE_KPROBE(n) \
40 (offsetof(struct trace_kprobe, tp.args) + \
41 (sizeof(struct probe_arg) * (n)))
42
43
44 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
45 {
46 return tk->rp.handler != NULL;
47 }
48
49 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
50 {
51 return tk->symbol ? tk->symbol : "unknown";
52 }
53
54 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
55 {
56 return tk->rp.kp.offset;
57 }
58
59 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
60 {
61 return !!(kprobe_gone(&tk->rp.kp));
62 }
63
64 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
65 struct module *mod)
66 {
67 int len = strlen(mod->name);
68 const char *name = trace_kprobe_symbol(tk);
69 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
70 }
71
72 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
73 {
74 return !!strchr(trace_kprobe_symbol(tk), ':');
75 }
76
77 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
78 {
79 unsigned long nhit = 0;
80 int cpu;
81
82 for_each_possible_cpu(cpu)
83 nhit += *per_cpu_ptr(tk->nhit, cpu);
84
85 return nhit;
86 }
87
88 static int register_kprobe_event(struct trace_kprobe *tk);
89 static int unregister_kprobe_event(struct trace_kprobe *tk);
90
91 static DEFINE_MUTEX(probe_lock);
92 static LIST_HEAD(probe_list);
93
94 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
95 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
96 struct pt_regs *regs);
97
98 /* Memory fetching by symbol */
99 struct symbol_cache {
100 char *symbol;
101 long offset;
102 unsigned long addr;
103 };
104
105 unsigned long update_symbol_cache(struct symbol_cache *sc)
106 {
107 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
108
109 if (sc->addr)
110 sc->addr += sc->offset;
111
112 return sc->addr;
113 }
114
115 void free_symbol_cache(struct symbol_cache *sc)
116 {
117 kfree(sc->symbol);
118 kfree(sc);
119 }
120
121 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
122 {
123 struct symbol_cache *sc;
124
125 if (!sym || strlen(sym) == 0)
126 return NULL;
127
128 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
129 if (!sc)
130 return NULL;
131
132 sc->symbol = kstrdup(sym, GFP_KERNEL);
133 if (!sc->symbol) {
134 kfree(sc);
135 return NULL;
136 }
137 sc->offset = offset;
138 update_symbol_cache(sc);
139
140 return sc;
141 }
142
143 /*
144 * Kprobes-specific fetch functions
145 */
146 #define DEFINE_FETCH_stack(type) \
147 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
148 void *offset, void *dest) \
149 { \
150 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
151 (unsigned int)((unsigned long)offset)); \
152 } \
153 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
154
155 DEFINE_BASIC_FETCH_FUNCS(stack)
156 /* No string on the stack entry */
157 #define fetch_stack_string NULL
158 #define fetch_stack_string_size NULL
159
160 #define DEFINE_FETCH_memory(type) \
161 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
162 void *addr, void *dest) \
163 { \
164 type retval; \
165 if (probe_kernel_address(addr, retval)) \
166 *(type *)dest = 0; \
167 else \
168 *(type *)dest = retval; \
169 } \
170 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
171
172 DEFINE_BASIC_FETCH_FUNCS(memory)
173 /*
174 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
175 * length and relative data location.
176 */
177 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
178 void *addr, void *dest)
179 {
180 int maxlen = get_rloc_len(*(u32 *)dest);
181 u8 *dst = get_rloc_data(dest);
182 long ret;
183
184 if (!maxlen)
185 return;
186
187 /*
188 * Try to get string again, since the string can be changed while
189 * probing.
190 */
191 ret = strncpy_from_unsafe(dst, addr, maxlen);
192
193 if (ret < 0) { /* Failed to fetch string */
194 dst[0] = '\0';
195 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
196 } else {
197 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
198 }
199 }
200 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
201
202 /* Return the length of string -- including null terminal byte */
203 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
204 void *addr, void *dest)
205 {
206 mm_segment_t old_fs;
207 int ret, len = 0;
208 u8 c;
209
210 old_fs = get_fs();
211 set_fs(KERNEL_DS);
212 pagefault_disable();
213
214 do {
215 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
216 len++;
217 } while (c && ret == 0 && len < MAX_STRING_SIZE);
218
219 pagefault_enable();
220 set_fs(old_fs);
221
222 if (ret < 0) /* Failed to check the length */
223 *(u32 *)dest = 0;
224 else
225 *(u32 *)dest = len;
226 }
227 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
228
229 #define DEFINE_FETCH_symbol(type) \
230 void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
231 { \
232 struct symbol_cache *sc = data; \
233 if (sc->addr) \
234 fetch_memory_##type(regs, (void *)sc->addr, dest); \
235 else \
236 *(type *)dest = 0; \
237 } \
238 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
239
240 DEFINE_BASIC_FETCH_FUNCS(symbol)
241 DEFINE_FETCH_symbol(string)
242 DEFINE_FETCH_symbol(string_size)
243
244 /* kprobes don't support file_offset fetch methods */
245 #define fetch_file_offset_u8 NULL
246 #define fetch_file_offset_u16 NULL
247 #define fetch_file_offset_u32 NULL
248 #define fetch_file_offset_u64 NULL
249 #define fetch_file_offset_string NULL
250 #define fetch_file_offset_string_size NULL
251
252 /* Fetch type information table */
253 static const struct fetch_type kprobes_fetch_type_table[] = {
254 /* Special types */
255 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
256 sizeof(u32), 1, "__data_loc char[]"),
257 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
258 string_size, sizeof(u32), 0, "u32"),
259 /* Basic types */
260 ASSIGN_FETCH_TYPE(u8, u8, 0),
261 ASSIGN_FETCH_TYPE(u16, u16, 0),
262 ASSIGN_FETCH_TYPE(u32, u32, 0),
263 ASSIGN_FETCH_TYPE(u64, u64, 0),
264 ASSIGN_FETCH_TYPE(s8, u8, 1),
265 ASSIGN_FETCH_TYPE(s16, u16, 1),
266 ASSIGN_FETCH_TYPE(s32, u32, 1),
267 ASSIGN_FETCH_TYPE(s64, u64, 1),
268 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
269 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
270 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
271 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
272
273 ASSIGN_FETCH_TYPE_END
274 };
275
276 /*
277 * Allocate new trace_probe and initialize it (including kprobes).
278 */
279 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
280 const char *event,
281 void *addr,
282 const char *symbol,
283 unsigned long offs,
284 int nargs, bool is_return)
285 {
286 struct trace_kprobe *tk;
287 int ret = -ENOMEM;
288
289 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
290 if (!tk)
291 return ERR_PTR(ret);
292
293 tk->nhit = alloc_percpu(unsigned long);
294 if (!tk->nhit)
295 goto error;
296
297 if (symbol) {
298 tk->symbol = kstrdup(symbol, GFP_KERNEL);
299 if (!tk->symbol)
300 goto error;
301 tk->rp.kp.symbol_name = tk->symbol;
302 tk->rp.kp.offset = offs;
303 } else
304 tk->rp.kp.addr = addr;
305
306 if (is_return)
307 tk->rp.handler = kretprobe_dispatcher;
308 else
309 tk->rp.kp.pre_handler = kprobe_dispatcher;
310
311 if (!event || !is_good_name(event)) {
312 ret = -EINVAL;
313 goto error;
314 }
315
316 tk->tp.call.class = &tk->tp.class;
317 tk->tp.call.name = kstrdup(event, GFP_KERNEL);
318 if (!tk->tp.call.name)
319 goto error;
320
321 if (!group || !is_good_name(group)) {
322 ret = -EINVAL;
323 goto error;
324 }
325
326 tk->tp.class.system = kstrdup(group, GFP_KERNEL);
327 if (!tk->tp.class.system)
328 goto error;
329
330 INIT_LIST_HEAD(&tk->list);
331 INIT_LIST_HEAD(&tk->tp.files);
332 return tk;
333 error:
334 kfree(tk->tp.call.name);
335 kfree(tk->symbol);
336 free_percpu(tk->nhit);
337 kfree(tk);
338 return ERR_PTR(ret);
339 }
340
341 static void free_trace_kprobe(struct trace_kprobe *tk)
342 {
343 int i;
344
345 for (i = 0; i < tk->tp.nr_args; i++)
346 traceprobe_free_probe_arg(&tk->tp.args[i]);
347
348 kfree(tk->tp.call.class->system);
349 kfree(tk->tp.call.name);
350 kfree(tk->symbol);
351 free_percpu(tk->nhit);
352 kfree(tk);
353 }
354
355 static struct trace_kprobe *find_trace_kprobe(const char *event,
356 const char *group)
357 {
358 struct trace_kprobe *tk;
359
360 list_for_each_entry(tk, &probe_list, list)
361 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
362 strcmp(tk->tp.call.class->system, group) == 0)
363 return tk;
364 return NULL;
365 }
366
367 /*
368 * Enable trace_probe
369 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
370 */
371 static int
372 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
373 {
374 int ret = 0;
375
376 if (file) {
377 struct event_file_link *link;
378
379 link = kmalloc(sizeof(*link), GFP_KERNEL);
380 if (!link) {
381 ret = -ENOMEM;
382 goto out;
383 }
384
385 link->file = file;
386 list_add_tail_rcu(&link->list, &tk->tp.files);
387
388 tk->tp.flags |= TP_FLAG_TRACE;
389 } else
390 tk->tp.flags |= TP_FLAG_PROFILE;
391
392 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
393 if (trace_kprobe_is_return(tk))
394 ret = enable_kretprobe(&tk->rp);
395 else
396 ret = enable_kprobe(&tk->rp.kp);
397 }
398 out:
399 return ret;
400 }
401
402 /*
403 * Disable trace_probe
404 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
405 */
406 static int
407 disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
408 {
409 struct event_file_link *link = NULL;
410 int wait = 0;
411 int ret = 0;
412
413 if (file) {
414 link = find_event_file_link(&tk->tp, file);
415 if (!link) {
416 ret = -EINVAL;
417 goto out;
418 }
419
420 list_del_rcu(&link->list);
421 wait = 1;
422 if (!list_empty(&tk->tp.files))
423 goto out;
424
425 tk->tp.flags &= ~TP_FLAG_TRACE;
426 } else
427 tk->tp.flags &= ~TP_FLAG_PROFILE;
428
429 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
430 if (trace_kprobe_is_return(tk))
431 disable_kretprobe(&tk->rp);
432 else
433 disable_kprobe(&tk->rp.kp);
434 wait = 1;
435 }
436 out:
437 if (wait) {
438 /*
439 * Synchronize with kprobe_trace_func/kretprobe_trace_func
440 * to ensure disabled (all running handlers are finished).
441 * This is not only for kfree(), but also the caller,
442 * trace_remove_event_call() supposes it for releasing
443 * event_call related objects, which will be accessed in
444 * the kprobe_trace_func/kretprobe_trace_func.
445 */
446 synchronize_sched();
447 kfree(link); /* Ignored if link == NULL */
448 }
449
450 return ret;
451 }
452
453 /* Internal register function - just handle k*probes and flags */
454 static int __register_trace_kprobe(struct trace_kprobe *tk)
455 {
456 int i, ret;
457
458 if (trace_probe_is_registered(&tk->tp))
459 return -EINVAL;
460
461 for (i = 0; i < tk->tp.nr_args; i++)
462 traceprobe_update_arg(&tk->tp.args[i]);
463
464 /* Set/clear disabled flag according to tp->flag */
465 if (trace_probe_is_enabled(&tk->tp))
466 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
467 else
468 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
469
470 if (trace_kprobe_is_return(tk))
471 ret = register_kretprobe(&tk->rp);
472 else
473 ret = register_kprobe(&tk->rp.kp);
474
475 if (ret == 0)
476 tk->tp.flags |= TP_FLAG_REGISTERED;
477 else {
478 pr_warn("Could not insert probe at %s+%lu: %d\n",
479 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
480 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
481 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
482 ret = 0;
483 } else if (ret == -EILSEQ) {
484 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
485 tk->rp.kp.addr);
486 ret = -EINVAL;
487 }
488 }
489
490 return ret;
491 }
492
493 /* Internal unregister function - just handle k*probes and flags */
494 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
495 {
496 if (trace_probe_is_registered(&tk->tp)) {
497 if (trace_kprobe_is_return(tk))
498 unregister_kretprobe(&tk->rp);
499 else
500 unregister_kprobe(&tk->rp.kp);
501 tk->tp.flags &= ~TP_FLAG_REGISTERED;
502 /* Cleanup kprobe for reuse */
503 if (tk->rp.kp.symbol_name)
504 tk->rp.kp.addr = NULL;
505 }
506 }
507
508 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
509 static int unregister_trace_kprobe(struct trace_kprobe *tk)
510 {
511 /* Enabled event can not be unregistered */
512 if (trace_probe_is_enabled(&tk->tp))
513 return -EBUSY;
514
515 /* Will fail if probe is being used by ftrace or perf */
516 if (unregister_kprobe_event(tk))
517 return -EBUSY;
518
519 __unregister_trace_kprobe(tk);
520 list_del(&tk->list);
521
522 return 0;
523 }
524
525 /* Register a trace_probe and probe_event */
526 static int register_trace_kprobe(struct trace_kprobe *tk)
527 {
528 struct trace_kprobe *old_tk;
529 int ret;
530
531 mutex_lock(&probe_lock);
532
533 /* Delete old (same name) event if exist */
534 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
535 tk->tp.call.class->system);
536 if (old_tk) {
537 ret = unregister_trace_kprobe(old_tk);
538 if (ret < 0)
539 goto end;
540 free_trace_kprobe(old_tk);
541 }
542
543 /* Register new event */
544 ret = register_kprobe_event(tk);
545 if (ret) {
546 pr_warn("Failed to register probe event(%d)\n", ret);
547 goto end;
548 }
549
550 /* Register k*probe */
551 ret = __register_trace_kprobe(tk);
552 if (ret < 0)
553 unregister_kprobe_event(tk);
554 else
555 list_add_tail(&tk->list, &probe_list);
556
557 end:
558 mutex_unlock(&probe_lock);
559 return ret;
560 }
561
562 /* Module notifier call back, checking event on the module */
563 static int trace_kprobe_module_callback(struct notifier_block *nb,
564 unsigned long val, void *data)
565 {
566 struct module *mod = data;
567 struct trace_kprobe *tk;
568 int ret;
569
570 if (val != MODULE_STATE_COMING)
571 return NOTIFY_DONE;
572
573 /* Update probes on coming module */
574 mutex_lock(&probe_lock);
575 list_for_each_entry(tk, &probe_list, list) {
576 if (trace_kprobe_within_module(tk, mod)) {
577 /* Don't need to check busy - this should have gone. */
578 __unregister_trace_kprobe(tk);
579 ret = __register_trace_kprobe(tk);
580 if (ret)
581 pr_warn("Failed to re-register probe %s on %s: %d\n",
582 trace_event_name(&tk->tp.call),
583 mod->name, ret);
584 }
585 }
586 mutex_unlock(&probe_lock);
587
588 return NOTIFY_DONE;
589 }
590
591 static struct notifier_block trace_kprobe_module_nb = {
592 .notifier_call = trace_kprobe_module_callback,
593 .priority = 1 /* Invoked after kprobe module callback */
594 };
595
596 static int create_trace_kprobe(int argc, char **argv)
597 {
598 /*
599 * Argument syntax:
600 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
601 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
602 * Fetch args:
603 * $retval : fetch return value
604 * $stack : fetch stack address
605 * $stackN : fetch Nth of stack (N:0-)
606 * $comm : fetch current task comm
607 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
608 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
609 * %REG : fetch register REG
610 * Dereferencing memory fetch:
611 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
612 * Alias name of args:
613 * NAME=FETCHARG : set NAME as alias of FETCHARG.
614 * Type of args:
615 * FETCHARG:TYPE : use TYPE instead of unsigned long.
616 */
617 struct trace_kprobe *tk;
618 int i, ret = 0;
619 bool is_return = false, is_delete = false;
620 char *symbol = NULL, *event = NULL, *group = NULL;
621 char *arg;
622 unsigned long offset = 0;
623 void *addr = NULL;
624 char buf[MAX_EVENT_NAME_LEN];
625
626 /* argc must be >= 1 */
627 if (argv[0][0] == 'p')
628 is_return = false;
629 else if (argv[0][0] == 'r')
630 is_return = true;
631 else if (argv[0][0] == '-')
632 is_delete = true;
633 else {
634 pr_info("Probe definition must be started with 'p', 'r' or"
635 " '-'.\n");
636 return -EINVAL;
637 }
638
639 if (argv[0][1] == ':') {
640 event = &argv[0][2];
641 if (strchr(event, '/')) {
642 group = event;
643 event = strchr(group, '/') + 1;
644 event[-1] = '\0';
645 if (strlen(group) == 0) {
646 pr_info("Group name is not specified\n");
647 return -EINVAL;
648 }
649 }
650 if (strlen(event) == 0) {
651 pr_info("Event name is not specified\n");
652 return -EINVAL;
653 }
654 }
655 if (!group)
656 group = KPROBE_EVENT_SYSTEM;
657
658 if (is_delete) {
659 if (!event) {
660 pr_info("Delete command needs an event name.\n");
661 return -EINVAL;
662 }
663 mutex_lock(&probe_lock);
664 tk = find_trace_kprobe(event, group);
665 if (!tk) {
666 mutex_unlock(&probe_lock);
667 pr_info("Event %s/%s doesn't exist.\n", group, event);
668 return -ENOENT;
669 }
670 /* delete an event */
671 ret = unregister_trace_kprobe(tk);
672 if (ret == 0)
673 free_trace_kprobe(tk);
674 mutex_unlock(&probe_lock);
675 return ret;
676 }
677
678 if (argc < 2) {
679 pr_info("Probe point is not specified.\n");
680 return -EINVAL;
681 }
682 if (isdigit(argv[1][0])) {
683 if (is_return) {
684 pr_info("Return probe point must be a symbol.\n");
685 return -EINVAL;
686 }
687 /* an address specified */
688 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
689 if (ret) {
690 pr_info("Failed to parse address.\n");
691 return ret;
692 }
693 } else {
694 /* a symbol specified */
695 symbol = argv[1];
696 /* TODO: support .init module functions */
697 ret = traceprobe_split_symbol_offset(symbol, &offset);
698 if (ret) {
699 pr_info("Failed to parse symbol.\n");
700 return ret;
701 }
702 if (offset && is_return) {
703 pr_info("Return probe must be used without offset.\n");
704 return -EINVAL;
705 }
706 }
707 argc -= 2; argv += 2;
708
709 /* setup a probe */
710 if (!event) {
711 /* Make a new event name */
712 if (symbol)
713 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
714 is_return ? 'r' : 'p', symbol, offset);
715 else
716 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
717 is_return ? 'r' : 'p', addr);
718 event = buf;
719 }
720 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
721 is_return);
722 if (IS_ERR(tk)) {
723 pr_info("Failed to allocate trace_probe.(%d)\n",
724 (int)PTR_ERR(tk));
725 return PTR_ERR(tk);
726 }
727
728 /* parse arguments */
729 ret = 0;
730 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
731 struct probe_arg *parg = &tk->tp.args[i];
732
733 /* Increment count for freeing args in error case */
734 tk->tp.nr_args++;
735
736 /* Parse argument name */
737 arg = strchr(argv[i], '=');
738 if (arg) {
739 *arg++ = '\0';
740 parg->name = kstrdup(argv[i], GFP_KERNEL);
741 } else {
742 arg = argv[i];
743 /* If argument name is omitted, set "argN" */
744 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
745 parg->name = kstrdup(buf, GFP_KERNEL);
746 }
747
748 if (!parg->name) {
749 pr_info("Failed to allocate argument[%d] name.\n", i);
750 ret = -ENOMEM;
751 goto error;
752 }
753
754 if (!is_good_name(parg->name)) {
755 pr_info("Invalid argument[%d] name: %s\n",
756 i, parg->name);
757 ret = -EINVAL;
758 goto error;
759 }
760
761 if (traceprobe_conflict_field_name(parg->name,
762 tk->tp.args, i)) {
763 pr_info("Argument[%d] name '%s' conflicts with "
764 "another field.\n", i, argv[i]);
765 ret = -EINVAL;
766 goto error;
767 }
768
769 /* Parse fetch argument */
770 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
771 is_return, true,
772 kprobes_fetch_type_table);
773 if (ret) {
774 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
775 goto error;
776 }
777 }
778
779 ret = register_trace_kprobe(tk);
780 if (ret)
781 goto error;
782 return 0;
783
784 error:
785 free_trace_kprobe(tk);
786 return ret;
787 }
788
789 static int release_all_trace_kprobes(void)
790 {
791 struct trace_kprobe *tk;
792 int ret = 0;
793
794 mutex_lock(&probe_lock);
795 /* Ensure no probe is in use. */
796 list_for_each_entry(tk, &probe_list, list)
797 if (trace_probe_is_enabled(&tk->tp)) {
798 ret = -EBUSY;
799 goto end;
800 }
801 /* TODO: Use batch unregistration */
802 while (!list_empty(&probe_list)) {
803 tk = list_entry(probe_list.next, struct trace_kprobe, list);
804 ret = unregister_trace_kprobe(tk);
805 if (ret)
806 goto end;
807 free_trace_kprobe(tk);
808 }
809
810 end:
811 mutex_unlock(&probe_lock);
812
813 return ret;
814 }
815
816 /* Probes listing interfaces */
817 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
818 {
819 mutex_lock(&probe_lock);
820 return seq_list_start(&probe_list, *pos);
821 }
822
823 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
824 {
825 return seq_list_next(v, &probe_list, pos);
826 }
827
828 static void probes_seq_stop(struct seq_file *m, void *v)
829 {
830 mutex_unlock(&probe_lock);
831 }
832
833 static int probes_seq_show(struct seq_file *m, void *v)
834 {
835 struct trace_kprobe *tk = v;
836 int i;
837
838 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
839 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
840 trace_event_name(&tk->tp.call));
841
842 if (!tk->symbol)
843 seq_printf(m, " 0x%p", tk->rp.kp.addr);
844 else if (tk->rp.kp.offset)
845 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
846 tk->rp.kp.offset);
847 else
848 seq_printf(m, " %s", trace_kprobe_symbol(tk));
849
850 for (i = 0; i < tk->tp.nr_args; i++)
851 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
852 seq_putc(m, '\n');
853
854 return 0;
855 }
856
857 static const struct seq_operations probes_seq_op = {
858 .start = probes_seq_start,
859 .next = probes_seq_next,
860 .stop = probes_seq_stop,
861 .show = probes_seq_show
862 };
863
864 static int probes_open(struct inode *inode, struct file *file)
865 {
866 int ret;
867
868 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
869 ret = release_all_trace_kprobes();
870 if (ret < 0)
871 return ret;
872 }
873
874 return seq_open(file, &probes_seq_op);
875 }
876
877 static ssize_t probes_write(struct file *file, const char __user *buffer,
878 size_t count, loff_t *ppos)
879 {
880 return traceprobe_probes_write(file, buffer, count, ppos,
881 create_trace_kprobe);
882 }
883
884 static const struct file_operations kprobe_events_ops = {
885 .owner = THIS_MODULE,
886 .open = probes_open,
887 .read = seq_read,
888 .llseek = seq_lseek,
889 .release = seq_release,
890 .write = probes_write,
891 };
892
893 /* Probes profiling interfaces */
894 static int probes_profile_seq_show(struct seq_file *m, void *v)
895 {
896 struct trace_kprobe *tk = v;
897
898 seq_printf(m, " %-44s %15lu %15lu\n",
899 trace_event_name(&tk->tp.call),
900 trace_kprobe_nhit(tk),
901 tk->rp.kp.nmissed);
902
903 return 0;
904 }
905
906 static const struct seq_operations profile_seq_op = {
907 .start = probes_seq_start,
908 .next = probes_seq_next,
909 .stop = probes_seq_stop,
910 .show = probes_profile_seq_show
911 };
912
913 static int profile_open(struct inode *inode, struct file *file)
914 {
915 return seq_open(file, &profile_seq_op);
916 }
917
918 static const struct file_operations kprobe_profile_ops = {
919 .owner = THIS_MODULE,
920 .open = profile_open,
921 .read = seq_read,
922 .llseek = seq_lseek,
923 .release = seq_release,
924 };
925
926 /* Kprobe handler */
927 static nokprobe_inline void
928 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
929 struct trace_event_file *trace_file)
930 {
931 struct kprobe_trace_entry_head *entry;
932 struct ring_buffer_event *event;
933 struct ring_buffer *buffer;
934 int size, dsize, pc;
935 unsigned long irq_flags;
936 struct trace_event_call *call = &tk->tp.call;
937
938 WARN_ON(call != trace_file->event_call);
939
940 if (trace_trigger_soft_disabled(trace_file))
941 return;
942
943 local_save_flags(irq_flags);
944 pc = preempt_count();
945
946 dsize = __get_data_size(&tk->tp, regs);
947 size = sizeof(*entry) + tk->tp.size + dsize;
948
949 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
950 call->event.type,
951 size, irq_flags, pc);
952 if (!event)
953 return;
954
955 entry = ring_buffer_event_data(event);
956 entry->ip = (unsigned long)tk->rp.kp.addr;
957 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
958
959 event_trigger_unlock_commit_regs(trace_file, buffer, event,
960 entry, irq_flags, pc, regs);
961 }
962
963 static void
964 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
965 {
966 struct event_file_link *link;
967
968 list_for_each_entry_rcu(link, &tk->tp.files, list)
969 __kprobe_trace_func(tk, regs, link->file);
970 }
971 NOKPROBE_SYMBOL(kprobe_trace_func);
972
973 /* Kretprobe handler */
974 static nokprobe_inline void
975 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
976 struct pt_regs *regs,
977 struct trace_event_file *trace_file)
978 {
979 struct kretprobe_trace_entry_head *entry;
980 struct ring_buffer_event *event;
981 struct ring_buffer *buffer;
982 int size, pc, dsize;
983 unsigned long irq_flags;
984 struct trace_event_call *call = &tk->tp.call;
985
986 WARN_ON(call != trace_file->event_call);
987
988 if (trace_trigger_soft_disabled(trace_file))
989 return;
990
991 local_save_flags(irq_flags);
992 pc = preempt_count();
993
994 dsize = __get_data_size(&tk->tp, regs);
995 size = sizeof(*entry) + tk->tp.size + dsize;
996
997 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
998 call->event.type,
999 size, irq_flags, pc);
1000 if (!event)
1001 return;
1002
1003 entry = ring_buffer_event_data(event);
1004 entry->func = (unsigned long)tk->rp.kp.addr;
1005 entry->ret_ip = (unsigned long)ri->ret_addr;
1006 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1007
1008 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1009 entry, irq_flags, pc, regs);
1010 }
1011
1012 static void
1013 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1014 struct pt_regs *regs)
1015 {
1016 struct event_file_link *link;
1017
1018 list_for_each_entry_rcu(link, &tk->tp.files, list)
1019 __kretprobe_trace_func(tk, ri, regs, link->file);
1020 }
1021 NOKPROBE_SYMBOL(kretprobe_trace_func);
1022
1023 /* Event entry printers */
1024 static enum print_line_t
1025 print_kprobe_event(struct trace_iterator *iter, int flags,
1026 struct trace_event *event)
1027 {
1028 struct kprobe_trace_entry_head *field;
1029 struct trace_seq *s = &iter->seq;
1030 struct trace_probe *tp;
1031 u8 *data;
1032 int i;
1033
1034 field = (struct kprobe_trace_entry_head *)iter->ent;
1035 tp = container_of(event, struct trace_probe, call.event);
1036
1037 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1038
1039 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1040 goto out;
1041
1042 trace_seq_putc(s, ')');
1043
1044 data = (u8 *)&field[1];
1045 for (i = 0; i < tp->nr_args; i++)
1046 if (!tp->args[i].type->print(s, tp->args[i].name,
1047 data + tp->args[i].offset, field))
1048 goto out;
1049
1050 trace_seq_putc(s, '\n');
1051 out:
1052 return trace_handle_return(s);
1053 }
1054
1055 static enum print_line_t
1056 print_kretprobe_event(struct trace_iterator *iter, int flags,
1057 struct trace_event *event)
1058 {
1059 struct kretprobe_trace_entry_head *field;
1060 struct trace_seq *s = &iter->seq;
1061 struct trace_probe *tp;
1062 u8 *data;
1063 int i;
1064
1065 field = (struct kretprobe_trace_entry_head *)iter->ent;
1066 tp = container_of(event, struct trace_probe, call.event);
1067
1068 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1069
1070 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1071 goto out;
1072
1073 trace_seq_puts(s, " <- ");
1074
1075 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1076 goto out;
1077
1078 trace_seq_putc(s, ')');
1079
1080 data = (u8 *)&field[1];
1081 for (i = 0; i < tp->nr_args; i++)
1082 if (!tp->args[i].type->print(s, tp->args[i].name,
1083 data + tp->args[i].offset, field))
1084 goto out;
1085
1086 trace_seq_putc(s, '\n');
1087
1088 out:
1089 return trace_handle_return(s);
1090 }
1091
1092
1093 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1094 {
1095 int ret, i;
1096 struct kprobe_trace_entry_head field;
1097 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1098
1099 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1100 /* Set argument names as fields */
1101 for (i = 0; i < tk->tp.nr_args; i++) {
1102 struct probe_arg *parg = &tk->tp.args[i];
1103
1104 ret = trace_define_field(event_call, parg->type->fmttype,
1105 parg->name,
1106 sizeof(field) + parg->offset,
1107 parg->type->size,
1108 parg->type->is_signed,
1109 FILTER_OTHER);
1110 if (ret)
1111 return ret;
1112 }
1113 return 0;
1114 }
1115
1116 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1117 {
1118 int ret, i;
1119 struct kretprobe_trace_entry_head field;
1120 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1121
1122 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1123 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1124 /* Set argument names as fields */
1125 for (i = 0; i < tk->tp.nr_args; i++) {
1126 struct probe_arg *parg = &tk->tp.args[i];
1127
1128 ret = trace_define_field(event_call, parg->type->fmttype,
1129 parg->name,
1130 sizeof(field) + parg->offset,
1131 parg->type->size,
1132 parg->type->is_signed,
1133 FILTER_OTHER);
1134 if (ret)
1135 return ret;
1136 }
1137 return 0;
1138 }
1139
1140 #ifdef CONFIG_PERF_EVENTS
1141
1142 /* Kprobe profile handler */
1143 static void
1144 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1145 {
1146 struct trace_event_call *call = &tk->tp.call;
1147 struct bpf_prog *prog = call->prog;
1148 struct kprobe_trace_entry_head *entry;
1149 struct hlist_head *head;
1150 int size, __size, dsize;
1151 int rctx;
1152
1153 if (prog && !trace_call_bpf(prog, regs))
1154 return;
1155
1156 head = this_cpu_ptr(call->perf_events);
1157 if (hlist_empty(head))
1158 return;
1159
1160 dsize = __get_data_size(&tk->tp, regs);
1161 __size = sizeof(*entry) + tk->tp.size + dsize;
1162 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1163 size -= sizeof(u32);
1164
1165 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1166 if (!entry)
1167 return;
1168
1169 entry->ip = (unsigned long)tk->rp.kp.addr;
1170 memset(&entry[1], 0, dsize);
1171 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1172 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1173 head, NULL);
1174 }
1175 NOKPROBE_SYMBOL(kprobe_perf_func);
1176
1177 /* Kretprobe profile handler */
1178 static void
1179 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1180 struct pt_regs *regs)
1181 {
1182 struct trace_event_call *call = &tk->tp.call;
1183 struct bpf_prog *prog = call->prog;
1184 struct kretprobe_trace_entry_head *entry;
1185 struct hlist_head *head;
1186 int size, __size, dsize;
1187 int rctx;
1188
1189 if (prog && !trace_call_bpf(prog, regs))
1190 return;
1191
1192 head = this_cpu_ptr(call->perf_events);
1193 if (hlist_empty(head))
1194 return;
1195
1196 dsize = __get_data_size(&tk->tp, regs);
1197 __size = sizeof(*entry) + tk->tp.size + dsize;
1198 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1199 size -= sizeof(u32);
1200
1201 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1202 if (!entry)
1203 return;
1204
1205 entry->func = (unsigned long)tk->rp.kp.addr;
1206 entry->ret_ip = (unsigned long)ri->ret_addr;
1207 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1208 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1209 head, NULL);
1210 }
1211 NOKPROBE_SYMBOL(kretprobe_perf_func);
1212 #endif /* CONFIG_PERF_EVENTS */
1213
1214 /*
1215 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1216 *
1217 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1218 * lockless, but we can't race with this __init function.
1219 */
1220 static int kprobe_register(struct trace_event_call *event,
1221 enum trace_reg type, void *data)
1222 {
1223 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1224 struct trace_event_file *file = data;
1225
1226 switch (type) {
1227 case TRACE_REG_REGISTER:
1228 return enable_trace_kprobe(tk, file);
1229 case TRACE_REG_UNREGISTER:
1230 return disable_trace_kprobe(tk, file);
1231
1232 #ifdef CONFIG_PERF_EVENTS
1233 case TRACE_REG_PERF_REGISTER:
1234 return enable_trace_kprobe(tk, NULL);
1235 case TRACE_REG_PERF_UNREGISTER:
1236 return disable_trace_kprobe(tk, NULL);
1237 case TRACE_REG_PERF_OPEN:
1238 case TRACE_REG_PERF_CLOSE:
1239 case TRACE_REG_PERF_ADD:
1240 case TRACE_REG_PERF_DEL:
1241 return 0;
1242 #endif
1243 }
1244 return 0;
1245 }
1246
1247 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1248 {
1249 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1250
1251 raw_cpu_inc(*tk->nhit);
1252
1253 if (tk->tp.flags & TP_FLAG_TRACE)
1254 kprobe_trace_func(tk, regs);
1255 #ifdef CONFIG_PERF_EVENTS
1256 if (tk->tp.flags & TP_FLAG_PROFILE)
1257 kprobe_perf_func(tk, regs);
1258 #endif
1259 return 0; /* We don't tweek kernel, so just return 0 */
1260 }
1261 NOKPROBE_SYMBOL(kprobe_dispatcher);
1262
1263 static int
1264 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1265 {
1266 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1267
1268 raw_cpu_inc(*tk->nhit);
1269
1270 if (tk->tp.flags & TP_FLAG_TRACE)
1271 kretprobe_trace_func(tk, ri, regs);
1272 #ifdef CONFIG_PERF_EVENTS
1273 if (tk->tp.flags & TP_FLAG_PROFILE)
1274 kretprobe_perf_func(tk, ri, regs);
1275 #endif
1276 return 0; /* We don't tweek kernel, so just return 0 */
1277 }
1278 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1279
1280 static struct trace_event_functions kretprobe_funcs = {
1281 .trace = print_kretprobe_event
1282 };
1283
1284 static struct trace_event_functions kprobe_funcs = {
1285 .trace = print_kprobe_event
1286 };
1287
1288 static int register_kprobe_event(struct trace_kprobe *tk)
1289 {
1290 struct trace_event_call *call = &tk->tp.call;
1291 int ret;
1292
1293 /* Initialize trace_event_call */
1294 INIT_LIST_HEAD(&call->class->fields);
1295 if (trace_kprobe_is_return(tk)) {
1296 call->event.funcs = &kretprobe_funcs;
1297 call->class->define_fields = kretprobe_event_define_fields;
1298 } else {
1299 call->event.funcs = &kprobe_funcs;
1300 call->class->define_fields = kprobe_event_define_fields;
1301 }
1302 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1303 return -ENOMEM;
1304 ret = register_trace_event(&call->event);
1305 if (!ret) {
1306 kfree(call->print_fmt);
1307 return -ENODEV;
1308 }
1309 call->flags = TRACE_EVENT_FL_KPROBE;
1310 call->class->reg = kprobe_register;
1311 call->data = tk;
1312 ret = trace_add_event_call(call);
1313 if (ret) {
1314 pr_info("Failed to register kprobe event: %s\n",
1315 trace_event_name(call));
1316 kfree(call->print_fmt);
1317 unregister_trace_event(&call->event);
1318 }
1319 return ret;
1320 }
1321
1322 static int unregister_kprobe_event(struct trace_kprobe *tk)
1323 {
1324 int ret;
1325
1326 /* tp->event is unregistered in trace_remove_event_call() */
1327 ret = trace_remove_event_call(&tk->tp.call);
1328 if (!ret)
1329 kfree(tk->tp.call.print_fmt);
1330 return ret;
1331 }
1332
1333 /* Make a tracefs interface for controlling probe points */
1334 static __init int init_kprobe_trace(void)
1335 {
1336 struct dentry *d_tracer;
1337 struct dentry *entry;
1338
1339 if (register_module_notifier(&trace_kprobe_module_nb))
1340 return -EINVAL;
1341
1342 d_tracer = tracing_init_dentry();
1343 if (IS_ERR(d_tracer))
1344 return 0;
1345
1346 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1347 NULL, &kprobe_events_ops);
1348
1349 /* Event list interface */
1350 if (!entry)
1351 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1352
1353 /* Profile interface */
1354 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1355 NULL, &kprobe_profile_ops);
1356
1357 if (!entry)
1358 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1359 return 0;
1360 }
1361 fs_initcall(init_kprobe_trace);
1362
1363
1364 #ifdef CONFIG_FTRACE_STARTUP_TEST
1365 /*
1366 * The "__used" keeps gcc from removing the function symbol
1367 * from the kallsyms table. 'noinline' makes sure that there
1368 * isn't an inlined version used by the test method below
1369 */
1370 static __used __init noinline int
1371 kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1372 {
1373 return a1 + a2 + a3 + a4 + a5 + a6;
1374 }
1375
1376 static __init struct trace_event_file *
1377 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1378 {
1379 struct trace_event_file *file;
1380
1381 list_for_each_entry(file, &tr->events, list)
1382 if (file->event_call == &tk->tp.call)
1383 return file;
1384
1385 return NULL;
1386 }
1387
1388 /*
1389 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1390 * stage, we can do this lockless.
1391 */
1392 static __init int kprobe_trace_self_tests_init(void)
1393 {
1394 int ret, warn = 0;
1395 int (*target)(int, int, int, int, int, int);
1396 struct trace_kprobe *tk;
1397 struct trace_event_file *file;
1398
1399 if (tracing_is_disabled())
1400 return -ENODEV;
1401
1402 target = kprobe_trace_selftest_target;
1403
1404 pr_info("Testing kprobe tracing: ");
1405
1406 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1407 "$stack $stack0 +0($stack)",
1408 create_trace_kprobe);
1409 if (WARN_ON_ONCE(ret)) {
1410 pr_warn("error on probing function entry.\n");
1411 warn++;
1412 } else {
1413 /* Enable trace point */
1414 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1415 if (WARN_ON_ONCE(tk == NULL)) {
1416 pr_warn("error on getting new probe.\n");
1417 warn++;
1418 } else {
1419 file = find_trace_probe_file(tk, top_trace_array());
1420 if (WARN_ON_ONCE(file == NULL)) {
1421 pr_warn("error on getting probe file.\n");
1422 warn++;
1423 } else
1424 enable_trace_kprobe(tk, file);
1425 }
1426 }
1427
1428 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1429 "$retval", create_trace_kprobe);
1430 if (WARN_ON_ONCE(ret)) {
1431 pr_warn("error on probing function return.\n");
1432 warn++;
1433 } else {
1434 /* Enable trace point */
1435 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1436 if (WARN_ON_ONCE(tk == NULL)) {
1437 pr_warn("error on getting 2nd new probe.\n");
1438 warn++;
1439 } else {
1440 file = find_trace_probe_file(tk, top_trace_array());
1441 if (WARN_ON_ONCE(file == NULL)) {
1442 pr_warn("error on getting probe file.\n");
1443 warn++;
1444 } else
1445 enable_trace_kprobe(tk, file);
1446 }
1447 }
1448
1449 if (warn)
1450 goto end;
1451
1452 ret = target(1, 2, 3, 4, 5, 6);
1453
1454 /*
1455 * Not expecting an error here, the check is only to prevent the
1456 * optimizer from removing the call to target() as otherwise there
1457 * are no side-effects and the call is never performed.
1458 */
1459 if (ret != 21)
1460 warn++;
1461
1462 /* Disable trace points before removing it */
1463 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1464 if (WARN_ON_ONCE(tk == NULL)) {
1465 pr_warn("error on getting test probe.\n");
1466 warn++;
1467 } else {
1468 if (trace_kprobe_nhit(tk) != 1) {
1469 pr_warn("incorrect number of testprobe hits\n");
1470 warn++;
1471 }
1472
1473 file = find_trace_probe_file(tk, top_trace_array());
1474 if (WARN_ON_ONCE(file == NULL)) {
1475 pr_warn("error on getting probe file.\n");
1476 warn++;
1477 } else
1478 disable_trace_kprobe(tk, file);
1479 }
1480
1481 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1482 if (WARN_ON_ONCE(tk == NULL)) {
1483 pr_warn("error on getting 2nd test probe.\n");
1484 warn++;
1485 } else {
1486 if (trace_kprobe_nhit(tk) != 1) {
1487 pr_warn("incorrect number of testprobe2 hits\n");
1488 warn++;
1489 }
1490
1491 file = find_trace_probe_file(tk, top_trace_array());
1492 if (WARN_ON_ONCE(file == NULL)) {
1493 pr_warn("error on getting probe file.\n");
1494 warn++;
1495 } else
1496 disable_trace_kprobe(tk, file);
1497 }
1498
1499 ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1500 if (WARN_ON_ONCE(ret)) {
1501 pr_warn("error on deleting a probe.\n");
1502 warn++;
1503 }
1504
1505 ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1506 if (WARN_ON_ONCE(ret)) {
1507 pr_warn("error on deleting a probe.\n");
1508 warn++;
1509 }
1510
1511 end:
1512 release_all_trace_kprobes();
1513 if (warn)
1514 pr_cont("NG: Some tests are failed. Please check them.\n");
1515 else
1516 pr_cont("OK\n");
1517 return 0;
1518 }
1519
1520 late_initcall(kprobe_trace_self_tests_init);
1521
1522 #endif