]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace_kprobe.c
tracing: Failed to create system directory
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_kprobe.c
CommitLineData
413d37d1 1/*
77b44d1b 2 * Kprobes-based tracing events
413d37d1
MH
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/uaccess.h>
413d37d1 22
8ab83f56 23#include "trace_probe.h"
1ff511e3 24
8ab83f56 25#define KPROBE_EVENT_SYSTEM "kprobes"
e09c8614 26
413d37d1 27/**
77b44d1b 28 * Kprobe event core functions
413d37d1 29 */
413d37d1
MH
30struct trace_probe {
31 struct list_head list;
4a846b44 32 struct kretprobe rp; /* Use rp.kp for kprobe use */
cd7e7bd5 33 unsigned long nhit;
50d78056 34 unsigned int flags; /* For TP_FLAG_* */
413d37d1 35 const char *symbol; /* symbol name */
2239291a 36 struct ftrace_event_class class;
413d37d1 37 struct ftrace_event_call call;
3d1fc7b0 38 struct ftrace_event_file * __rcu *files;
93ccae7a 39 ssize_t size; /* trace entry size */
a82378d8 40 unsigned int nr_args;
eca0d916 41 struct probe_arg args[];
413d37d1
MH
42};
43
a82378d8
MH
44#define SIZEOF_TRACE_PROBE(n) \
45 (offsetof(struct trace_probe, args) + \
eca0d916 46 (sizeof(struct probe_arg) * (n)))
a82378d8 47
93ccae7a 48
db02038f 49static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
413d37d1 50{
4a846b44 51 return tp->rp.handler != NULL;
413d37d1
MH
52}
53
7143f168 54static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
413d37d1
MH
55{
56 return tp->symbol ? tp->symbol : "unknown";
57}
58
61424318
MH
59static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
60{
61 return tp->rp.kp.offset;
62}
63
64static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
65{
66 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
67}
68
69static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
70{
71 return !!(tp->flags & TP_FLAG_REGISTERED);
72}
73
74static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
75{
76 return !!(kprobe_gone(&tp->rp.kp));
77}
78
79static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
80 struct module *mod)
81{
82 int len = strlen(mod->name);
83 const char *name = trace_probe_symbol(tp);
84 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
85}
86
87static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
88{
89 return !!strchr(trace_probe_symbol(tp), ':');
90}
91
413d37d1
MH
92static int register_probe_event(struct trace_probe *tp);
93static void unregister_probe_event(struct trace_probe *tp);
94
95static DEFINE_MUTEX(probe_lock);
96static LIST_HEAD(probe_list);
97
50d78056
MH
98static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
99static int kretprobe_dispatcher(struct kretprobe_instance *ri,
100 struct pt_regs *regs);
101
4a846b44
MH
102/*
103 * Allocate new trace_probe and initialize it (including kprobes).
104 */
f52487e9
MH
105static struct trace_probe *alloc_trace_probe(const char *group,
106 const char *event,
4a846b44
MH
107 void *addr,
108 const char *symbol,
109 unsigned long offs,
3a6b7666 110 int nargs, bool is_return)
413d37d1
MH
111{
112 struct trace_probe *tp;
6f3cf440 113 int ret = -ENOMEM;
413d37d1 114
a82378d8 115 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
413d37d1 116 if (!tp)
6f3cf440 117 return ERR_PTR(ret);
413d37d1
MH
118
119 if (symbol) {
120 tp->symbol = kstrdup(symbol, GFP_KERNEL);
121 if (!tp->symbol)
122 goto error;
4a846b44
MH
123 tp->rp.kp.symbol_name = tp->symbol;
124 tp->rp.kp.offset = offs;
125 } else
126 tp->rp.kp.addr = addr;
127
128 if (is_return)
50d78056 129 tp->rp.handler = kretprobe_dispatcher;
4a846b44 130 else
50d78056 131 tp->rp.kp.pre_handler = kprobe_dispatcher;
4a846b44 132
da34634f 133 if (!event || !is_good_name(event)) {
6f3cf440 134 ret = -EINVAL;
4263565d 135 goto error;
6f3cf440
MH
136 }
137
2239291a 138 tp->call.class = &tp->class;
4263565d
MH
139 tp->call.name = kstrdup(event, GFP_KERNEL);
140 if (!tp->call.name)
141 goto error;
413d37d1 142
da34634f 143 if (!group || !is_good_name(group)) {
6f3cf440 144 ret = -EINVAL;
f52487e9 145 goto error;
6f3cf440
MH
146 }
147
2239291a
SR
148 tp->class.system = kstrdup(group, GFP_KERNEL);
149 if (!tp->class.system)
f52487e9
MH
150 goto error;
151
413d37d1
MH
152 INIT_LIST_HEAD(&tp->list);
153 return tp;
154error:
f52487e9 155 kfree(tp->call.name);
413d37d1
MH
156 kfree(tp->symbol);
157 kfree(tp);
6f3cf440 158 return ERR_PTR(ret);
413d37d1
MH
159}
160
161static void free_trace_probe(struct trace_probe *tp)
162{
163 int i;
164
165 for (i = 0; i < tp->nr_args; i++)
8ab83f56 166 traceprobe_free_probe_arg(&tp->args[i]);
413d37d1 167
8f082018 168 kfree(tp->call.class->system);
413d37d1
MH
169 kfree(tp->call.name);
170 kfree(tp->symbol);
171 kfree(tp);
172}
173
7143f168 174static struct trace_probe *find_trace_probe(const char *event,
dd004c47 175 const char *group)
413d37d1
MH
176{
177 struct trace_probe *tp;
178
179 list_for_each_entry(tp, &probe_list, list)
dd004c47 180 if (strcmp(tp->call.name, event) == 0 &&
8f082018 181 strcmp(tp->call.class->system, group) == 0)
413d37d1
MH
182 return tp;
183 return NULL;
184}
185
41a7dd42
MH
186static int trace_probe_nr_files(struct trace_probe *tp)
187{
c02c7e65 188 struct ftrace_event_file **file;
41a7dd42
MH
189 int ret = 0;
190
c02c7e65
MH
191 /*
192 * Since all tp->files updater is protected by probe_enable_lock,
193 * we don't need to lock an rcu_read_lock.
194 */
195 file = rcu_dereference_raw(tp->files);
41a7dd42
MH
196 if (file)
197 while (*(file++))
198 ret++;
199
200 return ret;
201}
202
203static DEFINE_MUTEX(probe_enable_lock);
204
205/*
206 * Enable trace_probe
207 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
208 */
209static int
210enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
1538f888
MH
211{
212 int ret = 0;
213
41a7dd42
MH
214 mutex_lock(&probe_enable_lock);
215
216 if (file) {
c02c7e65 217 struct ftrace_event_file **new, **old;
41a7dd42
MH
218 int n = trace_probe_nr_files(tp);
219
c02c7e65 220 old = rcu_dereference_raw(tp->files);
41a7dd42
MH
221 /* 1 is for new one and 1 is for stopper */
222 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
223 GFP_KERNEL);
224 if (!new) {
225 ret = -ENOMEM;
226 goto out_unlock;
227 }
228 memcpy(new, old, n * sizeof(struct ftrace_event_file *));
229 new[n] = file;
230 /* The last one keeps a NULL */
231
232 rcu_assign_pointer(tp->files, new);
233 tp->flags |= TP_FLAG_TRACE;
234
235 if (old) {
236 /* Make sure the probe is done with old files */
237 synchronize_sched();
238 kfree(old);
239 }
240 } else
241 tp->flags |= TP_FLAG_PROFILE;
242
195a84d9 243 if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) {
1538f888
MH
244 if (trace_probe_is_return(tp))
245 ret = enable_kretprobe(&tp->rp);
246 else
247 ret = enable_kprobe(&tp->rp.kp);
248 }
249
41a7dd42
MH
250 out_unlock:
251 mutex_unlock(&probe_enable_lock);
252
1538f888
MH
253 return ret;
254}
255
41a7dd42
MH
256static int
257trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
258{
c02c7e65 259 struct ftrace_event_file **files;
41a7dd42
MH
260 int i;
261
c02c7e65
MH
262 /*
263 * Since all tp->files updater is protected by probe_enable_lock,
264 * we don't need to lock an rcu_read_lock.
265 */
266 files = rcu_dereference_raw(tp->files);
267 if (files) {
268 for (i = 0; files[i]; i++)
269 if (files[i] == file)
41a7dd42
MH
270 return i;
271 }
272
273 return -1;
274}
275
276/*
277 * Disable trace_probe
278 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
279 */
280static int
281disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
1538f888 282{
41a7dd42
MH
283 int ret = 0;
284
285 mutex_lock(&probe_enable_lock);
286
287 if (file) {
c02c7e65 288 struct ftrace_event_file **new, **old;
41a7dd42
MH
289 int n = trace_probe_nr_files(tp);
290 int i, j;
291
c02c7e65 292 old = rcu_dereference_raw(tp->files);
41a7dd42
MH
293 if (n == 0 || trace_probe_file_index(tp, file) < 0) {
294 ret = -EINVAL;
295 goto out_unlock;
296 }
297
298 if (n == 1) { /* Remove the last file */
299 tp->flags &= ~TP_FLAG_TRACE;
300 new = NULL;
301 } else {
302 new = kzalloc(n * sizeof(struct ftrace_event_file *),
303 GFP_KERNEL);
304 if (!new) {
305 ret = -ENOMEM;
306 goto out_unlock;
307 }
308
309 /* This copy & check loop copies the NULL stopper too */
310 for (i = 0, j = 0; j < n && i < n + 1; i++)
311 if (old[i] != file)
312 new[j++] = old[i];
313 }
314
315 rcu_assign_pointer(tp->files, new);
316
317 /* Make sure the probe is done with old files */
318 synchronize_sched();
319 kfree(old);
320 } else
321 tp->flags &= ~TP_FLAG_PROFILE;
322
61424318 323 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
1538f888
MH
324 if (trace_probe_is_return(tp))
325 disable_kretprobe(&tp->rp);
326 else
327 disable_kprobe(&tp->rp.kp);
328 }
41a7dd42
MH
329
330 out_unlock:
331 mutex_unlock(&probe_enable_lock);
332
333 return ret;
1538f888
MH
334}
335
61424318
MH
336/* Internal register function - just handle k*probes and flags */
337static int __register_trace_probe(struct trace_probe *tp)
413d37d1 338{
7f6878a3 339 int i, ret;
61424318
MH
340
341 if (trace_probe_is_registered(tp))
342 return -EINVAL;
343
7f6878a3 344 for (i = 0; i < tp->nr_args; i++)
8ab83f56 345 traceprobe_update_arg(&tp->args[i]);
7f6878a3 346
61424318
MH
347 /* Set/clear disabled flag according to tp->flag */
348 if (trace_probe_is_enabled(tp))
349 tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
350 else
351 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
352
7143f168 353 if (trace_probe_is_return(tp))
61424318 354 ret = register_kretprobe(&tp->rp);
413d37d1 355 else
61424318
MH
356 ret = register_kprobe(&tp->rp.kp);
357
358 if (ret == 0)
359 tp->flags |= TP_FLAG_REGISTERED;
360 else {
361 pr_warning("Could not insert probe at %s+%lu: %d\n",
362 trace_probe_symbol(tp), trace_probe_offset(tp), ret);
363 if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
364 pr_warning("This probe might be able to register after"
365 "target module is loaded. Continue.\n");
366 ret = 0;
367 } else if (ret == -EILSEQ) {
368 pr_warning("Probing address(0x%p) is not an "
369 "instruction boundary.\n",
370 tp->rp.kp.addr);
371 ret = -EINVAL;
372 }
373 }
374
375 return ret;
376}
377
378/* Internal unregister function - just handle k*probes and flags */
379static void __unregister_trace_probe(struct trace_probe *tp)
380{
381 if (trace_probe_is_registered(tp)) {
382 if (trace_probe_is_return(tp))
383 unregister_kretprobe(&tp->rp);
384 else
385 unregister_kprobe(&tp->rp.kp);
386 tp->flags &= ~TP_FLAG_REGISTERED;
387 /* Cleanup kprobe for reuse */
388 if (tp->rp.kp.symbol_name)
389 tp->rp.kp.addr = NULL;
390 }
391}
392
393/* Unregister a trace_probe and probe_event: call with locking probe_lock */
02ca1521 394static int unregister_trace_probe(struct trace_probe *tp)
61424318 395{
02ca1521
MH
396 /* Enabled event can not be unregistered */
397 if (trace_probe_is_enabled(tp))
398 return -EBUSY;
399
61424318 400 __unregister_trace_probe(tp);
413d37d1 401 list_del(&tp->list);
2d5e067e 402 unregister_probe_event(tp);
02ca1521
MH
403
404 return 0;
413d37d1
MH
405}
406
407/* Register a trace_probe and probe_event */
408static int register_trace_probe(struct trace_probe *tp)
409{
410 struct trace_probe *old_tp;
411 int ret;
412
413 mutex_lock(&probe_lock);
414
61424318 415 /* Delete old (same name) event if exist */
7143f168 416 old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
2d5e067e 417 if (old_tp) {
02ca1521
MH
418 ret = unregister_trace_probe(old_tp);
419 if (ret < 0)
420 goto end;
2d5e067e
MH
421 free_trace_probe(old_tp);
422 }
61424318
MH
423
424 /* Register new event */
2d5e067e
MH
425 ret = register_probe_event(tp);
426 if (ret) {
426d3107 427 pr_warning("Failed to register probe event(%d)\n", ret);
2d5e067e
MH
428 goto end;
429 }
430
61424318
MH
431 /* Register k*probe */
432 ret = __register_trace_probe(tp);
433 if (ret < 0)
2d5e067e 434 unregister_probe_event(tp);
61424318 435 else
2d5e067e 436 list_add_tail(&tp->list, &probe_list);
61424318 437
413d37d1
MH
438end:
439 mutex_unlock(&probe_lock);
440 return ret;
441}
442
61424318
MH
443/* Module notifier call back, checking event on the module */
444static int trace_probe_module_callback(struct notifier_block *nb,
445 unsigned long val, void *data)
446{
447 struct module *mod = data;
448 struct trace_probe *tp;
449 int ret;
450
451 if (val != MODULE_STATE_COMING)
452 return NOTIFY_DONE;
453
454 /* Update probes on coming module */
455 mutex_lock(&probe_lock);
456 list_for_each_entry(tp, &probe_list, list) {
457 if (trace_probe_within_module(tp, mod)) {
02ca1521 458 /* Don't need to check busy - this should have gone. */
61424318
MH
459 __unregister_trace_probe(tp);
460 ret = __register_trace_probe(tp);
461 if (ret)
462 pr_warning("Failed to re-register probe %s on"
463 "%s: %d\n",
464 tp->call.name, mod->name, ret);
465 }
466 }
467 mutex_unlock(&probe_lock);
468
469 return NOTIFY_DONE;
470}
471
472static struct notifier_block trace_probe_module_nb = {
473 .notifier_call = trace_probe_module_callback,
474 .priority = 1 /* Invoked after kprobe module callback */
475};
476
413d37d1
MH
477static int create_trace_probe(int argc, char **argv)
478{
479 /*
480 * Argument syntax:
61424318
MH
481 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
482 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
413d37d1 483 * Fetch args:
2e06ff63
MH
484 * $retval : fetch return value
485 * $stack : fetch stack address
486 * $stackN : fetch Nth of stack (N:0-)
413d37d1
MH
487 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
488 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
489 * %REG : fetch register REG
93ccae7a 490 * Dereferencing memory fetch:
413d37d1 491 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
eca0d916
MH
492 * Alias name of args:
493 * NAME=FETCHARG : set NAME as alias of FETCHARG.
93ccae7a
MH
494 * Type of args:
495 * FETCHARG:TYPE : use TYPE instead of unsigned long.
413d37d1
MH
496 */
497 struct trace_probe *tp;
413d37d1 498 int i, ret = 0;
3a6b7666 499 bool is_return = false, is_delete = false;
93ccae7a 500 char *symbol = NULL, *event = NULL, *group = NULL;
da34634f 501 char *arg;
2fba0c88 502 unsigned long offset = 0;
413d37d1 503 void *addr = NULL;
4a846b44 504 char buf[MAX_EVENT_NAME_LEN];
413d37d1 505
a7c312be 506 /* argc must be >= 1 */
413d37d1 507 if (argv[0][0] == 'p')
3a6b7666 508 is_return = false;
413d37d1 509 else if (argv[0][0] == 'r')
3a6b7666 510 is_return = true;
a7c312be 511 else if (argv[0][0] == '-')
3a6b7666 512 is_delete = true;
e63cc239 513 else {
a7c312be
MH
514 pr_info("Probe definition must be started with 'p', 'r' or"
515 " '-'.\n");
413d37d1 516 return -EINVAL;
e63cc239 517 }
413d37d1
MH
518
519 if (argv[0][1] == ':') {
520 event = &argv[0][2];
f52487e9
MH
521 if (strchr(event, '/')) {
522 group = event;
523 event = strchr(group, '/') + 1;
524 event[-1] = '\0';
525 if (strlen(group) == 0) {
a5efd925 526 pr_info("Group name is not specified\n");
f52487e9
MH
527 return -EINVAL;
528 }
529 }
413d37d1 530 if (strlen(event) == 0) {
a5efd925 531 pr_info("Event name is not specified\n");
413d37d1
MH
532 return -EINVAL;
533 }
534 }
a7c312be
MH
535 if (!group)
536 group = KPROBE_EVENT_SYSTEM;
413d37d1 537
a7c312be
MH
538 if (is_delete) {
539 if (!event) {
540 pr_info("Delete command needs an event name.\n");
541 return -EINVAL;
542 }
9da79ab8 543 mutex_lock(&probe_lock);
7143f168 544 tp = find_trace_probe(event, group);
a7c312be 545 if (!tp) {
9da79ab8 546 mutex_unlock(&probe_lock);
a7c312be
MH
547 pr_info("Event %s/%s doesn't exist.\n", group, event);
548 return -ENOENT;
549 }
550 /* delete an event */
02ca1521
MH
551 ret = unregister_trace_probe(tp);
552 if (ret == 0)
553 free_trace_probe(tp);
9da79ab8 554 mutex_unlock(&probe_lock);
02ca1521 555 return ret;
a7c312be
MH
556 }
557
558 if (argc < 2) {
559 pr_info("Probe point is not specified.\n");
560 return -EINVAL;
561 }
413d37d1 562 if (isdigit(argv[1][0])) {
e63cc239
MH
563 if (is_return) {
564 pr_info("Return probe point must be a symbol.\n");
413d37d1 565 return -EINVAL;
e63cc239 566 }
413d37d1 567 /* an address specified */
bcd83ea6 568 ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
e63cc239
MH
569 if (ret) {
570 pr_info("Failed to parse address.\n");
413d37d1 571 return ret;
e63cc239 572 }
413d37d1
MH
573 } else {
574 /* a symbol specified */
575 symbol = argv[1];
576 /* TODO: support .init module functions */
8ab83f56 577 ret = traceprobe_split_symbol_offset(symbol, &offset);
e63cc239
MH
578 if (ret) {
579 pr_info("Failed to parse symbol.\n");
413d37d1 580 return ret;
e63cc239
MH
581 }
582 if (offset && is_return) {
583 pr_info("Return probe must be used without offset.\n");
413d37d1 584 return -EINVAL;
e63cc239 585 }
413d37d1 586 }
a82378d8 587 argc -= 2; argv += 2;
413d37d1
MH
588
589 /* setup a probe */
4263565d
MH
590 if (!event) {
591 /* Make a new event name */
4263565d 592 if (symbol)
6f3cf440 593 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
4263565d
MH
594 is_return ? 'r' : 'p', symbol, offset);
595 else
6f3cf440 596 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
4263565d 597 is_return ? 'r' : 'p', addr);
4a846b44
MH
598 event = buf;
599 }
f52487e9
MH
600 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
601 is_return);
e63cc239
MH
602 if (IS_ERR(tp)) {
603 pr_info("Failed to allocate trace_probe.(%d)\n",
604 (int)PTR_ERR(tp));
413d37d1 605 return PTR_ERR(tp);
e63cc239 606 }
413d37d1 607
413d37d1 608 /* parse arguments */
a82378d8
MH
609 ret = 0;
610 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
61a52736
MH
611 /* Increment count for freeing args in error case */
612 tp->nr_args++;
613
eca0d916
MH
614 /* Parse argument name */
615 arg = strchr(argv[i], '=');
aba91595 616 if (arg) {
eca0d916 617 *arg++ = '\0';
aba91595
MH
618 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
619 } else {
eca0d916 620 arg = argv[i];
aba91595
MH
621 /* If argument name is omitted, set "argN" */
622 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
623 tp->args[i].name = kstrdup(buf, GFP_KERNEL);
624 }
a703d946 625
ba8665d7 626 if (!tp->args[i].name) {
aba91595 627 pr_info("Failed to allocate argument[%d] name.\n", i);
ba8665d7 628 ret = -ENOMEM;
413d37d1
MH
629 goto error;
630 }
da34634f
MH
631
632 if (!is_good_name(tp->args[i].name)) {
633 pr_info("Invalid argument[%d] name: %s\n",
634 i, tp->args[i].name);
635 ret = -EINVAL;
636 goto error;
637 }
93ccae7a 638
8ab83f56
SD
639 if (traceprobe_conflict_field_name(tp->args[i].name,
640 tp->args, i)) {
aba91595 641 pr_info("Argument[%d] name '%s' conflicts with "
93ccae7a
MH
642 "another field.\n", i, argv[i]);
643 ret = -EINVAL;
644 goto error;
645 }
ba8665d7
MH
646
647 /* Parse fetch argument */
8ab83f56 648 ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
f3f096cf 649 is_return, true);
e63cc239 650 if (ret) {
aba91595 651 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
413d37d1 652 goto error;
e63cc239 653 }
413d37d1 654 }
413d37d1
MH
655
656 ret = register_trace_probe(tp);
657 if (ret)
658 goto error;
659 return 0;
660
661error:
662 free_trace_probe(tp);
663 return ret;
664}
665
02ca1521 666static int release_all_trace_probes(void)
413d37d1
MH
667{
668 struct trace_probe *tp;
02ca1521 669 int ret = 0;
413d37d1
MH
670
671 mutex_lock(&probe_lock);
02ca1521
MH
672 /* Ensure no probe is in use. */
673 list_for_each_entry(tp, &probe_list, list)
674 if (trace_probe_is_enabled(tp)) {
675 ret = -EBUSY;
676 goto end;
677 }
413d37d1
MH
678 /* TODO: Use batch unregistration */
679 while (!list_empty(&probe_list)) {
680 tp = list_entry(probe_list.next, struct trace_probe, list);
681 unregister_trace_probe(tp);
682 free_trace_probe(tp);
683 }
02ca1521
MH
684
685end:
413d37d1 686 mutex_unlock(&probe_lock);
02ca1521
MH
687
688 return ret;
413d37d1
MH
689}
690
413d37d1
MH
691/* Probes listing interfaces */
692static void *probes_seq_start(struct seq_file *m, loff_t *pos)
693{
694 mutex_lock(&probe_lock);
695 return seq_list_start(&probe_list, *pos);
696}
697
698static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
699{
700 return seq_list_next(v, &probe_list, pos);
701}
702
703static void probes_seq_stop(struct seq_file *m, void *v)
704{
705 mutex_unlock(&probe_lock);
706}
707
708static int probes_seq_show(struct seq_file *m, void *v)
709{
710 struct trace_probe *tp = v;
93ccae7a 711 int i;
413d37d1 712
7143f168 713 seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
8f082018 714 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
413d37d1 715
52a11f35
LJ
716 if (!tp->symbol)
717 seq_printf(m, " 0x%p", tp->rp.kp.addr);
718 else if (tp->rp.kp.offset)
7143f168
MH
719 seq_printf(m, " %s+%u", trace_probe_symbol(tp),
720 tp->rp.kp.offset);
413d37d1 721 else
7143f168 722 seq_printf(m, " %s", trace_probe_symbol(tp));
413d37d1 723
93ccae7a
MH
724 for (i = 0; i < tp->nr_args; i++)
725 seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
413d37d1 726 seq_printf(m, "\n");
93ccae7a 727
413d37d1
MH
728 return 0;
729}
730
731static const struct seq_operations probes_seq_op = {
732 .start = probes_seq_start,
733 .next = probes_seq_next,
734 .stop = probes_seq_stop,
735 .show = probes_seq_show
736};
737
738static int probes_open(struct inode *inode, struct file *file)
739{
02ca1521
MH
740 int ret;
741
742 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
743 ret = release_all_trace_probes();
744 if (ret < 0)
745 return ret;
746 }
413d37d1
MH
747
748 return seq_open(file, &probes_seq_op);
749}
750
413d37d1
MH
751static ssize_t probes_write(struct file *file, const char __user *buffer,
752 size_t count, loff_t *ppos)
753{
8ab83f56
SD
754 return traceprobe_probes_write(file, buffer, count, ppos,
755 create_trace_probe);
413d37d1
MH
756}
757
758static const struct file_operations kprobe_events_ops = {
759 .owner = THIS_MODULE,
760 .open = probes_open,
761 .read = seq_read,
762 .llseek = seq_lseek,
763 .release = seq_release,
764 .write = probes_write,
765};
766
cd7e7bd5
MH
767/* Probes profiling interfaces */
768static int probes_profile_seq_show(struct seq_file *m, void *v)
769{
770 struct trace_probe *tp = v;
771
772 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
4a846b44 773 tp->rp.kp.nmissed);
cd7e7bd5
MH
774
775 return 0;
776}
777
778static const struct seq_operations profile_seq_op = {
779 .start = probes_seq_start,
780 .next = probes_seq_next,
781 .stop = probes_seq_stop,
782 .show = probes_profile_seq_show
783};
784
785static int profile_open(struct inode *inode, struct file *file)
786{
787 return seq_open(file, &profile_seq_op);
788}
789
790static const struct file_operations kprobe_profile_ops = {
791 .owner = THIS_MODULE,
792 .open = profile_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = seq_release,
796};
797
e09c8614
MH
798/* Sum up total data length for dynamic arraies (strings) */
799static __kprobes int __get_data_size(struct trace_probe *tp,
800 struct pt_regs *regs)
801{
802 int i, ret = 0;
803 u32 len;
804
805 for (i = 0; i < tp->nr_args; i++)
806 if (unlikely(tp->args[i].fetch_size.fn)) {
807 call_fetch(&tp->args[i].fetch_size, regs, &len);
808 ret += len;
809 }
810
811 return ret;
812}
813
814/* Store the value of each argument */
815static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
816 struct pt_regs *regs,
817 u8 *data, int maxlen)
818{
819 int i;
820 u32 end = tp->size;
821 u32 *dl; /* Data (relative) location */
822
823 for (i = 0; i < tp->nr_args; i++) {
824 if (unlikely(tp->args[i].fetch_size.fn)) {
825 /*
826 * First, we set the relative location and
827 * maximum data length to *dl
828 */
829 dl = (u32 *)(data + tp->args[i].offset);
830 *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
831 /* Then try to fetch string or dynamic array data */
832 call_fetch(&tp->args[i].fetch, regs, dl);
833 /* Reduce maximum length */
834 end += get_rloc_len(*dl);
835 maxlen -= get_rloc_len(*dl);
836 /* Trick here, convert data_rloc to data_loc */
837 *dl = convert_rloc_to_loc(*dl,
838 ent_size + tp->args[i].offset);
839 } else
840 /* Just fetching data normally */
841 call_fetch(&tp->args[i].fetch, regs,
842 data + tp->args[i].offset);
843 }
844}
845
413d37d1 846/* Kprobe handler */
2b106aab 847static __kprobes void
41a7dd42
MH
848__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
849 struct ftrace_event_file *ftrace_file)
413d37d1 850{
93ccae7a 851 struct kprobe_trace_entry_head *entry;
413d37d1 852 struct ring_buffer_event *event;
8f8ffe24 853 struct ring_buffer *buffer;
e09c8614 854 int size, dsize, pc;
413d37d1 855 unsigned long irq_flags;
4263565d 856 struct ftrace_event_call *call = &tp->call;
413d37d1 857
41a7dd42
MH
858 WARN_ON(call != ftrace_file->event_call);
859
b8820084
MH
860 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
861 return;
862
413d37d1
MH
863 local_save_flags(irq_flags);
864 pc = preempt_count();
865
e09c8614
MH
866 dsize = __get_data_size(tp, regs);
867 size = sizeof(*entry) + tp->size + dsize;
413d37d1 868
41a7dd42
MH
869 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
870 call->event.type,
871 size, irq_flags, pc);
413d37d1 872 if (!event)
1e12a4a7 873 return;
413d37d1
MH
874
875 entry = ring_buffer_event_data(event);
2b106aab 876 entry->ip = (unsigned long)tp->rp.kp.addr;
e09c8614 877 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
413d37d1 878
8f8ffe24 879 if (!filter_current_check_discard(buffer, call, entry, event))
0d5c6e1c
SR
880 trace_buffer_unlock_commit_regs(buffer, event,
881 irq_flags, pc, regs);
413d37d1
MH
882}
883
41a7dd42
MH
884static __kprobes void
885kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
886{
c02c7e65
MH
887 /*
888 * Note: preempt is already disabled around the kprobe handler.
889 * However, we still need an smp_read_barrier_depends() corresponding
890 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
891 */
892 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
893
894 if (unlikely(!file))
895 return;
41a7dd42 896
41a7dd42
MH
897 while (*file) {
898 __kprobe_trace_func(tp, regs, *file);
899 file++;
900 }
901}
902
413d37d1 903/* Kretprobe handler */
2b106aab 904static __kprobes void
41a7dd42
MH
905__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
906 struct pt_regs *regs,
907 struct ftrace_event_file *ftrace_file)
413d37d1 908{
93ccae7a 909 struct kretprobe_trace_entry_head *entry;
413d37d1 910 struct ring_buffer_event *event;
8f8ffe24 911 struct ring_buffer *buffer;
e09c8614 912 int size, pc, dsize;
413d37d1 913 unsigned long irq_flags;
4263565d 914 struct ftrace_event_call *call = &tp->call;
413d37d1 915
41a7dd42
MH
916 WARN_ON(call != ftrace_file->event_call);
917
b8820084
MH
918 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
919 return;
920
413d37d1
MH
921 local_save_flags(irq_flags);
922 pc = preempt_count();
923
e09c8614
MH
924 dsize = __get_data_size(tp, regs);
925 size = sizeof(*entry) + tp->size + dsize;
413d37d1 926
41a7dd42
MH
927 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
928 call->event.type,
929 size, irq_flags, pc);
413d37d1 930 if (!event)
1e12a4a7 931 return;
413d37d1
MH
932
933 entry = ring_buffer_event_data(event);
4a846b44 934 entry->func = (unsigned long)tp->rp.kp.addr;
413d37d1 935 entry->ret_ip = (unsigned long)ri->ret_addr;
e09c8614 936 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
413d37d1 937
8f8ffe24 938 if (!filter_current_check_discard(buffer, call, entry, event))
0d5c6e1c
SR
939 trace_buffer_unlock_commit_regs(buffer, event,
940 irq_flags, pc, regs);
413d37d1
MH
941}
942
41a7dd42
MH
943static __kprobes void
944kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
945 struct pt_regs *regs)
946{
c02c7e65
MH
947 /*
948 * Note: preempt is already disabled around the kprobe handler.
949 * However, we still need an smp_read_barrier_depends() corresponding
950 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
951 */
952 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
953
954 if (unlikely(!file))
955 return;
41a7dd42 956
41a7dd42
MH
957 while (*file) {
958 __kretprobe_trace_func(tp, ri, regs, *file);
959 file++;
960 }
961}
962
413d37d1 963/* Event entry printers */
b62fdd97 964static enum print_line_t
a9a57763
SR
965print_kprobe_event(struct trace_iterator *iter, int flags,
966 struct trace_event *event)
413d37d1 967{
93ccae7a 968 struct kprobe_trace_entry_head *field;
413d37d1 969 struct trace_seq *s = &iter->seq;
eca0d916 970 struct trace_probe *tp;
93ccae7a 971 u8 *data;
413d37d1
MH
972 int i;
973
93ccae7a 974 field = (struct kprobe_trace_entry_head *)iter->ent;
80decc70 975 tp = container_of(event, struct trace_probe, call.event);
413d37d1 976
6e9f23d1
MH
977 if (!trace_seq_printf(s, "%s: (", tp->call.name))
978 goto partial;
979
413d37d1
MH
980 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
981 goto partial;
982
6e9f23d1 983 if (!trace_seq_puts(s, ")"))
413d37d1
MH
984 goto partial;
985
93ccae7a
MH
986 data = (u8 *)&field[1];
987 for (i = 0; i < tp->nr_args; i++)
988 if (!tp->args[i].type->print(s, tp->args[i].name,
e09c8614 989 data + tp->args[i].offset, field))
413d37d1
MH
990 goto partial;
991
992 if (!trace_seq_puts(s, "\n"))
993 goto partial;
994
995 return TRACE_TYPE_HANDLED;
996partial:
997 return TRACE_TYPE_PARTIAL_LINE;
998}
999
b62fdd97 1000static enum print_line_t
a9a57763
SR
1001print_kretprobe_event(struct trace_iterator *iter, int flags,
1002 struct trace_event *event)
413d37d1 1003{
93ccae7a 1004 struct kretprobe_trace_entry_head *field;
413d37d1 1005 struct trace_seq *s = &iter->seq;
eca0d916 1006 struct trace_probe *tp;
93ccae7a 1007 u8 *data;
413d37d1
MH
1008 int i;
1009
93ccae7a 1010 field = (struct kretprobe_trace_entry_head *)iter->ent;
80decc70 1011 tp = container_of(event, struct trace_probe, call.event);
413d37d1 1012
6e9f23d1
MH
1013 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1014 goto partial;
1015
413d37d1
MH
1016 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1017 goto partial;
1018
1019 if (!trace_seq_puts(s, " <- "))
1020 goto partial;
1021
1022 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1023 goto partial;
1024
6e9f23d1 1025 if (!trace_seq_puts(s, ")"))
413d37d1
MH
1026 goto partial;
1027
93ccae7a
MH
1028 data = (u8 *)&field[1];
1029 for (i = 0; i < tp->nr_args; i++)
1030 if (!tp->args[i].type->print(s, tp->args[i].name,
e09c8614 1031 data + tp->args[i].offset, field))
413d37d1
MH
1032 goto partial;
1033
1034 if (!trace_seq_puts(s, "\n"))
1035 goto partial;
1036
1037 return TRACE_TYPE_HANDLED;
1038partial:
1039 return TRACE_TYPE_PARTIAL_LINE;
1040}
1041
413d37d1
MH
1042
1043static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1044{
1045 int ret, i;
93ccae7a 1046 struct kprobe_trace_entry_head field;
413d37d1
MH
1047 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1048
a703d946 1049 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
eca0d916 1050 /* Set argument names as fields */
93ccae7a 1051 for (i = 0; i < tp->nr_args; i++) {
e09c8614 1052 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
93ccae7a
MH
1053 tp->args[i].name,
1054 sizeof(field) + tp->args[i].offset,
1055 tp->args[i].type->size,
1056 tp->args[i].type->is_signed,
1057 FILTER_OTHER);
1058 if (ret)
1059 return ret;
1060 }
413d37d1
MH
1061 return 0;
1062}
1063
1064static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1065{
1066 int ret, i;
93ccae7a 1067 struct kretprobe_trace_entry_head field;
413d37d1
MH
1068 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1069
a703d946
MH
1070 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1071 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
eca0d916 1072 /* Set argument names as fields */
93ccae7a 1073 for (i = 0; i < tp->nr_args; i++) {
e09c8614 1074 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
93ccae7a
MH
1075 tp->args[i].name,
1076 sizeof(field) + tp->args[i].offset,
1077 tp->args[i].type->size,
1078 tp->args[i].type->is_signed,
1079 FILTER_OTHER);
1080 if (ret)
1081 return ret;
1082 }
413d37d1
MH
1083 return 0;
1084}
1085
a342a028
LJ
1086static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1087{
1088 int i;
1089 int pos = 0;
1090
1091 const char *fmt, *arg;
1092
7143f168 1093 if (!trace_probe_is_return(tp)) {
a342a028
LJ
1094 fmt = "(%lx)";
1095 arg = "REC->" FIELD_STRING_IP;
1096 } else {
1097 fmt = "(%lx <- %lx)";
1098 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1099 }
1100
1101 /* When len=0, we just calculate the needed length */
1102#define LEN_OR_ZERO (len ? len - pos : 0)
1103
1104 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1105
1106 for (i = 0; i < tp->nr_args; i++) {
93ccae7a
MH
1107 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
1108 tp->args[i].name, tp->args[i].type->fmt);
a342a028
LJ
1109 }
1110
1111 pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1112
1113 for (i = 0; i < tp->nr_args; i++) {
e09c8614
MH
1114 if (strcmp(tp->args[i].type->name, "string") == 0)
1115 pos += snprintf(buf + pos, LEN_OR_ZERO,
1116 ", __get_str(%s)",
1117 tp->args[i].name);
1118 else
1119 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1120 tp->args[i].name);
a342a028
LJ
1121 }
1122
1123#undef LEN_OR_ZERO
1124
1125 /* return the length of print_fmt */
1126 return pos;
1127}
1128
1129static int set_print_fmt(struct trace_probe *tp)
1130{
1131 int len;
1132 char *print_fmt;
1133
1134 /* First: called with 0 length to calculate the needed length */
1135 len = __set_print_fmt(tp, NULL, 0);
1136 print_fmt = kmalloc(len + 1, GFP_KERNEL);
1137 if (!print_fmt)
1138 return -ENOMEM;
1139
1140 /* Second: actually write the @print_fmt */
1141 __set_print_fmt(tp, print_fmt, len + 1);
1142 tp->call.print_fmt = print_fmt;
1143
1144 return 0;
1145}
1146
07b139c8 1147#ifdef CONFIG_PERF_EVENTS
e08d1c65
MH
1148
1149/* Kprobe profile handler */
2b106aab
MH
1150static __kprobes void
1151kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
e08d1c65 1152{
e08d1c65 1153 struct ftrace_event_call *call = &tp->call;
93ccae7a 1154 struct kprobe_trace_entry_head *entry;
1c024eca 1155 struct hlist_head *head;
e09c8614 1156 int size, __size, dsize;
4ed7c92d 1157 int rctx;
e08d1c65 1158
e09c8614
MH
1159 dsize = __get_data_size(tp, regs);
1160 __size = sizeof(*entry) + tp->size + dsize;
74ebb63e
MH
1161 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1162 size -= sizeof(u32);
97d5a220 1163 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
a1a138d0 1164 "profile buffer not large enough"))
1e12a4a7 1165 return;
ce71b9df 1166
ff5f149b 1167 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
430ad5a6 1168 if (!entry)
1e12a4a7 1169 return;
a1a138d0 1170
2b106aab 1171 entry->ip = (unsigned long)tp->rp.kp.addr;
e09c8614
MH
1172 memset(&entry[1], 0, dsize);
1173 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
444a2a3b 1174
3771f077 1175 head = this_cpu_ptr(call->perf_events);
e6dab5ff
AV
1176 perf_trace_buf_submit(entry, size, rctx,
1177 entry->ip, 1, regs, head, NULL);
e08d1c65
MH
1178}
1179
1180/* Kretprobe profile handler */
2b106aab
MH
1181static __kprobes void
1182kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1183 struct pt_regs *regs)
e08d1c65 1184{
e08d1c65 1185 struct ftrace_event_call *call = &tp->call;
93ccae7a 1186 struct kretprobe_trace_entry_head *entry;
1c024eca 1187 struct hlist_head *head;
e09c8614 1188 int size, __size, dsize;
4ed7c92d 1189 int rctx;
e08d1c65 1190
e09c8614
MH
1191 dsize = __get_data_size(tp, regs);
1192 __size = sizeof(*entry) + tp->size + dsize;
74ebb63e
MH
1193 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1194 size -= sizeof(u32);
97d5a220 1195 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
a1a138d0 1196 "profile buffer not large enough"))
1e12a4a7 1197 return;
444a2a3b 1198
ff5f149b 1199 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
430ad5a6 1200 if (!entry)
1e12a4a7 1201 return;
e08d1c65 1202
a1a138d0
MH
1203 entry->func = (unsigned long)tp->rp.kp.addr;
1204 entry->ret_ip = (unsigned long)ri->ret_addr;
e09c8614 1205 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
444a2a3b 1206
3771f077 1207 head = this_cpu_ptr(call->perf_events);
e6dab5ff
AV
1208 perf_trace_buf_submit(entry, size, rctx,
1209 entry->ret_ip, 1, regs, head, NULL);
e08d1c65 1210}
07b139c8 1211#endif /* CONFIG_PERF_EVENTS */
50d78056 1212
2239291a 1213static __kprobes
ceec0b6f
JO
1214int kprobe_register(struct ftrace_event_call *event,
1215 enum trace_reg type, void *data)
2239291a 1216{
1538f888 1217 struct trace_probe *tp = (struct trace_probe *)event->data;
41a7dd42 1218 struct ftrace_event_file *file = data;
1538f888 1219
2239291a
SR
1220 switch (type) {
1221 case TRACE_REG_REGISTER:
41a7dd42 1222 return enable_trace_probe(tp, file);
2239291a 1223 case TRACE_REG_UNREGISTER:
41a7dd42 1224 return disable_trace_probe(tp, file);
2239291a
SR
1225
1226#ifdef CONFIG_PERF_EVENTS
1227 case TRACE_REG_PERF_REGISTER:
41a7dd42 1228 return enable_trace_probe(tp, NULL);
2239291a 1229 case TRACE_REG_PERF_UNREGISTER:
41a7dd42 1230 return disable_trace_probe(tp, NULL);
ceec0b6f
JO
1231 case TRACE_REG_PERF_OPEN:
1232 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
1233 case TRACE_REG_PERF_ADD:
1234 case TRACE_REG_PERF_DEL:
ceec0b6f 1235 return 0;
2239291a
SR
1236#endif
1237 }
1238 return 0;
1239}
50d78056
MH
1240
1241static __kprobes
1242int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1243{
1244 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
e08d1c65 1245
48182bd2
MH
1246 tp->nhit++;
1247
50d78056 1248 if (tp->flags & TP_FLAG_TRACE)
2b106aab 1249 kprobe_trace_func(tp, regs);
07b139c8 1250#ifdef CONFIG_PERF_EVENTS
50d78056 1251 if (tp->flags & TP_FLAG_PROFILE)
2b106aab 1252 kprobe_perf_func(tp, regs);
07b139c8 1253#endif
50d78056
MH
1254 return 0; /* We don't tweek kernel, so just return 0 */
1255}
1256
1257static __kprobes
1258int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1259{
1260 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1261
48182bd2
MH
1262 tp->nhit++;
1263
50d78056 1264 if (tp->flags & TP_FLAG_TRACE)
2b106aab 1265 kretprobe_trace_func(tp, ri, regs);
07b139c8 1266#ifdef CONFIG_PERF_EVENTS
50d78056 1267 if (tp->flags & TP_FLAG_PROFILE)
2b106aab 1268 kretprobe_perf_func(tp, ri, regs);
07b139c8 1269#endif
50d78056
MH
1270 return 0; /* We don't tweek kernel, so just return 0 */
1271}
e08d1c65 1272
a9a57763
SR
1273static struct trace_event_functions kretprobe_funcs = {
1274 .trace = print_kretprobe_event
1275};
1276
1277static struct trace_event_functions kprobe_funcs = {
1278 .trace = print_kprobe_event
1279};
1280
413d37d1
MH
1281static int register_probe_event(struct trace_probe *tp)
1282{
1283 struct ftrace_event_call *call = &tp->call;
1284 int ret;
1285
1286 /* Initialize ftrace_event_call */
ffb9f995 1287 INIT_LIST_HEAD(&call->class->fields);
7143f168 1288 if (trace_probe_is_return(tp)) {
80decc70 1289 call->event.funcs = &kretprobe_funcs;
2e33af02 1290 call->class->define_fields = kretprobe_event_define_fields;
413d37d1 1291 } else {
80decc70 1292 call->event.funcs = &kprobe_funcs;
2e33af02 1293 call->class->define_fields = kprobe_event_define_fields;
413d37d1 1294 }
a342a028
LJ
1295 if (set_print_fmt(tp) < 0)
1296 return -ENOMEM;
32c0edae
SR
1297 ret = register_ftrace_event(&call->event);
1298 if (!ret) {
a342a028 1299 kfree(call->print_fmt);
ff50d991 1300 return -ENODEV;
a342a028 1301 }
553552ce 1302 call->flags = 0;
2239291a 1303 call->class->reg = kprobe_register;
413d37d1
MH
1304 call->data = tp;
1305 ret = trace_add_event_call(call);
ff50d991 1306 if (ret) {
413d37d1 1307 pr_info("Failed to register kprobe event: %s\n", call->name);
a342a028 1308 kfree(call->print_fmt);
80decc70 1309 unregister_ftrace_event(&call->event);
ff50d991 1310 }
413d37d1
MH
1311 return ret;
1312}
1313
1314static void unregister_probe_event(struct trace_probe *tp)
1315{
ff50d991 1316 /* tp->event is unregistered in trace_remove_event_call() */
413d37d1 1317 trace_remove_event_call(&tp->call);
a342a028 1318 kfree(tp->call.print_fmt);
413d37d1
MH
1319}
1320
25985edc 1321/* Make a debugfs interface for controlling probe points */
413d37d1
MH
1322static __init int init_kprobe_trace(void)
1323{
1324 struct dentry *d_tracer;
1325 struct dentry *entry;
413d37d1 1326
61424318
MH
1327 if (register_module_notifier(&trace_probe_module_nb))
1328 return -EINVAL;
1329
413d37d1
MH
1330 d_tracer = tracing_init_dentry();
1331 if (!d_tracer)
1332 return 0;
1333
1334 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1335 NULL, &kprobe_events_ops);
1336
cd7e7bd5 1337 /* Event list interface */
413d37d1
MH
1338 if (!entry)
1339 pr_warning("Could not create debugfs "
1340 "'kprobe_events' entry\n");
cd7e7bd5
MH
1341
1342 /* Profile interface */
1343 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1344 NULL, &kprobe_profile_ops);
1345
1346 if (!entry)
1347 pr_warning("Could not create debugfs "
1348 "'kprobe_profile' entry\n");
413d37d1
MH
1349 return 0;
1350}
1351fs_initcall(init_kprobe_trace);
1352
1353
1354#ifdef CONFIG_FTRACE_STARTUP_TEST
1355
265a5b7e
SR
1356/*
1357 * The "__used" keeps gcc from removing the function symbol
1358 * from the kallsyms table.
1359 */
1360static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1361 int a4, int a5, int a6)
413d37d1
MH
1362{
1363 return a1 + a2 + a3 + a4 + a5 + a6;
1364}
1365
41a7dd42
MH
1366static struct ftrace_event_file *
1367find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
1368{
1369 struct ftrace_event_file *file;
1370
1371 list_for_each_entry(file, &tr->events, list)
1372 if (file->event_call == &tp->call)
1373 return file;
1374
1375 return NULL;
1376}
1377
413d37d1
MH
1378static __init int kprobe_trace_self_tests_init(void)
1379{
231e36f4 1380 int ret, warn = 0;
413d37d1 1381 int (*target)(int, int, int, int, int, int);
231e36f4 1382 struct trace_probe *tp;
41a7dd42 1383 struct ftrace_event_file *file;
413d37d1
MH
1384
1385 target = kprobe_trace_selftest_target;
1386
1387 pr_info("Testing kprobe tracing: ");
1388
8ab83f56
SD
1389 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1390 "$stack $stack0 +0($stack)",
1391 create_trace_probe);
231e36f4 1392 if (WARN_ON_ONCE(ret)) {
41a7dd42 1393 pr_warn("error on probing function entry.\n");
231e36f4
MH
1394 warn++;
1395 } else {
1396 /* Enable trace point */
7143f168 1397 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
231e36f4 1398 if (WARN_ON_ONCE(tp == NULL)) {
41a7dd42 1399 pr_warn("error on getting new probe.\n");
231e36f4 1400 warn++;
41a7dd42
MH
1401 } else {
1402 file = find_trace_probe_file(tp, top_trace_array());
1403 if (WARN_ON_ONCE(file == NULL)) {
1404 pr_warn("error on getting probe file.\n");
1405 warn++;
1406 } else
1407 enable_trace_probe(tp, file);
1408 }
231e36f4 1409 }
413d37d1 1410
8ab83f56
SD
1411 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1412 "$retval", create_trace_probe);
231e36f4 1413 if (WARN_ON_ONCE(ret)) {
41a7dd42 1414 pr_warn("error on probing function return.\n");
231e36f4
MH
1415 warn++;
1416 } else {
1417 /* Enable trace point */
7143f168 1418 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
231e36f4 1419 if (WARN_ON_ONCE(tp == NULL)) {
41a7dd42 1420 pr_warn("error on getting 2nd new probe.\n");
231e36f4 1421 warn++;
41a7dd42
MH
1422 } else {
1423 file = find_trace_probe_file(tp, top_trace_array());
1424 if (WARN_ON_ONCE(file == NULL)) {
1425 pr_warn("error on getting probe file.\n");
1426 warn++;
1427 } else
1428 enable_trace_probe(tp, file);
1429 }
231e36f4
MH
1430 }
1431
1432 if (warn)
1433 goto end;
413d37d1
MH
1434
1435 ret = target(1, 2, 3, 4, 5, 6);
1436
02ca1521
MH
1437 /* Disable trace points before removing it */
1438 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1439 if (WARN_ON_ONCE(tp == NULL)) {
41a7dd42 1440 pr_warn("error on getting test probe.\n");
02ca1521 1441 warn++;
41a7dd42
MH
1442 } else {
1443 file = find_trace_probe_file(tp, top_trace_array());
1444 if (WARN_ON_ONCE(file == NULL)) {
1445 pr_warn("error on getting probe file.\n");
1446 warn++;
1447 } else
1448 disable_trace_probe(tp, file);
1449 }
02ca1521
MH
1450
1451 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1452 if (WARN_ON_ONCE(tp == NULL)) {
41a7dd42 1453 pr_warn("error on getting 2nd test probe.\n");
02ca1521 1454 warn++;
41a7dd42
MH
1455 } else {
1456 file = find_trace_probe_file(tp, top_trace_array());
1457 if (WARN_ON_ONCE(file == NULL)) {
1458 pr_warn("error on getting probe file.\n");
1459 warn++;
1460 } else
1461 disable_trace_probe(tp, file);
1462 }
02ca1521 1463
8ab83f56 1464 ret = traceprobe_command("-:testprobe", create_trace_probe);
231e36f4 1465 if (WARN_ON_ONCE(ret)) {
41a7dd42 1466 pr_warn("error on deleting a probe.\n");
231e36f4
MH
1467 warn++;
1468 }
1469
8ab83f56 1470 ret = traceprobe_command("-:testprobe2", create_trace_probe);
231e36f4 1471 if (WARN_ON_ONCE(ret)) {
41a7dd42 1472 pr_warn("error on deleting a probe.\n");
231e36f4
MH
1473 warn++;
1474 }
413d37d1 1475
231e36f4 1476end:
7143f168 1477 release_all_trace_probes();
231e36f4
MH
1478 if (warn)
1479 pr_cont("NG: Some tests are failed. Please check them.\n");
1480 else
1481 pr_cont("OK\n");
413d37d1
MH
1482 return 0;
1483}
1484
1485late_initcall(kprobe_trace_self_tests_init);
1486
1487#endif