]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace_uprobe.c
uprobes/tracing: Make register_uprobe_event() paths uretprobe-friendly
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace_uprobe.c
CommitLineData
f3f096cf
SD
1/*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/uprobes.h>
24#include <linux/namei.h>
b2e902f0 25#include <linux/string.h>
f3f096cf
SD
26
27#include "trace_probe.h"
28
29#define UPROBE_EVENT_SYSTEM "uprobes"
30
457d1772
ON
31struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34};
35
36#define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40#define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
736288ba
ON
43struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47};
48
f3f096cf
SD
49/*
50 * uprobe event core functions
51 */
f3f096cf
SD
52struct trace_uprobe {
53 struct list_head list;
54 struct ftrace_event_class class;
55 struct ftrace_event_call call;
736288ba 56 struct trace_uprobe_filter filter;
a932b738 57 struct uprobe_consumer consumer;
f3f096cf
SD
58 struct inode *inode;
59 char *filename;
60 unsigned long offset;
61 unsigned long nhit;
62 unsigned int flags; /* For TP_FLAG_* */
63 ssize_t size; /* trace entry size */
64 unsigned int nr_args;
65 struct probe_arg args[];
66};
67
68#define SIZEOF_TRACE_UPROBE(n) \
69 (offsetof(struct trace_uprobe, args) + \
70 (sizeof(struct probe_arg) * (n)))
71
72static int register_uprobe_event(struct trace_uprobe *tu);
73static void unregister_uprobe_event(struct trace_uprobe *tu);
74
75static DEFINE_MUTEX(uprobe_lock);
76static LIST_HEAD(uprobe_list);
77
78static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
c1ae5c75
ON
79static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
f3f096cf 81
736288ba
ON
82static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
83{
84 rwlock_init(&filter->rwlock);
85 filter->nr_systemwide = 0;
86 INIT_LIST_HEAD(&filter->perf_events);
87}
88
89static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
90{
91 return !filter->nr_systemwide && list_empty(&filter->perf_events);
92}
93
c1ae5c75
ON
94static inline bool is_ret_probe(struct trace_uprobe *tu)
95{
96 return tu->consumer.ret_handler != NULL;
97}
98
f3f096cf
SD
99/*
100 * Allocate new trace_uprobe and initialize it (including uprobes).
101 */
102static struct trace_uprobe *
c1ae5c75 103alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
f3f096cf
SD
104{
105 struct trace_uprobe *tu;
106
107 if (!event || !is_good_name(event))
108 return ERR_PTR(-EINVAL);
109
110 if (!group || !is_good_name(group))
111 return ERR_PTR(-EINVAL);
112
113 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
114 if (!tu)
115 return ERR_PTR(-ENOMEM);
116
117 tu->call.class = &tu->class;
118 tu->call.name = kstrdup(event, GFP_KERNEL);
119 if (!tu->call.name)
120 goto error;
121
122 tu->class.system = kstrdup(group, GFP_KERNEL);
123 if (!tu->class.system)
124 goto error;
125
126 INIT_LIST_HEAD(&tu->list);
a932b738 127 tu->consumer.handler = uprobe_dispatcher;
c1ae5c75
ON
128 if (is_ret)
129 tu->consumer.ret_handler = uretprobe_dispatcher;
736288ba 130 init_trace_uprobe_filter(&tu->filter);
f3f096cf
SD
131 return tu;
132
133error:
134 kfree(tu->call.name);
135 kfree(tu);
136
137 return ERR_PTR(-ENOMEM);
138}
139
140static void free_trace_uprobe(struct trace_uprobe *tu)
141{
142 int i;
143
144 for (i = 0; i < tu->nr_args; i++)
145 traceprobe_free_probe_arg(&tu->args[i]);
146
147 iput(tu->inode);
148 kfree(tu->call.class->system);
149 kfree(tu->call.name);
150 kfree(tu->filename);
151 kfree(tu);
152}
153
154static struct trace_uprobe *find_probe_event(const char *event, const char *group)
155{
156 struct trace_uprobe *tu;
157
158 list_for_each_entry(tu, &uprobe_list, list)
159 if (strcmp(tu->call.name, event) == 0 &&
160 strcmp(tu->call.class->system, group) == 0)
161 return tu;
162
163 return NULL;
164}
165
166/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
167static void unregister_trace_uprobe(struct trace_uprobe *tu)
168{
169 list_del(&tu->list);
170 unregister_uprobe_event(tu);
171 free_trace_uprobe(tu);
172}
173
174/* Register a trace_uprobe and probe_event */
175static int register_trace_uprobe(struct trace_uprobe *tu)
176{
177 struct trace_uprobe *old_tp;
178 int ret;
179
180 mutex_lock(&uprobe_lock);
181
182 /* register as an event */
183 old_tp = find_probe_event(tu->call.name, tu->call.class->system);
184 if (old_tp)
185 /* delete old event */
186 unregister_trace_uprobe(old_tp);
187
188 ret = register_uprobe_event(tu);
189 if (ret) {
190 pr_warning("Failed to register probe event(%d)\n", ret);
191 goto end;
192 }
193
194 list_add_tail(&tu->list, &uprobe_list);
195
196end:
197 mutex_unlock(&uprobe_lock);
198
199 return ret;
200}
201
202/*
203 * Argument syntax:
204 * - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS]
205 *
206 * - Remove uprobe: -:[GRP/]EVENT
207 */
208static int create_trace_uprobe(int argc, char **argv)
209{
210 struct trace_uprobe *tu;
211 struct inode *inode;
212 char *arg, *event, *group, *filename;
213 char buf[MAX_EVENT_NAME_LEN];
214 struct path path;
215 unsigned long offset;
216 bool is_delete;
217 int i, ret;
218
219 inode = NULL;
220 ret = 0;
221 is_delete = false;
222 event = NULL;
223 group = NULL;
224
225 /* argc must be >= 1 */
226 if (argv[0][0] == '-')
227 is_delete = true;
228 else if (argv[0][0] != 'p') {
0d13ac96 229 pr_info("Probe definition must be started with 'p' or '-'.\n");
f3f096cf
SD
230 return -EINVAL;
231 }
232
233 if (argv[0][1] == ':') {
234 event = &argv[0][2];
235 arg = strchr(event, '/');
236
237 if (arg) {
238 group = event;
239 event = arg + 1;
240 event[-1] = '\0';
241
242 if (strlen(group) == 0) {
243 pr_info("Group name is not specified\n");
244 return -EINVAL;
245 }
246 }
247 if (strlen(event) == 0) {
248 pr_info("Event name is not specified\n");
249 return -EINVAL;
250 }
251 }
252 if (!group)
253 group = UPROBE_EVENT_SYSTEM;
254
255 if (is_delete) {
256 if (!event) {
257 pr_info("Delete command needs an event name.\n");
258 return -EINVAL;
259 }
260 mutex_lock(&uprobe_lock);
261 tu = find_probe_event(event, group);
262
263 if (!tu) {
264 mutex_unlock(&uprobe_lock);
265 pr_info("Event %s/%s doesn't exist.\n", group, event);
266 return -ENOENT;
267 }
268 /* delete an event */
269 unregister_trace_uprobe(tu);
270 mutex_unlock(&uprobe_lock);
271 return 0;
272 }
273
274 if (argc < 2) {
275 pr_info("Probe point is not specified.\n");
276 return -EINVAL;
277 }
278 if (isdigit(argv[1][0])) {
279 pr_info("probe point must be have a filename.\n");
280 return -EINVAL;
281 }
282 arg = strchr(argv[1], ':');
283 if (!arg)
284 goto fail_address_parse;
285
286 *arg++ = '\0';
287 filename = argv[1];
288 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
289 if (ret)
290 goto fail_address_parse;
291
f3f096cf 292 inode = igrab(path.dentry->d_inode);
84d7ed79
ON
293 path_put(&path);
294
7e4e28c5 295 if (!inode || !S_ISREG(inode->i_mode)) {
d24d7dbf
JZ
296 ret = -EINVAL;
297 goto fail_address_parse;
298 }
f3f096cf 299
84d7ed79
ON
300 ret = kstrtoul(arg, 0, &offset);
301 if (ret)
302 goto fail_address_parse;
303
f3f096cf
SD
304 argc -= 2;
305 argv += 2;
306
307 /* setup a probe */
308 if (!event) {
b2e902f0 309 char *tail;
f3f096cf
SD
310 char *ptr;
311
b2e902f0
AS
312 tail = kstrdup(kbasename(filename), GFP_KERNEL);
313 if (!tail) {
f3f096cf
SD
314 ret = -ENOMEM;
315 goto fail_address_parse;
316 }
317
f3f096cf
SD
318 ptr = strpbrk(tail, ".-_");
319 if (ptr)
320 *ptr = '\0';
321
322 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
323 event = buf;
324 kfree(tail);
325 }
326
c1ae5c75 327 tu = alloc_trace_uprobe(group, event, argc, false);
f3f096cf
SD
328 if (IS_ERR(tu)) {
329 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
330 ret = PTR_ERR(tu);
331 goto fail_address_parse;
332 }
333 tu->offset = offset;
334 tu->inode = inode;
335 tu->filename = kstrdup(filename, GFP_KERNEL);
336
337 if (!tu->filename) {
338 pr_info("Failed to allocate filename.\n");
339 ret = -ENOMEM;
340 goto error;
341 }
342
343 /* parse arguments */
344 ret = 0;
345 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
346 /* Increment count for freeing args in error case */
347 tu->nr_args++;
348
349 /* Parse argument name */
350 arg = strchr(argv[i], '=');
351 if (arg) {
352 *arg++ = '\0';
353 tu->args[i].name = kstrdup(argv[i], GFP_KERNEL);
354 } else {
355 arg = argv[i];
356 /* If argument name is omitted, set "argN" */
357 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
358 tu->args[i].name = kstrdup(buf, GFP_KERNEL);
359 }
360
361 if (!tu->args[i].name) {
362 pr_info("Failed to allocate argument[%d] name.\n", i);
363 ret = -ENOMEM;
364 goto error;
365 }
366
367 if (!is_good_name(tu->args[i].name)) {
368 pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name);
369 ret = -EINVAL;
370 goto error;
371 }
372
373 if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) {
374 pr_info("Argument[%d] name '%s' conflicts with "
375 "another field.\n", i, argv[i]);
376 ret = -EINVAL;
377 goto error;
378 }
379
380 /* Parse fetch argument */
381 ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false);
382 if (ret) {
383 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
384 goto error;
385 }
386 }
387
388 ret = register_trace_uprobe(tu);
389 if (ret)
390 goto error;
391 return 0;
392
393error:
394 free_trace_uprobe(tu);
395 return ret;
396
397fail_address_parse:
398 if (inode)
399 iput(inode);
400
d24d7dbf 401 pr_info("Failed to parse address or file.\n");
f3f096cf
SD
402
403 return ret;
404}
405
406static void cleanup_all_probes(void)
407{
408 struct trace_uprobe *tu;
409
410 mutex_lock(&uprobe_lock);
411 while (!list_empty(&uprobe_list)) {
412 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
413 unregister_trace_uprobe(tu);
414 }
415 mutex_unlock(&uprobe_lock);
416}
417
418/* Probes listing interfaces */
419static void *probes_seq_start(struct seq_file *m, loff_t *pos)
420{
421 mutex_lock(&uprobe_lock);
422 return seq_list_start(&uprobe_list, *pos);
423}
424
425static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
426{
427 return seq_list_next(v, &uprobe_list, pos);
428}
429
430static void probes_seq_stop(struct seq_file *m, void *v)
431{
432 mutex_unlock(&uprobe_lock);
433}
434
435static int probes_seq_show(struct seq_file *m, void *v)
436{
437 struct trace_uprobe *tu = v;
438 int i;
439
440 seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name);
441 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
442
443 for (i = 0; i < tu->nr_args; i++)
444 seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm);
445
446 seq_printf(m, "\n");
447 return 0;
448}
449
450static const struct seq_operations probes_seq_op = {
451 .start = probes_seq_start,
452 .next = probes_seq_next,
453 .stop = probes_seq_stop,
454 .show = probes_seq_show
455};
456
457static int probes_open(struct inode *inode, struct file *file)
458{
459 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
460 cleanup_all_probes();
461
462 return seq_open(file, &probes_seq_op);
463}
464
465static ssize_t probes_write(struct file *file, const char __user *buffer,
466 size_t count, loff_t *ppos)
467{
468 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
469}
470
471static const struct file_operations uprobe_events_ops = {
472 .owner = THIS_MODULE,
473 .open = probes_open,
474 .read = seq_read,
475 .llseek = seq_lseek,
476 .release = seq_release,
477 .write = probes_write,
478};
479
480/* Probes profiling interfaces */
481static int probes_profile_seq_show(struct seq_file *m, void *v)
482{
483 struct trace_uprobe *tu = v;
484
485 seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit);
486 return 0;
487}
488
489static const struct seq_operations profile_seq_op = {
490 .start = probes_seq_start,
491 .next = probes_seq_next,
492 .stop = probes_seq_stop,
493 .show = probes_profile_seq_show
494};
495
496static int profile_open(struct inode *inode, struct file *file)
497{
498 return seq_open(file, &profile_seq_op);
499}
500
501static const struct file_operations uprobe_profile_ops = {
502 .owner = THIS_MODULE,
503 .open = profile_open,
504 .read = seq_read,
505 .llseek = seq_lseek,
506 .release = seq_release,
507};
508
a51cc604
ON
509static void uprobe_trace_print(struct trace_uprobe *tu,
510 unsigned long func, struct pt_regs *regs)
f3f096cf
SD
511{
512 struct uprobe_trace_entry_head *entry;
513 struct ring_buffer_event *event;
514 struct ring_buffer *buffer;
457d1772 515 void *data;
0e3853d2 516 int size, i;
f3f096cf
SD
517 struct ftrace_event_call *call = &tu->call;
518
393a736c 519 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
f3f096cf 520 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
393a736c 521 size + tu->size, 0, 0);
f3f096cf 522 if (!event)
a51cc604 523 return;
f3f096cf
SD
524
525 entry = ring_buffer_event_data(event);
393a736c
ON
526 if (is_ret_probe(tu)) {
527 entry->vaddr[0] = func;
528 entry->vaddr[1] = instruction_pointer(regs);
529 data = DATAOF_TRACE_ENTRY(entry, true);
530 } else {
531 entry->vaddr[0] = instruction_pointer(regs);
532 data = DATAOF_TRACE_ENTRY(entry, false);
533 }
534
f3f096cf
SD
535 for (i = 0; i < tu->nr_args; i++)
536 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
537
538 if (!filter_current_check_discard(buffer, call, entry, event))
0e3853d2 539 trace_buffer_unlock_commit(buffer, event, 0, 0);
a51cc604 540}
f42d24a1 541
a51cc604
ON
542/* uprobe handler */
543static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
544{
393a736c
ON
545 if (!is_ret_probe(tu))
546 uprobe_trace_print(tu, 0, regs);
f42d24a1 547 return 0;
f3f096cf
SD
548}
549
c1ae5c75
ON
550static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
551 struct pt_regs *regs)
552{
553 uprobe_trace_print(tu, func, regs);
554}
555
f3f096cf
SD
556/* Event entry printers */
557static enum print_line_t
558print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
559{
457d1772 560 struct uprobe_trace_entry_head *entry;
f3f096cf
SD
561 struct trace_seq *s = &iter->seq;
562 struct trace_uprobe *tu;
563 u8 *data;
564 int i;
565
457d1772 566 entry = (struct uprobe_trace_entry_head *)iter->ent;
f3f096cf
SD
567 tu = container_of(event, struct trace_uprobe, call.event);
568
457d1772 569 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, entry->vaddr[0]))
f3f096cf
SD
570 goto partial;
571
457d1772 572 data = DATAOF_TRACE_ENTRY(entry, false);
f3f096cf
SD
573 for (i = 0; i < tu->nr_args; i++) {
574 if (!tu->args[i].type->print(s, tu->args[i].name,
457d1772 575 data + tu->args[i].offset, entry))
f3f096cf
SD
576 goto partial;
577 }
578
579 if (trace_seq_puts(s, "\n"))
580 return TRACE_TYPE_HANDLED;
581
582partial:
583 return TRACE_TYPE_PARTIAL_LINE;
584}
585
b64b0077
ON
586static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu)
587{
588 return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE);
589}
590
31ba3348
ON
591typedef bool (*filter_func_t)(struct uprobe_consumer *self,
592 enum uprobe_filter_ctx ctx,
593 struct mm_struct *mm);
594
595static int
596probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
f3f096cf 597{
f3f096cf
SD
598 int ret = 0;
599
b64b0077 600 if (is_trace_uprobe_enabled(tu))
f3f096cf
SD
601 return -EINTR;
602
736288ba
ON
603 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
604
4161824f 605 tu->flags |= flag;
31ba3348 606 tu->consumer.filter = filter;
a932b738
ON
607 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
608 if (ret)
4161824f 609 tu->flags &= ~flag;
f3f096cf 610
4161824f 611 return ret;
f3f096cf
SD
612}
613
614static void probe_event_disable(struct trace_uprobe *tu, int flag)
615{
b64b0077 616 if (!is_trace_uprobe_enabled(tu))
f3f096cf
SD
617 return;
618
736288ba
ON
619 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
620
a932b738 621 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
f3f096cf 622 tu->flags &= ~flag;
f3f096cf
SD
623}
624
625static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
626{
457d1772 627 int ret, i, size;
f3f096cf 628 struct uprobe_trace_entry_head field;
457d1772 629 struct trace_uprobe *tu = event_call->data;
f3f096cf 630
4d1298e2
ON
631 if (is_ret_probe(tu)) {
632 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
633 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
634 size = SIZEOF_TRACE_ENTRY(true);
635 } else {
636 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
637 size = SIZEOF_TRACE_ENTRY(false);
638 }
f3f096cf
SD
639 /* Set argument names as fields */
640 for (i = 0; i < tu->nr_args; i++) {
641 ret = trace_define_field(event_call, tu->args[i].type->fmttype,
642 tu->args[i].name,
457d1772 643 size + tu->args[i].offset,
f3f096cf
SD
644 tu->args[i].type->size,
645 tu->args[i].type->is_signed,
646 FILTER_OTHER);
647
648 if (ret)
649 return ret;
650 }
651 return 0;
652}
653
654#define LEN_OR_ZERO (len ? len - pos : 0)
655static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len)
656{
657 const char *fmt, *arg;
658 int i;
659 int pos = 0;
660
4d1298e2
ON
661 if (is_ret_probe(tu)) {
662 fmt = "(%lx <- %lx)";
663 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
664 } else {
665 fmt = "(%lx)";
666 arg = "REC->" FIELD_STRING_IP;
667 }
f3f096cf
SD
668
669 /* When len=0, we just calculate the needed length */
670
671 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
672
673 for (i = 0; i < tu->nr_args; i++) {
674 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
675 tu->args[i].name, tu->args[i].type->fmt);
676 }
677
678 pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
679
680 for (i = 0; i < tu->nr_args; i++) {
681 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
682 tu->args[i].name);
683 }
684
685 return pos; /* return the length of print_fmt */
686}
687#undef LEN_OR_ZERO
688
689static int set_print_fmt(struct trace_uprobe *tu)
690{
691 char *print_fmt;
692 int len;
693
694 /* First: called with 0 length to calculate the needed length */
695 len = __set_print_fmt(tu, NULL, 0);
696 print_fmt = kmalloc(len + 1, GFP_KERNEL);
697 if (!print_fmt)
698 return -ENOMEM;
699
700 /* Second: actually write the @print_fmt */
701 __set_print_fmt(tu, print_fmt, len + 1);
702 tu->call.print_fmt = print_fmt;
703
704 return 0;
705}
706
707#ifdef CONFIG_PERF_EVENTS
31ba3348
ON
708static bool
709__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
710{
711 struct perf_event *event;
712
713 if (filter->nr_systemwide)
714 return true;
715
716 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
717 if (event->hw.tp_target->mm == mm)
718 return true;
719 }
720
721 return false;
722}
723
b2fe8ba6
ON
724static inline bool
725uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
726{
727 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
728}
729
736288ba
ON
730static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
731{
b2fe8ba6
ON
732 bool done;
733
736288ba 734 write_lock(&tu->filter.rwlock);
b2fe8ba6
ON
735 if (event->hw.tp_target) {
736 /*
737 * event->parent != NULL means copy_process(), we can avoid
738 * uprobe_apply(). current->mm must be probed and we can rely
739 * on dup_mmap() which preserves the already installed bp's.
740 *
741 * attr.enable_on_exec means that exec/mmap will install the
742 * breakpoints we need.
743 */
744 done = tu->filter.nr_systemwide ||
745 event->parent || event->attr.enable_on_exec ||
746 uprobe_filter_event(tu, event);
736288ba 747 list_add(&event->hw.tp_list, &tu->filter.perf_events);
b2fe8ba6
ON
748 } else {
749 done = tu->filter.nr_systemwide;
736288ba 750 tu->filter.nr_systemwide++;
b2fe8ba6 751 }
736288ba
ON
752 write_unlock(&tu->filter.rwlock);
753
b2fe8ba6
ON
754 if (!done)
755 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
31ba3348 756
736288ba
ON
757 return 0;
758}
759
760static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
761{
b2fe8ba6
ON
762 bool done;
763
736288ba 764 write_lock(&tu->filter.rwlock);
b2fe8ba6 765 if (event->hw.tp_target) {
736288ba 766 list_del(&event->hw.tp_list);
b2fe8ba6
ON
767 done = tu->filter.nr_systemwide ||
768 (event->hw.tp_target->flags & PF_EXITING) ||
769 uprobe_filter_event(tu, event);
770 } else {
736288ba 771 tu->filter.nr_systemwide--;
b2fe8ba6
ON
772 done = tu->filter.nr_systemwide;
773 }
736288ba
ON
774 write_unlock(&tu->filter.rwlock);
775
b2fe8ba6
ON
776 if (!done)
777 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
31ba3348 778
736288ba
ON
779 return 0;
780}
781
31ba3348
ON
782static bool uprobe_perf_filter(struct uprobe_consumer *uc,
783 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
784{
785 struct trace_uprobe *tu;
786 int ret;
787
788 tu = container_of(uc, struct trace_uprobe, consumer);
789 read_lock(&tu->filter.rwlock);
790 ret = __uprobe_perf_filter(&tu->filter, mm);
791 read_unlock(&tu->filter.rwlock);
792
793 return ret;
794}
795
a51cc604
ON
796static void uprobe_perf_print(struct trace_uprobe *tu,
797 unsigned long func, struct pt_regs *regs)
f3f096cf
SD
798{
799 struct ftrace_event_call *call = &tu->call;
800 struct uprobe_trace_entry_head *entry;
801 struct hlist_head *head;
457d1772
ON
802 unsigned long ip;
803 void *data;
804 int size, rctx, i;
f3f096cf 805
393a736c 806 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
457d1772 807 size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32);
f3f096cf 808 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
a51cc604 809 return;
f3f096cf
SD
810
811 preempt_disable();
f3f096cf
SD
812 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
813 if (!entry)
814 goto out;
815
457d1772 816 ip = instruction_pointer(regs);
393a736c
ON
817 if (is_ret_probe(tu)) {
818 entry->vaddr[0] = func;
819 entry->vaddr[1] = ip;
820 data = DATAOF_TRACE_ENTRY(entry, true);
821 } else {
822 entry->vaddr[0] = ip;
823 data = DATAOF_TRACE_ENTRY(entry, false);
824 }
825
f3f096cf
SD
826 for (i = 0; i < tu->nr_args; i++)
827 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
828
829 head = this_cpu_ptr(call->perf_events);
457d1772 830 perf_trace_buf_submit(entry, size, rctx, ip, 1, regs, head, NULL);
f3f096cf
SD
831 out:
832 preempt_enable();
a51cc604
ON
833}
834
835/* uprobe profile handler */
836static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
837{
838 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
839 return UPROBE_HANDLER_REMOVE;
840
393a736c
ON
841 if (!is_ret_probe(tu))
842 uprobe_perf_print(tu, 0, regs);
f42d24a1 843 return 0;
f3f096cf 844}
c1ae5c75
ON
845
846static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
847 struct pt_regs *regs)
848{
849 uprobe_perf_print(tu, func, regs);
850}
f3f096cf
SD
851#endif /* CONFIG_PERF_EVENTS */
852
853static
854int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
855{
457d1772 856 struct trace_uprobe *tu = event->data;
f3f096cf
SD
857
858 switch (type) {
859 case TRACE_REG_REGISTER:
31ba3348 860 return probe_event_enable(tu, TP_FLAG_TRACE, NULL);
f3f096cf
SD
861
862 case TRACE_REG_UNREGISTER:
863 probe_event_disable(tu, TP_FLAG_TRACE);
864 return 0;
865
866#ifdef CONFIG_PERF_EVENTS
867 case TRACE_REG_PERF_REGISTER:
31ba3348 868 return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter);
f3f096cf
SD
869
870 case TRACE_REG_PERF_UNREGISTER:
871 probe_event_disable(tu, TP_FLAG_PROFILE);
872 return 0;
736288ba
ON
873
874 case TRACE_REG_PERF_OPEN:
875 return uprobe_perf_open(tu, data);
876
877 case TRACE_REG_PERF_CLOSE:
878 return uprobe_perf_close(tu, data);
879
f3f096cf
SD
880#endif
881 default:
882 return 0;
883 }
884 return 0;
885}
886
887static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
888{
f3f096cf 889 struct trace_uprobe *tu;
f42d24a1 890 int ret = 0;
f3f096cf 891
a932b738 892 tu = container_of(con, struct trace_uprobe, consumer);
1b47aefd 893 tu->nhit++;
f3f096cf
SD
894
895 if (tu->flags & TP_FLAG_TRACE)
f42d24a1 896 ret |= uprobe_trace_func(tu, regs);
f3f096cf
SD
897
898#ifdef CONFIG_PERF_EVENTS
899 if (tu->flags & TP_FLAG_PROFILE)
f42d24a1 900 ret |= uprobe_perf_func(tu, regs);
f3f096cf 901#endif
f42d24a1 902 return ret;
f3f096cf
SD
903}
904
c1ae5c75
ON
905static int uretprobe_dispatcher(struct uprobe_consumer *con,
906 unsigned long func, struct pt_regs *regs)
907{
908 struct trace_uprobe *tu;
909
910 tu = container_of(con, struct trace_uprobe, consumer);
911
912 if (tu->flags & TP_FLAG_TRACE)
913 uretprobe_trace_func(tu, func, regs);
914
915#ifdef CONFIG_PERF_EVENTS
916 if (tu->flags & TP_FLAG_PROFILE)
917 uretprobe_perf_func(tu, func, regs);
918#endif
919 return 0;
920}
921
f3f096cf
SD
922static struct trace_event_functions uprobe_funcs = {
923 .trace = print_uprobe_event
924};
925
926static int register_uprobe_event(struct trace_uprobe *tu)
927{
928 struct ftrace_event_call *call = &tu->call;
929 int ret;
930
931 /* Initialize ftrace_event_call */
932 INIT_LIST_HEAD(&call->class->fields);
933 call->event.funcs = &uprobe_funcs;
934 call->class->define_fields = uprobe_event_define_fields;
935
936 if (set_print_fmt(tu) < 0)
937 return -ENOMEM;
938
939 ret = register_ftrace_event(&call->event);
940 if (!ret) {
941 kfree(call->print_fmt);
942 return -ENODEV;
943 }
944 call->flags = 0;
945 call->class->reg = trace_uprobe_register;
946 call->data = tu;
947 ret = trace_add_event_call(call);
948
949 if (ret) {
950 pr_info("Failed to register uprobe event: %s\n", call->name);
951 kfree(call->print_fmt);
952 unregister_ftrace_event(&call->event);
953 }
954
955 return ret;
956}
957
958static void unregister_uprobe_event(struct trace_uprobe *tu)
959{
960 /* tu->event is unregistered in trace_remove_event_call() */
961 trace_remove_event_call(&tu->call);
962 kfree(tu->call.print_fmt);
963 tu->call.print_fmt = NULL;
964}
965
966/* Make a trace interface for controling probe points */
967static __init int init_uprobe_trace(void)
968{
969 struct dentry *d_tracer;
970
971 d_tracer = tracing_init_dentry();
972 if (!d_tracer)
973 return 0;
974
975 trace_create_file("uprobe_events", 0644, d_tracer,
976 NULL, &uprobe_events_ops);
977 /* Profile interface */
978 trace_create_file("uprobe_profile", 0444, d_tracer,
979 NULL, &uprobe_profile_ops);
980 return 0;
981}
982
983fs_initcall(init_uprobe_trace);