]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace_uprobe.c
tracing: probeevent: Unify fetch type tables
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace_uprobe.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
f3f096cf
SD
2/*
3 * uprobes-based tracing events
4 *
f3f096cf
SD
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
72576341 8#define pr_fmt(fmt) "trace_kprobe: " fmt
f3f096cf
SD
9
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/uprobes.h>
13#include <linux/namei.h>
b2e902f0 14#include <linux/string.h>
b2d09103 15#include <linux/rculist.h>
f3f096cf
SD
16
17#include "trace_probe.h"
53305928 18#include "trace_probe_tmpl.h"
f3f096cf
SD
19
20#define UPROBE_EVENT_SYSTEM "uprobes"
21
457d1772
ON
22struct uprobe_trace_entry_head {
23 struct trace_entry ent;
24 unsigned long vaddr[];
25};
26
27#define SIZEOF_TRACE_ENTRY(is_return) \
28 (sizeof(struct uprobe_trace_entry_head) + \
29 sizeof(unsigned long) * (is_return ? 2 : 1))
30
31#define DATAOF_TRACE_ENTRY(entry, is_return) \
32 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
33
736288ba
ON
34struct trace_uprobe_filter {
35 rwlock_t rwlock;
36 int nr_systemwide;
37 struct list_head perf_events;
38};
39
f3f096cf
SD
40/*
41 * uprobe event core functions
42 */
f3f096cf
SD
43struct trace_uprobe {
44 struct list_head list;
736288ba 45 struct trace_uprobe_filter filter;
a932b738 46 struct uprobe_consumer consumer;
0c92c7a3 47 struct path path;
f3f096cf
SD
48 struct inode *inode;
49 char *filename;
50 unsigned long offset;
1cc33161 51 unsigned long ref_ctr_offset;
f3f096cf 52 unsigned long nhit;
14577c39 53 struct trace_probe tp;
f3f096cf
SD
54};
55
14577c39
NK
56#define SIZEOF_TRACE_UPROBE(n) \
57 (offsetof(struct trace_uprobe, tp.args) + \
f3f096cf
SD
58 (sizeof(struct probe_arg) * (n)))
59
60static int register_uprobe_event(struct trace_uprobe *tu);
c6c2401d 61static int unregister_uprobe_event(struct trace_uprobe *tu);
f3f096cf
SD
62
63static DEFINE_MUTEX(uprobe_lock);
64static LIST_HEAD(uprobe_list);
65
b7e0bf34
NK
66struct uprobe_dispatch_data {
67 struct trace_uprobe *tu;
68 unsigned long bp_addr;
69};
70
f3f096cf 71static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
c1ae5c75
ON
72static int uretprobe_dispatcher(struct uprobe_consumer *con,
73 unsigned long func, struct pt_regs *regs);
f3f096cf 74
3fd996a2
NK
75#ifdef CONFIG_STACK_GROWSUP
76static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
77{
78 return addr - (n * sizeof(long));
79}
80#else
81static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
82{
83 return addr + (n * sizeof(long));
84}
85#endif
86
87static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
88{
89 unsigned long ret;
90 unsigned long addr = user_stack_pointer(regs);
91
92 addr = adjust_stack_addr(addr, n);
93
94 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
95 return 0;
96
97 return ret;
98}
99
100/*
101 * Uprobes-specific fetch functions
102 */
53305928
MH
103static nokprobe_inline int
104probe_user_read(void *dest, void *src, size_t size)
105{
106 void __user *vaddr = (void __force __user *)src;
107
108 return copy_from_user(dest, vaddr, size);
5baaa59e 109}
5baaa59e
NK
110/*
111 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
112 * length and relative data location.
113 */
53305928
MH
114static nokprobe_inline void
115fetch_store_string(unsigned long addr, void *dest)
5baaa59e
NK
116{
117 long ret;
118 u32 rloc = *(u32 *)dest;
119 int maxlen = get_rloc_len(rloc);
120 u8 *dst = get_rloc_data(dest);
121 void __user *src = (void __force __user *) addr;
122
123 if (!maxlen)
124 return;
125
126 ret = strncpy_from_user(dst, src, maxlen);
50268a3d
MH
127 if (ret == maxlen)
128 dst[--ret] = '\0';
5baaa59e
NK
129
130 if (ret < 0) { /* Failed to fetch string */
131 ((u8 *)get_rloc_data(dest))[0] = '\0';
132 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
133 } else {
134 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
135 }
136}
137
53305928
MH
138/* Return the length of string -- including null terminal byte */
139static nokprobe_inline void
140fetch_store_strlen(unsigned long addr, void *dest)
5baaa59e
NK
141{
142 int len;
143 void __user *vaddr = (void __force __user *) addr;
144
145 len = strnlen_user(vaddr, MAX_STRING_SIZE);
146
147 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
148 *(u32 *)dest = 0;
149 else
150 *(u32 *)dest = len;
151}
3fd996a2 152
53305928 153static unsigned long translate_user_vaddr(unsigned long file_offset)
b7e0bf34
NK
154{
155 unsigned long base_addr;
156 struct uprobe_dispatch_data *udd;
157
158 udd = (void *) current->utask->vaddr;
159
160 base_addr = udd->bp_addr - udd->tu->offset;
53305928 161 return base_addr + file_offset;
b7e0bf34 162}
b7e0bf34 163
53305928
MH
164/* Note that we don't verify it, since the code does not come from user space */
165static int
166process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
167 bool pre)
168{
169 unsigned long val;
170 int ret;
171
172 /* 1st stage: get value from context */
173 switch (code->op) {
174 case FETCH_OP_REG:
175 val = regs_get_register(regs, code->param);
176 break;
177 case FETCH_OP_STACK:
178 val = get_user_stack_nth(regs, code->param);
179 break;
180 case FETCH_OP_STACKP:
181 val = user_stack_pointer(regs);
182 break;
183 case FETCH_OP_RETVAL:
184 val = regs_return_value(regs);
185 break;
186 case FETCH_OP_IMM:
187 val = code->immediate;
188 break;
189 case FETCH_OP_FOFFS:
190 val = translate_user_vaddr(code->immediate);
191 break;
192 default:
193 return -EILSEQ;
194 }
195 code++;
196
197 /* 2nd stage: dereference memory if needed */
198 while (code->op == FETCH_OP_DEREF) {
199 ret = probe_user_read(&val, (void *)val + code->offset,
200 sizeof(val));
201 if (ret)
202 return ret;
203 code++;
204 }
205
206 /* 3rd stage: store value to buffer */
207 switch (code->op) {
208 case FETCH_OP_ST_RAW:
209 fetch_store_raw(val, code, dest);
210 break;
211 case FETCH_OP_ST_MEM:
212 probe_user_read(dest, (void *)val + code->offset, code->size);
213 break;
214 case FETCH_OP_ST_STRING:
215 if (pre)
216 fetch_store_strlen(val + code->offset, dest);
217 else
218 fetch_store_string(val + code->offset, dest);
219 break;
220 default:
221 return -EILSEQ;
222 }
223 code++;
224
225 /* 4th stage: modify stored value if needed */
226 if (code->op == FETCH_OP_MOD_BF) {
227 fetch_apply_bitfield(code, dest);
228 code++;
229 }
230
231 return code->op == FETCH_OP_END ? 0 : -EILSEQ;
232}
233NOKPROBE_SYMBOL(process_fetch_insn)
234
736288ba
ON
235static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
236{
237 rwlock_init(&filter->rwlock);
238 filter->nr_systemwide = 0;
239 INIT_LIST_HEAD(&filter->perf_events);
240}
241
242static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
243{
244 return !filter->nr_systemwide && list_empty(&filter->perf_events);
245}
246
c1ae5c75
ON
247static inline bool is_ret_probe(struct trace_uprobe *tu)
248{
249 return tu->consumer.ret_handler != NULL;
250}
251
f3f096cf
SD
252/*
253 * Allocate new trace_uprobe and initialize it (including uprobes).
254 */
255static struct trace_uprobe *
c1ae5c75 256alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
f3f096cf
SD
257{
258 struct trace_uprobe *tu;
259
260 if (!event || !is_good_name(event))
261 return ERR_PTR(-EINVAL);
262
263 if (!group || !is_good_name(group))
264 return ERR_PTR(-EINVAL);
265
266 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
267 if (!tu)
268 return ERR_PTR(-ENOMEM);
269
14577c39
NK
270 tu->tp.call.class = &tu->tp.class;
271 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
272 if (!tu->tp.call.name)
f3f096cf
SD
273 goto error;
274
14577c39
NK
275 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
276 if (!tu->tp.class.system)
f3f096cf
SD
277 goto error;
278
279 INIT_LIST_HEAD(&tu->list);
70ed91c6 280 INIT_LIST_HEAD(&tu->tp.files);
a932b738 281 tu->consumer.handler = uprobe_dispatcher;
c1ae5c75
ON
282 if (is_ret)
283 tu->consumer.ret_handler = uretprobe_dispatcher;
736288ba 284 init_trace_uprobe_filter(&tu->filter);
f3f096cf
SD
285 return tu;
286
287error:
14577c39 288 kfree(tu->tp.call.name);
f3f096cf
SD
289 kfree(tu);
290
291 return ERR_PTR(-ENOMEM);
292}
293
294static void free_trace_uprobe(struct trace_uprobe *tu)
295{
296 int i;
297
14577c39
NK
298 for (i = 0; i < tu->tp.nr_args; i++)
299 traceprobe_free_probe_arg(&tu->tp.args[i]);
f3f096cf 300
0c92c7a3 301 path_put(&tu->path);
14577c39
NK
302 kfree(tu->tp.call.class->system);
303 kfree(tu->tp.call.name);
f3f096cf
SD
304 kfree(tu->filename);
305 kfree(tu);
306}
307
308static struct trace_uprobe *find_probe_event(const char *event, const char *group)
309{
310 struct trace_uprobe *tu;
311
312 list_for_each_entry(tu, &uprobe_list, list)
687fcc4a 313 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
14577c39 314 strcmp(tu->tp.call.class->system, group) == 0)
f3f096cf
SD
315 return tu;
316
317 return NULL;
318}
319
320/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
c6c2401d 321static int unregister_trace_uprobe(struct trace_uprobe *tu)
f3f096cf 322{
c6c2401d
SRRH
323 int ret;
324
325 ret = unregister_uprobe_event(tu);
326 if (ret)
327 return ret;
328
f3f096cf 329 list_del(&tu->list);
f3f096cf 330 free_trace_uprobe(tu);
c6c2401d 331 return 0;
f3f096cf
SD
332}
333
ccea8727
RB
334/*
335 * Uprobe with multiple reference counter is not allowed. i.e.
336 * If inode and offset matches, reference counter offset *must*
337 * match as well. Though, there is one exception: If user is
338 * replacing old trace_uprobe with new one(same group/event),
339 * then we allow same uprobe with new reference counter as far
340 * as the new one does not conflict with any other existing
341 * ones.
342 */
343static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
344{
345 struct trace_uprobe *tmp, *old = NULL;
346 struct inode *new_inode = d_real_inode(new->path.dentry);
347
348 old = find_probe_event(trace_event_name(&new->tp.call),
349 new->tp.call.class->system);
350
351 list_for_each_entry(tmp, &uprobe_list, list) {
352 if ((old ? old != tmp : true) &&
353 new_inode == d_real_inode(tmp->path.dentry) &&
354 new->offset == tmp->offset &&
355 new->ref_ctr_offset != tmp->ref_ctr_offset) {
356 pr_warn("Reference counter offset mismatch.");
357 return ERR_PTR(-EINVAL);
358 }
359 }
360 return old;
361}
362
f3f096cf
SD
363/* Register a trace_uprobe and probe_event */
364static int register_trace_uprobe(struct trace_uprobe *tu)
365{
14577c39 366 struct trace_uprobe *old_tu;
f3f096cf
SD
367 int ret;
368
369 mutex_lock(&uprobe_lock);
370
371 /* register as an event */
ccea8727
RB
372 old_tu = find_old_trace_uprobe(tu);
373 if (IS_ERR(old_tu)) {
374 ret = PTR_ERR(old_tu);
375 goto end;
376 }
377
14577c39 378 if (old_tu) {
f3f096cf 379 /* delete old event */
14577c39 380 ret = unregister_trace_uprobe(old_tu);
c6c2401d
SRRH
381 if (ret)
382 goto end;
383 }
f3f096cf
SD
384
385 ret = register_uprobe_event(tu);
386 if (ret) {
a395d6a7 387 pr_warn("Failed to register probe event(%d)\n", ret);
f3f096cf
SD
388 goto end;
389 }
390
391 list_add_tail(&tu->list, &uprobe_list);
392
393end:
394 mutex_unlock(&uprobe_lock);
395
396 return ret;
397}
398
399/*
400 * Argument syntax:
306cfe20 401 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
f3f096cf
SD
402 *
403 * - Remove uprobe: -:[GRP/]EVENT
404 */
405static int create_trace_uprobe(int argc, char **argv)
406{
407 struct trace_uprobe *tu;
1cc33161 408 char *arg, *event, *group, *filename, *rctr, *rctr_end;
f3f096cf
SD
409 char buf[MAX_EVENT_NAME_LEN];
410 struct path path;
1cc33161 411 unsigned long offset, ref_ctr_offset;
4ee5a52e 412 bool is_delete, is_return;
f3f096cf
SD
413 int i, ret;
414
f3f096cf
SD
415 ret = 0;
416 is_delete = false;
4ee5a52e 417 is_return = false;
f3f096cf
SD
418 event = NULL;
419 group = NULL;
1cc33161 420 ref_ctr_offset = 0;
f3f096cf
SD
421
422 /* argc must be >= 1 */
423 if (argv[0][0] == '-')
424 is_delete = true;
4ee5a52e
ON
425 else if (argv[0][0] == 'r')
426 is_return = true;
f3f096cf 427 else if (argv[0][0] != 'p') {
4ee5a52e 428 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
f3f096cf
SD
429 return -EINVAL;
430 }
431
432 if (argv[0][1] == ':') {
433 event = &argv[0][2];
434 arg = strchr(event, '/');
435
436 if (arg) {
437 group = event;
438 event = arg + 1;
439 event[-1] = '\0';
440
441 if (strlen(group) == 0) {
442 pr_info("Group name is not specified\n");
443 return -EINVAL;
444 }
445 }
446 if (strlen(event) == 0) {
447 pr_info("Event name is not specified\n");
448 return -EINVAL;
449 }
450 }
451 if (!group)
452 group = UPROBE_EVENT_SYSTEM;
453
454 if (is_delete) {
c6c2401d
SRRH
455 int ret;
456
f3f096cf
SD
457 if (!event) {
458 pr_info("Delete command needs an event name.\n");
459 return -EINVAL;
460 }
461 mutex_lock(&uprobe_lock);
462 tu = find_probe_event(event, group);
463
464 if (!tu) {
465 mutex_unlock(&uprobe_lock);
466 pr_info("Event %s/%s doesn't exist.\n", group, event);
467 return -ENOENT;
468 }
469 /* delete an event */
c6c2401d 470 ret = unregister_trace_uprobe(tu);
f3f096cf 471 mutex_unlock(&uprobe_lock);
c6c2401d 472 return ret;
f3f096cf
SD
473 }
474
475 if (argc < 2) {
476 pr_info("Probe point is not specified.\n");
477 return -EINVAL;
478 }
6496bb72
KY
479 /* Find the last occurrence, in case the path contains ':' too. */
480 arg = strrchr(argv[1], ':');
0c92c7a3
SL
481 if (!arg)
482 return -EINVAL;
f3f096cf
SD
483
484 *arg++ = '\0';
485 filename = argv[1];
486 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
487 if (ret)
0c92c7a3 488 return ret;
84d7ed79 489
0c92c7a3 490 if (!d_is_reg(path.dentry)) {
d24d7dbf
JZ
491 ret = -EINVAL;
492 goto fail_address_parse;
493 }
f3f096cf 494
1cc33161
RB
495 /* Parse reference counter offset if specified. */
496 rctr = strchr(arg, '(');
497 if (rctr) {
498 rctr_end = strchr(rctr, ')');
499 if (rctr > rctr_end || *(rctr_end + 1) != 0) {
500 ret = -EINVAL;
501 pr_info("Invalid reference counter offset.\n");
502 goto fail_address_parse;
503 }
504
505 *rctr++ = '\0';
506 *rctr_end = '\0';
507 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
508 if (ret) {
509 pr_info("Invalid reference counter offset.\n");
510 goto fail_address_parse;
511 }
512 }
513
514 /* Parse uprobe offset. */
84d7ed79
ON
515 ret = kstrtoul(arg, 0, &offset);
516 if (ret)
517 goto fail_address_parse;
518
f3f096cf
SD
519 argc -= 2;
520 argv += 2;
521
522 /* setup a probe */
523 if (!event) {
b2e902f0 524 char *tail;
f3f096cf
SD
525 char *ptr;
526
b2e902f0
AS
527 tail = kstrdup(kbasename(filename), GFP_KERNEL);
528 if (!tail) {
f3f096cf
SD
529 ret = -ENOMEM;
530 goto fail_address_parse;
531 }
532
f3f096cf
SD
533 ptr = strpbrk(tail, ".-_");
534 if (ptr)
535 *ptr = '\0';
536
537 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
538 event = buf;
539 kfree(tail);
540 }
541
4ee5a52e 542 tu = alloc_trace_uprobe(group, event, argc, is_return);
f3f096cf
SD
543 if (IS_ERR(tu)) {
544 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
545 ret = PTR_ERR(tu);
546 goto fail_address_parse;
547 }
548 tu->offset = offset;
1cc33161 549 tu->ref_ctr_offset = ref_ctr_offset;
0c92c7a3 550 tu->path = path;
f3f096cf
SD
551 tu->filename = kstrdup(filename, GFP_KERNEL);
552
553 if (!tu->filename) {
554 pr_info("Failed to allocate filename.\n");
555 ret = -ENOMEM;
556 goto error;
557 }
558
559 /* parse arguments */
560 ret = 0;
561 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
14577c39
NK
562 struct probe_arg *parg = &tu->tp.args[i];
563
f3f096cf 564 /* Increment count for freeing args in error case */
14577c39 565 tu->tp.nr_args++;
f3f096cf
SD
566
567 /* Parse argument name */
568 arg = strchr(argv[i], '=');
569 if (arg) {
570 *arg++ = '\0';
14577c39 571 parg->name = kstrdup(argv[i], GFP_KERNEL);
f3f096cf
SD
572 } else {
573 arg = argv[i];
574 /* If argument name is omitted, set "argN" */
575 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
14577c39 576 parg->name = kstrdup(buf, GFP_KERNEL);
f3f096cf
SD
577 }
578
14577c39 579 if (!parg->name) {
f3f096cf
SD
580 pr_info("Failed to allocate argument[%d] name.\n", i);
581 ret = -ENOMEM;
582 goto error;
583 }
584
14577c39
NK
585 if (!is_good_name(parg->name)) {
586 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
f3f096cf
SD
587 ret = -EINVAL;
588 goto error;
589 }
590
14577c39 591 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
f3f096cf
SD
592 pr_info("Argument[%d] name '%s' conflicts with "
593 "another field.\n", i, argv[i]);
594 ret = -EINVAL;
595 goto error;
596 }
597
598 /* Parse fetch argument */
14577c39 599 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
f451bc89 600 is_return, false);
f3f096cf
SD
601 if (ret) {
602 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
603 goto error;
604 }
605 }
606
607 ret = register_trace_uprobe(tu);
608 if (ret)
609 goto error;
610 return 0;
611
612error:
613 free_trace_uprobe(tu);
614 return ret;
615
616fail_address_parse:
0c92c7a3 617 path_put(&path);
f3f096cf 618
d24d7dbf 619 pr_info("Failed to parse address or file.\n");
f3f096cf
SD
620
621 return ret;
622}
623
c6c2401d 624static int cleanup_all_probes(void)
f3f096cf
SD
625{
626 struct trace_uprobe *tu;
c6c2401d 627 int ret = 0;
f3f096cf
SD
628
629 mutex_lock(&uprobe_lock);
630 while (!list_empty(&uprobe_list)) {
631 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
c6c2401d
SRRH
632 ret = unregister_trace_uprobe(tu);
633 if (ret)
634 break;
f3f096cf
SD
635 }
636 mutex_unlock(&uprobe_lock);
c6c2401d 637 return ret;
f3f096cf
SD
638}
639
640/* Probes listing interfaces */
641static void *probes_seq_start(struct seq_file *m, loff_t *pos)
642{
643 mutex_lock(&uprobe_lock);
644 return seq_list_start(&uprobe_list, *pos);
645}
646
647static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
648{
649 return seq_list_next(v, &uprobe_list, pos);
650}
651
652static void probes_seq_stop(struct seq_file *m, void *v)
653{
654 mutex_unlock(&uprobe_lock);
655}
656
657static int probes_seq_show(struct seq_file *m, void *v)
658{
659 struct trace_uprobe *tu = v;
3ede82dd 660 char c = is_ret_probe(tu) ? 'r' : 'p';
f3f096cf
SD
661 int i;
662
a64b2c01
RB
663 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
664 trace_event_name(&tu->tp.call), tu->filename,
665 (int)(sizeof(void *) * 2), tu->offset);
f3f096cf 666
1cc33161
RB
667 if (tu->ref_ctr_offset)
668 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
669
14577c39
NK
670 for (i = 0; i < tu->tp.nr_args; i++)
671 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
f3f096cf 672
fa6f0cc7 673 seq_putc(m, '\n');
f3f096cf
SD
674 return 0;
675}
676
677static const struct seq_operations probes_seq_op = {
678 .start = probes_seq_start,
679 .next = probes_seq_next,
680 .stop = probes_seq_stop,
681 .show = probes_seq_show
682};
683
684static int probes_open(struct inode *inode, struct file *file)
685{
c6c2401d
SRRH
686 int ret;
687
688 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
689 ret = cleanup_all_probes();
690 if (ret)
691 return ret;
692 }
f3f096cf
SD
693
694 return seq_open(file, &probes_seq_op);
695}
696
697static ssize_t probes_write(struct file *file, const char __user *buffer,
698 size_t count, loff_t *ppos)
699{
7e465baa 700 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
f3f096cf
SD
701}
702
703static const struct file_operations uprobe_events_ops = {
704 .owner = THIS_MODULE,
705 .open = probes_open,
706 .read = seq_read,
707 .llseek = seq_lseek,
708 .release = seq_release,
709 .write = probes_write,
710};
711
712/* Probes profiling interfaces */
713static int probes_profile_seq_show(struct seq_file *m, void *v)
714{
715 struct trace_uprobe *tu = v;
716
de7b2973 717 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
687fcc4a 718 trace_event_name(&tu->tp.call), tu->nhit);
f3f096cf
SD
719 return 0;
720}
721
722static const struct seq_operations profile_seq_op = {
723 .start = probes_seq_start,
724 .next = probes_seq_next,
725 .stop = probes_seq_stop,
726 .show = probes_profile_seq_show
727};
728
729static int profile_open(struct inode *inode, struct file *file)
730{
731 return seq_open(file, &profile_seq_op);
732}
733
734static const struct file_operations uprobe_profile_ops = {
735 .owner = THIS_MODULE,
736 .open = profile_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = seq_release,
740};
741
dcad1a20
NK
742struct uprobe_cpu_buffer {
743 struct mutex mutex;
744 void *buf;
745};
746static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
747static int uprobe_buffer_refcnt;
748
749static int uprobe_buffer_init(void)
750{
751 int cpu, err_cpu;
752
753 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
754 if (uprobe_cpu_buffer == NULL)
755 return -ENOMEM;
756
757 for_each_possible_cpu(cpu) {
758 struct page *p = alloc_pages_node(cpu_to_node(cpu),
759 GFP_KERNEL, 0);
760 if (p == NULL) {
761 err_cpu = cpu;
762 goto err;
763 }
764 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
765 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
766 }
767
768 return 0;
769
770err:
771 for_each_possible_cpu(cpu) {
772 if (cpu == err_cpu)
773 break;
774 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
775 }
776
777 free_percpu(uprobe_cpu_buffer);
778 return -ENOMEM;
779}
780
781static int uprobe_buffer_enable(void)
782{
783 int ret = 0;
784
785 BUG_ON(!mutex_is_locked(&event_mutex));
786
787 if (uprobe_buffer_refcnt++ == 0) {
788 ret = uprobe_buffer_init();
789 if (ret < 0)
790 uprobe_buffer_refcnt--;
791 }
792
793 return ret;
794}
795
796static void uprobe_buffer_disable(void)
797{
6ea6215f
J
798 int cpu;
799
dcad1a20
NK
800 BUG_ON(!mutex_is_locked(&event_mutex));
801
802 if (--uprobe_buffer_refcnt == 0) {
6ea6215f
J
803 for_each_possible_cpu(cpu)
804 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
805 cpu)->buf);
806
dcad1a20
NK
807 free_percpu(uprobe_cpu_buffer);
808 uprobe_cpu_buffer = NULL;
809 }
810}
811
812static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
813{
814 struct uprobe_cpu_buffer *ucb;
815 int cpu;
816
817 cpu = raw_smp_processor_id();
818 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
819
820 /*
821 * Use per-cpu buffers for fastest access, but we might migrate
822 * so the mutex makes sure we have sole access to it.
823 */
824 mutex_lock(&ucb->mutex);
825
826 return ucb;
827}
828
829static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
830{
831 mutex_unlock(&ucb->mutex);
832}
833
a43b9704 834static void __uprobe_trace_func(struct trace_uprobe *tu,
dd9fa555 835 unsigned long func, struct pt_regs *regs,
70ed91c6 836 struct uprobe_cpu_buffer *ucb, int dsize,
7f1d2f82 837 struct trace_event_file *trace_file)
f3f096cf
SD
838{
839 struct uprobe_trace_entry_head *entry;
840 struct ring_buffer_event *event;
841 struct ring_buffer *buffer;
457d1772 842 void *data;
dd9fa555 843 int size, esize;
2425bcb9 844 struct trace_event_call *call = &tu->tp.call;
f3f096cf 845
7f1d2f82 846 WARN_ON(call != trace_file->event_call);
70ed91c6 847
dd9fa555 848 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
dcad1a20
NK
849 return;
850
09a5059a 851 if (trace_trigger_soft_disabled(trace_file))
ca3b1620
NK
852 return;
853
dd9fa555 854 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
dcad1a20 855 size = esize + tu->tp.size + dsize;
7f1d2f82 856 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
70ed91c6 857 call->event.type, size, 0, 0);
f3f096cf 858 if (!event)
dd9fa555 859 return;
f3f096cf
SD
860
861 entry = ring_buffer_event_data(event);
393a736c
ON
862 if (is_ret_probe(tu)) {
863 entry->vaddr[0] = func;
864 entry->vaddr[1] = instruction_pointer(regs);
865 data = DATAOF_TRACE_ENTRY(entry, true);
866 } else {
867 entry->vaddr[0] = instruction_pointer(regs);
868 data = DATAOF_TRACE_ENTRY(entry, false);
869 }
870
dcad1a20 871 memcpy(data, ucb->buf, tu->tp.size + dsize);
f3f096cf 872
7f1d2f82 873 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
a51cc604 874}
f42d24a1 875
a51cc604 876/* uprobe handler */
dd9fa555
NK
877static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
878 struct uprobe_cpu_buffer *ucb, int dsize)
a51cc604 879{
70ed91c6
J
880 struct event_file_link *link;
881
882 if (is_ret_probe(tu))
883 return 0;
884
885 rcu_read_lock();
886 list_for_each_entry_rcu(link, &tu->tp.files, list)
887 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
888 rcu_read_unlock();
889
f42d24a1 890 return 0;
f3f096cf
SD
891}
892
c1ae5c75 893static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
dd9fa555
NK
894 struct pt_regs *regs,
895 struct uprobe_cpu_buffer *ucb, int dsize)
c1ae5c75 896{
70ed91c6
J
897 struct event_file_link *link;
898
899 rcu_read_lock();
900 list_for_each_entry_rcu(link, &tu->tp.files, list)
901 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
902 rcu_read_unlock();
c1ae5c75
ON
903}
904
f3f096cf
SD
905/* Event entry printers */
906static enum print_line_t
907print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
908{
457d1772 909 struct uprobe_trace_entry_head *entry;
f3f096cf
SD
910 struct trace_seq *s = &iter->seq;
911 struct trace_uprobe *tu;
912 u8 *data;
f3f096cf 913
457d1772 914 entry = (struct uprobe_trace_entry_head *)iter->ent;
14577c39 915 tu = container_of(event, struct trace_uprobe, tp.call.event);
f3f096cf 916
3ede82dd 917 if (is_ret_probe(tu)) {
8579a107 918 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
687fcc4a 919 trace_event_name(&tu->tp.call),
8579a107 920 entry->vaddr[1], entry->vaddr[0]);
3ede82dd
ON
921 data = DATAOF_TRACE_ENTRY(entry, true);
922 } else {
8579a107 923 trace_seq_printf(s, "%s: (0x%lx)",
687fcc4a 924 trace_event_name(&tu->tp.call),
8579a107 925 entry->vaddr[0]);
3ede82dd
ON
926 data = DATAOF_TRACE_ENTRY(entry, false);
927 }
f3f096cf 928
56de7630
MH
929 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
930 goto out;
f3f096cf 931
8579a107 932 trace_seq_putc(s, '\n');
f3f096cf 933
8579a107
SRRH
934 out:
935 return trace_handle_return(s);
f3f096cf
SD
936}
937
31ba3348
ON
938typedef bool (*filter_func_t)(struct uprobe_consumer *self,
939 enum uprobe_filter_ctx ctx,
940 struct mm_struct *mm);
941
942static int
7f1d2f82 943probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
70ed91c6 944 filter_func_t filter)
f3f096cf 945{
70ed91c6
J
946 bool enabled = trace_probe_is_enabled(&tu->tp);
947 struct event_file_link *link = NULL;
948 int ret;
949
950 if (file) {
48212542
ON
951 if (tu->tp.flags & TP_FLAG_PROFILE)
952 return -EINTR;
953
70ed91c6
J
954 link = kmalloc(sizeof(*link), GFP_KERNEL);
955 if (!link)
956 return -ENOMEM;
957
958 link->file = file;
959 list_add_tail_rcu(&link->list, &tu->tp.files);
960
961 tu->tp.flags |= TP_FLAG_TRACE;
48212542
ON
962 } else {
963 if (tu->tp.flags & TP_FLAG_TRACE)
964 return -EINTR;
965
70ed91c6 966 tu->tp.flags |= TP_FLAG_PROFILE;
48212542 967 }
f3f096cf 968
736288ba
ON
969 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
970
70ed91c6
J
971 if (enabled)
972 return 0;
973
fb6bab6a
ON
974 ret = uprobe_buffer_enable();
975 if (ret)
976 goto err_flags;
977
31ba3348 978 tu->consumer.filter = filter;
0c92c7a3 979 tu->inode = d_real_inode(tu->path.dentry);
1cc33161
RB
980 if (tu->ref_ctr_offset) {
981 ret = uprobe_register_refctr(tu->inode, tu->offset,
982 tu->ref_ctr_offset, &tu->consumer);
983 } else {
984 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
985 }
986
fb6bab6a
ON
987 if (ret)
988 goto err_buffer;
989
990 return 0;
991
992 err_buffer:
993 uprobe_buffer_disable();
f3f096cf 994
fb6bab6a
ON
995 err_flags:
996 if (file) {
997 list_del(&link->list);
998 kfree(link);
999 tu->tp.flags &= ~TP_FLAG_TRACE;
1000 } else {
1001 tu->tp.flags &= ~TP_FLAG_PROFILE;
1002 }
4161824f 1003 return ret;
f3f096cf
SD
1004}
1005
70ed91c6 1006static void
7f1d2f82 1007probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
f3f096cf 1008{
14577c39 1009 if (!trace_probe_is_enabled(&tu->tp))
f3f096cf
SD
1010 return;
1011
70ed91c6
J
1012 if (file) {
1013 struct event_file_link *link;
1014
1015 link = find_event_file_link(&tu->tp, file);
1016 if (!link)
1017 return;
1018
1019 list_del_rcu(&link->list);
1020 /* synchronize with u{,ret}probe_trace_func */
016f8ffc 1021 synchronize_rcu();
70ed91c6
J
1022 kfree(link);
1023
1024 if (!list_empty(&tu->tp.files))
1025 return;
1026 }
1027
736288ba
ON
1028 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1029
a932b738 1030 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
0c92c7a3 1031 tu->inode = NULL;
70ed91c6 1032 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
dcad1a20
NK
1033
1034 uprobe_buffer_disable();
f3f096cf
SD
1035}
1036
2425bcb9 1037static int uprobe_event_define_fields(struct trace_event_call *event_call)
f3f096cf 1038{
eeb07b06 1039 int ret, size;
f3f096cf 1040 struct uprobe_trace_entry_head field;
457d1772 1041 struct trace_uprobe *tu = event_call->data;
f3f096cf 1042
4d1298e2
ON
1043 if (is_ret_probe(tu)) {
1044 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1045 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1046 size = SIZEOF_TRACE_ENTRY(true);
1047 } else {
1048 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1049 size = SIZEOF_TRACE_ENTRY(false);
1050 }
f3f096cf 1051
eeb07b06 1052 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
f3f096cf
SD
1053}
1054
f3f096cf 1055#ifdef CONFIG_PERF_EVENTS
31ba3348
ON
1056static bool
1057__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1058{
1059 struct perf_event *event;
1060
1061 if (filter->nr_systemwide)
1062 return true;
1063
1064 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
50f16a8b 1065 if (event->hw.target->mm == mm)
31ba3348
ON
1066 return true;
1067 }
1068
1069 return false;
1070}
1071
b2fe8ba6
ON
1072static inline bool
1073uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1074{
50f16a8b 1075 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
b2fe8ba6
ON
1076}
1077
ce5f36a5 1078static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
736288ba 1079{
b2fe8ba6
ON
1080 bool done;
1081
736288ba 1082 write_lock(&tu->filter.rwlock);
50f16a8b 1083 if (event->hw.target) {
ce5f36a5 1084 list_del(&event->hw.tp_list);
b2fe8ba6 1085 done = tu->filter.nr_systemwide ||
50f16a8b 1086 (event->hw.target->flags & PF_EXITING) ||
b2fe8ba6 1087 uprobe_filter_event(tu, event);
b2fe8ba6 1088 } else {
ce5f36a5 1089 tu->filter.nr_systemwide--;
b2fe8ba6 1090 done = tu->filter.nr_systemwide;
b2fe8ba6 1091 }
736288ba
ON
1092 write_unlock(&tu->filter.rwlock);
1093
b2fe8ba6 1094 if (!done)
927d6874 1095 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
31ba3348 1096
736288ba
ON
1097 return 0;
1098}
1099
ce5f36a5 1100static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
736288ba 1101{
b2fe8ba6 1102 bool done;
927d6874 1103 int err;
b2fe8ba6 1104
736288ba 1105 write_lock(&tu->filter.rwlock);
50f16a8b 1106 if (event->hw.target) {
ce5f36a5
ON
1107 /*
1108 * event->parent != NULL means copy_process(), we can avoid
1109 * uprobe_apply(). current->mm must be probed and we can rely
1110 * on dup_mmap() which preserves the already installed bp's.
1111 *
1112 * attr.enable_on_exec means that exec/mmap will install the
1113 * breakpoints we need.
1114 */
b2fe8ba6 1115 done = tu->filter.nr_systemwide ||
ce5f36a5 1116 event->parent || event->attr.enable_on_exec ||
b2fe8ba6 1117 uprobe_filter_event(tu, event);
ce5f36a5 1118 list_add(&event->hw.tp_list, &tu->filter.perf_events);
b2fe8ba6 1119 } else {
b2fe8ba6 1120 done = tu->filter.nr_systemwide;
ce5f36a5 1121 tu->filter.nr_systemwide++;
b2fe8ba6 1122 }
736288ba
ON
1123 write_unlock(&tu->filter.rwlock);
1124
927d6874
ON
1125 err = 0;
1126 if (!done) {
1127 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1128 if (err)
1129 uprobe_perf_close(tu, event);
1130 }
1131 return err;
736288ba
ON
1132}
1133
31ba3348
ON
1134static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1135 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1136{
1137 struct trace_uprobe *tu;
1138 int ret;
1139
1140 tu = container_of(uc, struct trace_uprobe, consumer);
1141 read_lock(&tu->filter.rwlock);
1142 ret = __uprobe_perf_filter(&tu->filter, mm);
1143 read_unlock(&tu->filter.rwlock);
1144
1145 return ret;
1146}
1147
a43b9704 1148static void __uprobe_perf_func(struct trace_uprobe *tu,
dd9fa555
NK
1149 unsigned long func, struct pt_regs *regs,
1150 struct uprobe_cpu_buffer *ucb, int dsize)
f3f096cf 1151{
2425bcb9 1152 struct trace_event_call *call = &tu->tp.call;
f3f096cf
SD
1153 struct uprobe_trace_entry_head *entry;
1154 struct hlist_head *head;
457d1772 1155 void *data;
dd9fa555 1156 int size, esize;
dcad1a20
NK
1157 int rctx;
1158
e87c6bc3 1159 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
04a22fae
WN
1160 return;
1161
dcad1a20 1162 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
f3f096cf 1163
dcad1a20
NK
1164 size = esize + tu->tp.size + dsize;
1165 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1166 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1167 return;
1168
f3f096cf 1169 preempt_disable();
515619f2
ON
1170 head = this_cpu_ptr(call->perf_events);
1171 if (hlist_empty(head))
1172 goto out;
1173
1e1dcd93 1174 entry = perf_trace_buf_alloc(size, NULL, &rctx);
f3f096cf
SD
1175 if (!entry)
1176 goto out;
1177
393a736c
ON
1178 if (is_ret_probe(tu)) {
1179 entry->vaddr[0] = func;
32520b2c 1180 entry->vaddr[1] = instruction_pointer(regs);
393a736c
ON
1181 data = DATAOF_TRACE_ENTRY(entry, true);
1182 } else {
32520b2c 1183 entry->vaddr[0] = instruction_pointer(regs);
393a736c
ON
1184 data = DATAOF_TRACE_ENTRY(entry, false);
1185 }
1186
dcad1a20
NK
1187 memcpy(data, ucb->buf, tu->tp.size + dsize);
1188
1189 if (size - esize > tu->tp.size + dsize) {
1190 int len = tu->tp.size + dsize;
14577c39 1191
dcad1a20 1192 memset(data + len, 0, size - esize - len);
14577c39 1193 }
f3f096cf 1194
1e1dcd93 1195 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
8fd0fbbe 1196 head, NULL);
f3f096cf
SD
1197 out:
1198 preempt_enable();
a51cc604
ON
1199}
1200
1201/* uprobe profile handler */
dd9fa555
NK
1202static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1203 struct uprobe_cpu_buffer *ucb, int dsize)
a51cc604
ON
1204{
1205 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1206 return UPROBE_HANDLER_REMOVE;
1207
393a736c 1208 if (!is_ret_probe(tu))
dd9fa555 1209 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
f42d24a1 1210 return 0;
f3f096cf 1211}
c1ae5c75
ON
1212
1213static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
dd9fa555
NK
1214 struct pt_regs *regs,
1215 struct uprobe_cpu_buffer *ucb, int dsize)
c1ae5c75 1216{
dd9fa555 1217 __uprobe_perf_func(tu, func, regs, ucb, dsize);
c1ae5c75 1218}
41bdc4b4
YS
1219
1220int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1221 const char **filename, u64 *probe_offset,
1222 bool perf_type_tracepoint)
1223{
1224 const char *pevent = trace_event_name(event->tp_event);
1225 const char *group = event->tp_event->class->system;
1226 struct trace_uprobe *tu;
1227
1228 if (perf_type_tracepoint)
1229 tu = find_probe_event(pevent, group);
1230 else
1231 tu = event->tp_event->data;
1232 if (!tu)
1233 return -EINVAL;
1234
1235 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1236 : BPF_FD_TYPE_UPROBE;
1237 *filename = tu->filename;
1238 *probe_offset = tu->offset;
1239 return 0;
1240}
f3f096cf
SD
1241#endif /* CONFIG_PERF_EVENTS */
1242
70ed91c6 1243static int
2425bcb9 1244trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
70ed91c6 1245 void *data)
f3f096cf 1246{
457d1772 1247 struct trace_uprobe *tu = event->data;
7f1d2f82 1248 struct trace_event_file *file = data;
f3f096cf
SD
1249
1250 switch (type) {
1251 case TRACE_REG_REGISTER:
70ed91c6 1252 return probe_event_enable(tu, file, NULL);
f3f096cf
SD
1253
1254 case TRACE_REG_UNREGISTER:
70ed91c6 1255 probe_event_disable(tu, file);
f3f096cf
SD
1256 return 0;
1257
1258#ifdef CONFIG_PERF_EVENTS
1259 case TRACE_REG_PERF_REGISTER:
70ed91c6 1260 return probe_event_enable(tu, NULL, uprobe_perf_filter);
f3f096cf
SD
1261
1262 case TRACE_REG_PERF_UNREGISTER:
70ed91c6 1263 probe_event_disable(tu, NULL);
f3f096cf 1264 return 0;
736288ba
ON
1265
1266 case TRACE_REG_PERF_OPEN:
1267 return uprobe_perf_open(tu, data);
1268
1269 case TRACE_REG_PERF_CLOSE:
1270 return uprobe_perf_close(tu, data);
1271
f3f096cf
SD
1272#endif
1273 default:
1274 return 0;
1275 }
1276 return 0;
1277}
1278
1279static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1280{
f3f096cf 1281 struct trace_uprobe *tu;
b7e0bf34 1282 struct uprobe_dispatch_data udd;
dd9fa555
NK
1283 struct uprobe_cpu_buffer *ucb;
1284 int dsize, esize;
f42d24a1 1285 int ret = 0;
f3f096cf 1286
dd9fa555 1287
a932b738 1288 tu = container_of(con, struct trace_uprobe, consumer);
1b47aefd 1289 tu->nhit++;
f3f096cf 1290
b7e0bf34
NK
1291 udd.tu = tu;
1292 udd.bp_addr = instruction_pointer(regs);
1293
1294 current->utask->vaddr = (unsigned long) &udd;
1295
dd9fa555
NK
1296 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1297 return 0;
1298
1299 dsize = __get_data_size(&tu->tp, regs);
1300 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1301
1302 ucb = uprobe_buffer_get();
1303 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1304
14577c39 1305 if (tu->tp.flags & TP_FLAG_TRACE)
dd9fa555 1306 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
f3f096cf
SD
1307
1308#ifdef CONFIG_PERF_EVENTS
14577c39 1309 if (tu->tp.flags & TP_FLAG_PROFILE)
dd9fa555 1310 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
f3f096cf 1311#endif
dd9fa555 1312 uprobe_buffer_put(ucb);
f42d24a1 1313 return ret;
f3f096cf
SD
1314}
1315
c1ae5c75
ON
1316static int uretprobe_dispatcher(struct uprobe_consumer *con,
1317 unsigned long func, struct pt_regs *regs)
1318{
1319 struct trace_uprobe *tu;
b7e0bf34 1320 struct uprobe_dispatch_data udd;
dd9fa555
NK
1321 struct uprobe_cpu_buffer *ucb;
1322 int dsize, esize;
c1ae5c75
ON
1323
1324 tu = container_of(con, struct trace_uprobe, consumer);
1325
b7e0bf34
NK
1326 udd.tu = tu;
1327 udd.bp_addr = func;
1328
1329 current->utask->vaddr = (unsigned long) &udd;
1330
dd9fa555
NK
1331 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1332 return 0;
1333
1334 dsize = __get_data_size(&tu->tp, regs);
1335 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1336
1337 ucb = uprobe_buffer_get();
1338 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1339
14577c39 1340 if (tu->tp.flags & TP_FLAG_TRACE)
dd9fa555 1341 uretprobe_trace_func(tu, func, regs, ucb, dsize);
c1ae5c75
ON
1342
1343#ifdef CONFIG_PERF_EVENTS
14577c39 1344 if (tu->tp.flags & TP_FLAG_PROFILE)
dd9fa555 1345 uretprobe_perf_func(tu, func, regs, ucb, dsize);
c1ae5c75 1346#endif
dd9fa555 1347 uprobe_buffer_put(ucb);
c1ae5c75
ON
1348 return 0;
1349}
1350
f3f096cf
SD
1351static struct trace_event_functions uprobe_funcs = {
1352 .trace = print_uprobe_event
1353};
1354
33ea4b24
SL
1355static inline void init_trace_event_call(struct trace_uprobe *tu,
1356 struct trace_event_call *call)
f3f096cf 1357{
f3f096cf
SD
1358 INIT_LIST_HEAD(&call->class->fields);
1359 call->event.funcs = &uprobe_funcs;
1360 call->class->define_fields = uprobe_event_define_fields;
1361
33ea4b24
SL
1362 call->flags = TRACE_EVENT_FL_UPROBE;
1363 call->class->reg = trace_uprobe_register;
1364 call->data = tu;
1365}
1366
1367static int register_uprobe_event(struct trace_uprobe *tu)
1368{
1369 struct trace_event_call *call = &tu->tp.call;
1370 int ret = 0;
1371
1372 init_trace_event_call(tu, call);
1373
5bf652aa 1374 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
f3f096cf
SD
1375 return -ENOMEM;
1376
9023c930 1377 ret = register_trace_event(&call->event);
f3f096cf
SD
1378 if (!ret) {
1379 kfree(call->print_fmt);
1380 return -ENODEV;
1381 }
ede392a7 1382
f3f096cf
SD
1383 ret = trace_add_event_call(call);
1384
1385 if (ret) {
de7b2973 1386 pr_info("Failed to register uprobe event: %s\n",
687fcc4a 1387 trace_event_name(call));
f3f096cf 1388 kfree(call->print_fmt);
9023c930 1389 unregister_trace_event(&call->event);
f3f096cf
SD
1390 }
1391
1392 return ret;
1393}
1394
c6c2401d 1395static int unregister_uprobe_event(struct trace_uprobe *tu)
f3f096cf 1396{
c6c2401d
SRRH
1397 int ret;
1398
f3f096cf 1399 /* tu->event is unregistered in trace_remove_event_call() */
14577c39 1400 ret = trace_remove_event_call(&tu->tp.call);
c6c2401d
SRRH
1401 if (ret)
1402 return ret;
14577c39
NK
1403 kfree(tu->tp.call.print_fmt);
1404 tu->tp.call.print_fmt = NULL;
c6c2401d 1405 return 0;
f3f096cf
SD
1406}
1407
33ea4b24
SL
1408#ifdef CONFIG_PERF_EVENTS
1409struct trace_event_call *
a6ca88b2
SL
1410create_local_trace_uprobe(char *name, unsigned long offs,
1411 unsigned long ref_ctr_offset, bool is_return)
33ea4b24
SL
1412{
1413 struct trace_uprobe *tu;
33ea4b24
SL
1414 struct path path;
1415 int ret;
1416
1417 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1418 if (ret)
1419 return ERR_PTR(ret);
1420
0c92c7a3
SL
1421 if (!d_is_reg(path.dentry)) {
1422 path_put(&path);
33ea4b24
SL
1423 return ERR_PTR(-EINVAL);
1424 }
1425
1426 /*
1427 * local trace_kprobes are not added to probe_list, so they are never
1428 * searched in find_trace_kprobe(). Therefore, there is no concern of
1429 * duplicated name "DUMMY_EVENT" here.
1430 */
1431 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1432 is_return);
1433
1434 if (IS_ERR(tu)) {
1435 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1436 (int)PTR_ERR(tu));
0c92c7a3 1437 path_put(&path);
33ea4b24
SL
1438 return ERR_CAST(tu);
1439 }
1440
1441 tu->offset = offs;
0c92c7a3 1442 tu->path = path;
a6ca88b2 1443 tu->ref_ctr_offset = ref_ctr_offset;
33ea4b24
SL
1444 tu->filename = kstrdup(name, GFP_KERNEL);
1445 init_trace_event_call(tu, &tu->tp.call);
1446
1447 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1448 ret = -ENOMEM;
1449 goto error;
1450 }
1451
1452 return &tu->tp.call;
1453error:
1454 free_trace_uprobe(tu);
1455 return ERR_PTR(ret);
1456}
1457
1458void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1459{
1460 struct trace_uprobe *tu;
1461
1462 tu = container_of(event_call, struct trace_uprobe, tp.call);
1463
1464 kfree(tu->tp.call.print_fmt);
1465 tu->tp.call.print_fmt = NULL;
1466
1467 free_trace_uprobe(tu);
1468}
1469#endif /* CONFIG_PERF_EVENTS */
1470
f3f096cf
SD
1471/* Make a trace interface for controling probe points */
1472static __init int init_uprobe_trace(void)
1473{
1474 struct dentry *d_tracer;
1475
1476 d_tracer = tracing_init_dentry();
14a5ae40 1477 if (IS_ERR(d_tracer))
f3f096cf
SD
1478 return 0;
1479
1480 trace_create_file("uprobe_events", 0644, d_tracer,
1481 NULL, &uprobe_events_ops);
1482 /* Profile interface */
1483 trace_create_file("uprobe_profile", 0444, d_tracer,
1484 NULL, &uprobe_profile_ops);
1485 return 0;
1486}
1487
1488fs_initcall(init_uprobe_trace);