]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace_uprobe.c
tracing: probeevent: Cleanup print argument functions
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace_uprobe.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
f3f096cf
SD
2/*
3 * uprobes-based tracing events
4 *
f3f096cf
SD
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
72576341 8#define pr_fmt(fmt) "trace_kprobe: " fmt
f3f096cf
SD
9
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/uprobes.h>
13#include <linux/namei.h>
b2e902f0 14#include <linux/string.h>
b2d09103 15#include <linux/rculist.h>
f3f096cf
SD
16
17#include "trace_probe.h"
18
19#define UPROBE_EVENT_SYSTEM "uprobes"
20
457d1772
ON
21struct uprobe_trace_entry_head {
22 struct trace_entry ent;
23 unsigned long vaddr[];
24};
25
26#define SIZEOF_TRACE_ENTRY(is_return) \
27 (sizeof(struct uprobe_trace_entry_head) + \
28 sizeof(unsigned long) * (is_return ? 2 : 1))
29
30#define DATAOF_TRACE_ENTRY(entry, is_return) \
31 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
32
736288ba
ON
33struct trace_uprobe_filter {
34 rwlock_t rwlock;
35 int nr_systemwide;
36 struct list_head perf_events;
37};
38
f3f096cf
SD
39/*
40 * uprobe event core functions
41 */
f3f096cf
SD
42struct trace_uprobe {
43 struct list_head list;
736288ba 44 struct trace_uprobe_filter filter;
a932b738 45 struct uprobe_consumer consumer;
0c92c7a3 46 struct path path;
f3f096cf
SD
47 struct inode *inode;
48 char *filename;
49 unsigned long offset;
1cc33161 50 unsigned long ref_ctr_offset;
f3f096cf 51 unsigned long nhit;
14577c39 52 struct trace_probe tp;
f3f096cf
SD
53};
54
14577c39
NK
55#define SIZEOF_TRACE_UPROBE(n) \
56 (offsetof(struct trace_uprobe, tp.args) + \
f3f096cf
SD
57 (sizeof(struct probe_arg) * (n)))
58
59static int register_uprobe_event(struct trace_uprobe *tu);
c6c2401d 60static int unregister_uprobe_event(struct trace_uprobe *tu);
f3f096cf
SD
61
62static DEFINE_MUTEX(uprobe_lock);
63static LIST_HEAD(uprobe_list);
64
b7e0bf34
NK
65struct uprobe_dispatch_data {
66 struct trace_uprobe *tu;
67 unsigned long bp_addr;
68};
69
f3f096cf 70static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
c1ae5c75
ON
71static int uretprobe_dispatcher(struct uprobe_consumer *con,
72 unsigned long func, struct pt_regs *regs);
f3f096cf 73
3fd996a2
NK
74#ifdef CONFIG_STACK_GROWSUP
75static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
76{
77 return addr - (n * sizeof(long));
78}
79#else
80static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
81{
82 return addr + (n * sizeof(long));
83}
84#endif
85
86static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
87{
88 unsigned long ret;
89 unsigned long addr = user_stack_pointer(regs);
90
91 addr = adjust_stack_addr(addr, n);
92
93 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
94 return 0;
95
96 return ret;
97}
98
99/*
100 * Uprobes-specific fetch functions
101 */
102#define DEFINE_FETCH_stack(type) \
fbc1963d
MH
103static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
104 void *offset, void *dest) \
3fd996a2
NK
105{ \
106 *(type *)dest = (type)get_user_stack_nth(regs, \
107 ((unsigned long)offset)); \
108}
109DEFINE_BASIC_FETCH_FUNCS(stack)
110/* No string on the stack entry */
111#define fetch_stack_string NULL
112#define fetch_stack_string_size NULL
113
5baaa59e 114#define DEFINE_FETCH_memory(type) \
fbc1963d
MH
115static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
116 void *addr, void *dest) \
5baaa59e
NK
117{ \
118 type retval; \
119 void __user *vaddr = (void __force __user *) addr; \
120 \
121 if (copy_from_user(&retval, vaddr, sizeof(type))) \
122 *(type *)dest = 0; \
123 else \
124 *(type *) dest = retval; \
125}
126DEFINE_BASIC_FETCH_FUNCS(memory)
127/*
128 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
129 * length and relative data location.
130 */
fbc1963d
MH
131static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
132 void *addr, void *dest)
5baaa59e
NK
133{
134 long ret;
135 u32 rloc = *(u32 *)dest;
136 int maxlen = get_rloc_len(rloc);
137 u8 *dst = get_rloc_data(dest);
138 void __user *src = (void __force __user *) addr;
139
140 if (!maxlen)
141 return;
142
143 ret = strncpy_from_user(dst, src, maxlen);
50268a3d
MH
144 if (ret == maxlen)
145 dst[--ret] = '\0';
5baaa59e
NK
146
147 if (ret < 0) { /* Failed to fetch string */
148 ((u8 *)get_rloc_data(dest))[0] = '\0';
149 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
150 } else {
151 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
152 }
153}
154
fbc1963d
MH
155static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
156 void *addr, void *dest)
5baaa59e
NK
157{
158 int len;
159 void __user *vaddr = (void __force __user *) addr;
160
161 len = strnlen_user(vaddr, MAX_STRING_SIZE);
162
163 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
164 *(u32 *)dest = 0;
165 else
166 *(u32 *)dest = len;
167}
3fd996a2 168
b7e0bf34
NK
169static unsigned long translate_user_vaddr(void *file_offset)
170{
171 unsigned long base_addr;
172 struct uprobe_dispatch_data *udd;
173
174 udd = (void *) current->utask->vaddr;
175
176 base_addr = udd->bp_addr - udd->tu->offset;
177 return base_addr + (unsigned long)file_offset;
178}
179
180#define DEFINE_FETCH_file_offset(type) \
fbc1963d
MH
181static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
182 void *offset, void *dest)\
b7e0bf34
NK
183{ \
184 void *vaddr = (void *)translate_user_vaddr(offset); \
185 \
186 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
187}
188DEFINE_BASIC_FETCH_FUNCS(file_offset)
189DEFINE_FETCH_file_offset(string)
190DEFINE_FETCH_file_offset(string_size)
191
34fee3a1 192/* Fetch type information table */
d9a16d3a 193static const struct fetch_type uprobes_fetch_type_table[] = {
34fee3a1
NK
194 /* Special types */
195 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
196 sizeof(u32), 1, "__data_loc char[]"),
197 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
198 string_size, sizeof(u32), 0, "u32"),
199 /* Basic types */
200 ASSIGN_FETCH_TYPE(u8, u8, 0),
201 ASSIGN_FETCH_TYPE(u16, u16, 0),
202 ASSIGN_FETCH_TYPE(u32, u32, 0),
203 ASSIGN_FETCH_TYPE(u64, u64, 0),
204 ASSIGN_FETCH_TYPE(s8, u8, 1),
205 ASSIGN_FETCH_TYPE(s16, u16, 1),
206 ASSIGN_FETCH_TYPE(s32, u32, 1),
207 ASSIGN_FETCH_TYPE(s64, u64, 1),
17ce3dc7
MH
208 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
209 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
210 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
211 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
34fee3a1
NK
212
213 ASSIGN_FETCH_TYPE_END
214};
215
736288ba
ON
216static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
217{
218 rwlock_init(&filter->rwlock);
219 filter->nr_systemwide = 0;
220 INIT_LIST_HEAD(&filter->perf_events);
221}
222
223static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
224{
225 return !filter->nr_systemwide && list_empty(&filter->perf_events);
226}
227
c1ae5c75
ON
228static inline bool is_ret_probe(struct trace_uprobe *tu)
229{
230 return tu->consumer.ret_handler != NULL;
231}
232
f3f096cf
SD
233/*
234 * Allocate new trace_uprobe and initialize it (including uprobes).
235 */
236static struct trace_uprobe *
c1ae5c75 237alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
f3f096cf
SD
238{
239 struct trace_uprobe *tu;
240
241 if (!event || !is_good_name(event))
242 return ERR_PTR(-EINVAL);
243
244 if (!group || !is_good_name(group))
245 return ERR_PTR(-EINVAL);
246
247 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
248 if (!tu)
249 return ERR_PTR(-ENOMEM);
250
14577c39
NK
251 tu->tp.call.class = &tu->tp.class;
252 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
253 if (!tu->tp.call.name)
f3f096cf
SD
254 goto error;
255
14577c39
NK
256 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
257 if (!tu->tp.class.system)
f3f096cf
SD
258 goto error;
259
260 INIT_LIST_HEAD(&tu->list);
70ed91c6 261 INIT_LIST_HEAD(&tu->tp.files);
a932b738 262 tu->consumer.handler = uprobe_dispatcher;
c1ae5c75
ON
263 if (is_ret)
264 tu->consumer.ret_handler = uretprobe_dispatcher;
736288ba 265 init_trace_uprobe_filter(&tu->filter);
f3f096cf
SD
266 return tu;
267
268error:
14577c39 269 kfree(tu->tp.call.name);
f3f096cf
SD
270 kfree(tu);
271
272 return ERR_PTR(-ENOMEM);
273}
274
275static void free_trace_uprobe(struct trace_uprobe *tu)
276{
277 int i;
278
14577c39
NK
279 for (i = 0; i < tu->tp.nr_args; i++)
280 traceprobe_free_probe_arg(&tu->tp.args[i]);
f3f096cf 281
0c92c7a3 282 path_put(&tu->path);
14577c39
NK
283 kfree(tu->tp.call.class->system);
284 kfree(tu->tp.call.name);
f3f096cf
SD
285 kfree(tu->filename);
286 kfree(tu);
287}
288
289static struct trace_uprobe *find_probe_event(const char *event, const char *group)
290{
291 struct trace_uprobe *tu;
292
293 list_for_each_entry(tu, &uprobe_list, list)
687fcc4a 294 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
14577c39 295 strcmp(tu->tp.call.class->system, group) == 0)
f3f096cf
SD
296 return tu;
297
298 return NULL;
299}
300
301/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
c6c2401d 302static int unregister_trace_uprobe(struct trace_uprobe *tu)
f3f096cf 303{
c6c2401d
SRRH
304 int ret;
305
306 ret = unregister_uprobe_event(tu);
307 if (ret)
308 return ret;
309
f3f096cf 310 list_del(&tu->list);
f3f096cf 311 free_trace_uprobe(tu);
c6c2401d 312 return 0;
f3f096cf
SD
313}
314
ccea8727
RB
315/*
316 * Uprobe with multiple reference counter is not allowed. i.e.
317 * If inode and offset matches, reference counter offset *must*
318 * match as well. Though, there is one exception: If user is
319 * replacing old trace_uprobe with new one(same group/event),
320 * then we allow same uprobe with new reference counter as far
321 * as the new one does not conflict with any other existing
322 * ones.
323 */
324static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
325{
326 struct trace_uprobe *tmp, *old = NULL;
327 struct inode *new_inode = d_real_inode(new->path.dentry);
328
329 old = find_probe_event(trace_event_name(&new->tp.call),
330 new->tp.call.class->system);
331
332 list_for_each_entry(tmp, &uprobe_list, list) {
333 if ((old ? old != tmp : true) &&
334 new_inode == d_real_inode(tmp->path.dentry) &&
335 new->offset == tmp->offset &&
336 new->ref_ctr_offset != tmp->ref_ctr_offset) {
337 pr_warn("Reference counter offset mismatch.");
338 return ERR_PTR(-EINVAL);
339 }
340 }
341 return old;
342}
343
f3f096cf
SD
344/* Register a trace_uprobe and probe_event */
345static int register_trace_uprobe(struct trace_uprobe *tu)
346{
14577c39 347 struct trace_uprobe *old_tu;
f3f096cf
SD
348 int ret;
349
350 mutex_lock(&uprobe_lock);
351
352 /* register as an event */
ccea8727
RB
353 old_tu = find_old_trace_uprobe(tu);
354 if (IS_ERR(old_tu)) {
355 ret = PTR_ERR(old_tu);
356 goto end;
357 }
358
14577c39 359 if (old_tu) {
f3f096cf 360 /* delete old event */
14577c39 361 ret = unregister_trace_uprobe(old_tu);
c6c2401d
SRRH
362 if (ret)
363 goto end;
364 }
f3f096cf
SD
365
366 ret = register_uprobe_event(tu);
367 if (ret) {
a395d6a7 368 pr_warn("Failed to register probe event(%d)\n", ret);
f3f096cf
SD
369 goto end;
370 }
371
372 list_add_tail(&tu->list, &uprobe_list);
373
374end:
375 mutex_unlock(&uprobe_lock);
376
377 return ret;
378}
379
380/*
381 * Argument syntax:
306cfe20 382 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
f3f096cf
SD
383 *
384 * - Remove uprobe: -:[GRP/]EVENT
385 */
386static int create_trace_uprobe(int argc, char **argv)
387{
388 struct trace_uprobe *tu;
1cc33161 389 char *arg, *event, *group, *filename, *rctr, *rctr_end;
f3f096cf
SD
390 char buf[MAX_EVENT_NAME_LEN];
391 struct path path;
1cc33161 392 unsigned long offset, ref_ctr_offset;
4ee5a52e 393 bool is_delete, is_return;
f3f096cf
SD
394 int i, ret;
395
f3f096cf
SD
396 ret = 0;
397 is_delete = false;
4ee5a52e 398 is_return = false;
f3f096cf
SD
399 event = NULL;
400 group = NULL;
1cc33161 401 ref_ctr_offset = 0;
f3f096cf
SD
402
403 /* argc must be >= 1 */
404 if (argv[0][0] == '-')
405 is_delete = true;
4ee5a52e
ON
406 else if (argv[0][0] == 'r')
407 is_return = true;
f3f096cf 408 else if (argv[0][0] != 'p') {
4ee5a52e 409 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
f3f096cf
SD
410 return -EINVAL;
411 }
412
413 if (argv[0][1] == ':') {
414 event = &argv[0][2];
415 arg = strchr(event, '/');
416
417 if (arg) {
418 group = event;
419 event = arg + 1;
420 event[-1] = '\0';
421
422 if (strlen(group) == 0) {
423 pr_info("Group name is not specified\n");
424 return -EINVAL;
425 }
426 }
427 if (strlen(event) == 0) {
428 pr_info("Event name is not specified\n");
429 return -EINVAL;
430 }
431 }
432 if (!group)
433 group = UPROBE_EVENT_SYSTEM;
434
435 if (is_delete) {
c6c2401d
SRRH
436 int ret;
437
f3f096cf
SD
438 if (!event) {
439 pr_info("Delete command needs an event name.\n");
440 return -EINVAL;
441 }
442 mutex_lock(&uprobe_lock);
443 tu = find_probe_event(event, group);
444
445 if (!tu) {
446 mutex_unlock(&uprobe_lock);
447 pr_info("Event %s/%s doesn't exist.\n", group, event);
448 return -ENOENT;
449 }
450 /* delete an event */
c6c2401d 451 ret = unregister_trace_uprobe(tu);
f3f096cf 452 mutex_unlock(&uprobe_lock);
c6c2401d 453 return ret;
f3f096cf
SD
454 }
455
456 if (argc < 2) {
457 pr_info("Probe point is not specified.\n");
458 return -EINVAL;
459 }
6496bb72
KY
460 /* Find the last occurrence, in case the path contains ':' too. */
461 arg = strrchr(argv[1], ':');
0c92c7a3
SL
462 if (!arg)
463 return -EINVAL;
f3f096cf
SD
464
465 *arg++ = '\0';
466 filename = argv[1];
467 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
468 if (ret)
0c92c7a3 469 return ret;
84d7ed79 470
0c92c7a3 471 if (!d_is_reg(path.dentry)) {
d24d7dbf
JZ
472 ret = -EINVAL;
473 goto fail_address_parse;
474 }
f3f096cf 475
1cc33161
RB
476 /* Parse reference counter offset if specified. */
477 rctr = strchr(arg, '(');
478 if (rctr) {
479 rctr_end = strchr(rctr, ')');
480 if (rctr > rctr_end || *(rctr_end + 1) != 0) {
481 ret = -EINVAL;
482 pr_info("Invalid reference counter offset.\n");
483 goto fail_address_parse;
484 }
485
486 *rctr++ = '\0';
487 *rctr_end = '\0';
488 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
489 if (ret) {
490 pr_info("Invalid reference counter offset.\n");
491 goto fail_address_parse;
492 }
493 }
494
495 /* Parse uprobe offset. */
84d7ed79
ON
496 ret = kstrtoul(arg, 0, &offset);
497 if (ret)
498 goto fail_address_parse;
499
f3f096cf
SD
500 argc -= 2;
501 argv += 2;
502
503 /* setup a probe */
504 if (!event) {
b2e902f0 505 char *tail;
f3f096cf
SD
506 char *ptr;
507
b2e902f0
AS
508 tail = kstrdup(kbasename(filename), GFP_KERNEL);
509 if (!tail) {
f3f096cf
SD
510 ret = -ENOMEM;
511 goto fail_address_parse;
512 }
513
f3f096cf
SD
514 ptr = strpbrk(tail, ".-_");
515 if (ptr)
516 *ptr = '\0';
517
518 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
519 event = buf;
520 kfree(tail);
521 }
522
4ee5a52e 523 tu = alloc_trace_uprobe(group, event, argc, is_return);
f3f096cf
SD
524 if (IS_ERR(tu)) {
525 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
526 ret = PTR_ERR(tu);
527 goto fail_address_parse;
528 }
529 tu->offset = offset;
1cc33161 530 tu->ref_ctr_offset = ref_ctr_offset;
0c92c7a3 531 tu->path = path;
f3f096cf
SD
532 tu->filename = kstrdup(filename, GFP_KERNEL);
533
534 if (!tu->filename) {
535 pr_info("Failed to allocate filename.\n");
536 ret = -ENOMEM;
537 goto error;
538 }
539
540 /* parse arguments */
541 ret = 0;
542 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
14577c39
NK
543 struct probe_arg *parg = &tu->tp.args[i];
544
f3f096cf 545 /* Increment count for freeing args in error case */
14577c39 546 tu->tp.nr_args++;
f3f096cf
SD
547
548 /* Parse argument name */
549 arg = strchr(argv[i], '=');
550 if (arg) {
551 *arg++ = '\0';
14577c39 552 parg->name = kstrdup(argv[i], GFP_KERNEL);
f3f096cf
SD
553 } else {
554 arg = argv[i];
555 /* If argument name is omitted, set "argN" */
556 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
14577c39 557 parg->name = kstrdup(buf, GFP_KERNEL);
f3f096cf
SD
558 }
559
14577c39 560 if (!parg->name) {
f3f096cf
SD
561 pr_info("Failed to allocate argument[%d] name.\n", i);
562 ret = -ENOMEM;
563 goto error;
564 }
565
14577c39
NK
566 if (!is_good_name(parg->name)) {
567 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
f3f096cf
SD
568 ret = -EINVAL;
569 goto error;
570 }
571
14577c39 572 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
f3f096cf
SD
573 pr_info("Argument[%d] name '%s' conflicts with "
574 "another field.\n", i, argv[i]);
575 ret = -EINVAL;
576 goto error;
577 }
578
579 /* Parse fetch argument */
14577c39 580 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
d9a16d3a
SR
581 is_return, false,
582 uprobes_fetch_type_table);
f3f096cf
SD
583 if (ret) {
584 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
585 goto error;
586 }
587 }
588
589 ret = register_trace_uprobe(tu);
590 if (ret)
591 goto error;
592 return 0;
593
594error:
595 free_trace_uprobe(tu);
596 return ret;
597
598fail_address_parse:
0c92c7a3 599 path_put(&path);
f3f096cf 600
d24d7dbf 601 pr_info("Failed to parse address or file.\n");
f3f096cf
SD
602
603 return ret;
604}
605
c6c2401d 606static int cleanup_all_probes(void)
f3f096cf
SD
607{
608 struct trace_uprobe *tu;
c6c2401d 609 int ret = 0;
f3f096cf
SD
610
611 mutex_lock(&uprobe_lock);
612 while (!list_empty(&uprobe_list)) {
613 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
c6c2401d
SRRH
614 ret = unregister_trace_uprobe(tu);
615 if (ret)
616 break;
f3f096cf
SD
617 }
618 mutex_unlock(&uprobe_lock);
c6c2401d 619 return ret;
f3f096cf
SD
620}
621
622/* Probes listing interfaces */
623static void *probes_seq_start(struct seq_file *m, loff_t *pos)
624{
625 mutex_lock(&uprobe_lock);
626 return seq_list_start(&uprobe_list, *pos);
627}
628
629static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
630{
631 return seq_list_next(v, &uprobe_list, pos);
632}
633
634static void probes_seq_stop(struct seq_file *m, void *v)
635{
636 mutex_unlock(&uprobe_lock);
637}
638
639static int probes_seq_show(struct seq_file *m, void *v)
640{
641 struct trace_uprobe *tu = v;
3ede82dd 642 char c = is_ret_probe(tu) ? 'r' : 'p';
f3f096cf
SD
643 int i;
644
a64b2c01
RB
645 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
646 trace_event_name(&tu->tp.call), tu->filename,
647 (int)(sizeof(void *) * 2), tu->offset);
f3f096cf 648
1cc33161
RB
649 if (tu->ref_ctr_offset)
650 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
651
14577c39
NK
652 for (i = 0; i < tu->tp.nr_args; i++)
653 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
f3f096cf 654
fa6f0cc7 655 seq_putc(m, '\n');
f3f096cf
SD
656 return 0;
657}
658
659static const struct seq_operations probes_seq_op = {
660 .start = probes_seq_start,
661 .next = probes_seq_next,
662 .stop = probes_seq_stop,
663 .show = probes_seq_show
664};
665
666static int probes_open(struct inode *inode, struct file *file)
667{
c6c2401d
SRRH
668 int ret;
669
670 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
671 ret = cleanup_all_probes();
672 if (ret)
673 return ret;
674 }
f3f096cf
SD
675
676 return seq_open(file, &probes_seq_op);
677}
678
679static ssize_t probes_write(struct file *file, const char __user *buffer,
680 size_t count, loff_t *ppos)
681{
7e465baa 682 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
f3f096cf
SD
683}
684
685static const struct file_operations uprobe_events_ops = {
686 .owner = THIS_MODULE,
687 .open = probes_open,
688 .read = seq_read,
689 .llseek = seq_lseek,
690 .release = seq_release,
691 .write = probes_write,
692};
693
694/* Probes profiling interfaces */
695static int probes_profile_seq_show(struct seq_file *m, void *v)
696{
697 struct trace_uprobe *tu = v;
698
de7b2973 699 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
687fcc4a 700 trace_event_name(&tu->tp.call), tu->nhit);
f3f096cf
SD
701 return 0;
702}
703
704static const struct seq_operations profile_seq_op = {
705 .start = probes_seq_start,
706 .next = probes_seq_next,
707 .stop = probes_seq_stop,
708 .show = probes_profile_seq_show
709};
710
711static int profile_open(struct inode *inode, struct file *file)
712{
713 return seq_open(file, &profile_seq_op);
714}
715
716static const struct file_operations uprobe_profile_ops = {
717 .owner = THIS_MODULE,
718 .open = profile_open,
719 .read = seq_read,
720 .llseek = seq_lseek,
721 .release = seq_release,
722};
723
dcad1a20
NK
724struct uprobe_cpu_buffer {
725 struct mutex mutex;
726 void *buf;
727};
728static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
729static int uprobe_buffer_refcnt;
730
731static int uprobe_buffer_init(void)
732{
733 int cpu, err_cpu;
734
735 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
736 if (uprobe_cpu_buffer == NULL)
737 return -ENOMEM;
738
739 for_each_possible_cpu(cpu) {
740 struct page *p = alloc_pages_node(cpu_to_node(cpu),
741 GFP_KERNEL, 0);
742 if (p == NULL) {
743 err_cpu = cpu;
744 goto err;
745 }
746 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
747 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
748 }
749
750 return 0;
751
752err:
753 for_each_possible_cpu(cpu) {
754 if (cpu == err_cpu)
755 break;
756 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
757 }
758
759 free_percpu(uprobe_cpu_buffer);
760 return -ENOMEM;
761}
762
763static int uprobe_buffer_enable(void)
764{
765 int ret = 0;
766
767 BUG_ON(!mutex_is_locked(&event_mutex));
768
769 if (uprobe_buffer_refcnt++ == 0) {
770 ret = uprobe_buffer_init();
771 if (ret < 0)
772 uprobe_buffer_refcnt--;
773 }
774
775 return ret;
776}
777
778static void uprobe_buffer_disable(void)
779{
6ea6215f
J
780 int cpu;
781
dcad1a20
NK
782 BUG_ON(!mutex_is_locked(&event_mutex));
783
784 if (--uprobe_buffer_refcnt == 0) {
6ea6215f
J
785 for_each_possible_cpu(cpu)
786 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
787 cpu)->buf);
788
dcad1a20
NK
789 free_percpu(uprobe_cpu_buffer);
790 uprobe_cpu_buffer = NULL;
791 }
792}
793
794static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
795{
796 struct uprobe_cpu_buffer *ucb;
797 int cpu;
798
799 cpu = raw_smp_processor_id();
800 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
801
802 /*
803 * Use per-cpu buffers for fastest access, but we might migrate
804 * so the mutex makes sure we have sole access to it.
805 */
806 mutex_lock(&ucb->mutex);
807
808 return ucb;
809}
810
811static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
812{
813 mutex_unlock(&ucb->mutex);
814}
815
a43b9704 816static void __uprobe_trace_func(struct trace_uprobe *tu,
dd9fa555 817 unsigned long func, struct pt_regs *regs,
70ed91c6 818 struct uprobe_cpu_buffer *ucb, int dsize,
7f1d2f82 819 struct trace_event_file *trace_file)
f3f096cf
SD
820{
821 struct uprobe_trace_entry_head *entry;
822 struct ring_buffer_event *event;
823 struct ring_buffer *buffer;
457d1772 824 void *data;
dd9fa555 825 int size, esize;
2425bcb9 826 struct trace_event_call *call = &tu->tp.call;
f3f096cf 827
7f1d2f82 828 WARN_ON(call != trace_file->event_call);
70ed91c6 829
dd9fa555 830 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
dcad1a20
NK
831 return;
832
09a5059a 833 if (trace_trigger_soft_disabled(trace_file))
ca3b1620
NK
834 return;
835
dd9fa555 836 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
dcad1a20 837 size = esize + tu->tp.size + dsize;
7f1d2f82 838 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
70ed91c6 839 call->event.type, size, 0, 0);
f3f096cf 840 if (!event)
dd9fa555 841 return;
f3f096cf
SD
842
843 entry = ring_buffer_event_data(event);
393a736c
ON
844 if (is_ret_probe(tu)) {
845 entry->vaddr[0] = func;
846 entry->vaddr[1] = instruction_pointer(regs);
847 data = DATAOF_TRACE_ENTRY(entry, true);
848 } else {
849 entry->vaddr[0] = instruction_pointer(regs);
850 data = DATAOF_TRACE_ENTRY(entry, false);
851 }
852
dcad1a20 853 memcpy(data, ucb->buf, tu->tp.size + dsize);
f3f096cf 854
7f1d2f82 855 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
a51cc604 856}
f42d24a1 857
a51cc604 858/* uprobe handler */
dd9fa555
NK
859static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
860 struct uprobe_cpu_buffer *ucb, int dsize)
a51cc604 861{
70ed91c6
J
862 struct event_file_link *link;
863
864 if (is_ret_probe(tu))
865 return 0;
866
867 rcu_read_lock();
868 list_for_each_entry_rcu(link, &tu->tp.files, list)
869 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
870 rcu_read_unlock();
871
f42d24a1 872 return 0;
f3f096cf
SD
873}
874
c1ae5c75 875static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
dd9fa555
NK
876 struct pt_regs *regs,
877 struct uprobe_cpu_buffer *ucb, int dsize)
c1ae5c75 878{
70ed91c6
J
879 struct event_file_link *link;
880
881 rcu_read_lock();
882 list_for_each_entry_rcu(link, &tu->tp.files, list)
883 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
884 rcu_read_unlock();
c1ae5c75
ON
885}
886
f3f096cf
SD
887/* Event entry printers */
888static enum print_line_t
889print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
890{
457d1772 891 struct uprobe_trace_entry_head *entry;
f3f096cf
SD
892 struct trace_seq *s = &iter->seq;
893 struct trace_uprobe *tu;
894 u8 *data;
f3f096cf 895
457d1772 896 entry = (struct uprobe_trace_entry_head *)iter->ent;
14577c39 897 tu = container_of(event, struct trace_uprobe, tp.call.event);
f3f096cf 898
3ede82dd 899 if (is_ret_probe(tu)) {
8579a107 900 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
687fcc4a 901 trace_event_name(&tu->tp.call),
8579a107 902 entry->vaddr[1], entry->vaddr[0]);
3ede82dd
ON
903 data = DATAOF_TRACE_ENTRY(entry, true);
904 } else {
8579a107 905 trace_seq_printf(s, "%s: (0x%lx)",
687fcc4a 906 trace_event_name(&tu->tp.call),
8579a107 907 entry->vaddr[0]);
3ede82dd
ON
908 data = DATAOF_TRACE_ENTRY(entry, false);
909 }
f3f096cf 910
56de7630
MH
911 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
912 goto out;
f3f096cf 913
8579a107 914 trace_seq_putc(s, '\n');
f3f096cf 915
8579a107
SRRH
916 out:
917 return trace_handle_return(s);
f3f096cf
SD
918}
919
31ba3348
ON
920typedef bool (*filter_func_t)(struct uprobe_consumer *self,
921 enum uprobe_filter_ctx ctx,
922 struct mm_struct *mm);
923
924static int
7f1d2f82 925probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
70ed91c6 926 filter_func_t filter)
f3f096cf 927{
70ed91c6
J
928 bool enabled = trace_probe_is_enabled(&tu->tp);
929 struct event_file_link *link = NULL;
930 int ret;
931
932 if (file) {
48212542
ON
933 if (tu->tp.flags & TP_FLAG_PROFILE)
934 return -EINTR;
935
70ed91c6
J
936 link = kmalloc(sizeof(*link), GFP_KERNEL);
937 if (!link)
938 return -ENOMEM;
939
940 link->file = file;
941 list_add_tail_rcu(&link->list, &tu->tp.files);
942
943 tu->tp.flags |= TP_FLAG_TRACE;
48212542
ON
944 } else {
945 if (tu->tp.flags & TP_FLAG_TRACE)
946 return -EINTR;
947
70ed91c6 948 tu->tp.flags |= TP_FLAG_PROFILE;
48212542 949 }
f3f096cf 950
736288ba
ON
951 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
952
70ed91c6
J
953 if (enabled)
954 return 0;
955
fb6bab6a
ON
956 ret = uprobe_buffer_enable();
957 if (ret)
958 goto err_flags;
959
31ba3348 960 tu->consumer.filter = filter;
0c92c7a3 961 tu->inode = d_real_inode(tu->path.dentry);
1cc33161
RB
962 if (tu->ref_ctr_offset) {
963 ret = uprobe_register_refctr(tu->inode, tu->offset,
964 tu->ref_ctr_offset, &tu->consumer);
965 } else {
966 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
967 }
968
fb6bab6a
ON
969 if (ret)
970 goto err_buffer;
971
972 return 0;
973
974 err_buffer:
975 uprobe_buffer_disable();
f3f096cf 976
fb6bab6a
ON
977 err_flags:
978 if (file) {
979 list_del(&link->list);
980 kfree(link);
981 tu->tp.flags &= ~TP_FLAG_TRACE;
982 } else {
983 tu->tp.flags &= ~TP_FLAG_PROFILE;
984 }
4161824f 985 return ret;
f3f096cf
SD
986}
987
70ed91c6 988static void
7f1d2f82 989probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
f3f096cf 990{
14577c39 991 if (!trace_probe_is_enabled(&tu->tp))
f3f096cf
SD
992 return;
993
70ed91c6
J
994 if (file) {
995 struct event_file_link *link;
996
997 link = find_event_file_link(&tu->tp, file);
998 if (!link)
999 return;
1000
1001 list_del_rcu(&link->list);
1002 /* synchronize with u{,ret}probe_trace_func */
016f8ffc 1003 synchronize_rcu();
70ed91c6
J
1004 kfree(link);
1005
1006 if (!list_empty(&tu->tp.files))
1007 return;
1008 }
1009
736288ba
ON
1010 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1011
a932b738 1012 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
0c92c7a3 1013 tu->inode = NULL;
70ed91c6 1014 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
dcad1a20
NK
1015
1016 uprobe_buffer_disable();
f3f096cf
SD
1017}
1018
2425bcb9 1019static int uprobe_event_define_fields(struct trace_event_call *event_call)
f3f096cf 1020{
457d1772 1021 int ret, i, size;
f3f096cf 1022 struct uprobe_trace_entry_head field;
457d1772 1023 struct trace_uprobe *tu = event_call->data;
f3f096cf 1024
4d1298e2
ON
1025 if (is_ret_probe(tu)) {
1026 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1027 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1028 size = SIZEOF_TRACE_ENTRY(true);
1029 } else {
1030 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1031 size = SIZEOF_TRACE_ENTRY(false);
1032 }
f3f096cf 1033 /* Set argument names as fields */
14577c39
NK
1034 for (i = 0; i < tu->tp.nr_args; i++) {
1035 struct probe_arg *parg = &tu->tp.args[i];
1036
1037 ret = trace_define_field(event_call, parg->type->fmttype,
1038 parg->name, size + parg->offset,
1039 parg->type->size, parg->type->is_signed,
f3f096cf
SD
1040 FILTER_OTHER);
1041
1042 if (ret)
1043 return ret;
1044 }
1045 return 0;
1046}
1047
f3f096cf 1048#ifdef CONFIG_PERF_EVENTS
31ba3348
ON
1049static bool
1050__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1051{
1052 struct perf_event *event;
1053
1054 if (filter->nr_systemwide)
1055 return true;
1056
1057 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
50f16a8b 1058 if (event->hw.target->mm == mm)
31ba3348
ON
1059 return true;
1060 }
1061
1062 return false;
1063}
1064
b2fe8ba6
ON
1065static inline bool
1066uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1067{
50f16a8b 1068 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
b2fe8ba6
ON
1069}
1070
ce5f36a5 1071static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
736288ba 1072{
b2fe8ba6
ON
1073 bool done;
1074
736288ba 1075 write_lock(&tu->filter.rwlock);
50f16a8b 1076 if (event->hw.target) {
ce5f36a5 1077 list_del(&event->hw.tp_list);
b2fe8ba6 1078 done = tu->filter.nr_systemwide ||
50f16a8b 1079 (event->hw.target->flags & PF_EXITING) ||
b2fe8ba6 1080 uprobe_filter_event(tu, event);
b2fe8ba6 1081 } else {
ce5f36a5 1082 tu->filter.nr_systemwide--;
b2fe8ba6 1083 done = tu->filter.nr_systemwide;
b2fe8ba6 1084 }
736288ba
ON
1085 write_unlock(&tu->filter.rwlock);
1086
b2fe8ba6 1087 if (!done)
927d6874 1088 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
31ba3348 1089
736288ba
ON
1090 return 0;
1091}
1092
ce5f36a5 1093static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
736288ba 1094{
b2fe8ba6 1095 bool done;
927d6874 1096 int err;
b2fe8ba6 1097
736288ba 1098 write_lock(&tu->filter.rwlock);
50f16a8b 1099 if (event->hw.target) {
ce5f36a5
ON
1100 /*
1101 * event->parent != NULL means copy_process(), we can avoid
1102 * uprobe_apply(). current->mm must be probed and we can rely
1103 * on dup_mmap() which preserves the already installed bp's.
1104 *
1105 * attr.enable_on_exec means that exec/mmap will install the
1106 * breakpoints we need.
1107 */
b2fe8ba6 1108 done = tu->filter.nr_systemwide ||
ce5f36a5 1109 event->parent || event->attr.enable_on_exec ||
b2fe8ba6 1110 uprobe_filter_event(tu, event);
ce5f36a5 1111 list_add(&event->hw.tp_list, &tu->filter.perf_events);
b2fe8ba6 1112 } else {
b2fe8ba6 1113 done = tu->filter.nr_systemwide;
ce5f36a5 1114 tu->filter.nr_systemwide++;
b2fe8ba6 1115 }
736288ba
ON
1116 write_unlock(&tu->filter.rwlock);
1117
927d6874
ON
1118 err = 0;
1119 if (!done) {
1120 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1121 if (err)
1122 uprobe_perf_close(tu, event);
1123 }
1124 return err;
736288ba
ON
1125}
1126
31ba3348
ON
1127static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1128 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1129{
1130 struct trace_uprobe *tu;
1131 int ret;
1132
1133 tu = container_of(uc, struct trace_uprobe, consumer);
1134 read_lock(&tu->filter.rwlock);
1135 ret = __uprobe_perf_filter(&tu->filter, mm);
1136 read_unlock(&tu->filter.rwlock);
1137
1138 return ret;
1139}
1140
a43b9704 1141static void __uprobe_perf_func(struct trace_uprobe *tu,
dd9fa555
NK
1142 unsigned long func, struct pt_regs *regs,
1143 struct uprobe_cpu_buffer *ucb, int dsize)
f3f096cf 1144{
2425bcb9 1145 struct trace_event_call *call = &tu->tp.call;
f3f096cf
SD
1146 struct uprobe_trace_entry_head *entry;
1147 struct hlist_head *head;
457d1772 1148 void *data;
dd9fa555 1149 int size, esize;
dcad1a20
NK
1150 int rctx;
1151
e87c6bc3 1152 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
04a22fae
WN
1153 return;
1154
dcad1a20 1155 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
f3f096cf 1156
dcad1a20
NK
1157 size = esize + tu->tp.size + dsize;
1158 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1159 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1160 return;
1161
f3f096cf 1162 preempt_disable();
515619f2
ON
1163 head = this_cpu_ptr(call->perf_events);
1164 if (hlist_empty(head))
1165 goto out;
1166
1e1dcd93 1167 entry = perf_trace_buf_alloc(size, NULL, &rctx);
f3f096cf
SD
1168 if (!entry)
1169 goto out;
1170
393a736c
ON
1171 if (is_ret_probe(tu)) {
1172 entry->vaddr[0] = func;
32520b2c 1173 entry->vaddr[1] = instruction_pointer(regs);
393a736c
ON
1174 data = DATAOF_TRACE_ENTRY(entry, true);
1175 } else {
32520b2c 1176 entry->vaddr[0] = instruction_pointer(regs);
393a736c
ON
1177 data = DATAOF_TRACE_ENTRY(entry, false);
1178 }
1179
dcad1a20
NK
1180 memcpy(data, ucb->buf, tu->tp.size + dsize);
1181
1182 if (size - esize > tu->tp.size + dsize) {
1183 int len = tu->tp.size + dsize;
14577c39 1184
dcad1a20 1185 memset(data + len, 0, size - esize - len);
14577c39 1186 }
f3f096cf 1187
1e1dcd93 1188 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
8fd0fbbe 1189 head, NULL);
f3f096cf
SD
1190 out:
1191 preempt_enable();
a51cc604
ON
1192}
1193
1194/* uprobe profile handler */
dd9fa555
NK
1195static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1196 struct uprobe_cpu_buffer *ucb, int dsize)
a51cc604
ON
1197{
1198 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1199 return UPROBE_HANDLER_REMOVE;
1200
393a736c 1201 if (!is_ret_probe(tu))
dd9fa555 1202 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
f42d24a1 1203 return 0;
f3f096cf 1204}
c1ae5c75
ON
1205
1206static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
dd9fa555
NK
1207 struct pt_regs *regs,
1208 struct uprobe_cpu_buffer *ucb, int dsize)
c1ae5c75 1209{
dd9fa555 1210 __uprobe_perf_func(tu, func, regs, ucb, dsize);
c1ae5c75 1211}
41bdc4b4
YS
1212
1213int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1214 const char **filename, u64 *probe_offset,
1215 bool perf_type_tracepoint)
1216{
1217 const char *pevent = trace_event_name(event->tp_event);
1218 const char *group = event->tp_event->class->system;
1219 struct trace_uprobe *tu;
1220
1221 if (perf_type_tracepoint)
1222 tu = find_probe_event(pevent, group);
1223 else
1224 tu = event->tp_event->data;
1225 if (!tu)
1226 return -EINVAL;
1227
1228 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1229 : BPF_FD_TYPE_UPROBE;
1230 *filename = tu->filename;
1231 *probe_offset = tu->offset;
1232 return 0;
1233}
f3f096cf
SD
1234#endif /* CONFIG_PERF_EVENTS */
1235
70ed91c6 1236static int
2425bcb9 1237trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
70ed91c6 1238 void *data)
f3f096cf 1239{
457d1772 1240 struct trace_uprobe *tu = event->data;
7f1d2f82 1241 struct trace_event_file *file = data;
f3f096cf
SD
1242
1243 switch (type) {
1244 case TRACE_REG_REGISTER:
70ed91c6 1245 return probe_event_enable(tu, file, NULL);
f3f096cf
SD
1246
1247 case TRACE_REG_UNREGISTER:
70ed91c6 1248 probe_event_disable(tu, file);
f3f096cf
SD
1249 return 0;
1250
1251#ifdef CONFIG_PERF_EVENTS
1252 case TRACE_REG_PERF_REGISTER:
70ed91c6 1253 return probe_event_enable(tu, NULL, uprobe_perf_filter);
f3f096cf
SD
1254
1255 case TRACE_REG_PERF_UNREGISTER:
70ed91c6 1256 probe_event_disable(tu, NULL);
f3f096cf 1257 return 0;
736288ba
ON
1258
1259 case TRACE_REG_PERF_OPEN:
1260 return uprobe_perf_open(tu, data);
1261
1262 case TRACE_REG_PERF_CLOSE:
1263 return uprobe_perf_close(tu, data);
1264
f3f096cf
SD
1265#endif
1266 default:
1267 return 0;
1268 }
1269 return 0;
1270}
1271
1272static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1273{
f3f096cf 1274 struct trace_uprobe *tu;
b7e0bf34 1275 struct uprobe_dispatch_data udd;
dd9fa555
NK
1276 struct uprobe_cpu_buffer *ucb;
1277 int dsize, esize;
f42d24a1 1278 int ret = 0;
f3f096cf 1279
dd9fa555 1280
a932b738 1281 tu = container_of(con, struct trace_uprobe, consumer);
1b47aefd 1282 tu->nhit++;
f3f096cf 1283
b7e0bf34
NK
1284 udd.tu = tu;
1285 udd.bp_addr = instruction_pointer(regs);
1286
1287 current->utask->vaddr = (unsigned long) &udd;
1288
dd9fa555
NK
1289 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1290 return 0;
1291
1292 dsize = __get_data_size(&tu->tp, regs);
1293 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1294
1295 ucb = uprobe_buffer_get();
1296 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1297
14577c39 1298 if (tu->tp.flags & TP_FLAG_TRACE)
dd9fa555 1299 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
f3f096cf
SD
1300
1301#ifdef CONFIG_PERF_EVENTS
14577c39 1302 if (tu->tp.flags & TP_FLAG_PROFILE)
dd9fa555 1303 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
f3f096cf 1304#endif
dd9fa555 1305 uprobe_buffer_put(ucb);
f42d24a1 1306 return ret;
f3f096cf
SD
1307}
1308
c1ae5c75
ON
1309static int uretprobe_dispatcher(struct uprobe_consumer *con,
1310 unsigned long func, struct pt_regs *regs)
1311{
1312 struct trace_uprobe *tu;
b7e0bf34 1313 struct uprobe_dispatch_data udd;
dd9fa555
NK
1314 struct uprobe_cpu_buffer *ucb;
1315 int dsize, esize;
c1ae5c75
ON
1316
1317 tu = container_of(con, struct trace_uprobe, consumer);
1318
b7e0bf34
NK
1319 udd.tu = tu;
1320 udd.bp_addr = func;
1321
1322 current->utask->vaddr = (unsigned long) &udd;
1323
dd9fa555
NK
1324 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1325 return 0;
1326
1327 dsize = __get_data_size(&tu->tp, regs);
1328 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1329
1330 ucb = uprobe_buffer_get();
1331 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1332
14577c39 1333 if (tu->tp.flags & TP_FLAG_TRACE)
dd9fa555 1334 uretprobe_trace_func(tu, func, regs, ucb, dsize);
c1ae5c75
ON
1335
1336#ifdef CONFIG_PERF_EVENTS
14577c39 1337 if (tu->tp.flags & TP_FLAG_PROFILE)
dd9fa555 1338 uretprobe_perf_func(tu, func, regs, ucb, dsize);
c1ae5c75 1339#endif
dd9fa555 1340 uprobe_buffer_put(ucb);
c1ae5c75
ON
1341 return 0;
1342}
1343
f3f096cf
SD
1344static struct trace_event_functions uprobe_funcs = {
1345 .trace = print_uprobe_event
1346};
1347
33ea4b24
SL
1348static inline void init_trace_event_call(struct trace_uprobe *tu,
1349 struct trace_event_call *call)
f3f096cf 1350{
f3f096cf
SD
1351 INIT_LIST_HEAD(&call->class->fields);
1352 call->event.funcs = &uprobe_funcs;
1353 call->class->define_fields = uprobe_event_define_fields;
1354
33ea4b24
SL
1355 call->flags = TRACE_EVENT_FL_UPROBE;
1356 call->class->reg = trace_uprobe_register;
1357 call->data = tu;
1358}
1359
1360static int register_uprobe_event(struct trace_uprobe *tu)
1361{
1362 struct trace_event_call *call = &tu->tp.call;
1363 int ret = 0;
1364
1365 init_trace_event_call(tu, call);
1366
5bf652aa 1367 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
f3f096cf
SD
1368 return -ENOMEM;
1369
9023c930 1370 ret = register_trace_event(&call->event);
f3f096cf
SD
1371 if (!ret) {
1372 kfree(call->print_fmt);
1373 return -ENODEV;
1374 }
ede392a7 1375
f3f096cf
SD
1376 ret = trace_add_event_call(call);
1377
1378 if (ret) {
de7b2973 1379 pr_info("Failed to register uprobe event: %s\n",
687fcc4a 1380 trace_event_name(call));
f3f096cf 1381 kfree(call->print_fmt);
9023c930 1382 unregister_trace_event(&call->event);
f3f096cf
SD
1383 }
1384
1385 return ret;
1386}
1387
c6c2401d 1388static int unregister_uprobe_event(struct trace_uprobe *tu)
f3f096cf 1389{
c6c2401d
SRRH
1390 int ret;
1391
f3f096cf 1392 /* tu->event is unregistered in trace_remove_event_call() */
14577c39 1393 ret = trace_remove_event_call(&tu->tp.call);
c6c2401d
SRRH
1394 if (ret)
1395 return ret;
14577c39
NK
1396 kfree(tu->tp.call.print_fmt);
1397 tu->tp.call.print_fmt = NULL;
c6c2401d 1398 return 0;
f3f096cf
SD
1399}
1400
33ea4b24
SL
1401#ifdef CONFIG_PERF_EVENTS
1402struct trace_event_call *
a6ca88b2
SL
1403create_local_trace_uprobe(char *name, unsigned long offs,
1404 unsigned long ref_ctr_offset, bool is_return)
33ea4b24
SL
1405{
1406 struct trace_uprobe *tu;
33ea4b24
SL
1407 struct path path;
1408 int ret;
1409
1410 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1411 if (ret)
1412 return ERR_PTR(ret);
1413
0c92c7a3
SL
1414 if (!d_is_reg(path.dentry)) {
1415 path_put(&path);
33ea4b24
SL
1416 return ERR_PTR(-EINVAL);
1417 }
1418
1419 /*
1420 * local trace_kprobes are not added to probe_list, so they are never
1421 * searched in find_trace_kprobe(). Therefore, there is no concern of
1422 * duplicated name "DUMMY_EVENT" here.
1423 */
1424 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1425 is_return);
1426
1427 if (IS_ERR(tu)) {
1428 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1429 (int)PTR_ERR(tu));
0c92c7a3 1430 path_put(&path);
33ea4b24
SL
1431 return ERR_CAST(tu);
1432 }
1433
1434 tu->offset = offs;
0c92c7a3 1435 tu->path = path;
a6ca88b2 1436 tu->ref_ctr_offset = ref_ctr_offset;
33ea4b24
SL
1437 tu->filename = kstrdup(name, GFP_KERNEL);
1438 init_trace_event_call(tu, &tu->tp.call);
1439
1440 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1441 ret = -ENOMEM;
1442 goto error;
1443 }
1444
1445 return &tu->tp.call;
1446error:
1447 free_trace_uprobe(tu);
1448 return ERR_PTR(ret);
1449}
1450
1451void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1452{
1453 struct trace_uprobe *tu;
1454
1455 tu = container_of(event_call, struct trace_uprobe, tp.call);
1456
1457 kfree(tu->tp.call.print_fmt);
1458 tu->tp.call.print_fmt = NULL;
1459
1460 free_trace_uprobe(tu);
1461}
1462#endif /* CONFIG_PERF_EVENTS */
1463
f3f096cf
SD
1464/* Make a trace interface for controling probe points */
1465static __init int init_uprobe_trace(void)
1466{
1467 struct dentry *d_tracer;
1468
1469 d_tracer = tracing_init_dentry();
14a5ae40 1470 if (IS_ERR(d_tracer))
f3f096cf
SD
1471 return 0;
1472
1473 trace_create_file("uprobe_events", 0644, d_tracer,
1474 NULL, &uprobe_events_ops);
1475 /* Profile interface */
1476 trace_create_file("uprobe_profile", 0444, d_tracer,
1477 NULL, &uprobe_profile_ops);
1478 return 0;
1479}
1480
1481fs_initcall(init_uprobe_trace);