]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_uprobe.c
UBUNTU: link-to-tracker: update tracking bug
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_uprobe.c
1 /*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20 #define pr_fmt(fmt) "trace_kprobe: " fmt
21
22 #include <linux/module.h>
23 #include <linux/uaccess.h>
24 #include <linux/uprobes.h>
25 #include <linux/namei.h>
26 #include <linux/string.h>
27 #include <linux/rculist.h>
28
29 #include "trace_probe.h"
30
31 #define UPROBE_EVENT_SYSTEM "uprobes"
32
33 struct uprobe_trace_entry_head {
34 struct trace_entry ent;
35 unsigned long vaddr[];
36 };
37
38 #define SIZEOF_TRACE_ENTRY(is_return) \
39 (sizeof(struct uprobe_trace_entry_head) + \
40 sizeof(unsigned long) * (is_return ? 2 : 1))
41
42 #define DATAOF_TRACE_ENTRY(entry, is_return) \
43 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
44
45 struct trace_uprobe_filter {
46 rwlock_t rwlock;
47 int nr_systemwide;
48 struct list_head perf_events;
49 };
50
51 /*
52 * uprobe event core functions
53 */
54 struct trace_uprobe {
55 struct list_head list;
56 struct trace_uprobe_filter filter;
57 struct uprobe_consumer consumer;
58 struct path path;
59 struct inode *inode;
60 char *filename;
61 unsigned long offset;
62 unsigned long nhit;
63 struct trace_probe tp;
64 };
65
66 #define SIZEOF_TRACE_UPROBE(n) \
67 (offsetof(struct trace_uprobe, tp.args) + \
68 (sizeof(struct probe_arg) * (n)))
69
70 static int register_uprobe_event(struct trace_uprobe *tu);
71 static int unregister_uprobe_event(struct trace_uprobe *tu);
72
73 static DEFINE_MUTEX(uprobe_lock);
74 static LIST_HEAD(uprobe_list);
75
76 struct uprobe_dispatch_data {
77 struct trace_uprobe *tu;
78 unsigned long bp_addr;
79 };
80
81 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
82 static int uretprobe_dispatcher(struct uprobe_consumer *con,
83 unsigned long func, struct pt_regs *regs);
84
85 #ifdef CONFIG_STACK_GROWSUP
86 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
87 {
88 return addr - (n * sizeof(long));
89 }
90 #else
91 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
92 {
93 return addr + (n * sizeof(long));
94 }
95 #endif
96
97 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
98 {
99 unsigned long ret;
100 unsigned long addr = user_stack_pointer(regs);
101
102 addr = adjust_stack_addr(addr, n);
103
104 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
105 return 0;
106
107 return ret;
108 }
109
110 /*
111 * Uprobes-specific fetch functions
112 */
113 #define DEFINE_FETCH_stack(type) \
114 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
115 void *offset, void *dest) \
116 { \
117 *(type *)dest = (type)get_user_stack_nth(regs, \
118 ((unsigned long)offset)); \
119 }
120 DEFINE_BASIC_FETCH_FUNCS(stack)
121 /* No string on the stack entry */
122 #define fetch_stack_string NULL
123 #define fetch_stack_string_size NULL
124
125 #define DEFINE_FETCH_memory(type) \
126 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
127 void *addr, void *dest) \
128 { \
129 type retval; \
130 void __user *vaddr = (void __force __user *) addr; \
131 \
132 if (copy_from_user(&retval, vaddr, sizeof(type))) \
133 *(type *)dest = 0; \
134 else \
135 *(type *) dest = retval; \
136 }
137 DEFINE_BASIC_FETCH_FUNCS(memory)
138 /*
139 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
140 * length and relative data location.
141 */
142 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
143 void *addr, void *dest)
144 {
145 long ret;
146 u32 rloc = *(u32 *)dest;
147 int maxlen = get_rloc_len(rloc);
148 u8 *dst = get_rloc_data(dest);
149 void __user *src = (void __force __user *) addr;
150
151 if (!maxlen)
152 return;
153
154 ret = strncpy_from_user(dst, src, maxlen);
155 if (ret == maxlen)
156 dst[--ret] = '\0';
157
158 if (ret < 0) { /* Failed to fetch string */
159 ((u8 *)get_rloc_data(dest))[0] = '\0';
160 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
161 } else {
162 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
163 }
164 }
165
166 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
167 void *addr, void *dest)
168 {
169 int len;
170 void __user *vaddr = (void __force __user *) addr;
171
172 len = strnlen_user(vaddr, MAX_STRING_SIZE);
173
174 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
175 *(u32 *)dest = 0;
176 else
177 *(u32 *)dest = len;
178 }
179
180 static unsigned long translate_user_vaddr(void *file_offset)
181 {
182 unsigned long base_addr;
183 struct uprobe_dispatch_data *udd;
184
185 udd = (void *) current->utask->vaddr;
186
187 base_addr = udd->bp_addr - udd->tu->offset;
188 return base_addr + (unsigned long)file_offset;
189 }
190
191 #define DEFINE_FETCH_file_offset(type) \
192 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
193 void *offset, void *dest)\
194 { \
195 void *vaddr = (void *)translate_user_vaddr(offset); \
196 \
197 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
198 }
199 DEFINE_BASIC_FETCH_FUNCS(file_offset)
200 DEFINE_FETCH_file_offset(string)
201 DEFINE_FETCH_file_offset(string_size)
202
203 /* Fetch type information table */
204 static const struct fetch_type uprobes_fetch_type_table[] = {
205 /* Special types */
206 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
207 sizeof(u32), 1, "__data_loc char[]"),
208 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
209 string_size, sizeof(u32), 0, "u32"),
210 /* Basic types */
211 ASSIGN_FETCH_TYPE(u8, u8, 0),
212 ASSIGN_FETCH_TYPE(u16, u16, 0),
213 ASSIGN_FETCH_TYPE(u32, u32, 0),
214 ASSIGN_FETCH_TYPE(u64, u64, 0),
215 ASSIGN_FETCH_TYPE(s8, u8, 1),
216 ASSIGN_FETCH_TYPE(s16, u16, 1),
217 ASSIGN_FETCH_TYPE(s32, u32, 1),
218 ASSIGN_FETCH_TYPE(s64, u64, 1),
219 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
220 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
221 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
222 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
223
224 ASSIGN_FETCH_TYPE_END
225 };
226
227 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
228 {
229 rwlock_init(&filter->rwlock);
230 filter->nr_systemwide = 0;
231 INIT_LIST_HEAD(&filter->perf_events);
232 }
233
234 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
235 {
236 return !filter->nr_systemwide && list_empty(&filter->perf_events);
237 }
238
239 static inline bool is_ret_probe(struct trace_uprobe *tu)
240 {
241 return tu->consumer.ret_handler != NULL;
242 }
243
244 /*
245 * Allocate new trace_uprobe and initialize it (including uprobes).
246 */
247 static struct trace_uprobe *
248 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
249 {
250 struct trace_uprobe *tu;
251
252 if (!event || !is_good_name(event))
253 return ERR_PTR(-EINVAL);
254
255 if (!group || !is_good_name(group))
256 return ERR_PTR(-EINVAL);
257
258 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
259 if (!tu)
260 return ERR_PTR(-ENOMEM);
261
262 tu->tp.call.class = &tu->tp.class;
263 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
264 if (!tu->tp.call.name)
265 goto error;
266
267 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
268 if (!tu->tp.class.system)
269 goto error;
270
271 INIT_LIST_HEAD(&tu->list);
272 INIT_LIST_HEAD(&tu->tp.files);
273 tu->consumer.handler = uprobe_dispatcher;
274 if (is_ret)
275 tu->consumer.ret_handler = uretprobe_dispatcher;
276 init_trace_uprobe_filter(&tu->filter);
277 return tu;
278
279 error:
280 kfree(tu->tp.call.name);
281 kfree(tu);
282
283 return ERR_PTR(-ENOMEM);
284 }
285
286 static void free_trace_uprobe(struct trace_uprobe *tu)
287 {
288 int i;
289
290 for (i = 0; i < tu->tp.nr_args; i++)
291 traceprobe_free_probe_arg(&tu->tp.args[i]);
292
293 path_put(&tu->path);
294 kfree(tu->tp.call.class->system);
295 kfree(tu->tp.call.name);
296 kfree(tu->filename);
297 kfree(tu);
298 }
299
300 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
301 {
302 struct trace_uprobe *tu;
303
304 list_for_each_entry(tu, &uprobe_list, list)
305 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
306 strcmp(tu->tp.call.class->system, group) == 0)
307 return tu;
308
309 return NULL;
310 }
311
312 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
313 static int unregister_trace_uprobe(struct trace_uprobe *tu)
314 {
315 int ret;
316
317 ret = unregister_uprobe_event(tu);
318 if (ret)
319 return ret;
320
321 list_del(&tu->list);
322 free_trace_uprobe(tu);
323 return 0;
324 }
325
326 /* Register a trace_uprobe and probe_event */
327 static int register_trace_uprobe(struct trace_uprobe *tu)
328 {
329 struct trace_uprobe *old_tu;
330 int ret;
331
332 mutex_lock(&uprobe_lock);
333
334 /* register as an event */
335 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
336 tu->tp.call.class->system);
337 if (old_tu) {
338 /* delete old event */
339 ret = unregister_trace_uprobe(old_tu);
340 if (ret)
341 goto end;
342 }
343
344 ret = register_uprobe_event(tu);
345 if (ret) {
346 pr_warn("Failed to register probe event(%d)\n", ret);
347 goto end;
348 }
349
350 list_add_tail(&tu->list, &uprobe_list);
351
352 end:
353 mutex_unlock(&uprobe_lock);
354
355 return ret;
356 }
357
358 /*
359 * Argument syntax:
360 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
361 *
362 * - Remove uprobe: -:[GRP/]EVENT
363 */
364 static int create_trace_uprobe(int argc, char **argv)
365 {
366 struct trace_uprobe *tu;
367 char *arg, *event, *group, *filename;
368 char buf[MAX_EVENT_NAME_LEN];
369 struct path path;
370 unsigned long offset;
371 bool is_delete, is_return;
372 int i, ret;
373
374 ret = 0;
375 is_delete = false;
376 is_return = false;
377 event = NULL;
378 group = NULL;
379
380 /* argc must be >= 1 */
381 if (argv[0][0] == '-')
382 is_delete = true;
383 else if (argv[0][0] == 'r')
384 is_return = true;
385 else if (argv[0][0] != 'p') {
386 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
387 return -EINVAL;
388 }
389
390 if (argv[0][1] == ':') {
391 event = &argv[0][2];
392 arg = strchr(event, '/');
393
394 if (arg) {
395 group = event;
396 event = arg + 1;
397 event[-1] = '\0';
398
399 if (strlen(group) == 0) {
400 pr_info("Group name is not specified\n");
401 return -EINVAL;
402 }
403 }
404 if (strlen(event) == 0) {
405 pr_info("Event name is not specified\n");
406 return -EINVAL;
407 }
408 }
409 if (!group)
410 group = UPROBE_EVENT_SYSTEM;
411
412 if (is_delete) {
413 int ret;
414
415 if (!event) {
416 pr_info("Delete command needs an event name.\n");
417 return -EINVAL;
418 }
419 mutex_lock(&uprobe_lock);
420 tu = find_probe_event(event, group);
421
422 if (!tu) {
423 mutex_unlock(&uprobe_lock);
424 pr_info("Event %s/%s doesn't exist.\n", group, event);
425 return -ENOENT;
426 }
427 /* delete an event */
428 ret = unregister_trace_uprobe(tu);
429 mutex_unlock(&uprobe_lock);
430 return ret;
431 }
432
433 if (argc < 2) {
434 pr_info("Probe point is not specified.\n");
435 return -EINVAL;
436 }
437 /* Find the last occurrence, in case the path contains ':' too. */
438 arg = strrchr(argv[1], ':');
439 if (!arg)
440 return -EINVAL;
441
442 *arg++ = '\0';
443 filename = argv[1];
444 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
445 if (ret)
446 return ret;
447
448 if (!d_is_reg(path.dentry)) {
449 ret = -EINVAL;
450 goto fail_address_parse;
451 }
452
453 ret = kstrtoul(arg, 0, &offset);
454 if (ret)
455 goto fail_address_parse;
456
457 argc -= 2;
458 argv += 2;
459
460 /* setup a probe */
461 if (!event) {
462 char *tail;
463 char *ptr;
464
465 tail = kstrdup(kbasename(filename), GFP_KERNEL);
466 if (!tail) {
467 ret = -ENOMEM;
468 goto fail_address_parse;
469 }
470
471 ptr = strpbrk(tail, ".-_");
472 if (ptr)
473 *ptr = '\0';
474
475 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
476 event = buf;
477 kfree(tail);
478 }
479
480 tu = alloc_trace_uprobe(group, event, argc, is_return);
481 if (IS_ERR(tu)) {
482 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
483 ret = PTR_ERR(tu);
484 goto fail_address_parse;
485 }
486 tu->offset = offset;
487 tu->path = path;
488 tu->filename = kstrdup(filename, GFP_KERNEL);
489
490 if (!tu->filename) {
491 pr_info("Failed to allocate filename.\n");
492 ret = -ENOMEM;
493 goto error;
494 }
495
496 /* parse arguments */
497 ret = 0;
498 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
499 struct probe_arg *parg = &tu->tp.args[i];
500
501 /* Increment count for freeing args in error case */
502 tu->tp.nr_args++;
503
504 /* Parse argument name */
505 arg = strchr(argv[i], '=');
506 if (arg) {
507 *arg++ = '\0';
508 parg->name = kstrdup(argv[i], GFP_KERNEL);
509 } else {
510 arg = argv[i];
511 /* If argument name is omitted, set "argN" */
512 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
513 parg->name = kstrdup(buf, GFP_KERNEL);
514 }
515
516 if (!parg->name) {
517 pr_info("Failed to allocate argument[%d] name.\n", i);
518 ret = -ENOMEM;
519 goto error;
520 }
521
522 if (!is_good_name(parg->name)) {
523 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
524 ret = -EINVAL;
525 goto error;
526 }
527
528 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
529 pr_info("Argument[%d] name '%s' conflicts with "
530 "another field.\n", i, argv[i]);
531 ret = -EINVAL;
532 goto error;
533 }
534
535 /* Parse fetch argument */
536 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
537 is_return, false,
538 uprobes_fetch_type_table);
539 if (ret) {
540 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
541 goto error;
542 }
543 }
544
545 ret = register_trace_uprobe(tu);
546 if (ret)
547 goto error;
548 return 0;
549
550 error:
551 free_trace_uprobe(tu);
552 return ret;
553
554 fail_address_parse:
555 path_put(&path);
556
557 pr_info("Failed to parse address or file.\n");
558
559 return ret;
560 }
561
562 static int cleanup_all_probes(void)
563 {
564 struct trace_uprobe *tu;
565 int ret = 0;
566
567 mutex_lock(&uprobe_lock);
568 while (!list_empty(&uprobe_list)) {
569 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
570 ret = unregister_trace_uprobe(tu);
571 if (ret)
572 break;
573 }
574 mutex_unlock(&uprobe_lock);
575 return ret;
576 }
577
578 /* Probes listing interfaces */
579 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
580 {
581 mutex_lock(&uprobe_lock);
582 return seq_list_start(&uprobe_list, *pos);
583 }
584
585 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
586 {
587 return seq_list_next(v, &uprobe_list, pos);
588 }
589
590 static void probes_seq_stop(struct seq_file *m, void *v)
591 {
592 mutex_unlock(&uprobe_lock);
593 }
594
595 static int probes_seq_show(struct seq_file *m, void *v)
596 {
597 struct trace_uprobe *tu = v;
598 char c = is_ret_probe(tu) ? 'r' : 'p';
599 int i;
600
601 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
602 trace_event_name(&tu->tp.call));
603 seq_printf(m, " %s:", tu->filename);
604
605 /* Don't print "0x (null)" when offset is 0 */
606 if (tu->offset) {
607 seq_printf(m, "0x%0*lx", (int)(sizeof(void *) * 2), tu->offset);
608 } else {
609 switch (sizeof(void *)) {
610 case 4:
611 seq_printf(m, "0x00000000");
612 break;
613 case 8:
614 default:
615 seq_printf(m, "0x0000000000000000");
616 break;
617 }
618 }
619
620 for (i = 0; i < tu->tp.nr_args; i++)
621 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
622
623 seq_putc(m, '\n');
624 return 0;
625 }
626
627 static const struct seq_operations probes_seq_op = {
628 .start = probes_seq_start,
629 .next = probes_seq_next,
630 .stop = probes_seq_stop,
631 .show = probes_seq_show
632 };
633
634 static int probes_open(struct inode *inode, struct file *file)
635 {
636 int ret;
637
638 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
639 ret = cleanup_all_probes();
640 if (ret)
641 return ret;
642 }
643
644 return seq_open(file, &probes_seq_op);
645 }
646
647 static ssize_t probes_write(struct file *file, const char __user *buffer,
648 size_t count, loff_t *ppos)
649 {
650 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
651 }
652
653 static const struct file_operations uprobe_events_ops = {
654 .owner = THIS_MODULE,
655 .open = probes_open,
656 .read = seq_read,
657 .llseek = seq_lseek,
658 .release = seq_release,
659 .write = probes_write,
660 };
661
662 /* Probes profiling interfaces */
663 static int probes_profile_seq_show(struct seq_file *m, void *v)
664 {
665 struct trace_uprobe *tu = v;
666
667 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
668 trace_event_name(&tu->tp.call), tu->nhit);
669 return 0;
670 }
671
672 static const struct seq_operations profile_seq_op = {
673 .start = probes_seq_start,
674 .next = probes_seq_next,
675 .stop = probes_seq_stop,
676 .show = probes_profile_seq_show
677 };
678
679 static int profile_open(struct inode *inode, struct file *file)
680 {
681 return seq_open(file, &profile_seq_op);
682 }
683
684 static const struct file_operations uprobe_profile_ops = {
685 .owner = THIS_MODULE,
686 .open = profile_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = seq_release,
690 };
691
692 struct uprobe_cpu_buffer {
693 struct mutex mutex;
694 void *buf;
695 };
696 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
697 static int uprobe_buffer_refcnt;
698
699 static int uprobe_buffer_init(void)
700 {
701 int cpu, err_cpu;
702
703 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
704 if (uprobe_cpu_buffer == NULL)
705 return -ENOMEM;
706
707 for_each_possible_cpu(cpu) {
708 struct page *p = alloc_pages_node(cpu_to_node(cpu),
709 GFP_KERNEL, 0);
710 if (p == NULL) {
711 err_cpu = cpu;
712 goto err;
713 }
714 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
715 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
716 }
717
718 return 0;
719
720 err:
721 for_each_possible_cpu(cpu) {
722 if (cpu == err_cpu)
723 break;
724 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
725 }
726
727 free_percpu(uprobe_cpu_buffer);
728 return -ENOMEM;
729 }
730
731 static int uprobe_buffer_enable(void)
732 {
733 int ret = 0;
734
735 BUG_ON(!mutex_is_locked(&event_mutex));
736
737 if (uprobe_buffer_refcnt++ == 0) {
738 ret = uprobe_buffer_init();
739 if (ret < 0)
740 uprobe_buffer_refcnt--;
741 }
742
743 return ret;
744 }
745
746 static void uprobe_buffer_disable(void)
747 {
748 int cpu;
749
750 BUG_ON(!mutex_is_locked(&event_mutex));
751
752 if (--uprobe_buffer_refcnt == 0) {
753 for_each_possible_cpu(cpu)
754 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
755 cpu)->buf);
756
757 free_percpu(uprobe_cpu_buffer);
758 uprobe_cpu_buffer = NULL;
759 }
760 }
761
762 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
763 {
764 struct uprobe_cpu_buffer *ucb;
765 int cpu;
766
767 cpu = raw_smp_processor_id();
768 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
769
770 /*
771 * Use per-cpu buffers for fastest access, but we might migrate
772 * so the mutex makes sure we have sole access to it.
773 */
774 mutex_lock(&ucb->mutex);
775
776 return ucb;
777 }
778
779 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
780 {
781 mutex_unlock(&ucb->mutex);
782 }
783
784 static void __uprobe_trace_func(struct trace_uprobe *tu,
785 unsigned long func, struct pt_regs *regs,
786 struct uprobe_cpu_buffer *ucb, int dsize,
787 struct trace_event_file *trace_file)
788 {
789 struct uprobe_trace_entry_head *entry;
790 struct ring_buffer_event *event;
791 struct ring_buffer *buffer;
792 void *data;
793 int size, esize;
794 struct trace_event_call *call = &tu->tp.call;
795
796 WARN_ON(call != trace_file->event_call);
797
798 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
799 return;
800
801 if (trace_trigger_soft_disabled(trace_file))
802 return;
803
804 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
805 size = esize + tu->tp.size + dsize;
806 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
807 call->event.type, size, 0, 0);
808 if (!event)
809 return;
810
811 entry = ring_buffer_event_data(event);
812 if (is_ret_probe(tu)) {
813 entry->vaddr[0] = func;
814 entry->vaddr[1] = instruction_pointer(regs);
815 data = DATAOF_TRACE_ENTRY(entry, true);
816 } else {
817 entry->vaddr[0] = instruction_pointer(regs);
818 data = DATAOF_TRACE_ENTRY(entry, false);
819 }
820
821 memcpy(data, ucb->buf, tu->tp.size + dsize);
822
823 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
824 }
825
826 /* uprobe handler */
827 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
828 struct uprobe_cpu_buffer *ucb, int dsize)
829 {
830 struct event_file_link *link;
831
832 if (is_ret_probe(tu))
833 return 0;
834
835 rcu_read_lock();
836 list_for_each_entry_rcu(link, &tu->tp.files, list)
837 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
838 rcu_read_unlock();
839
840 return 0;
841 }
842
843 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
844 struct pt_regs *regs,
845 struct uprobe_cpu_buffer *ucb, int dsize)
846 {
847 struct event_file_link *link;
848
849 rcu_read_lock();
850 list_for_each_entry_rcu(link, &tu->tp.files, list)
851 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
852 rcu_read_unlock();
853 }
854
855 /* Event entry printers */
856 static enum print_line_t
857 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
858 {
859 struct uprobe_trace_entry_head *entry;
860 struct trace_seq *s = &iter->seq;
861 struct trace_uprobe *tu;
862 u8 *data;
863 int i;
864
865 entry = (struct uprobe_trace_entry_head *)iter->ent;
866 tu = container_of(event, struct trace_uprobe, tp.call.event);
867
868 if (is_ret_probe(tu)) {
869 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
870 trace_event_name(&tu->tp.call),
871 entry->vaddr[1], entry->vaddr[0]);
872 data = DATAOF_TRACE_ENTRY(entry, true);
873 } else {
874 trace_seq_printf(s, "%s: (0x%lx)",
875 trace_event_name(&tu->tp.call),
876 entry->vaddr[0]);
877 data = DATAOF_TRACE_ENTRY(entry, false);
878 }
879
880 for (i = 0; i < tu->tp.nr_args; i++) {
881 struct probe_arg *parg = &tu->tp.args[i];
882
883 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
884 goto out;
885 }
886
887 trace_seq_putc(s, '\n');
888
889 out:
890 return trace_handle_return(s);
891 }
892
893 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
894 enum uprobe_filter_ctx ctx,
895 struct mm_struct *mm);
896
897 static int
898 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
899 filter_func_t filter)
900 {
901 bool enabled = trace_probe_is_enabled(&tu->tp);
902 struct event_file_link *link = NULL;
903 int ret;
904
905 if (file) {
906 if (tu->tp.flags & TP_FLAG_PROFILE)
907 return -EINTR;
908
909 link = kmalloc(sizeof(*link), GFP_KERNEL);
910 if (!link)
911 return -ENOMEM;
912
913 link->file = file;
914 list_add_tail_rcu(&link->list, &tu->tp.files);
915
916 tu->tp.flags |= TP_FLAG_TRACE;
917 } else {
918 if (tu->tp.flags & TP_FLAG_TRACE)
919 return -EINTR;
920
921 tu->tp.flags |= TP_FLAG_PROFILE;
922 }
923
924 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
925
926 if (enabled)
927 return 0;
928
929 ret = uprobe_buffer_enable();
930 if (ret)
931 goto err_flags;
932
933 tu->consumer.filter = filter;
934 tu->inode = d_real_inode(tu->path.dentry);
935 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
936 if (ret)
937 goto err_buffer;
938
939 return 0;
940
941 err_buffer:
942 uprobe_buffer_disable();
943
944 err_flags:
945 if (file) {
946 list_del(&link->list);
947 kfree(link);
948 tu->tp.flags &= ~TP_FLAG_TRACE;
949 } else {
950 tu->tp.flags &= ~TP_FLAG_PROFILE;
951 }
952 return ret;
953 }
954
955 static void
956 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
957 {
958 if (!trace_probe_is_enabled(&tu->tp))
959 return;
960
961 if (file) {
962 struct event_file_link *link;
963
964 link = find_event_file_link(&tu->tp, file);
965 if (!link)
966 return;
967
968 list_del_rcu(&link->list);
969 /* synchronize with u{,ret}probe_trace_func */
970 synchronize_sched();
971 kfree(link);
972
973 if (!list_empty(&tu->tp.files))
974 return;
975 }
976
977 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
978
979 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
980 tu->inode = NULL;
981 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
982
983 uprobe_buffer_disable();
984 }
985
986 static int uprobe_event_define_fields(struct trace_event_call *event_call)
987 {
988 int ret, i, size;
989 struct uprobe_trace_entry_head field;
990 struct trace_uprobe *tu = event_call->data;
991
992 if (is_ret_probe(tu)) {
993 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
994 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
995 size = SIZEOF_TRACE_ENTRY(true);
996 } else {
997 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
998 size = SIZEOF_TRACE_ENTRY(false);
999 }
1000 /* Set argument names as fields */
1001 for (i = 0; i < tu->tp.nr_args; i++) {
1002 struct probe_arg *parg = &tu->tp.args[i];
1003
1004 ret = trace_define_field(event_call, parg->type->fmttype,
1005 parg->name, size + parg->offset,
1006 parg->type->size, parg->type->is_signed,
1007 FILTER_OTHER);
1008
1009 if (ret)
1010 return ret;
1011 }
1012 return 0;
1013 }
1014
1015 #ifdef CONFIG_PERF_EVENTS
1016 static bool
1017 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1018 {
1019 struct perf_event *event;
1020
1021 if (filter->nr_systemwide)
1022 return true;
1023
1024 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1025 if (event->hw.target->mm == mm)
1026 return true;
1027 }
1028
1029 return false;
1030 }
1031
1032 static inline bool
1033 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1034 {
1035 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1036 }
1037
1038 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1039 {
1040 bool done;
1041
1042 write_lock(&tu->filter.rwlock);
1043 if (event->hw.target) {
1044 list_del(&event->hw.tp_list);
1045 done = tu->filter.nr_systemwide ||
1046 (event->hw.target->flags & PF_EXITING) ||
1047 uprobe_filter_event(tu, event);
1048 } else {
1049 tu->filter.nr_systemwide--;
1050 done = tu->filter.nr_systemwide;
1051 }
1052 write_unlock(&tu->filter.rwlock);
1053
1054 if (!done)
1055 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1056
1057 return 0;
1058 }
1059
1060 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1061 {
1062 bool done;
1063 int err;
1064
1065 write_lock(&tu->filter.rwlock);
1066 if (event->hw.target) {
1067 /*
1068 * event->parent != NULL means copy_process(), we can avoid
1069 * uprobe_apply(). current->mm must be probed and we can rely
1070 * on dup_mmap() which preserves the already installed bp's.
1071 *
1072 * attr.enable_on_exec means that exec/mmap will install the
1073 * breakpoints we need.
1074 */
1075 done = tu->filter.nr_systemwide ||
1076 event->parent || event->attr.enable_on_exec ||
1077 uprobe_filter_event(tu, event);
1078 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1079 } else {
1080 done = tu->filter.nr_systemwide;
1081 tu->filter.nr_systemwide++;
1082 }
1083 write_unlock(&tu->filter.rwlock);
1084
1085 err = 0;
1086 if (!done) {
1087 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1088 if (err)
1089 uprobe_perf_close(tu, event);
1090 }
1091 return err;
1092 }
1093
1094 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1095 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1096 {
1097 struct trace_uprobe *tu;
1098 int ret;
1099
1100 tu = container_of(uc, struct trace_uprobe, consumer);
1101 read_lock(&tu->filter.rwlock);
1102 ret = __uprobe_perf_filter(&tu->filter, mm);
1103 read_unlock(&tu->filter.rwlock);
1104
1105 return ret;
1106 }
1107
1108 static void __uprobe_perf_func(struct trace_uprobe *tu,
1109 unsigned long func, struct pt_regs *regs,
1110 struct uprobe_cpu_buffer *ucb, int dsize)
1111 {
1112 struct trace_event_call *call = &tu->tp.call;
1113 struct uprobe_trace_entry_head *entry;
1114 struct hlist_head *head;
1115 void *data;
1116 int size, esize;
1117 int rctx;
1118
1119 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1120 return;
1121
1122 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1123
1124 size = esize + tu->tp.size + dsize;
1125 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1126 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1127 return;
1128
1129 preempt_disable();
1130 head = this_cpu_ptr(call->perf_events);
1131 if (hlist_empty(head))
1132 goto out;
1133
1134 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1135 if (!entry)
1136 goto out;
1137
1138 if (is_ret_probe(tu)) {
1139 entry->vaddr[0] = func;
1140 entry->vaddr[1] = instruction_pointer(regs);
1141 data = DATAOF_TRACE_ENTRY(entry, true);
1142 } else {
1143 entry->vaddr[0] = instruction_pointer(regs);
1144 data = DATAOF_TRACE_ENTRY(entry, false);
1145 }
1146
1147 memcpy(data, ucb->buf, tu->tp.size + dsize);
1148
1149 if (size - esize > tu->tp.size + dsize) {
1150 int len = tu->tp.size + dsize;
1151
1152 memset(data + len, 0, size - esize - len);
1153 }
1154
1155 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1156 head, NULL);
1157 out:
1158 preempt_enable();
1159 }
1160
1161 /* uprobe profile handler */
1162 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1163 struct uprobe_cpu_buffer *ucb, int dsize)
1164 {
1165 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1166 return UPROBE_HANDLER_REMOVE;
1167
1168 if (!is_ret_probe(tu))
1169 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1170 return 0;
1171 }
1172
1173 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1174 struct pt_regs *regs,
1175 struct uprobe_cpu_buffer *ucb, int dsize)
1176 {
1177 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1178 }
1179 #endif /* CONFIG_PERF_EVENTS */
1180
1181 static int
1182 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1183 void *data)
1184 {
1185 struct trace_uprobe *tu = event->data;
1186 struct trace_event_file *file = data;
1187
1188 switch (type) {
1189 case TRACE_REG_REGISTER:
1190 return probe_event_enable(tu, file, NULL);
1191
1192 case TRACE_REG_UNREGISTER:
1193 probe_event_disable(tu, file);
1194 return 0;
1195
1196 #ifdef CONFIG_PERF_EVENTS
1197 case TRACE_REG_PERF_REGISTER:
1198 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1199
1200 case TRACE_REG_PERF_UNREGISTER:
1201 probe_event_disable(tu, NULL);
1202 return 0;
1203
1204 case TRACE_REG_PERF_OPEN:
1205 return uprobe_perf_open(tu, data);
1206
1207 case TRACE_REG_PERF_CLOSE:
1208 return uprobe_perf_close(tu, data);
1209
1210 #endif
1211 default:
1212 return 0;
1213 }
1214 return 0;
1215 }
1216
1217 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1218 {
1219 struct trace_uprobe *tu;
1220 struct uprobe_dispatch_data udd;
1221 struct uprobe_cpu_buffer *ucb;
1222 int dsize, esize;
1223 int ret = 0;
1224
1225
1226 tu = container_of(con, struct trace_uprobe, consumer);
1227 tu->nhit++;
1228
1229 udd.tu = tu;
1230 udd.bp_addr = instruction_pointer(regs);
1231
1232 current->utask->vaddr = (unsigned long) &udd;
1233
1234 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1235 return 0;
1236
1237 dsize = __get_data_size(&tu->tp, regs);
1238 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1239
1240 ucb = uprobe_buffer_get();
1241 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1242
1243 if (tu->tp.flags & TP_FLAG_TRACE)
1244 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1245
1246 #ifdef CONFIG_PERF_EVENTS
1247 if (tu->tp.flags & TP_FLAG_PROFILE)
1248 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1249 #endif
1250 uprobe_buffer_put(ucb);
1251 return ret;
1252 }
1253
1254 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1255 unsigned long func, struct pt_regs *regs)
1256 {
1257 struct trace_uprobe *tu;
1258 struct uprobe_dispatch_data udd;
1259 struct uprobe_cpu_buffer *ucb;
1260 int dsize, esize;
1261
1262 tu = container_of(con, struct trace_uprobe, consumer);
1263
1264 udd.tu = tu;
1265 udd.bp_addr = func;
1266
1267 current->utask->vaddr = (unsigned long) &udd;
1268
1269 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1270 return 0;
1271
1272 dsize = __get_data_size(&tu->tp, regs);
1273 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1274
1275 ucb = uprobe_buffer_get();
1276 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1277
1278 if (tu->tp.flags & TP_FLAG_TRACE)
1279 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1280
1281 #ifdef CONFIG_PERF_EVENTS
1282 if (tu->tp.flags & TP_FLAG_PROFILE)
1283 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1284 #endif
1285 uprobe_buffer_put(ucb);
1286 return 0;
1287 }
1288
1289 static struct trace_event_functions uprobe_funcs = {
1290 .trace = print_uprobe_event
1291 };
1292
1293 static int register_uprobe_event(struct trace_uprobe *tu)
1294 {
1295 struct trace_event_call *call = &tu->tp.call;
1296 int ret;
1297
1298 /* Initialize trace_event_call */
1299 INIT_LIST_HEAD(&call->class->fields);
1300 call->event.funcs = &uprobe_funcs;
1301 call->class->define_fields = uprobe_event_define_fields;
1302
1303 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1304 return -ENOMEM;
1305
1306 ret = register_trace_event(&call->event);
1307 if (!ret) {
1308 kfree(call->print_fmt);
1309 return -ENODEV;
1310 }
1311
1312 call->flags = TRACE_EVENT_FL_UPROBE;
1313 call->class->reg = trace_uprobe_register;
1314 call->data = tu;
1315 ret = trace_add_event_call(call);
1316
1317 if (ret) {
1318 pr_info("Failed to register uprobe event: %s\n",
1319 trace_event_name(call));
1320 kfree(call->print_fmt);
1321 unregister_trace_event(&call->event);
1322 }
1323
1324 return ret;
1325 }
1326
1327 static int unregister_uprobe_event(struct trace_uprobe *tu)
1328 {
1329 int ret;
1330
1331 /* tu->event is unregistered in trace_remove_event_call() */
1332 ret = trace_remove_event_call(&tu->tp.call);
1333 if (ret)
1334 return ret;
1335 kfree(tu->tp.call.print_fmt);
1336 tu->tp.call.print_fmt = NULL;
1337 return 0;
1338 }
1339
1340 /* Make a trace interface for controling probe points */
1341 static __init int init_uprobe_trace(void)
1342 {
1343 struct dentry *d_tracer;
1344
1345 d_tracer = tracing_init_dentry();
1346 if (IS_ERR(d_tracer))
1347 return 0;
1348
1349 trace_create_file("uprobe_events", 0644, d_tracer,
1350 NULL, &uprobe_events_ops);
1351 /* Profile interface */
1352 trace_create_file("uprobe_profile", 0444, d_tracer,
1353 NULL, &uprobe_profile_ops);
1354 return 0;
1355 }
1356
1357 fs_initcall(init_uprobe_trace);