]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/trace/trace_uprobe.c
Merge branch 'perf/urgent' into perf/core, to pick up fixes before adding more changes
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace_uprobe.c
1 /*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/uprobes.h>
24 #include <linux/namei.h>
25 #include <linux/string.h>
26
27 #include "trace_probe.h"
28
29 #define UPROBE_EVENT_SYSTEM "uprobes"
30
31 struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34 };
35
36 #define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40 #define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
43 struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47 };
48
49 /*
50 * uprobe event core functions
51 */
52 struct trace_uprobe {
53 struct list_head list;
54 struct trace_uprobe_filter filter;
55 struct uprobe_consumer consumer;
56 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
60 struct trace_probe tp;
61 };
62
63 #define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
65 (sizeof(struct probe_arg) * (n)))
66
67 static int register_uprobe_event(struct trace_uprobe *tu);
68 static int unregister_uprobe_event(struct trace_uprobe *tu);
69
70 static DEFINE_MUTEX(uprobe_lock);
71 static LIST_HEAD(uprobe_list);
72
73 struct uprobe_dispatch_data {
74 struct trace_uprobe *tu;
75 unsigned long bp_addr;
76 };
77
78 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
79 static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
81
82 #ifdef CONFIG_STACK_GROWSUP
83 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84 {
85 return addr - (n * sizeof(long));
86 }
87 #else
88 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89 {
90 return addr + (n * sizeof(long));
91 }
92 #endif
93
94 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95 {
96 unsigned long ret;
97 unsigned long addr = user_stack_pointer(regs);
98
99 addr = adjust_stack_addr(addr, n);
100
101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 return 0;
103
104 return ret;
105 }
106
107 /*
108 * Uprobes-specific fetch functions
109 */
110 #define DEFINE_FETCH_stack(type) \
111 static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \
113 { \
114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \
116 }
117 DEFINE_BASIC_FETCH_FUNCS(stack)
118 /* No string on the stack entry */
119 #define fetch_stack_string NULL
120 #define fetch_stack_string_size NULL
121
122 #define DEFINE_FETCH_memory(type) \
123 static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \
125 { \
126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \
128 \
129 if (copy_from_user(&retval, vaddr, sizeof(type))) \
130 *(type *)dest = 0; \
131 else \
132 *(type *) dest = retval; \
133 }
134 DEFINE_BASIC_FETCH_FUNCS(memory)
135 /*
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location.
138 */
139 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest)
141 {
142 long ret;
143 u32 rloc = *(u32 *)dest;
144 int maxlen = get_rloc_len(rloc);
145 u8 *dst = get_rloc_data(dest);
146 void __user *src = (void __force __user *) addr;
147
148 if (!maxlen)
149 return;
150
151 ret = strncpy_from_user(dst, src, maxlen);
152
153 if (ret < 0) { /* Failed to fetch string */
154 ((u8 *)get_rloc_data(dest))[0] = '\0';
155 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 } else {
157 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 }
159 }
160
161 static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest)
163 {
164 int len;
165 void __user *vaddr = (void __force __user *) addr;
166
167 len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
169 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170 *(u32 *)dest = 0;
171 else
172 *(u32 *)dest = len;
173 }
174
175 static unsigned long translate_user_vaddr(void *file_offset)
176 {
177 unsigned long base_addr;
178 struct uprobe_dispatch_data *udd;
179
180 udd = (void *) current->utask->vaddr;
181
182 base_addr = udd->bp_addr - udd->tu->offset;
183 return base_addr + (unsigned long)file_offset;
184 }
185
186 #define DEFINE_FETCH_file_offset(type) \
187 static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188 void *offset, void *dest)\
189 { \
190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \
192 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
193 }
194 DEFINE_BASIC_FETCH_FUNCS(file_offset)
195 DEFINE_FETCH_file_offset(string)
196 DEFINE_FETCH_file_offset(string_size)
197
198 /* Fetch type information table */
199 static const struct fetch_type uprobes_fetch_type_table[] = {
200 /* Special types */
201 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 sizeof(u32), 1, "__data_loc char[]"),
203 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 string_size, sizeof(u32), 0, "u32"),
205 /* Basic types */
206 ASSIGN_FETCH_TYPE(u8, u8, 0),
207 ASSIGN_FETCH_TYPE(u16, u16, 0),
208 ASSIGN_FETCH_TYPE(u32, u32, 0),
209 ASSIGN_FETCH_TYPE(u64, u64, 0),
210 ASSIGN_FETCH_TYPE(s8, u8, 1),
211 ASSIGN_FETCH_TYPE(s16, u16, 1),
212 ASSIGN_FETCH_TYPE(s32, u32, 1),
213 ASSIGN_FETCH_TYPE(s64, u64, 1),
214
215 ASSIGN_FETCH_TYPE_END
216 };
217
218 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
219 {
220 rwlock_init(&filter->rwlock);
221 filter->nr_systemwide = 0;
222 INIT_LIST_HEAD(&filter->perf_events);
223 }
224
225 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
226 {
227 return !filter->nr_systemwide && list_empty(&filter->perf_events);
228 }
229
230 static inline bool is_ret_probe(struct trace_uprobe *tu)
231 {
232 return tu->consumer.ret_handler != NULL;
233 }
234
235 /*
236 * Allocate new trace_uprobe and initialize it (including uprobes).
237 */
238 static struct trace_uprobe *
239 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
240 {
241 struct trace_uprobe *tu;
242
243 if (!event || !is_good_name(event))
244 return ERR_PTR(-EINVAL);
245
246 if (!group || !is_good_name(group))
247 return ERR_PTR(-EINVAL);
248
249 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
250 if (!tu)
251 return ERR_PTR(-ENOMEM);
252
253 tu->tp.call.class = &tu->tp.class;
254 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
255 if (!tu->tp.call.name)
256 goto error;
257
258 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
259 if (!tu->tp.class.system)
260 goto error;
261
262 INIT_LIST_HEAD(&tu->list);
263 INIT_LIST_HEAD(&tu->tp.files);
264 tu->consumer.handler = uprobe_dispatcher;
265 if (is_ret)
266 tu->consumer.ret_handler = uretprobe_dispatcher;
267 init_trace_uprobe_filter(&tu->filter);
268 return tu;
269
270 error:
271 kfree(tu->tp.call.name);
272 kfree(tu);
273
274 return ERR_PTR(-ENOMEM);
275 }
276
277 static void free_trace_uprobe(struct trace_uprobe *tu)
278 {
279 int i;
280
281 for (i = 0; i < tu->tp.nr_args; i++)
282 traceprobe_free_probe_arg(&tu->tp.args[i]);
283
284 iput(tu->inode);
285 kfree(tu->tp.call.class->system);
286 kfree(tu->tp.call.name);
287 kfree(tu->filename);
288 kfree(tu);
289 }
290
291 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
292 {
293 struct trace_uprobe *tu;
294
295 list_for_each_entry(tu, &uprobe_list, list)
296 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
297 strcmp(tu->tp.call.class->system, group) == 0)
298 return tu;
299
300 return NULL;
301 }
302
303 /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
304 static int unregister_trace_uprobe(struct trace_uprobe *tu)
305 {
306 int ret;
307
308 ret = unregister_uprobe_event(tu);
309 if (ret)
310 return ret;
311
312 list_del(&tu->list);
313 free_trace_uprobe(tu);
314 return 0;
315 }
316
317 /* Register a trace_uprobe and probe_event */
318 static int register_trace_uprobe(struct trace_uprobe *tu)
319 {
320 struct trace_uprobe *old_tu;
321 int ret;
322
323 mutex_lock(&uprobe_lock);
324
325 /* register as an event */
326 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
327 tu->tp.call.class->system);
328 if (old_tu) {
329 /* delete old event */
330 ret = unregister_trace_uprobe(old_tu);
331 if (ret)
332 goto end;
333 }
334
335 ret = register_uprobe_event(tu);
336 if (ret) {
337 pr_warning("Failed to register probe event(%d)\n", ret);
338 goto end;
339 }
340
341 list_add_tail(&tu->list, &uprobe_list);
342
343 end:
344 mutex_unlock(&uprobe_lock);
345
346 return ret;
347 }
348
349 /*
350 * Argument syntax:
351 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
352 *
353 * - Remove uprobe: -:[GRP/]EVENT
354 */
355 static int create_trace_uprobe(int argc, char **argv)
356 {
357 struct trace_uprobe *tu;
358 struct inode *inode;
359 char *arg, *event, *group, *filename;
360 char buf[MAX_EVENT_NAME_LEN];
361 struct path path;
362 unsigned long offset;
363 bool is_delete, is_return;
364 int i, ret;
365
366 inode = NULL;
367 ret = 0;
368 is_delete = false;
369 is_return = false;
370 event = NULL;
371 group = NULL;
372
373 /* argc must be >= 1 */
374 if (argv[0][0] == '-')
375 is_delete = true;
376 else if (argv[0][0] == 'r')
377 is_return = true;
378 else if (argv[0][0] != 'p') {
379 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
380 return -EINVAL;
381 }
382
383 if (argv[0][1] == ':') {
384 event = &argv[0][2];
385 arg = strchr(event, '/');
386
387 if (arg) {
388 group = event;
389 event = arg + 1;
390 event[-1] = '\0';
391
392 if (strlen(group) == 0) {
393 pr_info("Group name is not specified\n");
394 return -EINVAL;
395 }
396 }
397 if (strlen(event) == 0) {
398 pr_info("Event name is not specified\n");
399 return -EINVAL;
400 }
401 }
402 if (!group)
403 group = UPROBE_EVENT_SYSTEM;
404
405 if (is_delete) {
406 int ret;
407
408 if (!event) {
409 pr_info("Delete command needs an event name.\n");
410 return -EINVAL;
411 }
412 mutex_lock(&uprobe_lock);
413 tu = find_probe_event(event, group);
414
415 if (!tu) {
416 mutex_unlock(&uprobe_lock);
417 pr_info("Event %s/%s doesn't exist.\n", group, event);
418 return -ENOENT;
419 }
420 /* delete an event */
421 ret = unregister_trace_uprobe(tu);
422 mutex_unlock(&uprobe_lock);
423 return ret;
424 }
425
426 if (argc < 2) {
427 pr_info("Probe point is not specified.\n");
428 return -EINVAL;
429 }
430 if (isdigit(argv[1][0])) {
431 pr_info("probe point must be have a filename.\n");
432 return -EINVAL;
433 }
434 arg = strchr(argv[1], ':');
435 if (!arg) {
436 ret = -EINVAL;
437 goto fail_address_parse;
438 }
439
440 *arg++ = '\0';
441 filename = argv[1];
442 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
443 if (ret)
444 goto fail_address_parse;
445
446 inode = igrab(d_inode(path.dentry));
447 path_put(&path);
448
449 if (!inode || !S_ISREG(inode->i_mode)) {
450 ret = -EINVAL;
451 goto fail_address_parse;
452 }
453
454 ret = kstrtoul(arg, 0, &offset);
455 if (ret)
456 goto fail_address_parse;
457
458 argc -= 2;
459 argv += 2;
460
461 /* setup a probe */
462 if (!event) {
463 char *tail;
464 char *ptr;
465
466 tail = kstrdup(kbasename(filename), GFP_KERNEL);
467 if (!tail) {
468 ret = -ENOMEM;
469 goto fail_address_parse;
470 }
471
472 ptr = strpbrk(tail, ".-_");
473 if (ptr)
474 *ptr = '\0';
475
476 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
477 event = buf;
478 kfree(tail);
479 }
480
481 tu = alloc_trace_uprobe(group, event, argc, is_return);
482 if (IS_ERR(tu)) {
483 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
484 ret = PTR_ERR(tu);
485 goto fail_address_parse;
486 }
487 tu->offset = offset;
488 tu->inode = inode;
489 tu->filename = kstrdup(filename, GFP_KERNEL);
490
491 if (!tu->filename) {
492 pr_info("Failed to allocate filename.\n");
493 ret = -ENOMEM;
494 goto error;
495 }
496
497 /* parse arguments */
498 ret = 0;
499 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
500 struct probe_arg *parg = &tu->tp.args[i];
501
502 /* Increment count for freeing args in error case */
503 tu->tp.nr_args++;
504
505 /* Parse argument name */
506 arg = strchr(argv[i], '=');
507 if (arg) {
508 *arg++ = '\0';
509 parg->name = kstrdup(argv[i], GFP_KERNEL);
510 } else {
511 arg = argv[i];
512 /* If argument name is omitted, set "argN" */
513 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
514 parg->name = kstrdup(buf, GFP_KERNEL);
515 }
516
517 if (!parg->name) {
518 pr_info("Failed to allocate argument[%d] name.\n", i);
519 ret = -ENOMEM;
520 goto error;
521 }
522
523 if (!is_good_name(parg->name)) {
524 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
525 ret = -EINVAL;
526 goto error;
527 }
528
529 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
530 pr_info("Argument[%d] name '%s' conflicts with "
531 "another field.\n", i, argv[i]);
532 ret = -EINVAL;
533 goto error;
534 }
535
536 /* Parse fetch argument */
537 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
538 is_return, false,
539 uprobes_fetch_type_table);
540 if (ret) {
541 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
542 goto error;
543 }
544 }
545
546 ret = register_trace_uprobe(tu);
547 if (ret)
548 goto error;
549 return 0;
550
551 error:
552 free_trace_uprobe(tu);
553 return ret;
554
555 fail_address_parse:
556 iput(inode);
557
558 pr_info("Failed to parse address or file.\n");
559
560 return ret;
561 }
562
563 static int cleanup_all_probes(void)
564 {
565 struct trace_uprobe *tu;
566 int ret = 0;
567
568 mutex_lock(&uprobe_lock);
569 while (!list_empty(&uprobe_list)) {
570 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
571 ret = unregister_trace_uprobe(tu);
572 if (ret)
573 break;
574 }
575 mutex_unlock(&uprobe_lock);
576 return ret;
577 }
578
579 /* Probes listing interfaces */
580 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
581 {
582 mutex_lock(&uprobe_lock);
583 return seq_list_start(&uprobe_list, *pos);
584 }
585
586 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
587 {
588 return seq_list_next(v, &uprobe_list, pos);
589 }
590
591 static void probes_seq_stop(struct seq_file *m, void *v)
592 {
593 mutex_unlock(&uprobe_lock);
594 }
595
596 static int probes_seq_show(struct seq_file *m, void *v)
597 {
598 struct trace_uprobe *tu = v;
599 char c = is_ret_probe(tu) ? 'r' : 'p';
600 int i;
601
602 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
603 trace_event_name(&tu->tp.call));
604 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
605
606 for (i = 0; i < tu->tp.nr_args; i++)
607 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
608
609 seq_putc(m, '\n');
610 return 0;
611 }
612
613 static const struct seq_operations probes_seq_op = {
614 .start = probes_seq_start,
615 .next = probes_seq_next,
616 .stop = probes_seq_stop,
617 .show = probes_seq_show
618 };
619
620 static int probes_open(struct inode *inode, struct file *file)
621 {
622 int ret;
623
624 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
625 ret = cleanup_all_probes();
626 if (ret)
627 return ret;
628 }
629
630 return seq_open(file, &probes_seq_op);
631 }
632
633 static ssize_t probes_write(struct file *file, const char __user *buffer,
634 size_t count, loff_t *ppos)
635 {
636 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
637 }
638
639 static const struct file_operations uprobe_events_ops = {
640 .owner = THIS_MODULE,
641 .open = probes_open,
642 .read = seq_read,
643 .llseek = seq_lseek,
644 .release = seq_release,
645 .write = probes_write,
646 };
647
648 /* Probes profiling interfaces */
649 static int probes_profile_seq_show(struct seq_file *m, void *v)
650 {
651 struct trace_uprobe *tu = v;
652
653 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
654 trace_event_name(&tu->tp.call), tu->nhit);
655 return 0;
656 }
657
658 static const struct seq_operations profile_seq_op = {
659 .start = probes_seq_start,
660 .next = probes_seq_next,
661 .stop = probes_seq_stop,
662 .show = probes_profile_seq_show
663 };
664
665 static int profile_open(struct inode *inode, struct file *file)
666 {
667 return seq_open(file, &profile_seq_op);
668 }
669
670 static const struct file_operations uprobe_profile_ops = {
671 .owner = THIS_MODULE,
672 .open = profile_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = seq_release,
676 };
677
678 struct uprobe_cpu_buffer {
679 struct mutex mutex;
680 void *buf;
681 };
682 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
683 static int uprobe_buffer_refcnt;
684
685 static int uprobe_buffer_init(void)
686 {
687 int cpu, err_cpu;
688
689 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
690 if (uprobe_cpu_buffer == NULL)
691 return -ENOMEM;
692
693 for_each_possible_cpu(cpu) {
694 struct page *p = alloc_pages_node(cpu_to_node(cpu),
695 GFP_KERNEL, 0);
696 if (p == NULL) {
697 err_cpu = cpu;
698 goto err;
699 }
700 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
701 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
702 }
703
704 return 0;
705
706 err:
707 for_each_possible_cpu(cpu) {
708 if (cpu == err_cpu)
709 break;
710 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
711 }
712
713 free_percpu(uprobe_cpu_buffer);
714 return -ENOMEM;
715 }
716
717 static int uprobe_buffer_enable(void)
718 {
719 int ret = 0;
720
721 BUG_ON(!mutex_is_locked(&event_mutex));
722
723 if (uprobe_buffer_refcnt++ == 0) {
724 ret = uprobe_buffer_init();
725 if (ret < 0)
726 uprobe_buffer_refcnt--;
727 }
728
729 return ret;
730 }
731
732 static void uprobe_buffer_disable(void)
733 {
734 int cpu;
735
736 BUG_ON(!mutex_is_locked(&event_mutex));
737
738 if (--uprobe_buffer_refcnt == 0) {
739 for_each_possible_cpu(cpu)
740 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
741 cpu)->buf);
742
743 free_percpu(uprobe_cpu_buffer);
744 uprobe_cpu_buffer = NULL;
745 }
746 }
747
748 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
749 {
750 struct uprobe_cpu_buffer *ucb;
751 int cpu;
752
753 cpu = raw_smp_processor_id();
754 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
755
756 /*
757 * Use per-cpu buffers for fastest access, but we might migrate
758 * so the mutex makes sure we have sole access to it.
759 */
760 mutex_lock(&ucb->mutex);
761
762 return ucb;
763 }
764
765 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
766 {
767 mutex_unlock(&ucb->mutex);
768 }
769
770 static void __uprobe_trace_func(struct trace_uprobe *tu,
771 unsigned long func, struct pt_regs *regs,
772 struct uprobe_cpu_buffer *ucb, int dsize,
773 struct trace_event_file *trace_file)
774 {
775 struct uprobe_trace_entry_head *entry;
776 struct ring_buffer_event *event;
777 struct ring_buffer *buffer;
778 void *data;
779 int size, esize;
780 struct trace_event_call *call = &tu->tp.call;
781
782 WARN_ON(call != trace_file->event_call);
783
784 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
785 return;
786
787 if (trace_trigger_soft_disabled(trace_file))
788 return;
789
790 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
791 size = esize + tu->tp.size + dsize;
792 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
793 call->event.type, size, 0, 0);
794 if (!event)
795 return;
796
797 entry = ring_buffer_event_data(event);
798 if (is_ret_probe(tu)) {
799 entry->vaddr[0] = func;
800 entry->vaddr[1] = instruction_pointer(regs);
801 data = DATAOF_TRACE_ENTRY(entry, true);
802 } else {
803 entry->vaddr[0] = instruction_pointer(regs);
804 data = DATAOF_TRACE_ENTRY(entry, false);
805 }
806
807 memcpy(data, ucb->buf, tu->tp.size + dsize);
808
809 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
810 }
811
812 /* uprobe handler */
813 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
814 struct uprobe_cpu_buffer *ucb, int dsize)
815 {
816 struct event_file_link *link;
817
818 if (is_ret_probe(tu))
819 return 0;
820
821 rcu_read_lock();
822 list_for_each_entry_rcu(link, &tu->tp.files, list)
823 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
824 rcu_read_unlock();
825
826 return 0;
827 }
828
829 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
830 struct pt_regs *regs,
831 struct uprobe_cpu_buffer *ucb, int dsize)
832 {
833 struct event_file_link *link;
834
835 rcu_read_lock();
836 list_for_each_entry_rcu(link, &tu->tp.files, list)
837 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
838 rcu_read_unlock();
839 }
840
841 /* Event entry printers */
842 static enum print_line_t
843 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
844 {
845 struct uprobe_trace_entry_head *entry;
846 struct trace_seq *s = &iter->seq;
847 struct trace_uprobe *tu;
848 u8 *data;
849 int i;
850
851 entry = (struct uprobe_trace_entry_head *)iter->ent;
852 tu = container_of(event, struct trace_uprobe, tp.call.event);
853
854 if (is_ret_probe(tu)) {
855 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
856 trace_event_name(&tu->tp.call),
857 entry->vaddr[1], entry->vaddr[0]);
858 data = DATAOF_TRACE_ENTRY(entry, true);
859 } else {
860 trace_seq_printf(s, "%s: (0x%lx)",
861 trace_event_name(&tu->tp.call),
862 entry->vaddr[0]);
863 data = DATAOF_TRACE_ENTRY(entry, false);
864 }
865
866 for (i = 0; i < tu->tp.nr_args; i++) {
867 struct probe_arg *parg = &tu->tp.args[i];
868
869 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
870 goto out;
871 }
872
873 trace_seq_putc(s, '\n');
874
875 out:
876 return trace_handle_return(s);
877 }
878
879 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
880 enum uprobe_filter_ctx ctx,
881 struct mm_struct *mm);
882
883 static int
884 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
885 filter_func_t filter)
886 {
887 bool enabled = trace_probe_is_enabled(&tu->tp);
888 struct event_file_link *link = NULL;
889 int ret;
890
891 if (file) {
892 if (tu->tp.flags & TP_FLAG_PROFILE)
893 return -EINTR;
894
895 link = kmalloc(sizeof(*link), GFP_KERNEL);
896 if (!link)
897 return -ENOMEM;
898
899 link->file = file;
900 list_add_tail_rcu(&link->list, &tu->tp.files);
901
902 tu->tp.flags |= TP_FLAG_TRACE;
903 } else {
904 if (tu->tp.flags & TP_FLAG_TRACE)
905 return -EINTR;
906
907 tu->tp.flags |= TP_FLAG_PROFILE;
908 }
909
910 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
911
912 if (enabled)
913 return 0;
914
915 ret = uprobe_buffer_enable();
916 if (ret)
917 goto err_flags;
918
919 tu->consumer.filter = filter;
920 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
921 if (ret)
922 goto err_buffer;
923
924 return 0;
925
926 err_buffer:
927 uprobe_buffer_disable();
928
929 err_flags:
930 if (file) {
931 list_del(&link->list);
932 kfree(link);
933 tu->tp.flags &= ~TP_FLAG_TRACE;
934 } else {
935 tu->tp.flags &= ~TP_FLAG_PROFILE;
936 }
937 return ret;
938 }
939
940 static void
941 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
942 {
943 if (!trace_probe_is_enabled(&tu->tp))
944 return;
945
946 if (file) {
947 struct event_file_link *link;
948
949 link = find_event_file_link(&tu->tp, file);
950 if (!link)
951 return;
952
953 list_del_rcu(&link->list);
954 /* synchronize with u{,ret}probe_trace_func */
955 synchronize_sched();
956 kfree(link);
957
958 if (!list_empty(&tu->tp.files))
959 return;
960 }
961
962 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
963
964 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
965 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
966
967 uprobe_buffer_disable();
968 }
969
970 static int uprobe_event_define_fields(struct trace_event_call *event_call)
971 {
972 int ret, i, size;
973 struct uprobe_trace_entry_head field;
974 struct trace_uprobe *tu = event_call->data;
975
976 if (is_ret_probe(tu)) {
977 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
978 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
979 size = SIZEOF_TRACE_ENTRY(true);
980 } else {
981 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
982 size = SIZEOF_TRACE_ENTRY(false);
983 }
984 /* Set argument names as fields */
985 for (i = 0; i < tu->tp.nr_args; i++) {
986 struct probe_arg *parg = &tu->tp.args[i];
987
988 ret = trace_define_field(event_call, parg->type->fmttype,
989 parg->name, size + parg->offset,
990 parg->type->size, parg->type->is_signed,
991 FILTER_OTHER);
992
993 if (ret)
994 return ret;
995 }
996 return 0;
997 }
998
999 #ifdef CONFIG_PERF_EVENTS
1000 static bool
1001 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1002 {
1003 struct perf_event *event;
1004
1005 if (filter->nr_systemwide)
1006 return true;
1007
1008 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1009 if (event->hw.target->mm == mm)
1010 return true;
1011 }
1012
1013 return false;
1014 }
1015
1016 static inline bool
1017 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1018 {
1019 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1020 }
1021
1022 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1023 {
1024 bool done;
1025
1026 write_lock(&tu->filter.rwlock);
1027 if (event->hw.target) {
1028 list_del(&event->hw.tp_list);
1029 done = tu->filter.nr_systemwide ||
1030 (event->hw.target->flags & PF_EXITING) ||
1031 uprobe_filter_event(tu, event);
1032 } else {
1033 tu->filter.nr_systemwide--;
1034 done = tu->filter.nr_systemwide;
1035 }
1036 write_unlock(&tu->filter.rwlock);
1037
1038 if (!done)
1039 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1040
1041 return 0;
1042 }
1043
1044 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1045 {
1046 bool done;
1047 int err;
1048
1049 write_lock(&tu->filter.rwlock);
1050 if (event->hw.target) {
1051 /*
1052 * event->parent != NULL means copy_process(), we can avoid
1053 * uprobe_apply(). current->mm must be probed and we can rely
1054 * on dup_mmap() which preserves the already installed bp's.
1055 *
1056 * attr.enable_on_exec means that exec/mmap will install the
1057 * breakpoints we need.
1058 */
1059 done = tu->filter.nr_systemwide ||
1060 event->parent || event->attr.enable_on_exec ||
1061 uprobe_filter_event(tu, event);
1062 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1063 } else {
1064 done = tu->filter.nr_systemwide;
1065 tu->filter.nr_systemwide++;
1066 }
1067 write_unlock(&tu->filter.rwlock);
1068
1069 err = 0;
1070 if (!done) {
1071 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1072 if (err)
1073 uprobe_perf_close(tu, event);
1074 }
1075 return err;
1076 }
1077
1078 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1079 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1080 {
1081 struct trace_uprobe *tu;
1082 int ret;
1083
1084 tu = container_of(uc, struct trace_uprobe, consumer);
1085 read_lock(&tu->filter.rwlock);
1086 ret = __uprobe_perf_filter(&tu->filter, mm);
1087 read_unlock(&tu->filter.rwlock);
1088
1089 return ret;
1090 }
1091
1092 static void __uprobe_perf_func(struct trace_uprobe *tu,
1093 unsigned long func, struct pt_regs *regs,
1094 struct uprobe_cpu_buffer *ucb, int dsize)
1095 {
1096 struct trace_event_call *call = &tu->tp.call;
1097 struct uprobe_trace_entry_head *entry;
1098 struct bpf_prog *prog = call->prog;
1099 struct hlist_head *head;
1100 void *data;
1101 int size, esize;
1102 int rctx;
1103
1104 if (prog && !trace_call_bpf(prog, regs))
1105 return;
1106
1107 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1108
1109 size = esize + tu->tp.size + dsize;
1110 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1111 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1112 return;
1113
1114 preempt_disable();
1115 head = this_cpu_ptr(call->perf_events);
1116 if (hlist_empty(head))
1117 goto out;
1118
1119 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1120 if (!entry)
1121 goto out;
1122
1123 if (is_ret_probe(tu)) {
1124 entry->vaddr[0] = func;
1125 entry->vaddr[1] = instruction_pointer(regs);
1126 data = DATAOF_TRACE_ENTRY(entry, true);
1127 } else {
1128 entry->vaddr[0] = instruction_pointer(regs);
1129 data = DATAOF_TRACE_ENTRY(entry, false);
1130 }
1131
1132 memcpy(data, ucb->buf, tu->tp.size + dsize);
1133
1134 if (size - esize > tu->tp.size + dsize) {
1135 int len = tu->tp.size + dsize;
1136
1137 memset(data + len, 0, size - esize - len);
1138 }
1139
1140 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1141 out:
1142 preempt_enable();
1143 }
1144
1145 /* uprobe profile handler */
1146 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1147 struct uprobe_cpu_buffer *ucb, int dsize)
1148 {
1149 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1150 return UPROBE_HANDLER_REMOVE;
1151
1152 if (!is_ret_probe(tu))
1153 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1154 return 0;
1155 }
1156
1157 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1158 struct pt_regs *regs,
1159 struct uprobe_cpu_buffer *ucb, int dsize)
1160 {
1161 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1162 }
1163 #endif /* CONFIG_PERF_EVENTS */
1164
1165 static int
1166 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1167 void *data)
1168 {
1169 struct trace_uprobe *tu = event->data;
1170 struct trace_event_file *file = data;
1171
1172 switch (type) {
1173 case TRACE_REG_REGISTER:
1174 return probe_event_enable(tu, file, NULL);
1175
1176 case TRACE_REG_UNREGISTER:
1177 probe_event_disable(tu, file);
1178 return 0;
1179
1180 #ifdef CONFIG_PERF_EVENTS
1181 case TRACE_REG_PERF_REGISTER:
1182 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1183
1184 case TRACE_REG_PERF_UNREGISTER:
1185 probe_event_disable(tu, NULL);
1186 return 0;
1187
1188 case TRACE_REG_PERF_OPEN:
1189 return uprobe_perf_open(tu, data);
1190
1191 case TRACE_REG_PERF_CLOSE:
1192 return uprobe_perf_close(tu, data);
1193
1194 #endif
1195 default:
1196 return 0;
1197 }
1198 return 0;
1199 }
1200
1201 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1202 {
1203 struct trace_uprobe *tu;
1204 struct uprobe_dispatch_data udd;
1205 struct uprobe_cpu_buffer *ucb;
1206 int dsize, esize;
1207 int ret = 0;
1208
1209
1210 tu = container_of(con, struct trace_uprobe, consumer);
1211 tu->nhit++;
1212
1213 udd.tu = tu;
1214 udd.bp_addr = instruction_pointer(regs);
1215
1216 current->utask->vaddr = (unsigned long) &udd;
1217
1218 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1219 return 0;
1220
1221 dsize = __get_data_size(&tu->tp, regs);
1222 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1223
1224 ucb = uprobe_buffer_get();
1225 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1226
1227 if (tu->tp.flags & TP_FLAG_TRACE)
1228 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1229
1230 #ifdef CONFIG_PERF_EVENTS
1231 if (tu->tp.flags & TP_FLAG_PROFILE)
1232 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1233 #endif
1234 uprobe_buffer_put(ucb);
1235 return ret;
1236 }
1237
1238 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1239 unsigned long func, struct pt_regs *regs)
1240 {
1241 struct trace_uprobe *tu;
1242 struct uprobe_dispatch_data udd;
1243 struct uprobe_cpu_buffer *ucb;
1244 int dsize, esize;
1245
1246 tu = container_of(con, struct trace_uprobe, consumer);
1247
1248 udd.tu = tu;
1249 udd.bp_addr = func;
1250
1251 current->utask->vaddr = (unsigned long) &udd;
1252
1253 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1254 return 0;
1255
1256 dsize = __get_data_size(&tu->tp, regs);
1257 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1258
1259 ucb = uprobe_buffer_get();
1260 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1261
1262 if (tu->tp.flags & TP_FLAG_TRACE)
1263 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1264
1265 #ifdef CONFIG_PERF_EVENTS
1266 if (tu->tp.flags & TP_FLAG_PROFILE)
1267 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1268 #endif
1269 uprobe_buffer_put(ucb);
1270 return 0;
1271 }
1272
1273 static struct trace_event_functions uprobe_funcs = {
1274 .trace = print_uprobe_event
1275 };
1276
1277 static int register_uprobe_event(struct trace_uprobe *tu)
1278 {
1279 struct trace_event_call *call = &tu->tp.call;
1280 int ret;
1281
1282 /* Initialize trace_event_call */
1283 INIT_LIST_HEAD(&call->class->fields);
1284 call->event.funcs = &uprobe_funcs;
1285 call->class->define_fields = uprobe_event_define_fields;
1286
1287 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1288 return -ENOMEM;
1289
1290 ret = register_trace_event(&call->event);
1291 if (!ret) {
1292 kfree(call->print_fmt);
1293 return -ENODEV;
1294 }
1295
1296 call->flags = TRACE_EVENT_FL_UPROBE;
1297 call->class->reg = trace_uprobe_register;
1298 call->data = tu;
1299 ret = trace_add_event_call(call);
1300
1301 if (ret) {
1302 pr_info("Failed to register uprobe event: %s\n",
1303 trace_event_name(call));
1304 kfree(call->print_fmt);
1305 unregister_trace_event(&call->event);
1306 }
1307
1308 return ret;
1309 }
1310
1311 static int unregister_uprobe_event(struct trace_uprobe *tu)
1312 {
1313 int ret;
1314
1315 /* tu->event is unregistered in trace_remove_event_call() */
1316 ret = trace_remove_event_call(&tu->tp.call);
1317 if (ret)
1318 return ret;
1319 kfree(tu->tp.call.print_fmt);
1320 tu->tp.call.print_fmt = NULL;
1321 return 0;
1322 }
1323
1324 /* Make a trace interface for controling probe points */
1325 static __init int init_uprobe_trace(void)
1326 {
1327 struct dentry *d_tracer;
1328
1329 d_tracer = tracing_init_dentry();
1330 if (IS_ERR(d_tracer))
1331 return 0;
1332
1333 trace_create_file("uprobe_events", 0644, d_tracer,
1334 NULL, &uprobe_events_ops);
1335 /* Profile interface */
1336 trace_create_file("uprobe_profile", 0444, d_tracer,
1337 NULL, &uprobe_profile_ops);
1338 return 0;
1339 }
1340
1341 fs_initcall(init_uprobe_trace);