]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/trace/trace_events.c
tracing: Add vim script to enable folding for function_graph traces
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
e6187007 18#include <linux/delay.h>
b77e38aa 19
020e5f85
LZ
20#include <asm/setup.h>
21
91729ef9 22#include "trace_output.h"
b77e38aa 23
b628b3e6
SR
24#define TRACE_SYSTEM "TRACE_SYSTEM"
25
20c8928a 26DEFINE_MUTEX(event_mutex);
11a241a3 27
a59fd602
SR
28LIST_HEAD(ftrace_events);
29
540b7b8d
LZ
30int trace_define_field(struct ftrace_event_call *call, const char *type,
31 const char *name, int offset, int size, int is_signed)
cf027f64
TZ
32{
33 struct ftrace_event_field *field;
34
fe9f57f2 35 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
36 if (!field)
37 goto err;
fe9f57f2 38
cf027f64
TZ
39 field->name = kstrdup(name, GFP_KERNEL);
40 if (!field->name)
41 goto err;
fe9f57f2 42
cf027f64
TZ
43 field->type = kstrdup(type, GFP_KERNEL);
44 if (!field->type)
45 goto err;
fe9f57f2 46
cf027f64
TZ
47 field->offset = offset;
48 field->size = size;
a118e4d1 49 field->is_signed = is_signed;
cf027f64
TZ
50 list_add(&field->link, &call->fields);
51
52 return 0;
fe9f57f2 53
cf027f64
TZ
54err:
55 if (field) {
56 kfree(field->name);
57 kfree(field->type);
58 }
59 kfree(field);
fe9f57f2 60
cf027f64
TZ
61 return -ENOMEM;
62}
17c873ec 63EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 64
e647d6b3
LZ
65#define __common_field(type, item) \
66 ret = trace_define_field(call, #type, "common_" #item, \
67 offsetof(typeof(ent), item), \
68 sizeof(ent.item), \
69 is_signed_type(type)); \
70 if (ret) \
71 return ret;
72
73int trace_define_common_fields(struct ftrace_event_call *call)
74{
75 int ret;
76 struct trace_entry ent;
77
78 __common_field(unsigned short, type);
79 __common_field(unsigned char, flags);
80 __common_field(unsigned char, preempt_count);
81 __common_field(int, pid);
82 __common_field(int, tgid);
83
84 return ret;
85}
540b7b8d 86EXPORT_SYMBOL_GPL(trace_define_common_fields);
e647d6b3 87
2df75e41
LZ
88#ifdef CONFIG_MODULES
89
90static void trace_destroy_fields(struct ftrace_event_call *call)
91{
92 struct ftrace_event_field *field, *next;
93
94 list_for_each_entry_safe(field, next, &call->fields, link) {
95 list_del(&field->link);
96 kfree(field->type);
97 kfree(field->name);
98 kfree(field);
99 }
100}
101
102#endif /* CONFIG_MODULES */
103
fd994989
SR
104static void ftrace_event_enable_disable(struct ftrace_event_call *call,
105 int enable)
106{
fd994989
SR
107 switch (enable) {
108 case 0:
109 if (call->enabled) {
110 call->enabled = 0;
b11c53e1 111 tracing_stop_cmdline_record();
69fd4f0e 112 call->unregfunc(call->data);
fd994989 113 }
fd994989
SR
114 break;
115 case 1:
da4d0302 116 if (!call->enabled) {
fd994989 117 call->enabled = 1;
b11c53e1 118 tracing_start_cmdline_record();
69fd4f0e 119 call->regfunc(call->data);
fd994989 120 }
fd994989
SR
121 break;
122 }
123}
124
0e907c99
Z
125static void ftrace_clear_events(void)
126{
127 struct ftrace_event_call *call;
128
129 mutex_lock(&event_mutex);
130 list_for_each_entry(call, &ftrace_events, list) {
131 ftrace_event_enable_disable(call, 0);
132 }
133 mutex_unlock(&event_mutex);
134}
135
8f31bfe5
LZ
136/*
137 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
138 */
139static int __ftrace_set_clr_event(const char *match, const char *sub,
140 const char *event, int set)
b77e38aa 141{
a59fd602 142 struct ftrace_event_call *call;
29f93943 143 int ret = -EINVAL;
8f31bfe5
LZ
144
145 mutex_lock(&event_mutex);
146 list_for_each_entry(call, &ftrace_events, list) {
147
148 if (!call->name || !call->regfunc)
149 continue;
150
151 if (match &&
152 strcmp(match, call->name) != 0 &&
153 strcmp(match, call->system) != 0)
154 continue;
155
156 if (sub && strcmp(sub, call->system) != 0)
157 continue;
158
159 if (event && strcmp(event, call->name) != 0)
160 continue;
161
162 ftrace_event_enable_disable(call, set);
163
164 ret = 0;
165 }
166 mutex_unlock(&event_mutex);
167
168 return ret;
169}
170
171static int ftrace_set_clr_event(char *buf, int set)
172{
b628b3e6 173 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
174
175 /*
176 * The buf format can be <subsystem>:<event-name>
177 * *:<event-name> means any event by that name.
178 * :<event-name> is the same.
179 *
180 * <subsystem>:* means all events in that subsystem
181 * <subsystem>: means the same.
182 *
183 * <name> (no ':') means all events in a subsystem with
184 * the name <name> or any event that matches <name>
185 */
186
187 match = strsep(&buf, ":");
188 if (buf) {
189 sub = match;
190 event = buf;
191 match = NULL;
192
193 if (!strlen(sub) || strcmp(sub, "*") == 0)
194 sub = NULL;
195 if (!strlen(event) || strcmp(event, "*") == 0)
196 event = NULL;
197 }
b77e38aa 198
8f31bfe5 199 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
200}
201
4671c794
SR
202/**
203 * trace_set_clr_event - enable or disable an event
204 * @system: system name to match (NULL for any system)
205 * @event: event name to match (NULL for all events, within system)
206 * @set: 1 to enable, 0 to disable
207 *
208 * This is a way for other parts of the kernel to enable or disable
209 * event recording.
210 *
211 * Returns 0 on success, -EINVAL if the parameters do not match any
212 * registered events.
213 */
214int trace_set_clr_event(const char *system, const char *event, int set)
215{
216 return __ftrace_set_clr_event(NULL, system, event, set);
217}
218
b77e38aa
SR
219/* 128 should be much more than enough */
220#define EVENT_BUF_SIZE 127
221
222static ssize_t
223ftrace_event_write(struct file *file, const char __user *ubuf,
224 size_t cnt, loff_t *ppos)
225{
226 size_t read = 0;
227 int i, set = 1;
228 ssize_t ret;
229 char *buf;
230 char ch;
231
232 if (!cnt || cnt < 0)
233 return 0;
234
1852fcce
SR
235 ret = tracing_update_buffers();
236 if (ret < 0)
237 return ret;
238
b77e38aa
SR
239 ret = get_user(ch, ubuf++);
240 if (ret)
241 return ret;
242 read++;
243 cnt--;
244
245 /* skip white space */
246 while (cnt && isspace(ch)) {
247 ret = get_user(ch, ubuf++);
248 if (ret)
249 return ret;
250 read++;
251 cnt--;
252 }
253
254 /* Only white space found? */
255 if (isspace(ch)) {
256 file->f_pos += read;
257 ret = read;
258 return ret;
259 }
260
261 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
262 if (!buf)
263 return -ENOMEM;
264
265 if (cnt > EVENT_BUF_SIZE)
266 cnt = EVENT_BUF_SIZE;
267
268 i = 0;
269 while (cnt && !isspace(ch)) {
270 if (!i && ch == '!')
271 set = 0;
272 else
273 buf[i++] = ch;
274
275 ret = get_user(ch, ubuf++);
276 if (ret)
277 goto out_free;
278 read++;
279 cnt--;
280 }
281 buf[i] = 0;
282
283 file->f_pos += read;
284
285 ret = ftrace_set_clr_event(buf, set);
286 if (ret)
287 goto out_free;
288
289 ret = read;
290
291 out_free:
292 kfree(buf);
293
294 return ret;
295}
296
297static void *
298t_next(struct seq_file *m, void *v, loff_t *pos)
299{
a59fd602
SR
300 struct list_head *list = m->private;
301 struct ftrace_event_call *call;
b77e38aa
SR
302
303 (*pos)++;
304
40e26815 305 for (;;) {
a59fd602 306 if (list == &ftrace_events)
40e26815
SR
307 return NULL;
308
a59fd602
SR
309 call = list_entry(list, struct ftrace_event_call, list);
310
40e26815
SR
311 /*
312 * The ftrace subsystem is for showing formats only.
313 * They can not be enabled or disabled via the event files.
314 */
315 if (call->regfunc)
316 break;
317
a59fd602 318 list = list->next;
40e26815 319 }
b77e38aa 320
a59fd602 321 m->private = list->next;
b77e38aa
SR
322
323 return call;
324}
325
326static void *t_start(struct seq_file *m, loff_t *pos)
327{
e1c7e2a6
LZ
328 struct ftrace_event_call *call = NULL;
329 loff_t l;
330
20c8928a 331 mutex_lock(&event_mutex);
e1c7e2a6
LZ
332
333 m->private = ftrace_events.next;
334 for (l = 0; l <= *pos; ) {
335 call = t_next(m, NULL, &l);
336 if (!call)
337 break;
338 }
339 return call;
b77e38aa
SR
340}
341
342static void *
343s_next(struct seq_file *m, void *v, loff_t *pos)
344{
a59fd602
SR
345 struct list_head *list = m->private;
346 struct ftrace_event_call *call;
b77e38aa
SR
347
348 (*pos)++;
349
350 retry:
a59fd602 351 if (list == &ftrace_events)
b77e38aa
SR
352 return NULL;
353
a59fd602
SR
354 call = list_entry(list, struct ftrace_event_call, list);
355
b77e38aa 356 if (!call->enabled) {
a59fd602 357 list = list->next;
b77e38aa
SR
358 goto retry;
359 }
360
a59fd602 361 m->private = list->next;
b77e38aa
SR
362
363 return call;
364}
365
366static void *s_start(struct seq_file *m, loff_t *pos)
367{
e1c7e2a6
LZ
368 struct ftrace_event_call *call = NULL;
369 loff_t l;
370
20c8928a 371 mutex_lock(&event_mutex);
e1c7e2a6
LZ
372
373 m->private = ftrace_events.next;
374 for (l = 0; l <= *pos; ) {
375 call = s_next(m, NULL, &l);
376 if (!call)
377 break;
378 }
379 return call;
b77e38aa
SR
380}
381
382static int t_show(struct seq_file *m, void *v)
383{
384 struct ftrace_event_call *call = v;
385
b628b3e6
SR
386 if (strcmp(call->system, TRACE_SYSTEM) != 0)
387 seq_printf(m, "%s:", call->system);
b77e38aa
SR
388 seq_printf(m, "%s\n", call->name);
389
390 return 0;
391}
392
393static void t_stop(struct seq_file *m, void *p)
394{
20c8928a 395 mutex_unlock(&event_mutex);
b77e38aa
SR
396}
397
398static int
399ftrace_event_seq_open(struct inode *inode, struct file *file)
400{
b77e38aa
SR
401 const struct seq_operations *seq_ops;
402
403 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 404 (file->f_flags & O_TRUNC))
b77e38aa
SR
405 ftrace_clear_events();
406
407 seq_ops = inode->i_private;
20c8928a 408 return seq_open(file, seq_ops);
b77e38aa
SR
409}
410
1473e441
SR
411static ssize_t
412event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
413 loff_t *ppos)
414{
415 struct ftrace_event_call *call = filp->private_data;
416 char *buf;
417
da4d0302 418 if (call->enabled)
1473e441
SR
419 buf = "1\n";
420 else
421 buf = "0\n";
422
423 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
424}
425
426static ssize_t
427event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
428 loff_t *ppos)
429{
430 struct ftrace_event_call *call = filp->private_data;
431 char buf[64];
432 unsigned long val;
433 int ret;
434
435 if (cnt >= sizeof(buf))
436 return -EINVAL;
437
438 if (copy_from_user(&buf, ubuf, cnt))
439 return -EFAULT;
440
441 buf[cnt] = 0;
442
443 ret = strict_strtoul(buf, 10, &val);
444 if (ret < 0)
445 return ret;
446
1852fcce
SR
447 ret = tracing_update_buffers();
448 if (ret < 0)
449 return ret;
450
1473e441
SR
451 switch (val) {
452 case 0:
1473e441 453 case 1:
11a241a3 454 mutex_lock(&event_mutex);
fd994989 455 ftrace_event_enable_disable(call, val);
11a241a3 456 mutex_unlock(&event_mutex);
1473e441
SR
457 break;
458
459 default:
460 return -EINVAL;
461 }
462
463 *ppos += cnt;
464
465 return cnt;
466}
467
8ae79a13
SR
468static ssize_t
469system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
470 loff_t *ppos)
471{
c142b15d 472 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
473 const char *system = filp->private_data;
474 struct ftrace_event_call *call;
475 char buf[2];
c142b15d 476 int set = 0;
8ae79a13
SR
477 int ret;
478
8ae79a13
SR
479 mutex_lock(&event_mutex);
480 list_for_each_entry(call, &ftrace_events, list) {
481 if (!call->name || !call->regfunc)
482 continue;
483
8f31bfe5 484 if (system && strcmp(call->system, system) != 0)
8ae79a13
SR
485 continue;
486
487 /*
488 * We need to find out if all the events are set
489 * or if all events or cleared, or if we have
490 * a mixture.
491 */
c142b15d
LZ
492 set |= (1 << !!call->enabled);
493
8ae79a13
SR
494 /*
495 * If we have a mixture, no need to look further.
496 */
c142b15d 497 if (set == 3)
8ae79a13
SR
498 break;
499 }
500 mutex_unlock(&event_mutex);
501
c142b15d 502 buf[0] = set_to_char[set];
8ae79a13 503 buf[1] = '\n';
8ae79a13
SR
504
505 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
506
507 return ret;
508}
509
510static ssize_t
511system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
512 loff_t *ppos)
513{
514 const char *system = filp->private_data;
515 unsigned long val;
8ae79a13
SR
516 char buf[64];
517 ssize_t ret;
518
519 if (cnt >= sizeof(buf))
520 return -EINVAL;
521
522 if (copy_from_user(&buf, ubuf, cnt))
523 return -EFAULT;
524
525 buf[cnt] = 0;
526
527 ret = strict_strtoul(buf, 10, &val);
528 if (ret < 0)
529 return ret;
530
531 ret = tracing_update_buffers();
532 if (ret < 0)
533 return ret;
534
8f31bfe5 535 if (val != 0 && val != 1)
8ae79a13 536 return -EINVAL;
8ae79a13 537
8f31bfe5 538 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 539 if (ret)
8f31bfe5 540 goto out;
8ae79a13
SR
541
542 ret = cnt;
543
8f31bfe5 544out:
8ae79a13
SR
545 *ppos += cnt;
546
547 return ret;
548}
549
75db37d2
SR
550extern char *__bad_type_size(void);
551
91729ef9 552#undef FIELD
156b5f17 553#define FIELD(type, name) \
75db37d2 554 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
cf027f64
TZ
555 #type, "common_" #name, offsetof(typeof(field), name), \
556 sizeof(field.name)
91729ef9
SR
557
558static int trace_write_header(struct trace_seq *s)
559{
560 struct trace_entry field;
561
562 /* struct trace_entry */
563 return trace_seq_printf(s,
ce8eb2bf
SR
564 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
565 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
566 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
567 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
568 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
91729ef9 569 "\n",
89ec0dee 570 FIELD(unsigned short, type),
91729ef9
SR
571 FIELD(unsigned char, flags),
572 FIELD(unsigned char, preempt_count),
573 FIELD(int, pid),
574 FIELD(int, tgid));
575}
da4d0302 576
981d081e
SR
577static ssize_t
578event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
579 loff_t *ppos)
580{
581 struct ftrace_event_call *call = filp->private_data;
582 struct trace_seq *s;
583 char *buf;
584 int r;
585
c269fc8c
TZ
586 if (*ppos)
587 return 0;
588
981d081e
SR
589 s = kmalloc(sizeof(*s), GFP_KERNEL);
590 if (!s)
591 return -ENOMEM;
592
593 trace_seq_init(s);
594
c5e4e192
SR
595 /* If any of the first writes fail, so will the show_format. */
596
597 trace_seq_printf(s, "name: %s\n", call->name);
598 trace_seq_printf(s, "ID: %d\n", call->id);
599 trace_seq_printf(s, "format:\n");
91729ef9
SR
600 trace_write_header(s);
601
e8f9f4d7 602 r = call->show_format(call, s);
981d081e
SR
603 if (!r) {
604 /*
605 * ug! The format output is bigger than a PAGE!!
606 */
607 buf = "FORMAT TOO BIG\n";
608 r = simple_read_from_buffer(ubuf, cnt, ppos,
609 buf, strlen(buf));
610 goto out;
611 }
612
613 r = simple_read_from_buffer(ubuf, cnt, ppos,
614 s->buffer, s->len);
615 out:
616 kfree(s);
617 return r;
618}
619
23725aee
PZ
620static ssize_t
621event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
622{
623 struct ftrace_event_call *call = filp->private_data;
624 struct trace_seq *s;
625 int r;
626
627 if (*ppos)
628 return 0;
629
630 s = kmalloc(sizeof(*s), GFP_KERNEL);
631 if (!s)
632 return -ENOMEM;
633
634 trace_seq_init(s);
635 trace_seq_printf(s, "%d\n", call->id);
636
637 r = simple_read_from_buffer(ubuf, cnt, ppos,
638 s->buffer, s->len);
639 kfree(s);
640 return r;
641}
642
7ce7e424
TZ
643static ssize_t
644event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
645 loff_t *ppos)
646{
647 struct ftrace_event_call *call = filp->private_data;
648 struct trace_seq *s;
649 int r;
650
651 if (*ppos)
652 return 0;
653
654 s = kmalloc(sizeof(*s), GFP_KERNEL);
655 if (!s)
656 return -ENOMEM;
657
658 trace_seq_init(s);
659
8b372562 660 print_event_filter(call, s);
4bda2d51 661 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
662
663 kfree(s);
664
665 return r;
666}
667
668static ssize_t
669event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
670 loff_t *ppos)
671{
672 struct ftrace_event_call *call = filp->private_data;
8b372562 673 char *buf;
7ce7e424
TZ
674 int err;
675
8b372562 676 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
677 return -EINVAL;
678
8b372562
TZ
679 buf = (char *)__get_free_page(GFP_TEMPORARY);
680 if (!buf)
7ce7e424
TZ
681 return -ENOMEM;
682
8b372562
TZ
683 if (copy_from_user(buf, ubuf, cnt)) {
684 free_page((unsigned long) buf);
685 return -EFAULT;
7ce7e424 686 }
8b372562 687 buf[cnt] = '\0';
7ce7e424 688
8b372562
TZ
689 err = apply_event_filter(call, buf);
690 free_page((unsigned long) buf);
691 if (err < 0)
44e9c8b7 692 return err;
0a19e53c 693
7ce7e424
TZ
694 *ppos += cnt;
695
696 return cnt;
697}
698
cfb180f3
TZ
699static ssize_t
700subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
701 loff_t *ppos)
702{
703 struct event_subsystem *system = filp->private_data;
704 struct trace_seq *s;
705 int r;
706
707 if (*ppos)
708 return 0;
709
710 s = kmalloc(sizeof(*s), GFP_KERNEL);
711 if (!s)
712 return -ENOMEM;
713
714 trace_seq_init(s);
715
8b372562 716 print_subsystem_event_filter(system, s);
4bda2d51 717 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
718
719 kfree(s);
720
721 return r;
722}
723
724static ssize_t
725subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
726 loff_t *ppos)
727{
728 struct event_subsystem *system = filp->private_data;
8b372562 729 char *buf;
cfb180f3
TZ
730 int err;
731
8b372562 732 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
733 return -EINVAL;
734
8b372562
TZ
735 buf = (char *)__get_free_page(GFP_TEMPORARY);
736 if (!buf)
cfb180f3
TZ
737 return -ENOMEM;
738
8b372562
TZ
739 if (copy_from_user(buf, ubuf, cnt)) {
740 free_page((unsigned long) buf);
741 return -EFAULT;
cfb180f3 742 }
8b372562 743 buf[cnt] = '\0';
cfb180f3 744
8b372562
TZ
745 err = apply_subsystem_event_filter(system, buf);
746 free_page((unsigned long) buf);
747 if (err < 0)
44e9c8b7 748 return err;
cfb180f3
TZ
749
750 *ppos += cnt;
751
752 return cnt;
753}
754
d1b182a8
SR
755static ssize_t
756show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
757{
758 int (*func)(struct trace_seq *s) = filp->private_data;
759 struct trace_seq *s;
760 int r;
761
762 if (*ppos)
763 return 0;
764
765 s = kmalloc(sizeof(*s), GFP_KERNEL);
766 if (!s)
767 return -ENOMEM;
768
769 trace_seq_init(s);
770
771 func(s);
772 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
773
774 kfree(s);
775
776 return r;
777}
778
b77e38aa
SR
779static const struct seq_operations show_event_seq_ops = {
780 .start = t_start,
781 .next = t_next,
782 .show = t_show,
783 .stop = t_stop,
784};
785
786static const struct seq_operations show_set_event_seq_ops = {
787 .start = s_start,
788 .next = s_next,
789 .show = t_show,
790 .stop = t_stop,
791};
792
2314c4ae
SR
793static const struct file_operations ftrace_avail_fops = {
794 .open = ftrace_event_seq_open,
795 .read = seq_read,
796 .llseek = seq_lseek,
797 .release = seq_release,
798};
799
b77e38aa
SR
800static const struct file_operations ftrace_set_event_fops = {
801 .open = ftrace_event_seq_open,
802 .read = seq_read,
803 .write = ftrace_event_write,
804 .llseek = seq_lseek,
805 .release = seq_release,
806};
807
1473e441
SR
808static const struct file_operations ftrace_enable_fops = {
809 .open = tracing_open_generic,
810 .read = event_enable_read,
811 .write = event_enable_write,
812};
813
981d081e
SR
814static const struct file_operations ftrace_event_format_fops = {
815 .open = tracing_open_generic,
816 .read = event_format_read,
817};
818
23725aee
PZ
819static const struct file_operations ftrace_event_id_fops = {
820 .open = tracing_open_generic,
821 .read = event_id_read,
822};
823
7ce7e424
TZ
824static const struct file_operations ftrace_event_filter_fops = {
825 .open = tracing_open_generic,
826 .read = event_filter_read,
827 .write = event_filter_write,
828};
829
cfb180f3
TZ
830static const struct file_operations ftrace_subsystem_filter_fops = {
831 .open = tracing_open_generic,
832 .read = subsystem_filter_read,
833 .write = subsystem_filter_write,
834};
835
8ae79a13
SR
836static const struct file_operations ftrace_system_enable_fops = {
837 .open = tracing_open_generic,
838 .read = system_enable_read,
839 .write = system_enable_write,
840};
841
d1b182a8
SR
842static const struct file_operations ftrace_show_header_fops = {
843 .open = tracing_open_generic,
844 .read = show_header,
845};
846
1473e441
SR
847static struct dentry *event_trace_events_dir(void)
848{
849 static struct dentry *d_tracer;
850 static struct dentry *d_events;
851
852 if (d_events)
853 return d_events;
854
855 d_tracer = tracing_init_dentry();
856 if (!d_tracer)
857 return NULL;
858
859 d_events = debugfs_create_dir("events", d_tracer);
860 if (!d_events)
861 pr_warning("Could not create debugfs "
862 "'events' directory\n");
863
864 return d_events;
865}
866
6ecc2d1c
SR
867static LIST_HEAD(event_subsystems);
868
869static struct dentry *
870event_subsystem_dir(const char *name, struct dentry *d_events)
871{
872 struct event_subsystem *system;
e1112b4d 873 struct dentry *entry;
6ecc2d1c
SR
874
875 /* First see if we did not already create this dir */
876 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
877 if (strcmp(system->name, name) == 0) {
878 system->nr_events++;
6ecc2d1c 879 return system->entry;
dc82ec98 880 }
6ecc2d1c
SR
881 }
882
883 /* need to create new entry */
884 system = kmalloc(sizeof(*system), GFP_KERNEL);
885 if (!system) {
886 pr_warning("No memory to create event subsystem %s\n",
887 name);
888 return d_events;
889 }
890
891 system->entry = debugfs_create_dir(name, d_events);
892 if (!system->entry) {
893 pr_warning("Could not create event subsystem %s\n",
894 name);
895 kfree(system);
896 return d_events;
897 }
898
dc82ec98 899 system->nr_events = 1;
6d723736
SR
900 system->name = kstrdup(name, GFP_KERNEL);
901 if (!system->name) {
902 debugfs_remove(system->entry);
903 kfree(system);
904 return d_events;
905 }
906
6ecc2d1c
SR
907 list_add(&system->list, &event_subsystems);
908
30e673b2 909 system->filter = NULL;
cfb180f3 910
8b372562
TZ
911 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
912 if (!system->filter) {
913 pr_warning("Could not allocate filter for subsystem "
914 "'%s'\n", name);
915 return system->entry;
916 }
917
e1112b4d
TZ
918 entry = debugfs_create_file("filter", 0644, system->entry, system,
919 &ftrace_subsystem_filter_fops);
8b372562
TZ
920 if (!entry) {
921 kfree(system->filter);
922 system->filter = NULL;
e1112b4d
TZ
923 pr_warning("Could not create debugfs "
924 "'%s/filter' entry\n", name);
8b372562 925 }
e1112b4d 926
8ae79a13
SR
927 entry = trace_create_file("enable", 0644, system->entry,
928 (void *)system->name,
929 &ftrace_system_enable_fops);
930
6ecc2d1c
SR
931 return system->entry;
932}
933
1473e441 934static int
701970b3
SR
935event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
936 const struct file_operations *id,
937 const struct file_operations *enable,
938 const struct file_operations *filter,
939 const struct file_operations *format)
1473e441
SR
940{
941 struct dentry *entry;
fd994989 942 int ret;
1473e441 943
6ecc2d1c
SR
944 /*
945 * If the trace point header did not define TRACE_SYSTEM
946 * then the system would be called "TRACE_SYSTEM".
947 */
6d723736 948 if (strcmp(call->system, TRACE_SYSTEM) != 0)
6ecc2d1c
SR
949 d_events = event_subsystem_dir(call->system, d_events);
950
1473e441
SR
951 call->dir = debugfs_create_dir(call->name, d_events);
952 if (!call->dir) {
953 pr_warning("Could not create debugfs "
954 "'%s' directory\n", call->name);
955 return -1;
956 }
957
6d723736
SR
958 if (call->regfunc)
959 entry = trace_create_file("enable", 0644, call->dir, call,
701970b3 960 enable);
1473e441 961
af6af30c 962 if (call->id && call->profile_enable)
6d723736 963 entry = trace_create_file("id", 0444, call->dir, call,
701970b3 964 id);
23725aee 965
cf027f64 966 if (call->define_fields) {
14be96c9 967 ret = call->define_fields(call);
cf027f64
TZ
968 if (ret < 0) {
969 pr_warning("Could not initialize trace point"
970 " events/%s\n", call->name);
971 return ret;
972 }
6d723736 973 entry = trace_create_file("filter", 0644, call->dir, call,
701970b3 974 filter);
cf027f64
TZ
975 }
976
981d081e
SR
977 /* A trace may not want to export its format */
978 if (!call->show_format)
979 return 0;
980
6d723736 981 entry = trace_create_file("format", 0444, call->dir, call,
701970b3 982 format);
6d723736
SR
983
984 return 0;
985}
986
987#define for_each_event(event, start, end) \
988 for (event = start; \
989 (unsigned long)event < (unsigned long)end; \
990 event++)
991
61f919a1 992#ifdef CONFIG_MODULES
701970b3
SR
993
994static LIST_HEAD(ftrace_module_file_list);
995
996/*
997 * Modules must own their file_operations to keep up with
998 * reference counting.
999 */
1000struct ftrace_module_file_ops {
1001 struct list_head list;
1002 struct module *mod;
1003 struct file_operations id;
1004 struct file_operations enable;
1005 struct file_operations format;
1006 struct file_operations filter;
1007};
1008
a2ca5e03
FW
1009static void remove_subsystem_dir(const char *name)
1010{
1011 struct event_subsystem *system;
1012
1013 if (strcmp(name, TRACE_SYSTEM) == 0)
1014 return;
1015
1016 list_for_each_entry(system, &event_subsystems, list) {
1017 if (strcmp(system->name, name) == 0) {
1018 if (!--system->nr_events) {
1019 struct event_filter *filter = system->filter;
1020
1021 debugfs_remove_recursive(system->entry);
1022 list_del(&system->list);
1023 if (filter) {
1024 kfree(filter->filter_string);
1025 kfree(filter);
1026 }
1027 kfree(system->name);
1028 kfree(system);
1029 }
1030 break;
1031 }
1032 }
1033}
1034
701970b3
SR
1035static struct ftrace_module_file_ops *
1036trace_create_file_ops(struct module *mod)
1037{
1038 struct ftrace_module_file_ops *file_ops;
1039
1040 /*
1041 * This is a bit of a PITA. To allow for correct reference
1042 * counting, modules must "own" their file_operations.
1043 * To do this, we allocate the file operations that will be
1044 * used in the event directory.
1045 */
1046
1047 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1048 if (!file_ops)
1049 return NULL;
1050
1051 file_ops->mod = mod;
1052
1053 file_ops->id = ftrace_event_id_fops;
1054 file_ops->id.owner = mod;
1055
1056 file_ops->enable = ftrace_enable_fops;
1057 file_ops->enable.owner = mod;
1058
1059 file_ops->filter = ftrace_event_filter_fops;
1060 file_ops->filter.owner = mod;
1061
1062 file_ops->format = ftrace_event_format_fops;
1063 file_ops->format.owner = mod;
1064
1065 list_add(&file_ops->list, &ftrace_module_file_list);
1066
1067 return file_ops;
1068}
1069
6d723736
SR
1070static void trace_module_add_events(struct module *mod)
1071{
701970b3 1072 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1073 struct ftrace_event_call *call, *start, *end;
1074 struct dentry *d_events;
f744bd57 1075 int ret;
6d723736
SR
1076
1077 start = mod->trace_events;
1078 end = mod->trace_events + mod->num_trace_events;
1079
1080 if (start == end)
1081 return;
1082
1083 d_events = event_trace_events_dir();
1084 if (!d_events)
1085 return;
1086
1087 for_each_event(call, start, end) {
1088 /* The linker may leave blanks */
1089 if (!call->name)
1090 continue;
f744bd57
JB
1091 if (call->raw_init) {
1092 ret = call->raw_init();
1093 if (ret < 0) {
1094 if (ret != -ENOSYS)
1095 pr_warning("Could not initialize trace "
1096 "point events/%s\n", call->name);
1097 continue;
1098 }
1099 }
701970b3
SR
1100 /*
1101 * This module has events, create file ops for this module
1102 * if not already done.
1103 */
1104 if (!file_ops) {
1105 file_ops = trace_create_file_ops(mod);
1106 if (!file_ops)
1107 return;
1108 }
6d723736
SR
1109 call->mod = mod;
1110 list_add(&call->list, &ftrace_events);
701970b3
SR
1111 event_create_dir(call, d_events,
1112 &file_ops->id, &file_ops->enable,
1113 &file_ops->filter, &file_ops->format);
6d723736
SR
1114 }
1115}
1116
1117static void trace_module_remove_events(struct module *mod)
1118{
701970b3 1119 struct ftrace_module_file_ops *file_ops;
6d723736 1120 struct ftrace_event_call *call, *p;
9456f0fa 1121 bool found = false;
6d723736 1122
110bf2b7 1123 down_write(&trace_event_mutex);
6d723736
SR
1124 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1125 if (call->mod == mod) {
9456f0fa 1126 found = true;
0e907c99 1127 ftrace_event_enable_disable(call, 0);
6d723736 1128 if (call->event)
110bf2b7 1129 __unregister_ftrace_event(call->event);
6d723736
SR
1130 debugfs_remove_recursive(call->dir);
1131 list_del(&call->list);
2df75e41
LZ
1132 trace_destroy_fields(call);
1133 destroy_preds(call);
dc82ec98 1134 remove_subsystem_dir(call->system);
6d723736
SR
1135 }
1136 }
701970b3
SR
1137
1138 /* Now free the file_operations */
1139 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1140 if (file_ops->mod == mod)
1141 break;
1142 }
1143 if (&file_ops->list != &ftrace_module_file_list) {
1144 list_del(&file_ops->list);
1145 kfree(file_ops);
1146 }
9456f0fa
SR
1147
1148 /*
1149 * It is safest to reset the ring buffer if the module being unloaded
1150 * registered any events.
1151 */
1152 if (found)
1153 tracing_reset_current_online_cpus();
110bf2b7 1154 up_write(&trace_event_mutex);
6d723736
SR
1155}
1156
61f919a1
SR
1157static int trace_module_notify(struct notifier_block *self,
1158 unsigned long val, void *data)
6d723736
SR
1159{
1160 struct module *mod = data;
1161
1162 mutex_lock(&event_mutex);
1163 switch (val) {
1164 case MODULE_STATE_COMING:
1165 trace_module_add_events(mod);
1166 break;
1167 case MODULE_STATE_GOING:
1168 trace_module_remove_events(mod);
1169 break;
1170 }
1171 mutex_unlock(&event_mutex);
fd994989 1172
1473e441
SR
1173 return 0;
1174}
61f919a1
SR
1175#else
1176static int trace_module_notify(struct notifier_block *self,
1177 unsigned long val, void *data)
1178{
1179 return 0;
1180}
1181#endif /* CONFIG_MODULES */
1473e441 1182
6d723736
SR
1183struct notifier_block trace_module_nb = {
1184 .notifier_call = trace_module_notify,
1185 .priority = 0,
1186};
1187
a59fd602
SR
1188extern struct ftrace_event_call __start_ftrace_events[];
1189extern struct ftrace_event_call __stop_ftrace_events[];
1190
020e5f85
LZ
1191static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1192
1193static __init int setup_trace_event(char *str)
1194{
1195 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1196 ring_buffer_expanded = 1;
1197 tracing_selftest_disabled = 1;
1198
1199 return 1;
1200}
1201__setup("trace_event=", setup_trace_event);
1202
b77e38aa
SR
1203static __init int event_trace_init(void)
1204{
a59fd602 1205 struct ftrace_event_call *call;
b77e38aa
SR
1206 struct dentry *d_tracer;
1207 struct dentry *entry;
1473e441 1208 struct dentry *d_events;
6d723736 1209 int ret;
020e5f85
LZ
1210 char *buf = bootup_event_buf;
1211 char *token;
b77e38aa
SR
1212
1213 d_tracer = tracing_init_dentry();
1214 if (!d_tracer)
1215 return 0;
1216
2314c4ae
SR
1217 entry = debugfs_create_file("available_events", 0444, d_tracer,
1218 (void *)&show_event_seq_ops,
1219 &ftrace_avail_fops);
1220 if (!entry)
1221 pr_warning("Could not create debugfs "
1222 "'available_events' entry\n");
1223
b77e38aa
SR
1224 entry = debugfs_create_file("set_event", 0644, d_tracer,
1225 (void *)&show_set_event_seq_ops,
1226 &ftrace_set_event_fops);
1227 if (!entry)
1228 pr_warning("Could not create debugfs "
1229 "'set_event' entry\n");
1230
1473e441
SR
1231 d_events = event_trace_events_dir();
1232 if (!d_events)
1233 return 0;
1234
d1b182a8
SR
1235 /* ring buffer internal formats */
1236 trace_create_file("header_page", 0444, d_events,
1237 ring_buffer_print_page_header,
1238 &ftrace_show_header_fops);
1239
1240 trace_create_file("header_event", 0444, d_events,
1241 ring_buffer_print_entry_header,
1242 &ftrace_show_header_fops);
1243
8ae79a13 1244 trace_create_file("enable", 0644, d_events,
8f31bfe5 1245 NULL, &ftrace_system_enable_fops);
8ae79a13 1246
6d723736 1247 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1248 /* The linker may leave blanks */
1249 if (!call->name)
1250 continue;
f744bd57
JB
1251 if (call->raw_init) {
1252 ret = call->raw_init();
1253 if (ret < 0) {
1254 if (ret != -ENOSYS)
1255 pr_warning("Could not initialize trace "
1256 "point events/%s\n", call->name);
1257 continue;
1258 }
1259 }
a59fd602 1260 list_add(&call->list, &ftrace_events);
701970b3
SR
1261 event_create_dir(call, d_events, &ftrace_event_id_fops,
1262 &ftrace_enable_fops, &ftrace_event_filter_fops,
1263 &ftrace_event_format_fops);
1473e441
SR
1264 }
1265
020e5f85
LZ
1266 while (true) {
1267 token = strsep(&buf, ",");
1268
1269 if (!token)
1270 break;
1271 if (!*token)
1272 continue;
1273
1274 ret = ftrace_set_clr_event(token, 1);
1275 if (ret)
1276 pr_warning("Failed to enable trace event: %s\n", token);
1277 }
1278
6d723736 1279 ret = register_module_notifier(&trace_module_nb);
55379376 1280 if (ret)
6d723736
SR
1281 pr_warning("Failed to register trace events module notifier\n");
1282
b77e38aa
SR
1283 return 0;
1284}
1285fs_initcall(event_trace_init);
e6187007
SR
1286
1287#ifdef CONFIG_FTRACE_STARTUP_TEST
1288
1289static DEFINE_SPINLOCK(test_spinlock);
1290static DEFINE_SPINLOCK(test_spinlock_irq);
1291static DEFINE_MUTEX(test_mutex);
1292
1293static __init void test_work(struct work_struct *dummy)
1294{
1295 spin_lock(&test_spinlock);
1296 spin_lock_irq(&test_spinlock_irq);
1297 udelay(1);
1298 spin_unlock_irq(&test_spinlock_irq);
1299 spin_unlock(&test_spinlock);
1300
1301 mutex_lock(&test_mutex);
1302 msleep(1);
1303 mutex_unlock(&test_mutex);
1304}
1305
1306static __init int event_test_thread(void *unused)
1307{
1308 void *test_malloc;
1309
1310 test_malloc = kmalloc(1234, GFP_KERNEL);
1311 if (!test_malloc)
1312 pr_info("failed to kmalloc\n");
1313
1314 schedule_on_each_cpu(test_work);
1315
1316 kfree(test_malloc);
1317
1318 set_current_state(TASK_INTERRUPTIBLE);
1319 while (!kthread_should_stop())
1320 schedule();
1321
1322 return 0;
1323}
1324
1325/*
1326 * Do various things that may trigger events.
1327 */
1328static __init void event_test_stuff(void)
1329{
1330 struct task_struct *test_thread;
1331
1332 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1333 msleep(1);
1334 kthread_stop(test_thread);
1335}
1336
1337/*
1338 * For every trace event defined, we will test each trace point separately,
1339 * and then by groups, and finally all trace points.
1340 */
9ea21c1e 1341static __init void event_trace_self_tests(void)
e6187007
SR
1342{
1343 struct ftrace_event_call *call;
1344 struct event_subsystem *system;
e6187007
SR
1345 int ret;
1346
1347 pr_info("Running tests on trace events:\n");
1348
1349 list_for_each_entry(call, &ftrace_events, list) {
1350
1351 /* Only test those that have a regfunc */
1352 if (!call->regfunc)
1353 continue;
1354
1355 pr_info("Testing event %s: ", call->name);
1356
1357 /*
1358 * If an event is already enabled, someone is using
1359 * it and the self test should not be on.
1360 */
1361 if (call->enabled) {
1362 pr_warning("Enabled event during self test!\n");
1363 WARN_ON_ONCE(1);
1364 continue;
1365 }
1366
0e907c99 1367 ftrace_event_enable_disable(call, 1);
e6187007 1368 event_test_stuff();
0e907c99 1369 ftrace_event_enable_disable(call, 0);
e6187007
SR
1370
1371 pr_cont("OK\n");
1372 }
1373
1374 /* Now test at the sub system level */
1375
1376 pr_info("Running tests on trace event systems:\n");
1377
1378 list_for_each_entry(system, &event_subsystems, list) {
1379
1380 /* the ftrace system is special, skip it */
1381 if (strcmp(system->name, "ftrace") == 0)
1382 continue;
1383
1384 pr_info("Testing event system %s: ", system->name);
1385
8f31bfe5 1386 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1387 if (WARN_ON_ONCE(ret)) {
1388 pr_warning("error enabling system %s\n",
1389 system->name);
1390 continue;
1391 }
1392
1393 event_test_stuff();
1394
8f31bfe5 1395 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1396 if (WARN_ON_ONCE(ret))
1397 pr_warning("error disabling system %s\n",
1398 system->name);
1399
1400 pr_cont("OK\n");
1401 }
1402
1403 /* Test with all events enabled */
1404
1405 pr_info("Running tests on all trace events:\n");
1406 pr_info("Testing all events: ");
1407
8f31bfe5 1408 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1409 if (WARN_ON_ONCE(ret)) {
e6187007 1410 pr_warning("error enabling all events\n");
9ea21c1e 1411 return;
e6187007
SR
1412 }
1413
1414 event_test_stuff();
1415
1416 /* reset sysname */
8f31bfe5 1417 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1418 if (WARN_ON_ONCE(ret)) {
1419 pr_warning("error disabling all events\n");
9ea21c1e 1420 return;
e6187007
SR
1421 }
1422
1423 pr_cont("OK\n");
9ea21c1e
SR
1424}
1425
1426#ifdef CONFIG_FUNCTION_TRACER
1427
1428static DEFINE_PER_CPU(atomic_t, test_event_disable);
1429
1430static void
1431function_test_events_call(unsigned long ip, unsigned long parent_ip)
1432{
1433 struct ring_buffer_event *event;
1434 struct ftrace_entry *entry;
1435 unsigned long flags;
1436 long disabled;
1437 int resched;
1438 int cpu;
1439 int pc;
1440
1441 pc = preempt_count();
1442 resched = ftrace_preempt_disable();
1443 cpu = raw_smp_processor_id();
1444 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1445
1446 if (disabled != 1)
1447 goto out;
1448
1449 local_save_flags(flags);
1450
1451 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
1452 flags, pc);
1453 if (!event)
1454 goto out;
1455 entry = ring_buffer_event_data(event);
1456 entry->ip = ip;
1457 entry->parent_ip = parent_ip;
1458
cb4764a6 1459 trace_nowake_buffer_unlock_commit(event, flags, pc);
9ea21c1e
SR
1460
1461 out:
1462 atomic_dec(&per_cpu(test_event_disable, cpu));
1463 ftrace_preempt_enable(resched);
1464}
1465
1466static struct ftrace_ops trace_ops __initdata =
1467{
1468 .func = function_test_events_call,
1469};
1470
1471static __init void event_trace_self_test_with_function(void)
1472{
1473 register_ftrace_function(&trace_ops);
1474 pr_info("Running tests again, along with the function tracer\n");
1475 event_trace_self_tests();
1476 unregister_ftrace_function(&trace_ops);
1477}
1478#else
1479static __init void event_trace_self_test_with_function(void)
1480{
1481}
1482#endif
1483
1484static __init int event_trace_self_tests_init(void)
1485{
020e5f85
LZ
1486 if (!tracing_selftest_disabled) {
1487 event_trace_self_tests();
1488 event_trace_self_test_with_function();
1489 }
e6187007
SR
1490
1491 return 0;
1492}
1493
28d20e2d 1494late_initcall(event_trace_self_tests_init);
e6187007
SR
1495
1496#endif