]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace_events.c
tracing: Add alloc_snapshot kernel command line parameter
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
5a0e3ad6 18#include <linux/slab.h>
e6187007 19#include <linux/delay.h>
b77e38aa 20
020e5f85
LZ
21#include <asm/setup.h>
22
91729ef9 23#include "trace_output.h"
b77e38aa 24
4e5292ea 25#undef TRACE_SYSTEM
b628b3e6
SR
26#define TRACE_SYSTEM "TRACE_SYSTEM"
27
20c8928a 28DEFINE_MUTEX(event_mutex);
11a241a3 29
04295780
SR
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
a59fd602 36LIST_HEAD(ftrace_events);
8728fe50 37LIST_HEAD(ftrace_common_fields);
a59fd602 38
d1a29143
SR
39#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41static struct kmem_cache *field_cachep;
42static struct kmem_cache *file_cachep;
43
ae63b31e
SR
44/* Double loops, do not use break, only goto's work */
45#define do_for_each_event_file(tr, file) \
46 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
47 list_for_each_entry(file, &tr->events, list)
48
49#define do_for_each_event_file_safe(tr, file) \
50 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
51 struct ftrace_event_file *___n; \
52 list_for_each_entry_safe(file, ___n, &tr->events, list)
53
54#define while_for_each_event_file() \
55 }
56
2e33af02
SR
57struct list_head *
58trace_get_fields(struct ftrace_event_call *event_call)
59{
60 if (!event_call->class->get_fields)
61 return &event_call->class->fields;
62 return event_call->class->get_fields(event_call);
63}
64
8728fe50
LZ
65static int __trace_define_field(struct list_head *head, const char *type,
66 const char *name, int offset, int size,
67 int is_signed, int filter_type)
cf027f64
TZ
68{
69 struct ftrace_event_field *field;
70
d1a29143 71 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
cf027f64
TZ
72 if (!field)
73 goto err;
fe9f57f2 74
92edca07
SR
75 field->name = name;
76 field->type = type;
fe9f57f2 77
43b51ead
LZ
78 if (filter_type == FILTER_OTHER)
79 field->filter_type = filter_assign_type(type);
80 else
81 field->filter_type = filter_type;
82
cf027f64
TZ
83 field->offset = offset;
84 field->size = size;
a118e4d1 85 field->is_signed = is_signed;
aa38e9fc 86
2e33af02 87 list_add(&field->link, head);
cf027f64
TZ
88
89 return 0;
fe9f57f2 90
cf027f64 91err:
d1a29143 92 kmem_cache_free(field_cachep, field);
fe9f57f2 93
cf027f64
TZ
94 return -ENOMEM;
95}
8728fe50
LZ
96
97int trace_define_field(struct ftrace_event_call *call, const char *type,
98 const char *name, int offset, int size, int is_signed,
99 int filter_type)
100{
101 struct list_head *head;
102
103 if (WARN_ON(!call->class))
104 return 0;
105
106 head = trace_get_fields(call);
107 return __trace_define_field(head, type, name, offset, size,
108 is_signed, filter_type);
109}
17c873ec 110EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 111
e647d6b3 112#define __common_field(type, item) \
8728fe50
LZ
113 ret = __trace_define_field(&ftrace_common_fields, #type, \
114 "common_" #item, \
115 offsetof(typeof(ent), item), \
116 sizeof(ent.item), \
117 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
118 if (ret) \
119 return ret;
120
8728fe50 121static int trace_define_common_fields(void)
e647d6b3
LZ
122{
123 int ret;
124 struct trace_entry ent;
125
126 __common_field(unsigned short, type);
127 __common_field(unsigned char, flags);
128 __common_field(unsigned char, preempt_count);
129 __common_field(int, pid);
e647d6b3
LZ
130
131 return ret;
132}
133
bd1a5c84 134void trace_destroy_fields(struct ftrace_event_call *call)
2df75e41
LZ
135{
136 struct ftrace_event_field *field, *next;
2e33af02 137 struct list_head *head;
2df75e41 138
2e33af02
SR
139 head = trace_get_fields(call);
140 list_for_each_entry_safe(field, next, head, link) {
2df75e41 141 list_del(&field->link);
d1a29143 142 kmem_cache_free(field_cachep, field);
2df75e41
LZ
143 }
144}
145
87d9b4e1
LZ
146int trace_event_raw_init(struct ftrace_event_call *call)
147{
148 int id;
149
80decc70 150 id = register_ftrace_event(&call->event);
87d9b4e1
LZ
151 if (!id)
152 return -ENODEV;
87d9b4e1
LZ
153
154 return 0;
155}
156EXPORT_SYMBOL_GPL(trace_event_raw_init);
157
ceec0b6f
JO
158int ftrace_event_reg(struct ftrace_event_call *call,
159 enum trace_reg type, void *data)
a1d0ce82 160{
ae63b31e
SR
161 struct ftrace_event_file *file = data;
162
a1d0ce82
SR
163 switch (type) {
164 case TRACE_REG_REGISTER:
165 return tracepoint_probe_register(call->name,
166 call->class->probe,
ae63b31e 167 file);
a1d0ce82
SR
168 case TRACE_REG_UNREGISTER:
169 tracepoint_probe_unregister(call->name,
170 call->class->probe,
ae63b31e 171 file);
a1d0ce82
SR
172 return 0;
173
174#ifdef CONFIG_PERF_EVENTS
175 case TRACE_REG_PERF_REGISTER:
176 return tracepoint_probe_register(call->name,
177 call->class->perf_probe,
178 call);
179 case TRACE_REG_PERF_UNREGISTER:
180 tracepoint_probe_unregister(call->name,
181 call->class->perf_probe,
182 call);
183 return 0;
ceec0b6f
JO
184 case TRACE_REG_PERF_OPEN:
185 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
186 case TRACE_REG_PERF_ADD:
187 case TRACE_REG_PERF_DEL:
ceec0b6f 188 return 0;
a1d0ce82
SR
189#endif
190 }
191 return 0;
192}
193EXPORT_SYMBOL_GPL(ftrace_event_reg);
194
e870e9a1
LZ
195void trace_event_enable_cmd_record(bool enable)
196{
ae63b31e
SR
197 struct ftrace_event_file *file;
198 struct trace_array *tr;
e870e9a1
LZ
199
200 mutex_lock(&event_mutex);
ae63b31e
SR
201 do_for_each_event_file(tr, file) {
202
203 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
e870e9a1
LZ
204 continue;
205
206 if (enable) {
207 tracing_start_cmdline_record();
ae63b31e 208 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1
LZ
209 } else {
210 tracing_stop_cmdline_record();
ae63b31e 211 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 212 }
ae63b31e 213 } while_for_each_event_file();
e870e9a1
LZ
214 mutex_unlock(&event_mutex);
215}
216
ae63b31e
SR
217static int ftrace_event_enable_disable(struct ftrace_event_file *file,
218 int enable)
fd994989 219{
ae63b31e 220 struct ftrace_event_call *call = file->event_call;
3b8e4273
LZ
221 int ret = 0;
222
fd994989
SR
223 switch (enable) {
224 case 0:
ae63b31e
SR
225 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
226 file->flags &= ~FTRACE_EVENT_FL_ENABLED;
227 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
e870e9a1 228 tracing_stop_cmdline_record();
ae63b31e 229 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 230 }
ae63b31e 231 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 232 }
fd994989
SR
233 break;
234 case 1:
ae63b31e 235 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
e870e9a1
LZ
236 if (trace_flags & TRACE_ITER_RECORD_CMD) {
237 tracing_start_cmdline_record();
ae63b31e 238 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
e870e9a1 239 }
ae63b31e 240 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273
LZ
241 if (ret) {
242 tracing_stop_cmdline_record();
243 pr_info("event trace: Could not enable event "
244 "%s\n", call->name);
245 break;
246 }
ae63b31e 247 file->flags |= FTRACE_EVENT_FL_ENABLED;
575380da
SRRH
248
249 /* WAS_ENABLED gets set but never cleared. */
250 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
fd994989 251 }
fd994989
SR
252 break;
253 }
3b8e4273
LZ
254
255 return ret;
fd994989
SR
256}
257
ae63b31e 258static void ftrace_clear_events(struct trace_array *tr)
0e907c99 259{
ae63b31e 260 struct ftrace_event_file *file;
0e907c99
Z
261
262 mutex_lock(&event_mutex);
ae63b31e
SR
263 list_for_each_entry(file, &tr->events, list) {
264 ftrace_event_enable_disable(file, 0);
0e907c99
Z
265 }
266 mutex_unlock(&event_mutex);
267}
268
e9dbfae5
SR
269static void __put_system(struct event_subsystem *system)
270{
271 struct event_filter *filter = system->filter;
272
273 WARN_ON_ONCE(system->ref_count == 0);
274 if (--system->ref_count)
275 return;
276
ae63b31e
SR
277 list_del(&system->list);
278
e9dbfae5
SR
279 if (filter) {
280 kfree(filter->filter_string);
281 kfree(filter);
282 }
e9dbfae5
SR
283 kfree(system);
284}
285
286static void __get_system(struct event_subsystem *system)
287{
288 WARN_ON_ONCE(system->ref_count == 0);
289 system->ref_count++;
290}
291
ae63b31e
SR
292static void __get_system_dir(struct ftrace_subsystem_dir *dir)
293{
294 WARN_ON_ONCE(dir->ref_count == 0);
295 dir->ref_count++;
296 __get_system(dir->subsystem);
297}
298
299static void __put_system_dir(struct ftrace_subsystem_dir *dir)
300{
301 WARN_ON_ONCE(dir->ref_count == 0);
302 /* If the subsystem is about to be freed, the dir must be too */
303 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
304
305 __put_system(dir->subsystem);
306 if (!--dir->ref_count)
307 kfree(dir);
308}
309
310static void put_system(struct ftrace_subsystem_dir *dir)
e9dbfae5
SR
311{
312 mutex_lock(&event_mutex);
ae63b31e 313 __put_system_dir(dir);
e9dbfae5
SR
314 mutex_unlock(&event_mutex);
315}
316
8f31bfe5
LZ
317/*
318 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
319 */
ae63b31e
SR
320static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
321 const char *sub, const char *event, int set)
b77e38aa 322{
ae63b31e 323 struct ftrace_event_file *file;
a59fd602 324 struct ftrace_event_call *call;
29f93943 325 int ret = -EINVAL;
8f31bfe5
LZ
326
327 mutex_lock(&event_mutex);
ae63b31e
SR
328 list_for_each_entry(file, &tr->events, list) {
329
330 call = file->event_call;
8f31bfe5 331
a1d0ce82 332 if (!call->name || !call->class || !call->class->reg)
8f31bfe5
LZ
333 continue;
334
9b63776f
SR
335 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
336 continue;
337
8f31bfe5
LZ
338 if (match &&
339 strcmp(match, call->name) != 0 &&
8f082018 340 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
341 continue;
342
8f082018 343 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
344 continue;
345
346 if (event && strcmp(event, call->name) != 0)
347 continue;
348
ae63b31e 349 ftrace_event_enable_disable(file, set);
8f31bfe5
LZ
350
351 ret = 0;
352 }
353 mutex_unlock(&event_mutex);
354
355 return ret;
356}
357
ae63b31e 358static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 359{
b628b3e6 360 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
361
362 /*
363 * The buf format can be <subsystem>:<event-name>
364 * *:<event-name> means any event by that name.
365 * :<event-name> is the same.
366 *
367 * <subsystem>:* means all events in that subsystem
368 * <subsystem>: means the same.
369 *
370 * <name> (no ':') means all events in a subsystem with
371 * the name <name> or any event that matches <name>
372 */
373
374 match = strsep(&buf, ":");
375 if (buf) {
376 sub = match;
377 event = buf;
378 match = NULL;
379
380 if (!strlen(sub) || strcmp(sub, "*") == 0)
381 sub = NULL;
382 if (!strlen(event) || strcmp(event, "*") == 0)
383 event = NULL;
384 }
b77e38aa 385
ae63b31e 386 return __ftrace_set_clr_event(tr, match, sub, event, set);
b77e38aa
SR
387}
388
4671c794
SR
389/**
390 * trace_set_clr_event - enable or disable an event
391 * @system: system name to match (NULL for any system)
392 * @event: event name to match (NULL for all events, within system)
393 * @set: 1 to enable, 0 to disable
394 *
395 * This is a way for other parts of the kernel to enable or disable
396 * event recording.
397 *
398 * Returns 0 on success, -EINVAL if the parameters do not match any
399 * registered events.
400 */
401int trace_set_clr_event(const char *system, const char *event, int set)
402{
ae63b31e
SR
403 struct trace_array *tr = top_trace_array();
404
405 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 406}
56355b83 407EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 408
b77e38aa
SR
409/* 128 should be much more than enough */
410#define EVENT_BUF_SIZE 127
411
412static ssize_t
413ftrace_event_write(struct file *file, const char __user *ubuf,
414 size_t cnt, loff_t *ppos)
415{
48966364 416 struct trace_parser parser;
ae63b31e
SR
417 struct seq_file *m = file->private_data;
418 struct trace_array *tr = m->private;
4ba7978e 419 ssize_t read, ret;
b77e38aa 420
4ba7978e 421 if (!cnt)
b77e38aa
SR
422 return 0;
423
1852fcce
SR
424 ret = tracing_update_buffers();
425 if (ret < 0)
426 return ret;
427
48966364 428 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
429 return -ENOMEM;
430
48966364 431 read = trace_get_user(&parser, ubuf, cnt, ppos);
432
4ba7978e 433 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 434 int set = 1;
b77e38aa 435
48966364 436 if (*parser.buffer == '!')
b77e38aa 437 set = 0;
b77e38aa 438
48966364 439 parser.buffer[parser.idx] = 0;
440
ae63b31e 441 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 442 if (ret)
48966364 443 goto out_put;
b77e38aa 444 }
b77e38aa
SR
445
446 ret = read;
447
48966364 448 out_put:
449 trace_parser_put(&parser);
b77e38aa
SR
450
451 return ret;
452}
453
454static void *
455t_next(struct seq_file *m, void *v, loff_t *pos)
456{
ae63b31e
SR
457 struct ftrace_event_file *file = v;
458 struct ftrace_event_call *call;
459 struct trace_array *tr = m->private;
b77e38aa
SR
460
461 (*pos)++;
462
ae63b31e
SR
463 list_for_each_entry_continue(file, &tr->events, list) {
464 call = file->event_call;
40e26815
SR
465 /*
466 * The ftrace subsystem is for showing formats only.
467 * They can not be enabled or disabled via the event files.
468 */
a1d0ce82 469 if (call->class && call->class->reg)
ae63b31e 470 return file;
40e26815 471 }
b77e38aa 472
30bd39cd 473 return NULL;
b77e38aa
SR
474}
475
476static void *t_start(struct seq_file *m, loff_t *pos)
477{
ae63b31e
SR
478 struct ftrace_event_file *file;
479 struct trace_array *tr = m->private;
e1c7e2a6
LZ
480 loff_t l;
481
20c8928a 482 mutex_lock(&event_mutex);
e1c7e2a6 483
ae63b31e 484 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 485 for (l = 0; l <= *pos; ) {
ae63b31e
SR
486 file = t_next(m, file, &l);
487 if (!file)
e1c7e2a6
LZ
488 break;
489 }
ae63b31e 490 return file;
b77e38aa
SR
491}
492
493static void *
494s_next(struct seq_file *m, void *v, loff_t *pos)
495{
ae63b31e
SR
496 struct ftrace_event_file *file = v;
497 struct trace_array *tr = m->private;
b77e38aa
SR
498
499 (*pos)++;
500
ae63b31e
SR
501 list_for_each_entry_continue(file, &tr->events, list) {
502 if (file->flags & FTRACE_EVENT_FL_ENABLED)
503 return file;
b77e38aa
SR
504 }
505
30bd39cd 506 return NULL;
b77e38aa
SR
507}
508
509static void *s_start(struct seq_file *m, loff_t *pos)
510{
ae63b31e
SR
511 struct ftrace_event_file *file;
512 struct trace_array *tr = m->private;
e1c7e2a6
LZ
513 loff_t l;
514
20c8928a 515 mutex_lock(&event_mutex);
e1c7e2a6 516
ae63b31e 517 file = list_entry(&tr->events, struct ftrace_event_file, list);
e1c7e2a6 518 for (l = 0; l <= *pos; ) {
ae63b31e
SR
519 file = s_next(m, file, &l);
520 if (!file)
e1c7e2a6
LZ
521 break;
522 }
ae63b31e 523 return file;
b77e38aa
SR
524}
525
526static int t_show(struct seq_file *m, void *v)
527{
ae63b31e
SR
528 struct ftrace_event_file *file = v;
529 struct ftrace_event_call *call = file->event_call;
b77e38aa 530
8f082018
SR
531 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
532 seq_printf(m, "%s:", call->class->system);
b77e38aa
SR
533 seq_printf(m, "%s\n", call->name);
534
535 return 0;
536}
537
538static void t_stop(struct seq_file *m, void *p)
539{
20c8928a 540 mutex_unlock(&event_mutex);
b77e38aa
SR
541}
542
1473e441
SR
543static ssize_t
544event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
545 loff_t *ppos)
546{
ae63b31e 547 struct ftrace_event_file *file = filp->private_data;
1473e441
SR
548 char *buf;
549
ae63b31e 550 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1473e441
SR
551 buf = "1\n";
552 else
553 buf = "0\n";
554
555 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
556}
557
558static ssize_t
559event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
560 loff_t *ppos)
561{
ae63b31e 562 struct ftrace_event_file *file = filp->private_data;
1473e441
SR
563 unsigned long val;
564 int ret;
565
ae63b31e
SR
566 if (!file)
567 return -EINVAL;
568
22fe9b54
PH
569 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
570 if (ret)
1473e441
SR
571 return ret;
572
1852fcce
SR
573 ret = tracing_update_buffers();
574 if (ret < 0)
575 return ret;
576
1473e441
SR
577 switch (val) {
578 case 0:
1473e441 579 case 1:
11a241a3 580 mutex_lock(&event_mutex);
ae63b31e 581 ret = ftrace_event_enable_disable(file, val);
11a241a3 582 mutex_unlock(&event_mutex);
1473e441
SR
583 break;
584
585 default:
586 return -EINVAL;
587 }
588
589 *ppos += cnt;
590
3b8e4273 591 return ret ? ret : cnt;
1473e441
SR
592}
593
8ae79a13
SR
594static ssize_t
595system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
596 loff_t *ppos)
597{
c142b15d 598 const char set_to_char[4] = { '?', '0', '1', 'X' };
ae63b31e
SR
599 struct ftrace_subsystem_dir *dir = filp->private_data;
600 struct event_subsystem *system = dir->subsystem;
8ae79a13 601 struct ftrace_event_call *call;
ae63b31e
SR
602 struct ftrace_event_file *file;
603 struct trace_array *tr = dir->tr;
8ae79a13 604 char buf[2];
c142b15d 605 int set = 0;
8ae79a13
SR
606 int ret;
607
8ae79a13 608 mutex_lock(&event_mutex);
ae63b31e
SR
609 list_for_each_entry(file, &tr->events, list) {
610 call = file->event_call;
a1d0ce82 611 if (!call->name || !call->class || !call->class->reg)
8ae79a13
SR
612 continue;
613
40ee4dff 614 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
615 continue;
616
617 /*
618 * We need to find out if all the events are set
619 * or if all events or cleared, or if we have
620 * a mixture.
621 */
ae63b31e 622 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
c142b15d 623
8ae79a13
SR
624 /*
625 * If we have a mixture, no need to look further.
626 */
c142b15d 627 if (set == 3)
8ae79a13
SR
628 break;
629 }
630 mutex_unlock(&event_mutex);
631
c142b15d 632 buf[0] = set_to_char[set];
8ae79a13 633 buf[1] = '\n';
8ae79a13
SR
634
635 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
636
637 return ret;
638}
639
640static ssize_t
641system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
642 loff_t *ppos)
643{
ae63b31e
SR
644 struct ftrace_subsystem_dir *dir = filp->private_data;
645 struct event_subsystem *system = dir->subsystem;
40ee4dff 646 const char *name = NULL;
8ae79a13 647 unsigned long val;
8ae79a13
SR
648 ssize_t ret;
649
22fe9b54
PH
650 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
651 if (ret)
8ae79a13
SR
652 return ret;
653
654 ret = tracing_update_buffers();
655 if (ret < 0)
656 return ret;
657
8f31bfe5 658 if (val != 0 && val != 1)
8ae79a13 659 return -EINVAL;
8ae79a13 660
40ee4dff
SR
661 /*
662 * Opening of "enable" adds a ref count to system,
663 * so the name is safe to use.
664 */
665 if (system)
666 name = system->name;
667
ae63b31e 668 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 669 if (ret)
8f31bfe5 670 goto out;
8ae79a13
SR
671
672 ret = cnt;
673
8f31bfe5 674out:
8ae79a13
SR
675 *ppos += cnt;
676
677 return ret;
678}
679
2a37a3df
SR
680enum {
681 FORMAT_HEADER = 1,
86397dc3
LZ
682 FORMAT_FIELD_SEPERATOR = 2,
683 FORMAT_PRINTFMT = 3,
2a37a3df
SR
684};
685
686static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 687{
2a37a3df 688 struct ftrace_event_call *call = m->private;
5a65e956 689 struct ftrace_event_field *field;
86397dc3
LZ
690 struct list_head *common_head = &ftrace_common_fields;
691 struct list_head *head = trace_get_fields(call);
981d081e 692
2a37a3df 693 (*pos)++;
5a65e956 694
2a37a3df
SR
695 switch ((unsigned long)v) {
696 case FORMAT_HEADER:
86397dc3
LZ
697 if (unlikely(list_empty(common_head)))
698 return NULL;
699
700 field = list_entry(common_head->prev,
701 struct ftrace_event_field, link);
702 return field;
5a65e956 703
86397dc3 704 case FORMAT_FIELD_SEPERATOR:
2a37a3df
SR
705 if (unlikely(list_empty(head)))
706 return NULL;
5a65e956 707
2a37a3df
SR
708 field = list_entry(head->prev, struct ftrace_event_field, link);
709 return field;
5a65e956 710
2a37a3df
SR
711 case FORMAT_PRINTFMT:
712 /* all done */
713 return NULL;
5a65e956
LJ
714 }
715
2a37a3df 716 field = v;
86397dc3
LZ
717 if (field->link.prev == common_head)
718 return (void *)FORMAT_FIELD_SEPERATOR;
719 else if (field->link.prev == head)
2a37a3df
SR
720 return (void *)FORMAT_PRINTFMT;
721
722 field = list_entry(field->link.prev, struct ftrace_event_field, link);
723
2a37a3df 724 return field;
8728fe50 725}
5a65e956 726
2a37a3df 727static void *f_start(struct seq_file *m, loff_t *pos)
8728fe50 728{
2a37a3df
SR
729 loff_t l = 0;
730 void *p;
5a65e956 731
2a37a3df
SR
732 /* Start by showing the header */
733 if (!*pos)
734 return (void *)FORMAT_HEADER;
735
736 p = (void *)FORMAT_HEADER;
737 do {
738 p = f_next(m, p, &l);
739 } while (p && l < *pos);
740
741 return p;
742}
743
744static int f_show(struct seq_file *m, void *v)
745{
746 struct ftrace_event_call *call = m->private;
747 struct ftrace_event_field *field;
748 const char *array_descriptor;
749
750 switch ((unsigned long)v) {
751 case FORMAT_HEADER:
752 seq_printf(m, "name: %s\n", call->name);
753 seq_printf(m, "ID: %d\n", call->event.type);
754 seq_printf(m, "format:\n");
8728fe50 755 return 0;
5a65e956 756
86397dc3
LZ
757 case FORMAT_FIELD_SEPERATOR:
758 seq_putc(m, '\n');
759 return 0;
760
2a37a3df
SR
761 case FORMAT_PRINTFMT:
762 seq_printf(m, "\nprint fmt: %s\n",
763 call->print_fmt);
764 return 0;
981d081e 765 }
8728fe50 766
2a37a3df 767 field = v;
8728fe50 768
2a37a3df
SR
769 /*
770 * Smartly shows the array type(except dynamic array).
771 * Normal:
772 * field:TYPE VAR
773 * If TYPE := TYPE[LEN], it is shown:
774 * field:TYPE VAR[LEN]
775 */
776 array_descriptor = strchr(field->type, '[');
8728fe50 777
2a37a3df
SR
778 if (!strncmp(field->type, "__data_loc", 10))
779 array_descriptor = NULL;
8728fe50 780
2a37a3df
SR
781 if (!array_descriptor)
782 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
783 field->type, field->name, field->offset,
784 field->size, !!field->is_signed);
785 else
786 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
787 (int)(array_descriptor - field->type),
788 field->type, field->name,
789 array_descriptor, field->offset,
790 field->size, !!field->is_signed);
8728fe50 791
2a37a3df
SR
792 return 0;
793}
5a65e956 794
2a37a3df
SR
795static void f_stop(struct seq_file *m, void *p)
796{
797}
981d081e 798
2a37a3df
SR
799static const struct seq_operations trace_format_seq_ops = {
800 .start = f_start,
801 .next = f_next,
802 .stop = f_stop,
803 .show = f_show,
804};
805
806static int trace_format_open(struct inode *inode, struct file *file)
807{
808 struct ftrace_event_call *call = inode->i_private;
809 struct seq_file *m;
810 int ret;
811
812 ret = seq_open(file, &trace_format_seq_ops);
813 if (ret < 0)
814 return ret;
815
816 m = file->private_data;
817 m->private = call;
818
819 return 0;
981d081e
SR
820}
821
23725aee
PZ
822static ssize_t
823event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
824{
825 struct ftrace_event_call *call = filp->private_data;
826 struct trace_seq *s;
827 int r;
828
829 if (*ppos)
830 return 0;
831
832 s = kmalloc(sizeof(*s), GFP_KERNEL);
833 if (!s)
834 return -ENOMEM;
835
836 trace_seq_init(s);
32c0edae 837 trace_seq_printf(s, "%d\n", call->event.type);
23725aee
PZ
838
839 r = simple_read_from_buffer(ubuf, cnt, ppos,
840 s->buffer, s->len);
841 kfree(s);
842 return r;
843}
844
7ce7e424
TZ
845static ssize_t
846event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
847 loff_t *ppos)
848{
849 struct ftrace_event_call *call = filp->private_data;
850 struct trace_seq *s;
851 int r;
852
853 if (*ppos)
854 return 0;
855
856 s = kmalloc(sizeof(*s), GFP_KERNEL);
857 if (!s)
858 return -ENOMEM;
859
860 trace_seq_init(s);
861
8b372562 862 print_event_filter(call, s);
4bda2d51 863 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
864
865 kfree(s);
866
867 return r;
868}
869
870static ssize_t
871event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
872 loff_t *ppos)
873{
874 struct ftrace_event_call *call = filp->private_data;
8b372562 875 char *buf;
7ce7e424
TZ
876 int err;
877
8b372562 878 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
879 return -EINVAL;
880
8b372562
TZ
881 buf = (char *)__get_free_page(GFP_TEMPORARY);
882 if (!buf)
7ce7e424
TZ
883 return -ENOMEM;
884
8b372562
TZ
885 if (copy_from_user(buf, ubuf, cnt)) {
886 free_page((unsigned long) buf);
887 return -EFAULT;
7ce7e424 888 }
8b372562 889 buf[cnt] = '\0';
7ce7e424 890
8b372562
TZ
891 err = apply_event_filter(call, buf);
892 free_page((unsigned long) buf);
893 if (err < 0)
44e9c8b7 894 return err;
0a19e53c 895
7ce7e424
TZ
896 *ppos += cnt;
897
898 return cnt;
899}
900
e9dbfae5
SR
901static LIST_HEAD(event_subsystems);
902
903static int subsystem_open(struct inode *inode, struct file *filp)
904{
905 struct event_subsystem *system = NULL;
ae63b31e
SR
906 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
907 struct trace_array *tr;
e9dbfae5
SR
908 int ret;
909
910 /* Make sure the system still exists */
911 mutex_lock(&event_mutex);
ae63b31e
SR
912 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
913 list_for_each_entry(dir, &tr->systems, list) {
914 if (dir == inode->i_private) {
915 /* Don't open systems with no events */
916 if (dir->nr_events) {
917 __get_system_dir(dir);
918 system = dir->subsystem;
919 }
920 goto exit_loop;
e9dbfae5 921 }
e9dbfae5
SR
922 }
923 }
ae63b31e 924 exit_loop:
e9dbfae5
SR
925 mutex_unlock(&event_mutex);
926
ae63b31e 927 if (!system)
e9dbfae5
SR
928 return -ENODEV;
929
ae63b31e
SR
930 /* Some versions of gcc think dir can be uninitialized here */
931 WARN_ON(!dir);
932
e9dbfae5 933 ret = tracing_open_generic(inode, filp);
ae63b31e
SR
934 if (ret < 0)
935 put_system(dir);
936
937 return ret;
938}
939
940static int system_tr_open(struct inode *inode, struct file *filp)
941{
942 struct ftrace_subsystem_dir *dir;
943 struct trace_array *tr = inode->i_private;
944 int ret;
945
946 /* Make a temporary dir that has no system but points to tr */
947 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
948 if (!dir)
949 return -ENOMEM;
950
951 dir->tr = tr;
952
953 ret = tracing_open_generic(inode, filp);
954 if (ret < 0)
955 kfree(dir);
956
957 filp->private_data = dir;
e9dbfae5
SR
958
959 return ret;
960}
961
962static int subsystem_release(struct inode *inode, struct file *file)
963{
ae63b31e 964 struct ftrace_subsystem_dir *dir = file->private_data;
e9dbfae5 965
ae63b31e
SR
966 /*
967 * If dir->subsystem is NULL, then this is a temporary
968 * descriptor that was made for a trace_array to enable
969 * all subsystems.
970 */
971 if (dir->subsystem)
972 put_system(dir);
973 else
974 kfree(dir);
e9dbfae5
SR
975
976 return 0;
977}
978
cfb180f3
TZ
979static ssize_t
980subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
981 loff_t *ppos)
982{
ae63b31e
SR
983 struct ftrace_subsystem_dir *dir = filp->private_data;
984 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
985 struct trace_seq *s;
986 int r;
987
988 if (*ppos)
989 return 0;
990
991 s = kmalloc(sizeof(*s), GFP_KERNEL);
992 if (!s)
993 return -ENOMEM;
994
995 trace_seq_init(s);
996
8b372562 997 print_subsystem_event_filter(system, s);
4bda2d51 998 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
999
1000 kfree(s);
1001
1002 return r;
1003}
1004
1005static ssize_t
1006subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1007 loff_t *ppos)
1008{
ae63b31e 1009 struct ftrace_subsystem_dir *dir = filp->private_data;
8b372562 1010 char *buf;
cfb180f3
TZ
1011 int err;
1012
8b372562 1013 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1014 return -EINVAL;
1015
8b372562
TZ
1016 buf = (char *)__get_free_page(GFP_TEMPORARY);
1017 if (!buf)
cfb180f3
TZ
1018 return -ENOMEM;
1019
8b372562
TZ
1020 if (copy_from_user(buf, ubuf, cnt)) {
1021 free_page((unsigned long) buf);
1022 return -EFAULT;
cfb180f3 1023 }
8b372562 1024 buf[cnt] = '\0';
cfb180f3 1025
ae63b31e 1026 err = apply_subsystem_event_filter(dir, buf);
8b372562
TZ
1027 free_page((unsigned long) buf);
1028 if (err < 0)
44e9c8b7 1029 return err;
cfb180f3
TZ
1030
1031 *ppos += cnt;
1032
1033 return cnt;
1034}
1035
d1b182a8
SR
1036static ssize_t
1037show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1038{
1039 int (*func)(struct trace_seq *s) = filp->private_data;
1040 struct trace_seq *s;
1041 int r;
1042
1043 if (*ppos)
1044 return 0;
1045
1046 s = kmalloc(sizeof(*s), GFP_KERNEL);
1047 if (!s)
1048 return -ENOMEM;
1049
1050 trace_seq_init(s);
1051
1052 func(s);
1053 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1054
1055 kfree(s);
1056
1057 return r;
1058}
1059
15075cac
SR
1060static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1061static int ftrace_event_set_open(struct inode *inode, struct file *file);
1062
b77e38aa
SR
1063static const struct seq_operations show_event_seq_ops = {
1064 .start = t_start,
1065 .next = t_next,
1066 .show = t_show,
1067 .stop = t_stop,
1068};
1069
1070static const struct seq_operations show_set_event_seq_ops = {
1071 .start = s_start,
1072 .next = s_next,
1073 .show = t_show,
1074 .stop = t_stop,
1075};
1076
2314c4ae 1077static const struct file_operations ftrace_avail_fops = {
15075cac 1078 .open = ftrace_event_avail_open,
2314c4ae
SR
1079 .read = seq_read,
1080 .llseek = seq_lseek,
1081 .release = seq_release,
1082};
1083
b77e38aa 1084static const struct file_operations ftrace_set_event_fops = {
15075cac 1085 .open = ftrace_event_set_open,
b77e38aa
SR
1086 .read = seq_read,
1087 .write = ftrace_event_write,
1088 .llseek = seq_lseek,
1089 .release = seq_release,
1090};
1091
1473e441
SR
1092static const struct file_operations ftrace_enable_fops = {
1093 .open = tracing_open_generic,
1094 .read = event_enable_read,
1095 .write = event_enable_write,
6038f373 1096 .llseek = default_llseek,
1473e441
SR
1097};
1098
981d081e 1099static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1100 .open = trace_format_open,
1101 .read = seq_read,
1102 .llseek = seq_lseek,
1103 .release = seq_release,
981d081e
SR
1104};
1105
23725aee
PZ
1106static const struct file_operations ftrace_event_id_fops = {
1107 .open = tracing_open_generic,
1108 .read = event_id_read,
6038f373 1109 .llseek = default_llseek,
23725aee
PZ
1110};
1111
7ce7e424
TZ
1112static const struct file_operations ftrace_event_filter_fops = {
1113 .open = tracing_open_generic,
1114 .read = event_filter_read,
1115 .write = event_filter_write,
6038f373 1116 .llseek = default_llseek,
7ce7e424
TZ
1117};
1118
cfb180f3 1119static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1120 .open = subsystem_open,
cfb180f3
TZ
1121 .read = subsystem_filter_read,
1122 .write = subsystem_filter_write,
6038f373 1123 .llseek = default_llseek,
e9dbfae5 1124 .release = subsystem_release,
cfb180f3
TZ
1125};
1126
8ae79a13 1127static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1128 .open = subsystem_open,
8ae79a13
SR
1129 .read = system_enable_read,
1130 .write = system_enable_write,
6038f373 1131 .llseek = default_llseek,
40ee4dff 1132 .release = subsystem_release,
8ae79a13
SR
1133};
1134
ae63b31e
SR
1135static const struct file_operations ftrace_tr_enable_fops = {
1136 .open = system_tr_open,
1137 .read = system_enable_read,
1138 .write = system_enable_write,
1139 .llseek = default_llseek,
1140 .release = subsystem_release,
1141};
1142
d1b182a8
SR
1143static const struct file_operations ftrace_show_header_fops = {
1144 .open = tracing_open_generic,
1145 .read = show_header,
6038f373 1146 .llseek = default_llseek,
d1b182a8
SR
1147};
1148
ae63b31e
SR
1149static int
1150ftrace_event_open(struct inode *inode, struct file *file,
1151 const struct seq_operations *seq_ops)
1473e441 1152{
ae63b31e
SR
1153 struct seq_file *m;
1154 int ret;
1473e441 1155
ae63b31e
SR
1156 ret = seq_open(file, seq_ops);
1157 if (ret < 0)
1158 return ret;
1159 m = file->private_data;
1160 /* copy tr over to seq ops */
1161 m->private = inode->i_private;
1473e441 1162
ae63b31e 1163 return ret;
1473e441
SR
1164}
1165
15075cac
SR
1166static int
1167ftrace_event_avail_open(struct inode *inode, struct file *file)
1168{
1169 const struct seq_operations *seq_ops = &show_event_seq_ops;
1170
ae63b31e 1171 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1172}
1173
1174static int
1175ftrace_event_set_open(struct inode *inode, struct file *file)
1176{
1177 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1178 struct trace_array *tr = inode->i_private;
15075cac
SR
1179
1180 if ((file->f_mode & FMODE_WRITE) &&
1181 (file->f_flags & O_TRUNC))
ae63b31e 1182 ftrace_clear_events(tr);
15075cac 1183
ae63b31e
SR
1184 return ftrace_event_open(inode, file, seq_ops);
1185}
1186
1187static struct event_subsystem *
1188create_new_subsystem(const char *name)
1189{
1190 struct event_subsystem *system;
1191
1192 /* need to create new entry */
1193 system = kmalloc(sizeof(*system), GFP_KERNEL);
1194 if (!system)
1195 return NULL;
1196
1197 system->ref_count = 1;
92edca07 1198 system->name = name;
ae63b31e
SR
1199
1200 system->filter = NULL;
1201
1202 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1203 if (!system->filter)
1204 goto out_free;
1205
1206 list_add(&system->list, &event_subsystems);
1207
1208 return system;
1209
1210 out_free:
ae63b31e
SR
1211 kfree(system);
1212 return NULL;
15075cac
SR
1213}
1214
6ecc2d1c 1215static struct dentry *
ae63b31e
SR
1216event_subsystem_dir(struct trace_array *tr, const char *name,
1217 struct ftrace_event_file *file, struct dentry *parent)
6ecc2d1c 1218{
ae63b31e 1219 struct ftrace_subsystem_dir *dir;
6ecc2d1c 1220 struct event_subsystem *system;
e1112b4d 1221 struct dentry *entry;
6ecc2d1c
SR
1222
1223 /* First see if we did not already create this dir */
ae63b31e
SR
1224 list_for_each_entry(dir, &tr->systems, list) {
1225 system = dir->subsystem;
dc82ec98 1226 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
1227 dir->nr_events++;
1228 file->system = dir;
1229 return dir->entry;
dc82ec98 1230 }
6ecc2d1c
SR
1231 }
1232
ae63b31e
SR
1233 /* Now see if the system itself exists. */
1234 list_for_each_entry(system, &event_subsystems, list) {
1235 if (strcmp(system->name, name) == 0)
1236 break;
6ecc2d1c 1237 }
ae63b31e
SR
1238 /* Reset system variable when not found */
1239 if (&system->list == &event_subsystems)
1240 system = NULL;
6ecc2d1c 1241
ae63b31e
SR
1242 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1243 if (!dir)
1244 goto out_fail;
6ecc2d1c 1245
ae63b31e
SR
1246 if (!system) {
1247 system = create_new_subsystem(name);
1248 if (!system)
1249 goto out_free;
1250 } else
1251 __get_system(system);
1252
1253 dir->entry = debugfs_create_dir(name, parent);
1254 if (!dir->entry) {
1255 pr_warning("Failed to create system directory %s\n", name);
1256 __put_system(system);
1257 goto out_free;
6d723736
SR
1258 }
1259
ae63b31e
SR
1260 dir->tr = tr;
1261 dir->ref_count = 1;
1262 dir->nr_events = 1;
1263 dir->subsystem = system;
1264 file->system = dir;
8b372562 1265
ae63b31e 1266 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 1267 &ftrace_subsystem_filter_fops);
8b372562
TZ
1268 if (!entry) {
1269 kfree(system->filter);
1270 system->filter = NULL;
ae63b31e 1271 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
8b372562 1272 }
e1112b4d 1273
ae63b31e 1274 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 1275 &ftrace_system_enable_fops);
8ae79a13 1276
ae63b31e
SR
1277 list_add(&dir->list, &tr->systems);
1278
1279 return dir->entry;
1280
1281 out_free:
1282 kfree(dir);
1283 out_fail:
1284 /* Only print this message if failed on memory allocation */
1285 if (!dir || !system)
1286 pr_warning("No memory to create event subsystem %s\n",
1287 name);
1288 return NULL;
6ecc2d1c
SR
1289}
1290
1473e441 1291static int
ae63b31e
SR
1292event_create_dir(struct dentry *parent,
1293 struct ftrace_event_file *file,
701970b3
SR
1294 const struct file_operations *id,
1295 const struct file_operations *enable,
1296 const struct file_operations *filter,
1297 const struct file_operations *format)
1473e441 1298{
ae63b31e
SR
1299 struct ftrace_event_call *call = file->event_call;
1300 struct trace_array *tr = file->tr;
2e33af02 1301 struct list_head *head;
ae63b31e 1302 struct dentry *d_events;
fd994989 1303 int ret;
1473e441 1304
6ecc2d1c
SR
1305 /*
1306 * If the trace point header did not define TRACE_SYSTEM
1307 * then the system would be called "TRACE_SYSTEM".
1308 */
ae63b31e
SR
1309 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1310 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1311 if (!d_events)
1312 return -ENOMEM;
1313 } else
1314 d_events = parent;
1315
1316 file->dir = debugfs_create_dir(call->name, d_events);
1317 if (!file->dir) {
1318 pr_warning("Could not create debugfs '%s' directory\n",
1319 call->name);
1473e441
SR
1320 return -1;
1321 }
1322
9b63776f 1323 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 1324 trace_create_file("enable", 0644, file->dir, file,
f3f3f009 1325 enable);
1473e441 1326
2239291a 1327#ifdef CONFIG_PERF_EVENTS
a1d0ce82 1328 if (call->event.type && call->class->reg)
ae63b31e 1329 trace_create_file("id", 0444, file->dir, call,
f3f3f009 1330 id);
2239291a 1331#endif
23725aee 1332
c9d932cf
LZ
1333 /*
1334 * Other events may have the same class. Only update
1335 * the fields if they are not already defined.
1336 */
1337 head = trace_get_fields(call);
1338 if (list_empty(head)) {
1339 ret = call->class->define_fields(call);
1340 if (ret < 0) {
1341 pr_warning("Could not initialize trace point"
1342 " events/%s\n", call->name);
ae63b31e 1343 return -1;
cf027f64
TZ
1344 }
1345 }
ae63b31e 1346 trace_create_file("filter", 0644, file->dir, call,
c9d932cf 1347 filter);
cf027f64 1348
ae63b31e 1349 trace_create_file("format", 0444, file->dir, call,
f3f3f009 1350 format);
6d723736
SR
1351
1352 return 0;
1353}
1354
ae63b31e
SR
1355static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1356{
1357 if (!dir)
1358 return;
1359
1360 if (!--dir->nr_events) {
1361 debugfs_remove_recursive(dir->entry);
1362 list_del(&dir->list);
1363 __put_system_dir(dir);
1364 }
1365}
1366
1367static void remove_event_from_tracers(struct ftrace_event_call *call)
1368{
1369 struct ftrace_event_file *file;
1370 struct trace_array *tr;
1371
1372 do_for_each_event_file_safe(tr, file) {
1373
1374 if (file->event_call != call)
1375 continue;
1376
1377 list_del(&file->list);
1378 debugfs_remove_recursive(file->dir);
1379 remove_subsystem(file->system);
d1a29143 1380 kmem_cache_free(file_cachep, file);
ae63b31e
SR
1381
1382 /*
1383 * The do_for_each_event_file_safe() is
1384 * a double loop. After finding the call for this
1385 * trace_array, we use break to jump to the next
1386 * trace_array.
1387 */
1388 break;
1389 } while_for_each_event_file();
1390}
1391
8781915a
EG
1392static void event_remove(struct ftrace_event_call *call)
1393{
ae63b31e
SR
1394 struct trace_array *tr;
1395 struct ftrace_event_file *file;
1396
1397 do_for_each_event_file(tr, file) {
1398 if (file->event_call != call)
1399 continue;
1400 ftrace_event_enable_disable(file, 0);
1401 /*
1402 * The do_for_each_event_file() is
1403 * a double loop. After finding the call for this
1404 * trace_array, we use break to jump to the next
1405 * trace_array.
1406 */
1407 break;
1408 } while_for_each_event_file();
1409
8781915a
EG
1410 if (call->event.funcs)
1411 __unregister_ftrace_event(&call->event);
ae63b31e 1412 remove_event_from_tracers(call);
8781915a
EG
1413 list_del(&call->list);
1414}
1415
1416static int event_init(struct ftrace_event_call *call)
1417{
1418 int ret = 0;
1419
1420 if (WARN_ON(!call->name))
1421 return -EINVAL;
1422
1423 if (call->class->raw_init) {
1424 ret = call->class->raw_init(call);
1425 if (ret < 0 && ret != -ENOSYS)
1426 pr_warn("Could not initialize trace events/%s\n",
1427 call->name);
1428 }
1429
1430 return ret;
1431}
1432
67ead0a6 1433static int
ae63b31e 1434__register_event(struct ftrace_event_call *call, struct module *mod)
bd1a5c84 1435{
bd1a5c84 1436 int ret;
6d723736 1437
8781915a
EG
1438 ret = event_init(call);
1439 if (ret < 0)
1440 return ret;
701970b3 1441
ae63b31e 1442 list_add(&call->list, &ftrace_events);
67ead0a6 1443 call->mod = mod;
88f70d75 1444
ae63b31e 1445 return 0;
bd1a5c84
MH
1446}
1447
ae63b31e
SR
1448/* Add an event to a trace directory */
1449static int
1450__trace_add_new_event(struct ftrace_event_call *call,
1451 struct trace_array *tr,
1452 const struct file_operations *id,
1453 const struct file_operations *enable,
1454 const struct file_operations *filter,
1455 const struct file_operations *format)
1456{
1457 struct ftrace_event_file *file;
1458
d1a29143 1459 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
ae63b31e
SR
1460 if (!file)
1461 return -ENOMEM;
1462
1463 file->event_call = call;
1464 file->tr = tr;
1465 list_add(&file->list, &tr->events);
1466
1467 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1468}
1469
77248221
SR
1470/*
1471 * Just create a decriptor for early init. A descriptor is required
1472 * for enabling events at boot. We want to enable events before
1473 * the filesystem is initialized.
1474 */
1475static __init int
1476__trace_early_add_new_event(struct ftrace_event_call *call,
1477 struct trace_array *tr)
1478{
1479 struct ftrace_event_file *file;
1480
d1a29143 1481 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
77248221
SR
1482 if (!file)
1483 return -ENOMEM;
1484
1485 file->event_call = call;
1486 file->tr = tr;
1487 list_add(&file->list, &tr->events);
1488
1489 return 0;
1490}
1491
ae63b31e
SR
1492struct ftrace_module_file_ops;
1493static void __add_event_to_tracers(struct ftrace_event_call *call,
1494 struct ftrace_module_file_ops *file_ops);
1495
bd1a5c84
MH
1496/* Add an additional event_call dynamically */
1497int trace_add_event_call(struct ftrace_event_call *call)
1498{
1499 int ret;
1500 mutex_lock(&event_mutex);
701970b3 1501
ae63b31e
SR
1502 ret = __register_event(call, NULL);
1503 if (ret >= 0)
1504 __add_event_to_tracers(call, NULL);
a2ca5e03 1505
ae63b31e
SR
1506 mutex_unlock(&event_mutex);
1507 return ret;
a2ca5e03
FW
1508}
1509
4fead8e4
MH
1510/*
1511 * Must be called under locking both of event_mutex and trace_event_mutex.
1512 */
bd1a5c84
MH
1513static void __trace_remove_event_call(struct ftrace_event_call *call)
1514{
8781915a 1515 event_remove(call);
bd1a5c84
MH
1516 trace_destroy_fields(call);
1517 destroy_preds(call);
bd1a5c84
MH
1518}
1519
1520/* Remove an event_call */
1521void trace_remove_event_call(struct ftrace_event_call *call)
1522{
1523 mutex_lock(&event_mutex);
4fead8e4 1524 down_write(&trace_event_mutex);
bd1a5c84 1525 __trace_remove_event_call(call);
4fead8e4 1526 up_write(&trace_event_mutex);
bd1a5c84
MH
1527 mutex_unlock(&event_mutex);
1528}
1529
1530#define for_each_event(event, start, end) \
1531 for (event = start; \
1532 (unsigned long)event < (unsigned long)end; \
1533 event++)
1534
1535#ifdef CONFIG_MODULES
1536
1537static LIST_HEAD(ftrace_module_file_list);
1538
1539/*
1540 * Modules must own their file_operations to keep up with
1541 * reference counting.
1542 */
1543struct ftrace_module_file_ops {
1544 struct list_head list;
1545 struct module *mod;
1546 struct file_operations id;
1547 struct file_operations enable;
1548 struct file_operations format;
1549 struct file_operations filter;
1550};
1551
315326c1
SRRH
1552static struct ftrace_module_file_ops *
1553find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
ae63b31e 1554{
315326c1
SRRH
1555 /*
1556 * As event_calls are added in groups by module,
1557 * when we find one file_ops, we don't need to search for
1558 * each call in that module, as the rest should be the
1559 * same. Only search for a new one if the last one did
1560 * not match.
1561 */
1562 if (file_ops && mod == file_ops->mod)
1563 return file_ops;
ae63b31e
SR
1564
1565 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1566 if (file_ops->mod == mod)
1567 return file_ops;
1568 }
1569 return NULL;
1570}
1571
701970b3
SR
1572static struct ftrace_module_file_ops *
1573trace_create_file_ops(struct module *mod)
1574{
1575 struct ftrace_module_file_ops *file_ops;
1576
1577 /*
1578 * This is a bit of a PITA. To allow for correct reference
1579 * counting, modules must "own" their file_operations.
1580 * To do this, we allocate the file operations that will be
1581 * used in the event directory.
1582 */
1583
1584 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1585 if (!file_ops)
1586 return NULL;
1587
1588 file_ops->mod = mod;
1589
1590 file_ops->id = ftrace_event_id_fops;
1591 file_ops->id.owner = mod;
1592
1593 file_ops->enable = ftrace_enable_fops;
1594 file_ops->enable.owner = mod;
1595
1596 file_ops->filter = ftrace_event_filter_fops;
1597 file_ops->filter.owner = mod;
1598
1599 file_ops->format = ftrace_event_format_fops;
1600 file_ops->format.owner = mod;
1601
1602 list_add(&file_ops->list, &ftrace_module_file_list);
1603
1604 return file_ops;
1605}
1606
6d723736
SR
1607static void trace_module_add_events(struct module *mod)
1608{
701970b3 1609 struct ftrace_module_file_ops *file_ops = NULL;
e4a9ea5e 1610 struct ftrace_event_call **call, **start, **end;
6d723736
SR
1611
1612 start = mod->trace_events;
1613 end = mod->trace_events + mod->num_trace_events;
1614
1615 if (start == end)
1616 return;
1617
67ead0a6
LZ
1618 file_ops = trace_create_file_ops(mod);
1619 if (!file_ops)
6d723736
SR
1620 return;
1621
1622 for_each_event(call, start, end) {
ae63b31e
SR
1623 __register_event(*call, mod);
1624 __add_event_to_tracers(*call, file_ops);
6d723736
SR
1625 }
1626}
1627
1628static void trace_module_remove_events(struct module *mod)
1629{
701970b3 1630 struct ftrace_module_file_ops *file_ops;
6d723736 1631 struct ftrace_event_call *call, *p;
575380da 1632 bool clear_trace = false;
6d723736 1633
110bf2b7 1634 down_write(&trace_event_mutex);
6d723736
SR
1635 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1636 if (call->mod == mod) {
575380da
SRRH
1637 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1638 clear_trace = true;
bd1a5c84 1639 __trace_remove_event_call(call);
6d723736
SR
1640 }
1641 }
701970b3
SR
1642
1643 /* Now free the file_operations */
1644 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1645 if (file_ops->mod == mod)
1646 break;
1647 }
1648 if (&file_ops->list != &ftrace_module_file_list) {
1649 list_del(&file_ops->list);
1650 kfree(file_ops);
1651 }
873c642f 1652 up_write(&trace_event_mutex);
9456f0fa
SR
1653
1654 /*
1655 * It is safest to reset the ring buffer if the module being unloaded
873c642f
SRRH
1656 * registered any events that were used. The only worry is if
1657 * a new module gets loaded, and takes on the same id as the events
1658 * of this module. When printing out the buffer, traced events left
1659 * over from this module may be passed to the new module events and
1660 * unexpected results may occur.
9456f0fa 1661 */
575380da 1662 if (clear_trace)
873c642f 1663 tracing_reset_all_online_cpus();
6d723736
SR
1664}
1665
61f919a1
SR
1666static int trace_module_notify(struct notifier_block *self,
1667 unsigned long val, void *data)
6d723736
SR
1668{
1669 struct module *mod = data;
1670
1671 mutex_lock(&event_mutex);
1672 switch (val) {
1673 case MODULE_STATE_COMING:
1674 trace_module_add_events(mod);
1675 break;
1676 case MODULE_STATE_GOING:
1677 trace_module_remove_events(mod);
1678 break;
1679 }
1680 mutex_unlock(&event_mutex);
fd994989 1681
1473e441
SR
1682 return 0;
1683}
315326c1
SRRH
1684
1685static int
1686__trace_add_new_mod_event(struct ftrace_event_call *call,
1687 struct trace_array *tr,
1688 struct ftrace_module_file_ops *file_ops)
1689{
1690 return __trace_add_new_event(call, tr,
1691 &file_ops->id, &file_ops->enable,
1692 &file_ops->filter, &file_ops->format);
1693}
1694
61f919a1 1695#else
315326c1
SRRH
1696static inline struct ftrace_module_file_ops *
1697find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
ae63b31e
SR
1698{
1699 return NULL;
1700}
315326c1
SRRH
1701static inline int trace_module_notify(struct notifier_block *self,
1702 unsigned long val, void *data)
61f919a1
SR
1703{
1704 return 0;
1705}
315326c1
SRRH
1706static inline int
1707__trace_add_new_mod_event(struct ftrace_event_call *call,
1708 struct trace_array *tr,
1709 struct ftrace_module_file_ops *file_ops)
1710{
1711 return -ENODEV;
1712}
61f919a1 1713#endif /* CONFIG_MODULES */
1473e441 1714
ae63b31e
SR
1715/* Create a new event directory structure for a trace directory. */
1716static void
1717__trace_add_event_dirs(struct trace_array *tr)
1718{
1719 struct ftrace_module_file_ops *file_ops = NULL;
1720 struct ftrace_event_call *call;
1721 int ret;
1722
1723 list_for_each_entry(call, &ftrace_events, list) {
1724 if (call->mod) {
1725 /*
1726 * Directories for events by modules need to
1727 * keep module ref counts when opened (as we don't
1728 * want the module to disappear when reading one
1729 * of these files). The file_ops keep account of
1730 * the module ref count.
ae63b31e 1731 */
315326c1 1732 file_ops = find_ftrace_file_ops(file_ops, call->mod);
ae63b31e
SR
1733 if (!file_ops)
1734 continue; /* Warn? */
315326c1 1735 ret = __trace_add_new_mod_event(call, tr, file_ops);
ae63b31e
SR
1736 if (ret < 0)
1737 pr_warning("Could not create directory for event %s\n",
1738 call->name);
1739 continue;
1740 }
1741 ret = __trace_add_new_event(call, tr,
1742 &ftrace_event_id_fops,
1743 &ftrace_enable_fops,
1744 &ftrace_event_filter_fops,
1745 &ftrace_event_format_fops);
1746 if (ret < 0)
1747 pr_warning("Could not create directory for event %s\n",
1748 call->name);
1749 }
1750}
1751
77248221
SR
1752/*
1753 * The top level array has already had its ftrace_event_file
1754 * descriptors created in order to allow for early events to
1755 * be recorded. This function is called after the debugfs has been
1756 * initialized, and we now have to create the files associated
1757 * to the events.
1758 */
1759static __init void
1760__trace_early_add_event_dirs(struct trace_array *tr)
1761{
1762 struct ftrace_event_file *file;
1763 int ret;
1764
1765
1766 list_for_each_entry(file, &tr->events, list) {
1767 ret = event_create_dir(tr->event_dir, file,
1768 &ftrace_event_id_fops,
1769 &ftrace_enable_fops,
1770 &ftrace_event_filter_fops,
1771 &ftrace_event_format_fops);
1772 if (ret < 0)
1773 pr_warning("Could not create directory for event %s\n",
1774 file->event_call->name);
1775 }
1776}
1777
1778/*
1779 * For early boot up, the top trace array requires to have
1780 * a list of events that can be enabled. This must be done before
1781 * the filesystem is set up in order to allow events to be traced
1782 * early.
1783 */
1784static __init void
1785__trace_early_add_events(struct trace_array *tr)
1786{
1787 struct ftrace_event_call *call;
1788 int ret;
1789
1790 list_for_each_entry(call, &ftrace_events, list) {
1791 /* Early boot up should not have any modules loaded */
1792 if (WARN_ON_ONCE(call->mod))
1793 continue;
1794
1795 ret = __trace_early_add_new_event(call, tr);
1796 if (ret < 0)
1797 pr_warning("Could not create early event %s\n",
1798 call->name);
1799 }
1800}
1801
0c8916c3
SR
1802/* Remove the event directory structure for a trace directory. */
1803static void
1804__trace_remove_event_dirs(struct trace_array *tr)
1805{
1806 struct ftrace_event_file *file, *next;
1807
1808 list_for_each_entry_safe(file, next, &tr->events, list) {
1809 list_del(&file->list);
1810 debugfs_remove_recursive(file->dir);
1811 remove_subsystem(file->system);
d1a29143 1812 kmem_cache_free(file_cachep, file);
0c8916c3
SR
1813 }
1814}
1815
ae63b31e
SR
1816static void
1817__add_event_to_tracers(struct ftrace_event_call *call,
1818 struct ftrace_module_file_ops *file_ops)
1819{
1820 struct trace_array *tr;
1821
1822 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1823 if (file_ops)
315326c1 1824 __trace_add_new_mod_event(call, tr, file_ops);
ae63b31e
SR
1825 else
1826 __trace_add_new_event(call, tr,
1827 &ftrace_event_id_fops,
1828 &ftrace_enable_fops,
1829 &ftrace_event_filter_fops,
1830 &ftrace_event_format_fops);
1831 }
1832}
1833
ec827c7e 1834static struct notifier_block trace_module_nb = {
6d723736
SR
1835 .notifier_call = trace_module_notify,
1836 .priority = 0,
1837};
1838
e4a9ea5e
SR
1839extern struct ftrace_event_call *__start_ftrace_events[];
1840extern struct ftrace_event_call *__stop_ftrace_events[];
a59fd602 1841
020e5f85
LZ
1842static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1843
1844static __init int setup_trace_event(char *str)
1845{
1846 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
55034cd6
SRRH
1847 ring_buffer_expanded = true;
1848 tracing_selftest_disabled = true;
020e5f85
LZ
1849
1850 return 1;
1851}
1852__setup("trace_event=", setup_trace_event);
1853
77248221
SR
1854/* Expects to have event_mutex held when called */
1855static int
1856create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
ae63b31e
SR
1857{
1858 struct dentry *d_events;
1859 struct dentry *entry;
1860
1861 entry = debugfs_create_file("set_event", 0644, parent,
1862 tr, &ftrace_set_event_fops);
1863 if (!entry) {
1864 pr_warning("Could not create debugfs 'set_event' entry\n");
1865 return -ENOMEM;
1866 }
1867
1868 d_events = debugfs_create_dir("events", parent);
277ba044 1869 if (!d_events) {
ae63b31e 1870 pr_warning("Could not create debugfs 'events' directory\n");
277ba044
SR
1871 return -ENOMEM;
1872 }
ae63b31e
SR
1873
1874 /* ring buffer internal formats */
1875 trace_create_file("header_page", 0444, d_events,
1876 ring_buffer_print_page_header,
1877 &ftrace_show_header_fops);
1878
1879 trace_create_file("header_event", 0444, d_events,
1880 ring_buffer_print_entry_header,
1881 &ftrace_show_header_fops);
1882
1883 trace_create_file("enable", 0644, d_events,
1884 tr, &ftrace_tr_enable_fops);
1885
1886 tr->event_dir = d_events;
77248221
SR
1887
1888 return 0;
1889}
1890
1891/**
1892 * event_trace_add_tracer - add a instance of a trace_array to events
1893 * @parent: The parent dentry to place the files/directories for events in
1894 * @tr: The trace array associated with these events
1895 *
1896 * When a new instance is created, it needs to set up its events
1897 * directory, as well as other files associated with events. It also
1898 * creates the event hierachry in the @parent/events directory.
1899 *
1900 * Returns 0 on success.
1901 */
1902int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1903{
1904 int ret;
1905
1906 mutex_lock(&event_mutex);
1907
1908 ret = create_event_toplevel_files(parent, tr);
1909 if (ret)
1910 goto out_unlock;
1911
277ba044 1912 down_write(&trace_event_mutex);
ae63b31e 1913 __trace_add_event_dirs(tr);
277ba044
SR
1914 up_write(&trace_event_mutex);
1915
77248221 1916 out_unlock:
277ba044 1917 mutex_unlock(&event_mutex);
ae63b31e 1918
77248221
SR
1919 return ret;
1920}
1921
1922/*
1923 * The top trace array already had its file descriptors created.
1924 * Now the files themselves need to be created.
1925 */
1926static __init int
1927early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
1928{
1929 int ret;
1930
1931 mutex_lock(&event_mutex);
1932
1933 ret = create_event_toplevel_files(parent, tr);
1934 if (ret)
1935 goto out_unlock;
1936
1937 down_write(&trace_event_mutex);
1938 __trace_early_add_event_dirs(tr);
1939 up_write(&trace_event_mutex);
1940
1941 out_unlock:
1942 mutex_unlock(&event_mutex);
1943
1944 return ret;
ae63b31e
SR
1945}
1946
0c8916c3
SR
1947int event_trace_del_tracer(struct trace_array *tr)
1948{
1949 /* Disable any running events */
1950 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
1951
1952 mutex_lock(&event_mutex);
1953
1954 down_write(&trace_event_mutex);
1955 __trace_remove_event_dirs(tr);
1956 debugfs_remove_recursive(tr->event_dir);
1957 up_write(&trace_event_mutex);
1958
1959 tr->event_dir = NULL;
1960
1961 mutex_unlock(&event_mutex);
1962
1963 return 0;
1964}
1965
d1a29143
SR
1966static __init int event_trace_memsetup(void)
1967{
1968 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
1969 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
1970 return 0;
1971}
1972
8781915a
EG
1973static __init int event_trace_enable(void)
1974{
ae63b31e 1975 struct trace_array *tr = top_trace_array();
8781915a
EG
1976 struct ftrace_event_call **iter, *call;
1977 char *buf = bootup_event_buf;
1978 char *token;
1979 int ret;
1980
1981 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1982
1983 call = *iter;
1984 ret = event_init(call);
1985 if (!ret)
1986 list_add(&call->list, &ftrace_events);
1987 }
1988
77248221
SR
1989 /*
1990 * We need the top trace array to have a working set of trace
1991 * points at early init, before the debug files and directories
1992 * are created. Create the file entries now, and attach them
1993 * to the actual file dentries later.
1994 */
1995 __trace_early_add_events(tr);
1996
8781915a
EG
1997 while (true) {
1998 token = strsep(&buf, ",");
1999
2000 if (!token)
2001 break;
2002 if (!*token)
2003 continue;
2004
ae63b31e 2005 ret = ftrace_set_clr_event(tr, token, 1);
8781915a
EG
2006 if (ret)
2007 pr_warn("Failed to enable trace event: %s\n", token);
2008 }
81698831
SR
2009
2010 trace_printk_start_comm();
2011
8781915a
EG
2012 return 0;
2013}
2014
b77e38aa
SR
2015static __init int event_trace_init(void)
2016{
ae63b31e 2017 struct trace_array *tr;
b77e38aa
SR
2018 struct dentry *d_tracer;
2019 struct dentry *entry;
6d723736 2020 int ret;
b77e38aa 2021
ae63b31e
SR
2022 tr = top_trace_array();
2023
b77e38aa
SR
2024 d_tracer = tracing_init_dentry();
2025 if (!d_tracer)
2026 return 0;
2027
2314c4ae 2028 entry = debugfs_create_file("available_events", 0444, d_tracer,
ae63b31e 2029 tr, &ftrace_avail_fops);
2314c4ae
SR
2030 if (!entry)
2031 pr_warning("Could not create debugfs "
2032 "'available_events' entry\n");
2033
8728fe50
LZ
2034 if (trace_define_common_fields())
2035 pr_warning("tracing: Failed to allocate common fields");
2036
77248221 2037 ret = early_event_add_tracer(d_tracer, tr);
ae63b31e
SR
2038 if (ret)
2039 return ret;
020e5f85 2040
6d723736 2041 ret = register_module_notifier(&trace_module_nb);
55379376 2042 if (ret)
6d723736
SR
2043 pr_warning("Failed to register trace events module notifier\n");
2044
b77e38aa
SR
2045 return 0;
2046}
d1a29143 2047early_initcall(event_trace_memsetup);
8781915a 2048core_initcall(event_trace_enable);
b77e38aa 2049fs_initcall(event_trace_init);
e6187007
SR
2050
2051#ifdef CONFIG_FTRACE_STARTUP_TEST
2052
2053static DEFINE_SPINLOCK(test_spinlock);
2054static DEFINE_SPINLOCK(test_spinlock_irq);
2055static DEFINE_MUTEX(test_mutex);
2056
2057static __init void test_work(struct work_struct *dummy)
2058{
2059 spin_lock(&test_spinlock);
2060 spin_lock_irq(&test_spinlock_irq);
2061 udelay(1);
2062 spin_unlock_irq(&test_spinlock_irq);
2063 spin_unlock(&test_spinlock);
2064
2065 mutex_lock(&test_mutex);
2066 msleep(1);
2067 mutex_unlock(&test_mutex);
2068}
2069
2070static __init int event_test_thread(void *unused)
2071{
2072 void *test_malloc;
2073
2074 test_malloc = kmalloc(1234, GFP_KERNEL);
2075 if (!test_malloc)
2076 pr_info("failed to kmalloc\n");
2077
2078 schedule_on_each_cpu(test_work);
2079
2080 kfree(test_malloc);
2081
2082 set_current_state(TASK_INTERRUPTIBLE);
2083 while (!kthread_should_stop())
2084 schedule();
2085
2086 return 0;
2087}
2088
2089/*
2090 * Do various things that may trigger events.
2091 */
2092static __init void event_test_stuff(void)
2093{
2094 struct task_struct *test_thread;
2095
2096 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2097 msleep(1);
2098 kthread_stop(test_thread);
2099}
2100
2101/*
2102 * For every trace event defined, we will test each trace point separately,
2103 * and then by groups, and finally all trace points.
2104 */
9ea21c1e 2105static __init void event_trace_self_tests(void)
e6187007 2106{
ae63b31e
SR
2107 struct ftrace_subsystem_dir *dir;
2108 struct ftrace_event_file *file;
e6187007
SR
2109 struct ftrace_event_call *call;
2110 struct event_subsystem *system;
ae63b31e 2111 struct trace_array *tr;
e6187007
SR
2112 int ret;
2113
ae63b31e
SR
2114 tr = top_trace_array();
2115
e6187007
SR
2116 pr_info("Running tests on trace events:\n");
2117
ae63b31e
SR
2118 list_for_each_entry(file, &tr->events, list) {
2119
2120 call = file->event_call;
e6187007 2121
2239291a
SR
2122 /* Only test those that have a probe */
2123 if (!call->class || !call->class->probe)
e6187007
SR
2124 continue;
2125
1f5a6b45
SR
2126/*
2127 * Testing syscall events here is pretty useless, but
2128 * we still do it if configured. But this is time consuming.
2129 * What we really need is a user thread to perform the
2130 * syscalls as we test.
2131 */
2132#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
2133 if (call->class->system &&
2134 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
2135 continue;
2136#endif
2137
e6187007
SR
2138 pr_info("Testing event %s: ", call->name);
2139
2140 /*
2141 * If an event is already enabled, someone is using
2142 * it and the self test should not be on.
2143 */
ae63b31e 2144 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
e6187007
SR
2145 pr_warning("Enabled event during self test!\n");
2146 WARN_ON_ONCE(1);
2147 continue;
2148 }
2149
ae63b31e 2150 ftrace_event_enable_disable(file, 1);
e6187007 2151 event_test_stuff();
ae63b31e 2152 ftrace_event_enable_disable(file, 0);
e6187007
SR
2153
2154 pr_cont("OK\n");
2155 }
2156
2157 /* Now test at the sub system level */
2158
2159 pr_info("Running tests on trace event systems:\n");
2160
ae63b31e
SR
2161 list_for_each_entry(dir, &tr->systems, list) {
2162
2163 system = dir->subsystem;
e6187007
SR
2164
2165 /* the ftrace system is special, skip it */
2166 if (strcmp(system->name, "ftrace") == 0)
2167 continue;
2168
2169 pr_info("Testing event system %s: ", system->name);
2170
ae63b31e 2171 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007
SR
2172 if (WARN_ON_ONCE(ret)) {
2173 pr_warning("error enabling system %s\n",
2174 system->name);
2175 continue;
2176 }
2177
2178 event_test_stuff();
2179
ae63b31e 2180 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 2181 if (WARN_ON_ONCE(ret)) {
e6187007
SR
2182 pr_warning("error disabling system %s\n",
2183 system->name);
76bab1b7
YL
2184 continue;
2185 }
e6187007
SR
2186
2187 pr_cont("OK\n");
2188 }
2189
2190 /* Test with all events enabled */
2191
2192 pr_info("Running tests on all trace events:\n");
2193 pr_info("Testing all events: ");
2194
ae63b31e 2195 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 2196 if (WARN_ON_ONCE(ret)) {
e6187007 2197 pr_warning("error enabling all events\n");
9ea21c1e 2198 return;
e6187007
SR
2199 }
2200
2201 event_test_stuff();
2202
2203 /* reset sysname */
ae63b31e 2204 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007
SR
2205 if (WARN_ON_ONCE(ret)) {
2206 pr_warning("error disabling all events\n");
9ea21c1e 2207 return;
e6187007
SR
2208 }
2209
2210 pr_cont("OK\n");
9ea21c1e
SR
2211}
2212
2213#ifdef CONFIG_FUNCTION_TRACER
2214
245b2e70 2215static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e
SR
2216
2217static void
2f5f6ad9 2218function_test_events_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 2219 struct ftrace_ops *op, struct pt_regs *pt_regs)
9ea21c1e
SR
2220{
2221 struct ring_buffer_event *event;
e77405ad 2222 struct ring_buffer *buffer;
9ea21c1e
SR
2223 struct ftrace_entry *entry;
2224 unsigned long flags;
2225 long disabled;
9ea21c1e
SR
2226 int cpu;
2227 int pc;
2228
2229 pc = preempt_count();
5168ae50 2230 preempt_disable_notrace();
9ea21c1e 2231 cpu = raw_smp_processor_id();
245b2e70 2232 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
2233
2234 if (disabled != 1)
2235 goto out;
2236
2237 local_save_flags(flags);
2238
e77405ad
SR
2239 event = trace_current_buffer_lock_reserve(&buffer,
2240 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
2241 flags, pc);
2242 if (!event)
2243 goto out;
2244 entry = ring_buffer_event_data(event);
2245 entry->ip = ip;
2246 entry->parent_ip = parent_ip;
2247
0d5c6e1c 2248 trace_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
2249
2250 out:
245b2e70 2251 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 2252 preempt_enable_notrace();
9ea21c1e
SR
2253}
2254
2255static struct ftrace_ops trace_ops __initdata =
2256{
2257 .func = function_test_events_call,
4740974a 2258 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
9ea21c1e
SR
2259};
2260
2261static __init void event_trace_self_test_with_function(void)
2262{
17bb615a
SR
2263 int ret;
2264 ret = register_ftrace_function(&trace_ops);
2265 if (WARN_ON(ret < 0)) {
2266 pr_info("Failed to enable function tracer for event tests\n");
2267 return;
2268 }
9ea21c1e
SR
2269 pr_info("Running tests again, along with the function tracer\n");
2270 event_trace_self_tests();
2271 unregister_ftrace_function(&trace_ops);
2272}
2273#else
2274static __init void event_trace_self_test_with_function(void)
2275{
2276}
2277#endif
2278
2279static __init int event_trace_self_tests_init(void)
2280{
020e5f85
LZ
2281 if (!tracing_selftest_disabled) {
2282 event_trace_self_tests();
2283 event_trace_self_test_with_function();
2284 }
e6187007
SR
2285
2286 return 0;
2287}
2288
28d20e2d 2289late_initcall(event_trace_self_tests_init);
e6187007
SR
2290
2291#endif