]>
Commit | Line | Data |
---|---|---|
b77e38aa SR |
1 | /* |
2 | * event tracer | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
981d081e SR |
6 | * - Added format output of fields of the trace point. |
7 | * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. | |
8 | * | |
b77e38aa SR |
9 | */ |
10 | ||
e6187007 SR |
11 | #include <linux/workqueue.h> |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/kthread.h> | |
b77e38aa SR |
14 | #include <linux/debugfs.h> |
15 | #include <linux/uaccess.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/ctype.h> | |
e6187007 | 18 | #include <linux/delay.h> |
b77e38aa | 19 | |
91729ef9 | 20 | #include "trace_output.h" |
b77e38aa | 21 | |
b628b3e6 SR |
22 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
23 | ||
20c8928a | 24 | DEFINE_MUTEX(event_mutex); |
11a241a3 | 25 | |
a59fd602 SR |
26 | LIST_HEAD(ftrace_events); |
27 | ||
cf027f64 | 28 | int trace_define_field(struct ftrace_event_call *call, char *type, |
a118e4d1 | 29 | char *name, int offset, int size, int is_signed) |
cf027f64 TZ |
30 | { |
31 | struct ftrace_event_field *field; | |
32 | ||
fe9f57f2 | 33 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
cf027f64 TZ |
34 | if (!field) |
35 | goto err; | |
fe9f57f2 | 36 | |
cf027f64 TZ |
37 | field->name = kstrdup(name, GFP_KERNEL); |
38 | if (!field->name) | |
39 | goto err; | |
fe9f57f2 | 40 | |
cf027f64 TZ |
41 | field->type = kstrdup(type, GFP_KERNEL); |
42 | if (!field->type) | |
43 | goto err; | |
fe9f57f2 | 44 | |
cf027f64 TZ |
45 | field->offset = offset; |
46 | field->size = size; | |
a118e4d1 | 47 | field->is_signed = is_signed; |
cf027f64 TZ |
48 | list_add(&field->link, &call->fields); |
49 | ||
50 | return 0; | |
fe9f57f2 | 51 | |
cf027f64 TZ |
52 | err: |
53 | if (field) { | |
54 | kfree(field->name); | |
55 | kfree(field->type); | |
56 | } | |
57 | kfree(field); | |
fe9f57f2 | 58 | |
cf027f64 TZ |
59 | return -ENOMEM; |
60 | } | |
17c873ec | 61 | EXPORT_SYMBOL_GPL(trace_define_field); |
cf027f64 | 62 | |
2df75e41 LZ |
63 | #ifdef CONFIG_MODULES |
64 | ||
65 | static void trace_destroy_fields(struct ftrace_event_call *call) | |
66 | { | |
67 | struct ftrace_event_field *field, *next; | |
68 | ||
69 | list_for_each_entry_safe(field, next, &call->fields, link) { | |
70 | list_del(&field->link); | |
71 | kfree(field->type); | |
72 | kfree(field->name); | |
73 | kfree(field); | |
74 | } | |
75 | } | |
76 | ||
77 | #endif /* CONFIG_MODULES */ | |
78 | ||
b77e38aa SR |
79 | static void ftrace_clear_events(void) |
80 | { | |
a59fd602 | 81 | struct ftrace_event_call *call; |
b77e38aa | 82 | |
20c8928a | 83 | mutex_lock(&event_mutex); |
a59fd602 | 84 | list_for_each_entry(call, &ftrace_events, list) { |
b77e38aa SR |
85 | |
86 | if (call->enabled) { | |
87 | call->enabled = 0; | |
b11c53e1 | 88 | tracing_stop_cmdline_record(); |
b77e38aa SR |
89 | call->unregfunc(); |
90 | } | |
b77e38aa | 91 | } |
20c8928a | 92 | mutex_unlock(&event_mutex); |
b77e38aa SR |
93 | } |
94 | ||
fd994989 SR |
95 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, |
96 | int enable) | |
97 | { | |
98 | ||
99 | switch (enable) { | |
100 | case 0: | |
101 | if (call->enabled) { | |
102 | call->enabled = 0; | |
b11c53e1 | 103 | tracing_stop_cmdline_record(); |
fd994989 SR |
104 | call->unregfunc(); |
105 | } | |
fd994989 SR |
106 | break; |
107 | case 1: | |
da4d0302 | 108 | if (!call->enabled) { |
fd994989 | 109 | call->enabled = 1; |
b11c53e1 | 110 | tracing_start_cmdline_record(); |
fd994989 SR |
111 | call->regfunc(); |
112 | } | |
fd994989 SR |
113 | break; |
114 | } | |
115 | } | |
116 | ||
8f31bfe5 LZ |
117 | /* |
118 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. | |
119 | */ | |
120 | static int __ftrace_set_clr_event(const char *match, const char *sub, | |
121 | const char *event, int set) | |
b77e38aa | 122 | { |
a59fd602 | 123 | struct ftrace_event_call *call; |
29f93943 | 124 | int ret = -EINVAL; |
8f31bfe5 LZ |
125 | |
126 | mutex_lock(&event_mutex); | |
127 | list_for_each_entry(call, &ftrace_events, list) { | |
128 | ||
129 | if (!call->name || !call->regfunc) | |
130 | continue; | |
131 | ||
132 | if (match && | |
133 | strcmp(match, call->name) != 0 && | |
134 | strcmp(match, call->system) != 0) | |
135 | continue; | |
136 | ||
137 | if (sub && strcmp(sub, call->system) != 0) | |
138 | continue; | |
139 | ||
140 | if (event && strcmp(event, call->name) != 0) | |
141 | continue; | |
142 | ||
143 | ftrace_event_enable_disable(call, set); | |
144 | ||
145 | ret = 0; | |
146 | } | |
147 | mutex_unlock(&event_mutex); | |
148 | ||
149 | return ret; | |
150 | } | |
151 | ||
152 | static int ftrace_set_clr_event(char *buf, int set) | |
153 | { | |
b628b3e6 | 154 | char *event = NULL, *sub = NULL, *match; |
b628b3e6 SR |
155 | |
156 | /* | |
157 | * The buf format can be <subsystem>:<event-name> | |
158 | * *:<event-name> means any event by that name. | |
159 | * :<event-name> is the same. | |
160 | * | |
161 | * <subsystem>:* means all events in that subsystem | |
162 | * <subsystem>: means the same. | |
163 | * | |
164 | * <name> (no ':') means all events in a subsystem with | |
165 | * the name <name> or any event that matches <name> | |
166 | */ | |
167 | ||
168 | match = strsep(&buf, ":"); | |
169 | if (buf) { | |
170 | sub = match; | |
171 | event = buf; | |
172 | match = NULL; | |
173 | ||
174 | if (!strlen(sub) || strcmp(sub, "*") == 0) | |
175 | sub = NULL; | |
176 | if (!strlen(event) || strcmp(event, "*") == 0) | |
177 | event = NULL; | |
178 | } | |
b77e38aa | 179 | |
8f31bfe5 | 180 | return __ftrace_set_clr_event(match, sub, event, set); |
b77e38aa SR |
181 | } |
182 | ||
4671c794 SR |
183 | /** |
184 | * trace_set_clr_event - enable or disable an event | |
185 | * @system: system name to match (NULL for any system) | |
186 | * @event: event name to match (NULL for all events, within system) | |
187 | * @set: 1 to enable, 0 to disable | |
188 | * | |
189 | * This is a way for other parts of the kernel to enable or disable | |
190 | * event recording. | |
191 | * | |
192 | * Returns 0 on success, -EINVAL if the parameters do not match any | |
193 | * registered events. | |
194 | */ | |
195 | int trace_set_clr_event(const char *system, const char *event, int set) | |
196 | { | |
197 | return __ftrace_set_clr_event(NULL, system, event, set); | |
198 | } | |
199 | ||
b77e38aa SR |
200 | /* 128 should be much more than enough */ |
201 | #define EVENT_BUF_SIZE 127 | |
202 | ||
203 | static ssize_t | |
204 | ftrace_event_write(struct file *file, const char __user *ubuf, | |
205 | size_t cnt, loff_t *ppos) | |
206 | { | |
207 | size_t read = 0; | |
208 | int i, set = 1; | |
209 | ssize_t ret; | |
210 | char *buf; | |
211 | char ch; | |
212 | ||
213 | if (!cnt || cnt < 0) | |
214 | return 0; | |
215 | ||
1852fcce SR |
216 | ret = tracing_update_buffers(); |
217 | if (ret < 0) | |
218 | return ret; | |
219 | ||
b77e38aa SR |
220 | ret = get_user(ch, ubuf++); |
221 | if (ret) | |
222 | return ret; | |
223 | read++; | |
224 | cnt--; | |
225 | ||
226 | /* skip white space */ | |
227 | while (cnt && isspace(ch)) { | |
228 | ret = get_user(ch, ubuf++); | |
229 | if (ret) | |
230 | return ret; | |
231 | read++; | |
232 | cnt--; | |
233 | } | |
234 | ||
235 | /* Only white space found? */ | |
236 | if (isspace(ch)) { | |
237 | file->f_pos += read; | |
238 | ret = read; | |
239 | return ret; | |
240 | } | |
241 | ||
242 | buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL); | |
243 | if (!buf) | |
244 | return -ENOMEM; | |
245 | ||
246 | if (cnt > EVENT_BUF_SIZE) | |
247 | cnt = EVENT_BUF_SIZE; | |
248 | ||
249 | i = 0; | |
250 | while (cnt && !isspace(ch)) { | |
251 | if (!i && ch == '!') | |
252 | set = 0; | |
253 | else | |
254 | buf[i++] = ch; | |
255 | ||
256 | ret = get_user(ch, ubuf++); | |
257 | if (ret) | |
258 | goto out_free; | |
259 | read++; | |
260 | cnt--; | |
261 | } | |
262 | buf[i] = 0; | |
263 | ||
264 | file->f_pos += read; | |
265 | ||
266 | ret = ftrace_set_clr_event(buf, set); | |
267 | if (ret) | |
268 | goto out_free; | |
269 | ||
270 | ret = read; | |
271 | ||
272 | out_free: | |
273 | kfree(buf); | |
274 | ||
275 | return ret; | |
276 | } | |
277 | ||
278 | static void * | |
279 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
280 | { | |
a59fd602 SR |
281 | struct list_head *list = m->private; |
282 | struct ftrace_event_call *call; | |
b77e38aa SR |
283 | |
284 | (*pos)++; | |
285 | ||
40e26815 | 286 | for (;;) { |
a59fd602 | 287 | if (list == &ftrace_events) |
40e26815 SR |
288 | return NULL; |
289 | ||
a59fd602 SR |
290 | call = list_entry(list, struct ftrace_event_call, list); |
291 | ||
40e26815 SR |
292 | /* |
293 | * The ftrace subsystem is for showing formats only. | |
294 | * They can not be enabled or disabled via the event files. | |
295 | */ | |
296 | if (call->regfunc) | |
297 | break; | |
298 | ||
a59fd602 | 299 | list = list->next; |
40e26815 | 300 | } |
b77e38aa | 301 | |
a59fd602 | 302 | m->private = list->next; |
b77e38aa SR |
303 | |
304 | return call; | |
305 | } | |
306 | ||
307 | static void *t_start(struct seq_file *m, loff_t *pos) | |
308 | { | |
20c8928a LZ |
309 | mutex_lock(&event_mutex); |
310 | if (*pos == 0) | |
311 | m->private = ftrace_events.next; | |
b77e38aa SR |
312 | return t_next(m, NULL, pos); |
313 | } | |
314 | ||
315 | static void * | |
316 | s_next(struct seq_file *m, void *v, loff_t *pos) | |
317 | { | |
a59fd602 SR |
318 | struct list_head *list = m->private; |
319 | struct ftrace_event_call *call; | |
b77e38aa SR |
320 | |
321 | (*pos)++; | |
322 | ||
323 | retry: | |
a59fd602 | 324 | if (list == &ftrace_events) |
b77e38aa SR |
325 | return NULL; |
326 | ||
a59fd602 SR |
327 | call = list_entry(list, struct ftrace_event_call, list); |
328 | ||
b77e38aa | 329 | if (!call->enabled) { |
a59fd602 | 330 | list = list->next; |
b77e38aa SR |
331 | goto retry; |
332 | } | |
333 | ||
a59fd602 | 334 | m->private = list->next; |
b77e38aa SR |
335 | |
336 | return call; | |
337 | } | |
338 | ||
339 | static void *s_start(struct seq_file *m, loff_t *pos) | |
340 | { | |
20c8928a LZ |
341 | mutex_lock(&event_mutex); |
342 | if (*pos == 0) | |
343 | m->private = ftrace_events.next; | |
b77e38aa SR |
344 | return s_next(m, NULL, pos); |
345 | } | |
346 | ||
347 | static int t_show(struct seq_file *m, void *v) | |
348 | { | |
349 | struct ftrace_event_call *call = v; | |
350 | ||
b628b3e6 SR |
351 | if (strcmp(call->system, TRACE_SYSTEM) != 0) |
352 | seq_printf(m, "%s:", call->system); | |
b77e38aa SR |
353 | seq_printf(m, "%s\n", call->name); |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static void t_stop(struct seq_file *m, void *p) | |
359 | { | |
20c8928a | 360 | mutex_unlock(&event_mutex); |
b77e38aa SR |
361 | } |
362 | ||
363 | static int | |
364 | ftrace_event_seq_open(struct inode *inode, struct file *file) | |
365 | { | |
b77e38aa SR |
366 | const struct seq_operations *seq_ops; |
367 | ||
368 | if ((file->f_mode & FMODE_WRITE) && | |
369 | !(file->f_flags & O_APPEND)) | |
370 | ftrace_clear_events(); | |
371 | ||
372 | seq_ops = inode->i_private; | |
20c8928a | 373 | return seq_open(file, seq_ops); |
b77e38aa SR |
374 | } |
375 | ||
1473e441 SR |
376 | static ssize_t |
377 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |
378 | loff_t *ppos) | |
379 | { | |
380 | struct ftrace_event_call *call = filp->private_data; | |
381 | char *buf; | |
382 | ||
da4d0302 | 383 | if (call->enabled) |
1473e441 SR |
384 | buf = "1\n"; |
385 | else | |
386 | buf = "0\n"; | |
387 | ||
388 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | |
389 | } | |
390 | ||
391 | static ssize_t | |
392 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
393 | loff_t *ppos) | |
394 | { | |
395 | struct ftrace_event_call *call = filp->private_data; | |
396 | char buf[64]; | |
397 | unsigned long val; | |
398 | int ret; | |
399 | ||
400 | if (cnt >= sizeof(buf)) | |
401 | return -EINVAL; | |
402 | ||
403 | if (copy_from_user(&buf, ubuf, cnt)) | |
404 | return -EFAULT; | |
405 | ||
406 | buf[cnt] = 0; | |
407 | ||
408 | ret = strict_strtoul(buf, 10, &val); | |
409 | if (ret < 0) | |
410 | return ret; | |
411 | ||
1852fcce SR |
412 | ret = tracing_update_buffers(); |
413 | if (ret < 0) | |
414 | return ret; | |
415 | ||
1473e441 SR |
416 | switch (val) { |
417 | case 0: | |
1473e441 | 418 | case 1: |
11a241a3 | 419 | mutex_lock(&event_mutex); |
fd994989 | 420 | ftrace_event_enable_disable(call, val); |
11a241a3 | 421 | mutex_unlock(&event_mutex); |
1473e441 SR |
422 | break; |
423 | ||
424 | default: | |
425 | return -EINVAL; | |
426 | } | |
427 | ||
428 | *ppos += cnt; | |
429 | ||
430 | return cnt; | |
431 | } | |
432 | ||
8ae79a13 SR |
433 | static ssize_t |
434 | system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |
435 | loff_t *ppos) | |
436 | { | |
c142b15d | 437 | const char set_to_char[4] = { '?', '0', '1', 'X' }; |
8ae79a13 SR |
438 | const char *system = filp->private_data; |
439 | struct ftrace_event_call *call; | |
440 | char buf[2]; | |
c142b15d | 441 | int set = 0; |
8ae79a13 SR |
442 | int ret; |
443 | ||
8ae79a13 SR |
444 | mutex_lock(&event_mutex); |
445 | list_for_each_entry(call, &ftrace_events, list) { | |
446 | if (!call->name || !call->regfunc) | |
447 | continue; | |
448 | ||
8f31bfe5 | 449 | if (system && strcmp(call->system, system) != 0) |
8ae79a13 SR |
450 | continue; |
451 | ||
452 | /* | |
453 | * We need to find out if all the events are set | |
454 | * or if all events or cleared, or if we have | |
455 | * a mixture. | |
456 | */ | |
c142b15d LZ |
457 | set |= (1 << !!call->enabled); |
458 | ||
8ae79a13 SR |
459 | /* |
460 | * If we have a mixture, no need to look further. | |
461 | */ | |
c142b15d | 462 | if (set == 3) |
8ae79a13 SR |
463 | break; |
464 | } | |
465 | mutex_unlock(&event_mutex); | |
466 | ||
c142b15d | 467 | buf[0] = set_to_char[set]; |
8ae79a13 | 468 | buf[1] = '\n'; |
8ae79a13 SR |
469 | |
470 | ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | |
471 | ||
472 | return ret; | |
473 | } | |
474 | ||
475 | static ssize_t | |
476 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
477 | loff_t *ppos) | |
478 | { | |
479 | const char *system = filp->private_data; | |
480 | unsigned long val; | |
8ae79a13 SR |
481 | char buf[64]; |
482 | ssize_t ret; | |
483 | ||
484 | if (cnt >= sizeof(buf)) | |
485 | return -EINVAL; | |
486 | ||
487 | if (copy_from_user(&buf, ubuf, cnt)) | |
488 | return -EFAULT; | |
489 | ||
490 | buf[cnt] = 0; | |
491 | ||
492 | ret = strict_strtoul(buf, 10, &val); | |
493 | if (ret < 0) | |
494 | return ret; | |
495 | ||
496 | ret = tracing_update_buffers(); | |
497 | if (ret < 0) | |
498 | return ret; | |
499 | ||
8f31bfe5 | 500 | if (val != 0 && val != 1) |
8ae79a13 | 501 | return -EINVAL; |
8ae79a13 | 502 | |
8f31bfe5 | 503 | ret = __ftrace_set_clr_event(NULL, system, NULL, val); |
8ae79a13 | 504 | if (ret) |
8f31bfe5 | 505 | goto out; |
8ae79a13 SR |
506 | |
507 | ret = cnt; | |
508 | ||
8f31bfe5 | 509 | out: |
8ae79a13 SR |
510 | *ppos += cnt; |
511 | ||
512 | return ret; | |
513 | } | |
514 | ||
75db37d2 SR |
515 | extern char *__bad_type_size(void); |
516 | ||
91729ef9 | 517 | #undef FIELD |
156b5f17 | 518 | #define FIELD(type, name) \ |
75db37d2 | 519 | sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ |
cf027f64 TZ |
520 | #type, "common_" #name, offsetof(typeof(field), name), \ |
521 | sizeof(field.name) | |
91729ef9 SR |
522 | |
523 | static int trace_write_header(struct trace_seq *s) | |
524 | { | |
525 | struct trace_entry field; | |
526 | ||
527 | /* struct trace_entry */ | |
528 | return trace_seq_printf(s, | |
ce8eb2bf SR |
529 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" |
530 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | |
531 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | |
532 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | |
533 | "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" | |
91729ef9 | 534 | "\n", |
89ec0dee | 535 | FIELD(unsigned short, type), |
91729ef9 SR |
536 | FIELD(unsigned char, flags), |
537 | FIELD(unsigned char, preempt_count), | |
538 | FIELD(int, pid), | |
539 | FIELD(int, tgid)); | |
540 | } | |
da4d0302 | 541 | |
981d081e SR |
542 | static ssize_t |
543 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |
544 | loff_t *ppos) | |
545 | { | |
546 | struct ftrace_event_call *call = filp->private_data; | |
547 | struct trace_seq *s; | |
548 | char *buf; | |
549 | int r; | |
550 | ||
c269fc8c TZ |
551 | if (*ppos) |
552 | return 0; | |
553 | ||
981d081e SR |
554 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
555 | if (!s) | |
556 | return -ENOMEM; | |
557 | ||
558 | trace_seq_init(s); | |
559 | ||
c5e4e192 SR |
560 | /* If any of the first writes fail, so will the show_format. */ |
561 | ||
562 | trace_seq_printf(s, "name: %s\n", call->name); | |
563 | trace_seq_printf(s, "ID: %d\n", call->id); | |
564 | trace_seq_printf(s, "format:\n"); | |
91729ef9 SR |
565 | trace_write_header(s); |
566 | ||
981d081e SR |
567 | r = call->show_format(s); |
568 | if (!r) { | |
569 | /* | |
570 | * ug! The format output is bigger than a PAGE!! | |
571 | */ | |
572 | buf = "FORMAT TOO BIG\n"; | |
573 | r = simple_read_from_buffer(ubuf, cnt, ppos, | |
574 | buf, strlen(buf)); | |
575 | goto out; | |
576 | } | |
577 | ||
578 | r = simple_read_from_buffer(ubuf, cnt, ppos, | |
579 | s->buffer, s->len); | |
580 | out: | |
581 | kfree(s); | |
582 | return r; | |
583 | } | |
584 | ||
23725aee PZ |
585 | static ssize_t |
586 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |
587 | { | |
588 | struct ftrace_event_call *call = filp->private_data; | |
589 | struct trace_seq *s; | |
590 | int r; | |
591 | ||
592 | if (*ppos) | |
593 | return 0; | |
594 | ||
595 | s = kmalloc(sizeof(*s), GFP_KERNEL); | |
596 | if (!s) | |
597 | return -ENOMEM; | |
598 | ||
599 | trace_seq_init(s); | |
600 | trace_seq_printf(s, "%d\n", call->id); | |
601 | ||
602 | r = simple_read_from_buffer(ubuf, cnt, ppos, | |
603 | s->buffer, s->len); | |
604 | kfree(s); | |
605 | return r; | |
606 | } | |
607 | ||
7ce7e424 TZ |
608 | static ssize_t |
609 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | |
610 | loff_t *ppos) | |
611 | { | |
612 | struct ftrace_event_call *call = filp->private_data; | |
613 | struct trace_seq *s; | |
614 | int r; | |
615 | ||
616 | if (*ppos) | |
617 | return 0; | |
618 | ||
619 | s = kmalloc(sizeof(*s), GFP_KERNEL); | |
620 | if (!s) | |
621 | return -ENOMEM; | |
622 | ||
623 | trace_seq_init(s); | |
624 | ||
8b372562 | 625 | print_event_filter(call, s); |
4bda2d51 | 626 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); |
7ce7e424 TZ |
627 | |
628 | kfree(s); | |
629 | ||
630 | return r; | |
631 | } | |
632 | ||
633 | static ssize_t | |
634 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
635 | loff_t *ppos) | |
636 | { | |
637 | struct ftrace_event_call *call = filp->private_data; | |
8b372562 | 638 | char *buf; |
7ce7e424 TZ |
639 | int err; |
640 | ||
8b372562 | 641 | if (cnt >= PAGE_SIZE) |
7ce7e424 TZ |
642 | return -EINVAL; |
643 | ||
8b372562 TZ |
644 | buf = (char *)__get_free_page(GFP_TEMPORARY); |
645 | if (!buf) | |
7ce7e424 TZ |
646 | return -ENOMEM; |
647 | ||
8b372562 TZ |
648 | if (copy_from_user(buf, ubuf, cnt)) { |
649 | free_page((unsigned long) buf); | |
650 | return -EFAULT; | |
7ce7e424 | 651 | } |
8b372562 | 652 | buf[cnt] = '\0'; |
7ce7e424 | 653 | |
8b372562 TZ |
654 | err = apply_event_filter(call, buf); |
655 | free_page((unsigned long) buf); | |
656 | if (err < 0) | |
44e9c8b7 | 657 | return err; |
0a19e53c | 658 | |
7ce7e424 TZ |
659 | *ppos += cnt; |
660 | ||
661 | return cnt; | |
662 | } | |
663 | ||
cfb180f3 TZ |
664 | static ssize_t |
665 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | |
666 | loff_t *ppos) | |
667 | { | |
668 | struct event_subsystem *system = filp->private_data; | |
669 | struct trace_seq *s; | |
670 | int r; | |
671 | ||
672 | if (*ppos) | |
673 | return 0; | |
674 | ||
675 | s = kmalloc(sizeof(*s), GFP_KERNEL); | |
676 | if (!s) | |
677 | return -ENOMEM; | |
678 | ||
679 | trace_seq_init(s); | |
680 | ||
8b372562 | 681 | print_subsystem_event_filter(system, s); |
4bda2d51 | 682 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); |
cfb180f3 TZ |
683 | |
684 | kfree(s); | |
685 | ||
686 | return r; | |
687 | } | |
688 | ||
689 | static ssize_t | |
690 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |
691 | loff_t *ppos) | |
692 | { | |
693 | struct event_subsystem *system = filp->private_data; | |
8b372562 | 694 | char *buf; |
cfb180f3 TZ |
695 | int err; |
696 | ||
8b372562 | 697 | if (cnt >= PAGE_SIZE) |
cfb180f3 TZ |
698 | return -EINVAL; |
699 | ||
8b372562 TZ |
700 | buf = (char *)__get_free_page(GFP_TEMPORARY); |
701 | if (!buf) | |
cfb180f3 TZ |
702 | return -ENOMEM; |
703 | ||
8b372562 TZ |
704 | if (copy_from_user(buf, ubuf, cnt)) { |
705 | free_page((unsigned long) buf); | |
706 | return -EFAULT; | |
cfb180f3 | 707 | } |
8b372562 | 708 | buf[cnt] = '\0'; |
cfb180f3 | 709 | |
8b372562 TZ |
710 | err = apply_subsystem_event_filter(system, buf); |
711 | free_page((unsigned long) buf); | |
712 | if (err < 0) | |
44e9c8b7 | 713 | return err; |
cfb180f3 TZ |
714 | |
715 | *ppos += cnt; | |
716 | ||
717 | return cnt; | |
718 | } | |
719 | ||
d1b182a8 SR |
720 | static ssize_t |
721 | show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |
722 | { | |
723 | int (*func)(struct trace_seq *s) = filp->private_data; | |
724 | struct trace_seq *s; | |
725 | int r; | |
726 | ||
727 | if (*ppos) | |
728 | return 0; | |
729 | ||
730 | s = kmalloc(sizeof(*s), GFP_KERNEL); | |
731 | if (!s) | |
732 | return -ENOMEM; | |
733 | ||
734 | trace_seq_init(s); | |
735 | ||
736 | func(s); | |
737 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | |
738 | ||
739 | kfree(s); | |
740 | ||
741 | return r; | |
742 | } | |
743 | ||
b77e38aa SR |
744 | static const struct seq_operations show_event_seq_ops = { |
745 | .start = t_start, | |
746 | .next = t_next, | |
747 | .show = t_show, | |
748 | .stop = t_stop, | |
749 | }; | |
750 | ||
751 | static const struct seq_operations show_set_event_seq_ops = { | |
752 | .start = s_start, | |
753 | .next = s_next, | |
754 | .show = t_show, | |
755 | .stop = t_stop, | |
756 | }; | |
757 | ||
2314c4ae SR |
758 | static const struct file_operations ftrace_avail_fops = { |
759 | .open = ftrace_event_seq_open, | |
760 | .read = seq_read, | |
761 | .llseek = seq_lseek, | |
762 | .release = seq_release, | |
763 | }; | |
764 | ||
b77e38aa SR |
765 | static const struct file_operations ftrace_set_event_fops = { |
766 | .open = ftrace_event_seq_open, | |
767 | .read = seq_read, | |
768 | .write = ftrace_event_write, | |
769 | .llseek = seq_lseek, | |
770 | .release = seq_release, | |
771 | }; | |
772 | ||
1473e441 SR |
773 | static const struct file_operations ftrace_enable_fops = { |
774 | .open = tracing_open_generic, | |
775 | .read = event_enable_read, | |
776 | .write = event_enable_write, | |
777 | }; | |
778 | ||
981d081e SR |
779 | static const struct file_operations ftrace_event_format_fops = { |
780 | .open = tracing_open_generic, | |
781 | .read = event_format_read, | |
782 | }; | |
783 | ||
23725aee PZ |
784 | static const struct file_operations ftrace_event_id_fops = { |
785 | .open = tracing_open_generic, | |
786 | .read = event_id_read, | |
787 | }; | |
788 | ||
7ce7e424 TZ |
789 | static const struct file_operations ftrace_event_filter_fops = { |
790 | .open = tracing_open_generic, | |
791 | .read = event_filter_read, | |
792 | .write = event_filter_write, | |
793 | }; | |
794 | ||
cfb180f3 TZ |
795 | static const struct file_operations ftrace_subsystem_filter_fops = { |
796 | .open = tracing_open_generic, | |
797 | .read = subsystem_filter_read, | |
798 | .write = subsystem_filter_write, | |
799 | }; | |
800 | ||
8ae79a13 SR |
801 | static const struct file_operations ftrace_system_enable_fops = { |
802 | .open = tracing_open_generic, | |
803 | .read = system_enable_read, | |
804 | .write = system_enable_write, | |
805 | }; | |
806 | ||
d1b182a8 SR |
807 | static const struct file_operations ftrace_show_header_fops = { |
808 | .open = tracing_open_generic, | |
809 | .read = show_header, | |
810 | }; | |
811 | ||
1473e441 SR |
812 | static struct dentry *event_trace_events_dir(void) |
813 | { | |
814 | static struct dentry *d_tracer; | |
815 | static struct dentry *d_events; | |
816 | ||
817 | if (d_events) | |
818 | return d_events; | |
819 | ||
820 | d_tracer = tracing_init_dentry(); | |
821 | if (!d_tracer) | |
822 | return NULL; | |
823 | ||
824 | d_events = debugfs_create_dir("events", d_tracer); | |
825 | if (!d_events) | |
826 | pr_warning("Could not create debugfs " | |
827 | "'events' directory\n"); | |
828 | ||
829 | return d_events; | |
830 | } | |
831 | ||
6ecc2d1c SR |
832 | static LIST_HEAD(event_subsystems); |
833 | ||
834 | static struct dentry * | |
835 | event_subsystem_dir(const char *name, struct dentry *d_events) | |
836 | { | |
837 | struct event_subsystem *system; | |
e1112b4d | 838 | struct dentry *entry; |
6ecc2d1c SR |
839 | |
840 | /* First see if we did not already create this dir */ | |
841 | list_for_each_entry(system, &event_subsystems, list) { | |
842 | if (strcmp(system->name, name) == 0) | |
843 | return system->entry; | |
844 | } | |
845 | ||
846 | /* need to create new entry */ | |
847 | system = kmalloc(sizeof(*system), GFP_KERNEL); | |
848 | if (!system) { | |
849 | pr_warning("No memory to create event subsystem %s\n", | |
850 | name); | |
851 | return d_events; | |
852 | } | |
853 | ||
854 | system->entry = debugfs_create_dir(name, d_events); | |
855 | if (!system->entry) { | |
856 | pr_warning("Could not create event subsystem %s\n", | |
857 | name); | |
858 | kfree(system); | |
859 | return d_events; | |
860 | } | |
861 | ||
6d723736 SR |
862 | system->name = kstrdup(name, GFP_KERNEL); |
863 | if (!system->name) { | |
864 | debugfs_remove(system->entry); | |
865 | kfree(system); | |
866 | return d_events; | |
867 | } | |
868 | ||
6ecc2d1c SR |
869 | list_add(&system->list, &event_subsystems); |
870 | ||
30e673b2 | 871 | system->filter = NULL; |
cfb180f3 | 872 | |
8b372562 TZ |
873 | system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); |
874 | if (!system->filter) { | |
875 | pr_warning("Could not allocate filter for subsystem " | |
876 | "'%s'\n", name); | |
877 | return system->entry; | |
878 | } | |
879 | ||
e1112b4d TZ |
880 | entry = debugfs_create_file("filter", 0644, system->entry, system, |
881 | &ftrace_subsystem_filter_fops); | |
8b372562 TZ |
882 | if (!entry) { |
883 | kfree(system->filter); | |
884 | system->filter = NULL; | |
e1112b4d TZ |
885 | pr_warning("Could not create debugfs " |
886 | "'%s/filter' entry\n", name); | |
8b372562 | 887 | } |
e1112b4d | 888 | |
8ae79a13 SR |
889 | entry = trace_create_file("enable", 0644, system->entry, |
890 | (void *)system->name, | |
891 | &ftrace_system_enable_fops); | |
892 | ||
6ecc2d1c SR |
893 | return system->entry; |
894 | } | |
895 | ||
1473e441 | 896 | static int |
701970b3 SR |
897 | event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, |
898 | const struct file_operations *id, | |
899 | const struct file_operations *enable, | |
900 | const struct file_operations *filter, | |
901 | const struct file_operations *format) | |
1473e441 SR |
902 | { |
903 | struct dentry *entry; | |
fd994989 | 904 | int ret; |
1473e441 | 905 | |
6ecc2d1c SR |
906 | /* |
907 | * If the trace point header did not define TRACE_SYSTEM | |
908 | * then the system would be called "TRACE_SYSTEM". | |
909 | */ | |
6d723736 | 910 | if (strcmp(call->system, TRACE_SYSTEM) != 0) |
6ecc2d1c SR |
911 | d_events = event_subsystem_dir(call->system, d_events); |
912 | ||
fd994989 SR |
913 | if (call->raw_init) { |
914 | ret = call->raw_init(); | |
915 | if (ret < 0) { | |
916 | pr_warning("Could not initialize trace point" | |
917 | " events/%s\n", call->name); | |
918 | return ret; | |
919 | } | |
920 | } | |
921 | ||
1473e441 SR |
922 | call->dir = debugfs_create_dir(call->name, d_events); |
923 | if (!call->dir) { | |
924 | pr_warning("Could not create debugfs " | |
925 | "'%s' directory\n", call->name); | |
926 | return -1; | |
927 | } | |
928 | ||
6d723736 SR |
929 | if (call->regfunc) |
930 | entry = trace_create_file("enable", 0644, call->dir, call, | |
701970b3 | 931 | enable); |
1473e441 | 932 | |
6d723736 SR |
933 | if (call->id) |
934 | entry = trace_create_file("id", 0444, call->dir, call, | |
701970b3 | 935 | id); |
23725aee | 936 | |
cf027f64 TZ |
937 | if (call->define_fields) { |
938 | ret = call->define_fields(); | |
939 | if (ret < 0) { | |
940 | pr_warning("Could not initialize trace point" | |
941 | " events/%s\n", call->name); | |
942 | return ret; | |
943 | } | |
6d723736 | 944 | entry = trace_create_file("filter", 0644, call->dir, call, |
701970b3 | 945 | filter); |
cf027f64 TZ |
946 | } |
947 | ||
981d081e SR |
948 | /* A trace may not want to export its format */ |
949 | if (!call->show_format) | |
950 | return 0; | |
951 | ||
6d723736 | 952 | entry = trace_create_file("format", 0444, call->dir, call, |
701970b3 | 953 | format); |
6d723736 SR |
954 | |
955 | return 0; | |
956 | } | |
957 | ||
958 | #define for_each_event(event, start, end) \ | |
959 | for (event = start; \ | |
960 | (unsigned long)event < (unsigned long)end; \ | |
961 | event++) | |
962 | ||
61f919a1 | 963 | #ifdef CONFIG_MODULES |
701970b3 SR |
964 | |
965 | static LIST_HEAD(ftrace_module_file_list); | |
966 | ||
967 | /* | |
968 | * Modules must own their file_operations to keep up with | |
969 | * reference counting. | |
970 | */ | |
971 | struct ftrace_module_file_ops { | |
972 | struct list_head list; | |
973 | struct module *mod; | |
974 | struct file_operations id; | |
975 | struct file_operations enable; | |
976 | struct file_operations format; | |
977 | struct file_operations filter; | |
978 | }; | |
979 | ||
980 | static struct ftrace_module_file_ops * | |
981 | trace_create_file_ops(struct module *mod) | |
982 | { | |
983 | struct ftrace_module_file_ops *file_ops; | |
984 | ||
985 | /* | |
986 | * This is a bit of a PITA. To allow for correct reference | |
987 | * counting, modules must "own" their file_operations. | |
988 | * To do this, we allocate the file operations that will be | |
989 | * used in the event directory. | |
990 | */ | |
991 | ||
992 | file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL); | |
993 | if (!file_ops) | |
994 | return NULL; | |
995 | ||
996 | file_ops->mod = mod; | |
997 | ||
998 | file_ops->id = ftrace_event_id_fops; | |
999 | file_ops->id.owner = mod; | |
1000 | ||
1001 | file_ops->enable = ftrace_enable_fops; | |
1002 | file_ops->enable.owner = mod; | |
1003 | ||
1004 | file_ops->filter = ftrace_event_filter_fops; | |
1005 | file_ops->filter.owner = mod; | |
1006 | ||
1007 | file_ops->format = ftrace_event_format_fops; | |
1008 | file_ops->format.owner = mod; | |
1009 | ||
1010 | list_add(&file_ops->list, &ftrace_module_file_list); | |
1011 | ||
1012 | return file_ops; | |
1013 | } | |
1014 | ||
6d723736 SR |
1015 | static void trace_module_add_events(struct module *mod) |
1016 | { | |
701970b3 | 1017 | struct ftrace_module_file_ops *file_ops = NULL; |
6d723736 SR |
1018 | struct ftrace_event_call *call, *start, *end; |
1019 | struct dentry *d_events; | |
1020 | ||
1021 | start = mod->trace_events; | |
1022 | end = mod->trace_events + mod->num_trace_events; | |
1023 | ||
1024 | if (start == end) | |
1025 | return; | |
1026 | ||
1027 | d_events = event_trace_events_dir(); | |
1028 | if (!d_events) | |
1029 | return; | |
1030 | ||
1031 | for_each_event(call, start, end) { | |
1032 | /* The linker may leave blanks */ | |
1033 | if (!call->name) | |
1034 | continue; | |
701970b3 SR |
1035 | |
1036 | /* | |
1037 | * This module has events, create file ops for this module | |
1038 | * if not already done. | |
1039 | */ | |
1040 | if (!file_ops) { | |
1041 | file_ops = trace_create_file_ops(mod); | |
1042 | if (!file_ops) | |
1043 | return; | |
1044 | } | |
6d723736 SR |
1045 | call->mod = mod; |
1046 | list_add(&call->list, &ftrace_events); | |
701970b3 SR |
1047 | event_create_dir(call, d_events, |
1048 | &file_ops->id, &file_ops->enable, | |
1049 | &file_ops->filter, &file_ops->format); | |
6d723736 SR |
1050 | } |
1051 | } | |
1052 | ||
1053 | static void trace_module_remove_events(struct module *mod) | |
1054 | { | |
701970b3 | 1055 | struct ftrace_module_file_ops *file_ops; |
6d723736 | 1056 | struct ftrace_event_call *call, *p; |
9456f0fa | 1057 | bool found = false; |
6d723736 SR |
1058 | |
1059 | list_for_each_entry_safe(call, p, &ftrace_events, list) { | |
1060 | if (call->mod == mod) { | |
9456f0fa | 1061 | found = true; |
6d723736 SR |
1062 | if (call->enabled) { |
1063 | call->enabled = 0; | |
b11c53e1 | 1064 | tracing_stop_cmdline_record(); |
6d723736 SR |
1065 | call->unregfunc(); |
1066 | } | |
1067 | if (call->event) | |
1068 | unregister_ftrace_event(call->event); | |
1069 | debugfs_remove_recursive(call->dir); | |
1070 | list_del(&call->list); | |
2df75e41 LZ |
1071 | trace_destroy_fields(call); |
1072 | destroy_preds(call); | |
6d723736 SR |
1073 | } |
1074 | } | |
701970b3 SR |
1075 | |
1076 | /* Now free the file_operations */ | |
1077 | list_for_each_entry(file_ops, &ftrace_module_file_list, list) { | |
1078 | if (file_ops->mod == mod) | |
1079 | break; | |
1080 | } | |
1081 | if (&file_ops->list != &ftrace_module_file_list) { | |
1082 | list_del(&file_ops->list); | |
1083 | kfree(file_ops); | |
1084 | } | |
9456f0fa SR |
1085 | |
1086 | /* | |
1087 | * It is safest to reset the ring buffer if the module being unloaded | |
1088 | * registered any events. | |
1089 | */ | |
1090 | if (found) | |
1091 | tracing_reset_current_online_cpus(); | |
6d723736 SR |
1092 | } |
1093 | ||
61f919a1 SR |
1094 | static int trace_module_notify(struct notifier_block *self, |
1095 | unsigned long val, void *data) | |
6d723736 SR |
1096 | { |
1097 | struct module *mod = data; | |
1098 | ||
1099 | mutex_lock(&event_mutex); | |
1100 | switch (val) { | |
1101 | case MODULE_STATE_COMING: | |
1102 | trace_module_add_events(mod); | |
1103 | break; | |
1104 | case MODULE_STATE_GOING: | |
1105 | trace_module_remove_events(mod); | |
1106 | break; | |
1107 | } | |
1108 | mutex_unlock(&event_mutex); | |
fd994989 | 1109 | |
1473e441 SR |
1110 | return 0; |
1111 | } | |
61f919a1 SR |
1112 | #else |
1113 | static int trace_module_notify(struct notifier_block *self, | |
1114 | unsigned long val, void *data) | |
1115 | { | |
1116 | return 0; | |
1117 | } | |
1118 | #endif /* CONFIG_MODULES */ | |
1473e441 | 1119 | |
6d723736 SR |
1120 | struct notifier_block trace_module_nb = { |
1121 | .notifier_call = trace_module_notify, | |
1122 | .priority = 0, | |
1123 | }; | |
1124 | ||
a59fd602 SR |
1125 | extern struct ftrace_event_call __start_ftrace_events[]; |
1126 | extern struct ftrace_event_call __stop_ftrace_events[]; | |
1127 | ||
b77e38aa SR |
1128 | static __init int event_trace_init(void) |
1129 | { | |
a59fd602 | 1130 | struct ftrace_event_call *call; |
b77e38aa SR |
1131 | struct dentry *d_tracer; |
1132 | struct dentry *entry; | |
1473e441 | 1133 | struct dentry *d_events; |
6d723736 | 1134 | int ret; |
b77e38aa SR |
1135 | |
1136 | d_tracer = tracing_init_dentry(); | |
1137 | if (!d_tracer) | |
1138 | return 0; | |
1139 | ||
2314c4ae SR |
1140 | entry = debugfs_create_file("available_events", 0444, d_tracer, |
1141 | (void *)&show_event_seq_ops, | |
1142 | &ftrace_avail_fops); | |
1143 | if (!entry) | |
1144 | pr_warning("Could not create debugfs " | |
1145 | "'available_events' entry\n"); | |
1146 | ||
b77e38aa SR |
1147 | entry = debugfs_create_file("set_event", 0644, d_tracer, |
1148 | (void *)&show_set_event_seq_ops, | |
1149 | &ftrace_set_event_fops); | |
1150 | if (!entry) | |
1151 | pr_warning("Could not create debugfs " | |
1152 | "'set_event' entry\n"); | |
1153 | ||
1473e441 SR |
1154 | d_events = event_trace_events_dir(); |
1155 | if (!d_events) | |
1156 | return 0; | |
1157 | ||
d1b182a8 SR |
1158 | /* ring buffer internal formats */ |
1159 | trace_create_file("header_page", 0444, d_events, | |
1160 | ring_buffer_print_page_header, | |
1161 | &ftrace_show_header_fops); | |
1162 | ||
1163 | trace_create_file("header_event", 0444, d_events, | |
1164 | ring_buffer_print_entry_header, | |
1165 | &ftrace_show_header_fops); | |
1166 | ||
8ae79a13 | 1167 | trace_create_file("enable", 0644, d_events, |
8f31bfe5 | 1168 | NULL, &ftrace_system_enable_fops); |
8ae79a13 | 1169 | |
6d723736 | 1170 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { |
1473e441 SR |
1171 | /* The linker may leave blanks */ |
1172 | if (!call->name) | |
1173 | continue; | |
a59fd602 | 1174 | list_add(&call->list, &ftrace_events); |
701970b3 SR |
1175 | event_create_dir(call, d_events, &ftrace_event_id_fops, |
1176 | &ftrace_enable_fops, &ftrace_event_filter_fops, | |
1177 | &ftrace_event_format_fops); | |
1473e441 SR |
1178 | } |
1179 | ||
6d723736 | 1180 | ret = register_module_notifier(&trace_module_nb); |
55379376 | 1181 | if (ret) |
6d723736 SR |
1182 | pr_warning("Failed to register trace events module notifier\n"); |
1183 | ||
b77e38aa SR |
1184 | return 0; |
1185 | } | |
1186 | fs_initcall(event_trace_init); | |
e6187007 SR |
1187 | |
1188 | #ifdef CONFIG_FTRACE_STARTUP_TEST | |
1189 | ||
1190 | static DEFINE_SPINLOCK(test_spinlock); | |
1191 | static DEFINE_SPINLOCK(test_spinlock_irq); | |
1192 | static DEFINE_MUTEX(test_mutex); | |
1193 | ||
1194 | static __init void test_work(struct work_struct *dummy) | |
1195 | { | |
1196 | spin_lock(&test_spinlock); | |
1197 | spin_lock_irq(&test_spinlock_irq); | |
1198 | udelay(1); | |
1199 | spin_unlock_irq(&test_spinlock_irq); | |
1200 | spin_unlock(&test_spinlock); | |
1201 | ||
1202 | mutex_lock(&test_mutex); | |
1203 | msleep(1); | |
1204 | mutex_unlock(&test_mutex); | |
1205 | } | |
1206 | ||
1207 | static __init int event_test_thread(void *unused) | |
1208 | { | |
1209 | void *test_malloc; | |
1210 | ||
1211 | test_malloc = kmalloc(1234, GFP_KERNEL); | |
1212 | if (!test_malloc) | |
1213 | pr_info("failed to kmalloc\n"); | |
1214 | ||
1215 | schedule_on_each_cpu(test_work); | |
1216 | ||
1217 | kfree(test_malloc); | |
1218 | ||
1219 | set_current_state(TASK_INTERRUPTIBLE); | |
1220 | while (!kthread_should_stop()) | |
1221 | schedule(); | |
1222 | ||
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | /* | |
1227 | * Do various things that may trigger events. | |
1228 | */ | |
1229 | static __init void event_test_stuff(void) | |
1230 | { | |
1231 | struct task_struct *test_thread; | |
1232 | ||
1233 | test_thread = kthread_run(event_test_thread, NULL, "test-events"); | |
1234 | msleep(1); | |
1235 | kthread_stop(test_thread); | |
1236 | } | |
1237 | ||
1238 | /* | |
1239 | * For every trace event defined, we will test each trace point separately, | |
1240 | * and then by groups, and finally all trace points. | |
1241 | */ | |
9ea21c1e | 1242 | static __init void event_trace_self_tests(void) |
e6187007 SR |
1243 | { |
1244 | struct ftrace_event_call *call; | |
1245 | struct event_subsystem *system; | |
e6187007 SR |
1246 | int ret; |
1247 | ||
1248 | pr_info("Running tests on trace events:\n"); | |
1249 | ||
1250 | list_for_each_entry(call, &ftrace_events, list) { | |
1251 | ||
1252 | /* Only test those that have a regfunc */ | |
1253 | if (!call->regfunc) | |
1254 | continue; | |
1255 | ||
1256 | pr_info("Testing event %s: ", call->name); | |
1257 | ||
1258 | /* | |
1259 | * If an event is already enabled, someone is using | |
1260 | * it and the self test should not be on. | |
1261 | */ | |
1262 | if (call->enabled) { | |
1263 | pr_warning("Enabled event during self test!\n"); | |
1264 | WARN_ON_ONCE(1); | |
1265 | continue; | |
1266 | } | |
1267 | ||
1268 | call->enabled = 1; | |
b11c53e1 | 1269 | tracing_start_cmdline_record(); |
e6187007 SR |
1270 | call->regfunc(); |
1271 | ||
1272 | event_test_stuff(); | |
1273 | ||
1274 | call->unregfunc(); | |
b11c53e1 | 1275 | tracing_stop_cmdline_record(); |
e6187007 SR |
1276 | call->enabled = 0; |
1277 | ||
1278 | pr_cont("OK\n"); | |
1279 | } | |
1280 | ||
1281 | /* Now test at the sub system level */ | |
1282 | ||
1283 | pr_info("Running tests on trace event systems:\n"); | |
1284 | ||
1285 | list_for_each_entry(system, &event_subsystems, list) { | |
1286 | ||
1287 | /* the ftrace system is special, skip it */ | |
1288 | if (strcmp(system->name, "ftrace") == 0) | |
1289 | continue; | |
1290 | ||
1291 | pr_info("Testing event system %s: ", system->name); | |
1292 | ||
8f31bfe5 | 1293 | ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1); |
e6187007 SR |
1294 | if (WARN_ON_ONCE(ret)) { |
1295 | pr_warning("error enabling system %s\n", | |
1296 | system->name); | |
1297 | continue; | |
1298 | } | |
1299 | ||
1300 | event_test_stuff(); | |
1301 | ||
8f31bfe5 | 1302 | ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0); |
e6187007 SR |
1303 | if (WARN_ON_ONCE(ret)) |
1304 | pr_warning("error disabling system %s\n", | |
1305 | system->name); | |
1306 | ||
1307 | pr_cont("OK\n"); | |
1308 | } | |
1309 | ||
1310 | /* Test with all events enabled */ | |
1311 | ||
1312 | pr_info("Running tests on all trace events:\n"); | |
1313 | pr_info("Testing all events: "); | |
1314 | ||
8f31bfe5 | 1315 | ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1); |
e6187007 | 1316 | if (WARN_ON_ONCE(ret)) { |
e6187007 | 1317 | pr_warning("error enabling all events\n"); |
9ea21c1e | 1318 | return; |
e6187007 SR |
1319 | } |
1320 | ||
1321 | event_test_stuff(); | |
1322 | ||
1323 | /* reset sysname */ | |
8f31bfe5 | 1324 | ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0); |
e6187007 SR |
1325 | if (WARN_ON_ONCE(ret)) { |
1326 | pr_warning("error disabling all events\n"); | |
9ea21c1e | 1327 | return; |
e6187007 SR |
1328 | } |
1329 | ||
1330 | pr_cont("OK\n"); | |
9ea21c1e SR |
1331 | } |
1332 | ||
1333 | #ifdef CONFIG_FUNCTION_TRACER | |
1334 | ||
1335 | static DEFINE_PER_CPU(atomic_t, test_event_disable); | |
1336 | ||
1337 | static void | |
1338 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | |
1339 | { | |
1340 | struct ring_buffer_event *event; | |
1341 | struct ftrace_entry *entry; | |
1342 | unsigned long flags; | |
1343 | long disabled; | |
1344 | int resched; | |
1345 | int cpu; | |
1346 | int pc; | |
1347 | ||
1348 | pc = preempt_count(); | |
1349 | resched = ftrace_preempt_disable(); | |
1350 | cpu = raw_smp_processor_id(); | |
1351 | disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); | |
1352 | ||
1353 | if (disabled != 1) | |
1354 | goto out; | |
1355 | ||
1356 | local_save_flags(flags); | |
1357 | ||
1358 | event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry), | |
1359 | flags, pc); | |
1360 | if (!event) | |
1361 | goto out; | |
1362 | entry = ring_buffer_event_data(event); | |
1363 | entry->ip = ip; | |
1364 | entry->parent_ip = parent_ip; | |
1365 | ||
cb4764a6 | 1366 | trace_nowake_buffer_unlock_commit(event, flags, pc); |
9ea21c1e SR |
1367 | |
1368 | out: | |
1369 | atomic_dec(&per_cpu(test_event_disable, cpu)); | |
1370 | ftrace_preempt_enable(resched); | |
1371 | } | |
1372 | ||
1373 | static struct ftrace_ops trace_ops __initdata = | |
1374 | { | |
1375 | .func = function_test_events_call, | |
1376 | }; | |
1377 | ||
1378 | static __init void event_trace_self_test_with_function(void) | |
1379 | { | |
1380 | register_ftrace_function(&trace_ops); | |
1381 | pr_info("Running tests again, along with the function tracer\n"); | |
1382 | event_trace_self_tests(); | |
1383 | unregister_ftrace_function(&trace_ops); | |
1384 | } | |
1385 | #else | |
1386 | static __init void event_trace_self_test_with_function(void) | |
1387 | { | |
1388 | } | |
1389 | #endif | |
1390 | ||
1391 | static __init int event_trace_self_tests_init(void) | |
1392 | { | |
1393 | ||
1394 | event_trace_self_tests(); | |
1395 | ||
1396 | event_trace_self_test_with_function(); | |
e6187007 SR |
1397 | |
1398 | return 0; | |
1399 | } | |
1400 | ||
28d20e2d | 1401 | late_initcall(event_trace_self_tests_init); |
e6187007 SR |
1402 | |
1403 | #endif |