]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/trace/trace_events.c
tracing: add subsystem level to trace events
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ctype.h>
12
13#include "trace_events.h"
14
1473e441
SR
15#define events_for_each(event) \
16 for (event = __start_ftrace_events; \
17 (unsigned long)event < (unsigned long)__stop_ftrace_events; \
18 event++)
19
b77e38aa
SR
20void event_trace_printk(unsigned long ip, const char *fmt, ...)
21{
22 va_list ap;
23
24 va_start(ap, fmt);
25 tracing_record_cmdline(current);
26 trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
27 va_end(ap);
28}
29
30static void ftrace_clear_events(void)
31{
32 struct ftrace_event_call *call = (void *)__start_ftrace_events;
33
34
35 while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
36
37 if (call->enabled) {
38 call->enabled = 0;
39 call->unregfunc();
40 }
41 call++;
42 }
43}
44
45static int ftrace_set_clr_event(char *buf, int set)
46{
1473e441 47 struct ftrace_event_call *call = __start_ftrace_events;
b77e38aa
SR
48
49
1473e441 50 events_for_each(call) {
b77e38aa 51
1473e441
SR
52 if (!call->name)
53 continue;
54
55 if (strcmp(buf, call->name) != 0)
b77e38aa 56 continue;
b77e38aa
SR
57
58 if (set) {
59 /* Already set? */
60 if (call->enabled)
61 return 0;
62 call->enabled = 1;
63 call->regfunc();
64 } else {
65 /* Already cleared? */
66 if (!call->enabled)
67 return 0;
68 call->enabled = 0;
69 call->unregfunc();
70 }
71 return 0;
72 }
73 return -EINVAL;
74}
75
76/* 128 should be much more than enough */
77#define EVENT_BUF_SIZE 127
78
79static ssize_t
80ftrace_event_write(struct file *file, const char __user *ubuf,
81 size_t cnt, loff_t *ppos)
82{
83 size_t read = 0;
84 int i, set = 1;
85 ssize_t ret;
86 char *buf;
87 char ch;
88
89 if (!cnt || cnt < 0)
90 return 0;
91
92 ret = get_user(ch, ubuf++);
93 if (ret)
94 return ret;
95 read++;
96 cnt--;
97
98 /* skip white space */
99 while (cnt && isspace(ch)) {
100 ret = get_user(ch, ubuf++);
101 if (ret)
102 return ret;
103 read++;
104 cnt--;
105 }
106
107 /* Only white space found? */
108 if (isspace(ch)) {
109 file->f_pos += read;
110 ret = read;
111 return ret;
112 }
113
114 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
115 if (!buf)
116 return -ENOMEM;
117
118 if (cnt > EVENT_BUF_SIZE)
119 cnt = EVENT_BUF_SIZE;
120
121 i = 0;
122 while (cnt && !isspace(ch)) {
123 if (!i && ch == '!')
124 set = 0;
125 else
126 buf[i++] = ch;
127
128 ret = get_user(ch, ubuf++);
129 if (ret)
130 goto out_free;
131 read++;
132 cnt--;
133 }
134 buf[i] = 0;
135
136 file->f_pos += read;
137
138 ret = ftrace_set_clr_event(buf, set);
139 if (ret)
140 goto out_free;
141
142 ret = read;
143
144 out_free:
145 kfree(buf);
146
147 return ret;
148}
149
150static void *
151t_next(struct seq_file *m, void *v, loff_t *pos)
152{
153 struct ftrace_event_call *call = m->private;
154 struct ftrace_event_call *next = call;
155
156 (*pos)++;
157
158 if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
159 return NULL;
160
161 m->private = ++next;
162
163 return call;
164}
165
166static void *t_start(struct seq_file *m, loff_t *pos)
167{
168 return t_next(m, NULL, pos);
169}
170
171static void *
172s_next(struct seq_file *m, void *v, loff_t *pos)
173{
174 struct ftrace_event_call *call = m->private;
175 struct ftrace_event_call *next;
176
177 (*pos)++;
178
179 retry:
180 if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
181 return NULL;
182
183 if (!call->enabled) {
184 call++;
185 goto retry;
186 }
187
188 next = call;
189 m->private = ++next;
190
191 return call;
192}
193
194static void *s_start(struct seq_file *m, loff_t *pos)
195{
196 return s_next(m, NULL, pos);
197}
198
199static int t_show(struct seq_file *m, void *v)
200{
201 struct ftrace_event_call *call = v;
202
203 seq_printf(m, "%s\n", call->name);
204
205 return 0;
206}
207
208static void t_stop(struct seq_file *m, void *p)
209{
210}
211
212static int
213ftrace_event_seq_open(struct inode *inode, struct file *file)
214{
215 int ret;
216 const struct seq_operations *seq_ops;
217
218 if ((file->f_mode & FMODE_WRITE) &&
219 !(file->f_flags & O_APPEND))
220 ftrace_clear_events();
221
222 seq_ops = inode->i_private;
223 ret = seq_open(file, seq_ops);
224 if (!ret) {
225 struct seq_file *m = file->private_data;
226
227 m->private = __start_ftrace_events;
228 }
229 return ret;
230}
231
1473e441
SR
232static ssize_t
233event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
234 loff_t *ppos)
235{
236 struct ftrace_event_call *call = filp->private_data;
237 char *buf;
238
239 if (call->enabled)
240 buf = "1\n";
241 else
242 buf = "0\n";
243
244 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
245}
246
247static ssize_t
248event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
249 loff_t *ppos)
250{
251 struct ftrace_event_call *call = filp->private_data;
252 char buf[64];
253 unsigned long val;
254 int ret;
255
256 if (cnt >= sizeof(buf))
257 return -EINVAL;
258
259 if (copy_from_user(&buf, ubuf, cnt))
260 return -EFAULT;
261
262 buf[cnt] = 0;
263
264 ret = strict_strtoul(buf, 10, &val);
265 if (ret < 0)
266 return ret;
267
268 switch (val) {
269 case 0:
270 if (!call->enabled)
271 break;
272
273 call->enabled = 0;
274 call->unregfunc();
275 break;
276 case 1:
277 if (call->enabled)
278 break;
279
280 call->enabled = 1;
281 call->regfunc();
282 break;
283
284 default:
285 return -EINVAL;
286 }
287
288 *ppos += cnt;
289
290 return cnt;
291}
292
b77e38aa
SR
293static const struct seq_operations show_event_seq_ops = {
294 .start = t_start,
295 .next = t_next,
296 .show = t_show,
297 .stop = t_stop,
298};
299
300static const struct seq_operations show_set_event_seq_ops = {
301 .start = s_start,
302 .next = s_next,
303 .show = t_show,
304 .stop = t_stop,
305};
306
307static const struct file_operations ftrace_avail_fops = {
308 .open = ftrace_event_seq_open,
309 .read = seq_read,
310 .llseek = seq_lseek,
311 .release = seq_release,
312};
313
314static const struct file_operations ftrace_set_event_fops = {
315 .open = ftrace_event_seq_open,
316 .read = seq_read,
317 .write = ftrace_event_write,
318 .llseek = seq_lseek,
319 .release = seq_release,
320};
321
1473e441
SR
322static const struct file_operations ftrace_enable_fops = {
323 .open = tracing_open_generic,
324 .read = event_enable_read,
325 .write = event_enable_write,
326};
327
328static struct dentry *event_trace_events_dir(void)
329{
330 static struct dentry *d_tracer;
331 static struct dentry *d_events;
332
333 if (d_events)
334 return d_events;
335
336 d_tracer = tracing_init_dentry();
337 if (!d_tracer)
338 return NULL;
339
340 d_events = debugfs_create_dir("events", d_tracer);
341 if (!d_events)
342 pr_warning("Could not create debugfs "
343 "'events' directory\n");
344
345 return d_events;
346}
347
6ecc2d1c
SR
348struct event_subsystem {
349 struct list_head list;
350 const char *name;
351 struct dentry *entry;
352};
353
354static LIST_HEAD(event_subsystems);
355
356static struct dentry *
357event_subsystem_dir(const char *name, struct dentry *d_events)
358{
359 struct event_subsystem *system;
360
361 /* First see if we did not already create this dir */
362 list_for_each_entry(system, &event_subsystems, list) {
363 if (strcmp(system->name, name) == 0)
364 return system->entry;
365 }
366
367 /* need to create new entry */
368 system = kmalloc(sizeof(*system), GFP_KERNEL);
369 if (!system) {
370 pr_warning("No memory to create event subsystem %s\n",
371 name);
372 return d_events;
373 }
374
375 system->entry = debugfs_create_dir(name, d_events);
376 if (!system->entry) {
377 pr_warning("Could not create event subsystem %s\n",
378 name);
379 kfree(system);
380 return d_events;
381 }
382
383 system->name = name;
384 list_add(&system->list, &event_subsystems);
385
386 return system->entry;
387}
388
1473e441
SR
389static int
390event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
391{
392 struct dentry *entry;
393
6ecc2d1c
SR
394 /*
395 * If the trace point header did not define TRACE_SYSTEM
396 * then the system would be called "TRACE_SYSTEM".
397 */
398 if (strcmp(call->system, "TRACE_SYSTEM") != 0)
399 d_events = event_subsystem_dir(call->system, d_events);
400
1473e441
SR
401 call->dir = debugfs_create_dir(call->name, d_events);
402 if (!call->dir) {
403 pr_warning("Could not create debugfs "
404 "'%s' directory\n", call->name);
405 return -1;
406 }
407
408 entry = debugfs_create_file("enable", 0644, call->dir, call,
409 &ftrace_enable_fops);
410 if (!entry)
411 pr_warning("Could not create debugfs "
412 "'%s/enable' entry\n", call->name);
413
414 return 0;
415}
416
b77e38aa
SR
417static __init int event_trace_init(void)
418{
1473e441 419 struct ftrace_event_call *call = __start_ftrace_events;
b77e38aa
SR
420 struct dentry *d_tracer;
421 struct dentry *entry;
1473e441 422 struct dentry *d_events;
b77e38aa
SR
423
424 d_tracer = tracing_init_dentry();
425 if (!d_tracer)
426 return 0;
427
428 entry = debugfs_create_file("available_events", 0444, d_tracer,
429 (void *)&show_event_seq_ops,
430 &ftrace_avail_fops);
431 if (!entry)
432 pr_warning("Could not create debugfs "
433 "'available_events' entry\n");
434
435 entry = debugfs_create_file("set_event", 0644, d_tracer,
436 (void *)&show_set_event_seq_ops,
437 &ftrace_set_event_fops);
438 if (!entry)
439 pr_warning("Could not create debugfs "
440 "'set_event' entry\n");
441
1473e441
SR
442 d_events = event_trace_events_dir();
443 if (!d_events)
444 return 0;
445
446 events_for_each(call) {
447 /* The linker may leave blanks */
448 if (!call->name)
449 continue;
450 event_create_dir(call, d_events);
451 }
452
b77e38aa
SR
453 return 0;
454}
455fs_initcall(event_trace_init);