]>
Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | /* |
2 | * ring buffer based function tracer | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Originally taken from the RT patch by: | |
8 | * Arnaldo Carvalho de Melo <acme@redhat.com> | |
9 | * | |
10 | * Based on code from the latency_tracer, that is: | |
11 | * Copyright (C) 2004-2006 Ingo Molnar | |
12 | * Copyright (C) 2004 William Lee Irwin III | |
13 | */ | |
14 | #include <linux/utsrelease.h> | |
15 | #include <linux/kallsyms.h> | |
16 | #include <linux/seq_file.h> | |
3f5a54e3 | 17 | #include <linux/notifier.h> |
bc0c38d1 | 18 | #include <linux/debugfs.h> |
4c11d7ae | 19 | #include <linux/pagemap.h> |
bc0c38d1 SR |
20 | #include <linux/hardirq.h> |
21 | #include <linux/linkage.h> | |
22 | #include <linux/uaccess.h> | |
23 | #include <linux/ftrace.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/percpu.h> | |
3f5a54e3 | 26 | #include <linux/kdebug.h> |
bc0c38d1 SR |
27 | #include <linux/ctype.h> |
28 | #include <linux/init.h> | |
2a2cc8f7 | 29 | #include <linux/poll.h> |
bc0c38d1 SR |
30 | #include <linux/gfp.h> |
31 | #include <linux/fs.h> | |
76094a2c | 32 | #include <linux/kprobes.h> |
b54d3de9 | 33 | #include <linux/seq_file.h> |
3eefae99 | 34 | #include <linux/writeback.h> |
bc0c38d1 | 35 | |
86387f7e | 36 | #include <linux/stacktrace.h> |
3928a8a2 | 37 | #include <linux/ring_buffer.h> |
21798a84 | 38 | #include <linux/irqflags.h> |
86387f7e | 39 | |
bc0c38d1 | 40 | #include "trace.h" |
f0868d1e | 41 | #include "trace_output.h" |
bc0c38d1 | 42 | |
3928a8a2 SR |
43 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
44 | ||
bc0c38d1 SR |
45 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
46 | unsigned long __read_mostly tracing_thresh; | |
47 | ||
8e1b82e0 FW |
48 | /* |
49 | * We need to change this state when a selftest is running. | |
ff32504f FW |
50 | * A selftest will lurk into the ring-buffer to count the |
51 | * entries inserted during the selftest although some concurrent | |
52 | * insertions into the ring-buffer such as ftrace_printk could occurred | |
53 | * at the same time, giving false positive or negative results. | |
54 | */ | |
8e1b82e0 | 55 | static bool __read_mostly tracing_selftest_running; |
ff32504f | 56 | |
adf9f195 FW |
57 | /* For tracers that don't implement custom flags */ |
58 | static struct tracer_opt dummy_tracer_opt[] = { | |
59 | { } | |
60 | }; | |
61 | ||
62 | static struct tracer_flags dummy_tracer_flags = { | |
63 | .val = 0, | |
64 | .opts = dummy_tracer_opt | |
65 | }; | |
66 | ||
67 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |
68 | { | |
69 | return 0; | |
70 | } | |
0f048701 SR |
71 | |
72 | /* | |
73 | * Kill all tracing for good (never come back). | |
74 | * It is initialized to 1 but will turn to zero if the initialization | |
75 | * of the tracer is successful. But that is the only place that sets | |
76 | * this back to zero. | |
77 | */ | |
78 | int tracing_disabled = 1; | |
79 | ||
d769041f SR |
80 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
81 | ||
82 | static inline void ftrace_disable_cpu(void) | |
83 | { | |
84 | preempt_disable(); | |
85 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | |
86 | } | |
87 | ||
88 | static inline void ftrace_enable_cpu(void) | |
89 | { | |
90 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | |
91 | preempt_enable(); | |
92 | } | |
93 | ||
ab46428c SR |
94 | static cpumask_t __read_mostly tracing_buffer_mask; |
95 | ||
96 | #define for_each_tracing_cpu(cpu) \ | |
97 | for_each_cpu_mask(cpu, tracing_buffer_mask) | |
98 | ||
944ac425 SR |
99 | /* |
100 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | |
101 | * | |
102 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | |
103 | * is set, then ftrace_dump is called. This will output the contents | |
104 | * of the ftrace buffers to the console. This is very useful for | |
105 | * capturing traces that lead to crashes and outputing it to a | |
106 | * serial console. | |
107 | * | |
108 | * It is default off, but you can enable it with either specifying | |
109 | * "ftrace_dump_on_oops" in the kernel command line, or setting | |
110 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | |
111 | */ | |
112 | int ftrace_dump_on_oops; | |
113 | ||
d9e54076 PZ |
114 | static int tracing_set_tracer(char *buf); |
115 | ||
116 | static int __init set_ftrace(char *str) | |
117 | { | |
118 | tracing_set_tracer(str); | |
119 | return 1; | |
120 | } | |
121 | __setup("ftrace", set_ftrace); | |
122 | ||
944ac425 SR |
123 | static int __init set_ftrace_dump_on_oops(char *str) |
124 | { | |
125 | ftrace_dump_on_oops = 1; | |
126 | return 1; | |
127 | } | |
128 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | |
60a11774 | 129 | |
72829bc3 | 130 | long |
bc0c38d1 SR |
131 | ns2usecs(cycle_t nsec) |
132 | { | |
133 | nsec += 500; | |
134 | do_div(nsec, 1000); | |
135 | return nsec; | |
136 | } | |
137 | ||
e309b41d | 138 | cycle_t ftrace_now(int cpu) |
750ed1a4 | 139 | { |
3928a8a2 SR |
140 | u64 ts = ring_buffer_time_stamp(cpu); |
141 | ring_buffer_normalize_time_stamp(cpu, &ts); | |
142 | return ts; | |
750ed1a4 IM |
143 | } |
144 | ||
4fcdae83 SR |
145 | /* |
146 | * The global_trace is the descriptor that holds the tracing | |
147 | * buffers for the live tracing. For each CPU, it contains | |
148 | * a link list of pages that will store trace entries. The | |
149 | * page descriptor of the pages in the memory is used to hold | |
150 | * the link list by linking the lru item in the page descriptor | |
151 | * to each of the pages in the buffer per CPU. | |
152 | * | |
153 | * For each active CPU there is a data field that holds the | |
154 | * pages for the buffer for that CPU. Each CPU has the same number | |
155 | * of pages allocated for its buffer. | |
156 | */ | |
bc0c38d1 SR |
157 | static struct trace_array global_trace; |
158 | ||
159 | static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); | |
160 | ||
4fcdae83 SR |
161 | /* |
162 | * The max_tr is used to snapshot the global_trace when a maximum | |
163 | * latency is reached. Some tracers will use this to store a maximum | |
164 | * trace while it continues examining live traces. | |
165 | * | |
166 | * The buffers for the max_tr are set up the same as the global_trace. | |
167 | * When a snapshot is taken, the link list of the max_tr is swapped | |
168 | * with the link list of the global_trace and the buffers are reset for | |
169 | * the global_trace so the tracing can continue. | |
170 | */ | |
bc0c38d1 SR |
171 | static struct trace_array max_tr; |
172 | ||
173 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | |
174 | ||
4fcdae83 | 175 | /* tracer_enabled is used to toggle activation of a tracer */ |
26994ead | 176 | static int tracer_enabled = 1; |
4fcdae83 | 177 | |
9036990d SR |
178 | /** |
179 | * tracing_is_enabled - return tracer_enabled status | |
180 | * | |
181 | * This function is used by other tracers to know the status | |
182 | * of the tracer_enabled flag. Tracers may use this function | |
183 | * to know if it should enable their features when starting | |
184 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | |
185 | */ | |
186 | int tracing_is_enabled(void) | |
187 | { | |
188 | return tracer_enabled; | |
189 | } | |
190 | ||
60bc0800 SR |
191 | /* function tracing enabled */ |
192 | int ftrace_function_enabled; | |
193 | ||
4fcdae83 | 194 | /* |
3928a8a2 SR |
195 | * trace_buf_size is the size in bytes that is allocated |
196 | * for a buffer. Note, the number of bytes is always rounded | |
197 | * to page size. | |
3f5a54e3 SR |
198 | * |
199 | * This number is purposely set to a low number of 16384. | |
200 | * If the dump on oops happens, it will be much appreciated | |
201 | * to not have to wait for all that output. Anyway this can be | |
202 | * boot time and run time configurable. | |
4fcdae83 | 203 | */ |
3928a8a2 | 204 | #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ |
3f5a54e3 | 205 | |
3928a8a2 | 206 | static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; |
bc0c38d1 | 207 | |
4fcdae83 | 208 | /* trace_types holds a link list of available tracers. */ |
bc0c38d1 | 209 | static struct tracer *trace_types __read_mostly; |
4fcdae83 SR |
210 | |
211 | /* current_trace points to the tracer that is currently active */ | |
bc0c38d1 | 212 | static struct tracer *current_trace __read_mostly; |
4fcdae83 SR |
213 | |
214 | /* | |
215 | * max_tracer_type_len is used to simplify the allocating of | |
216 | * buffers to read userspace tracer names. We keep track of | |
217 | * the longest tracer name registered. | |
218 | */ | |
bc0c38d1 SR |
219 | static int max_tracer_type_len; |
220 | ||
4fcdae83 SR |
221 | /* |
222 | * trace_types_lock is used to protect the trace_types list. | |
223 | * This lock is also used to keep user access serialized. | |
224 | * Accesses from userspace will grab this lock while userspace | |
225 | * activities happen inside the kernel. | |
226 | */ | |
bc0c38d1 | 227 | static DEFINE_MUTEX(trace_types_lock); |
4fcdae83 SR |
228 | |
229 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | |
4e655519 IM |
230 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
231 | ||
ee6bce52 | 232 | /* trace_flags holds trace_options default values */ |
12ef7d44 SR |
233 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
234 | TRACE_ITER_ANNOTATE; | |
4e655519 | 235 | |
4fcdae83 SR |
236 | /** |
237 | * trace_wake_up - wake up tasks waiting for trace input | |
238 | * | |
239 | * Simply wakes up any task that is blocked on the trace_wait | |
240 | * queue. These is used with trace_poll for tasks polling the trace. | |
241 | */ | |
4e655519 IM |
242 | void trace_wake_up(void) |
243 | { | |
017730c1 IM |
244 | /* |
245 | * The runqueue_is_locked() can fail, but this is the best we | |
246 | * have for now: | |
247 | */ | |
248 | if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) | |
4e655519 IM |
249 | wake_up(&trace_wait); |
250 | } | |
bc0c38d1 | 251 | |
3928a8a2 | 252 | static int __init set_buf_size(char *str) |
bc0c38d1 | 253 | { |
3928a8a2 | 254 | unsigned long buf_size; |
c6caeeb1 SR |
255 | int ret; |
256 | ||
bc0c38d1 SR |
257 | if (!str) |
258 | return 0; | |
3928a8a2 | 259 | ret = strict_strtoul(str, 0, &buf_size); |
c6caeeb1 | 260 | /* nr_entries can not be zero */ |
3928a8a2 | 261 | if (ret < 0 || buf_size == 0) |
c6caeeb1 | 262 | return 0; |
3928a8a2 | 263 | trace_buf_size = buf_size; |
bc0c38d1 SR |
264 | return 1; |
265 | } | |
3928a8a2 | 266 | __setup("trace_buf_size=", set_buf_size); |
bc0c38d1 | 267 | |
57f50be1 SR |
268 | unsigned long nsecs_to_usecs(unsigned long nsecs) |
269 | { | |
270 | return nsecs / 1000; | |
271 | } | |
272 | ||
4fcdae83 | 273 | /* These must match the bit postions in trace_iterator_flags */ |
bc0c38d1 SR |
274 | static const char *trace_options[] = { |
275 | "print-parent", | |
276 | "sym-offset", | |
277 | "sym-addr", | |
278 | "verbose", | |
f9896bf3 | 279 | "raw", |
5e3ca0ec | 280 | "hex", |
cb0f12aa | 281 | "bin", |
2a2cc8f7 | 282 | "block", |
86387f7e | 283 | "stacktrace", |
4ac3ba41 | 284 | "sched-tree", |
f09ce573 | 285 | "ftrace_printk", |
b2a866f9 | 286 | "ftrace_preempt", |
9f029e83 | 287 | "branch", |
12ef7d44 | 288 | "annotate", |
02b67518 | 289 | "userstacktrace", |
b54d3de9 | 290 | "sym-userobj", |
66896a85 | 291 | "printk-msg-only", |
bc0c38d1 SR |
292 | NULL |
293 | }; | |
294 | ||
4fcdae83 SR |
295 | /* |
296 | * ftrace_max_lock is used to protect the swapping of buffers | |
297 | * when taking a max snapshot. The buffers themselves are | |
298 | * protected by per_cpu spinlocks. But the action of the swap | |
299 | * needs its own lock. | |
300 | * | |
301 | * This is defined as a raw_spinlock_t in order to help | |
302 | * with performance when lockdep debugging is enabled. | |
303 | */ | |
92205c23 SR |
304 | static raw_spinlock_t ftrace_max_lock = |
305 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
bc0c38d1 SR |
306 | |
307 | /* | |
308 | * Copy the new maximum trace into the separate maximum-trace | |
309 | * structure. (this way the maximum trace is permanently saved, | |
310 | * for later retrieval via /debugfs/tracing/latency_trace) | |
311 | */ | |
e309b41d | 312 | static void |
bc0c38d1 SR |
313 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
314 | { | |
315 | struct trace_array_cpu *data = tr->data[cpu]; | |
316 | ||
317 | max_tr.cpu = cpu; | |
318 | max_tr.time_start = data->preempt_timestamp; | |
319 | ||
320 | data = max_tr.data[cpu]; | |
321 | data->saved_latency = tracing_max_latency; | |
322 | ||
323 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | |
324 | data->pid = tsk->pid; | |
b6dff3ec | 325 | data->uid = task_uid(tsk); |
bc0c38d1 SR |
326 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
327 | data->policy = tsk->policy; | |
328 | data->rt_priority = tsk->rt_priority; | |
329 | ||
330 | /* record this tasks comm */ | |
331 | tracing_record_cmdline(current); | |
332 | } | |
333 | ||
e309b41d | 334 | static void |
214023c3 SR |
335 | trace_seq_reset(struct trace_seq *s) |
336 | { | |
337 | s->len = 0; | |
6c6c2796 PP |
338 | s->readpos = 0; |
339 | } | |
340 | ||
341 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | |
342 | { | |
343 | int len; | |
344 | int ret; | |
345 | ||
346 | if (s->len <= s->readpos) | |
347 | return -EBUSY; | |
348 | ||
349 | len = s->len - s->readpos; | |
350 | if (cnt > len) | |
351 | cnt = len; | |
352 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | |
353 | if (ret) | |
354 | return -EFAULT; | |
355 | ||
356 | s->readpos += len; | |
357 | return cnt; | |
214023c3 SR |
358 | } |
359 | ||
e309b41d | 360 | static void |
214023c3 SR |
361 | trace_print_seq(struct seq_file *m, struct trace_seq *s) |
362 | { | |
363 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | |
364 | ||
365 | s->buffer[len] = 0; | |
366 | seq_puts(m, s->buffer); | |
367 | ||
368 | trace_seq_reset(s); | |
369 | } | |
370 | ||
4fcdae83 SR |
371 | /** |
372 | * update_max_tr - snapshot all trace buffers from global_trace to max_tr | |
373 | * @tr: tracer | |
374 | * @tsk: the task with the latency | |
375 | * @cpu: The cpu that initiated the trace. | |
376 | * | |
377 | * Flip the buffers between the @tr and the max_tr and record information | |
378 | * about which task was the cause of this latency. | |
379 | */ | |
e309b41d | 380 | void |
bc0c38d1 SR |
381 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
382 | { | |
3928a8a2 | 383 | struct ring_buffer *buf = tr->buffer; |
bc0c38d1 | 384 | |
4c11d7ae | 385 | WARN_ON_ONCE(!irqs_disabled()); |
92205c23 | 386 | __raw_spin_lock(&ftrace_max_lock); |
3928a8a2 SR |
387 | |
388 | tr->buffer = max_tr.buffer; | |
389 | max_tr.buffer = buf; | |
390 | ||
d769041f | 391 | ftrace_disable_cpu(); |
3928a8a2 | 392 | ring_buffer_reset(tr->buffer); |
d769041f | 393 | ftrace_enable_cpu(); |
bc0c38d1 SR |
394 | |
395 | __update_max_tr(tr, tsk, cpu); | |
92205c23 | 396 | __raw_spin_unlock(&ftrace_max_lock); |
bc0c38d1 SR |
397 | } |
398 | ||
399 | /** | |
400 | * update_max_tr_single - only copy one trace over, and reset the rest | |
401 | * @tr - tracer | |
402 | * @tsk - task with the latency | |
403 | * @cpu - the cpu of the buffer to copy. | |
4fcdae83 SR |
404 | * |
405 | * Flip the trace of a single CPU buffer between the @tr and the max_tr. | |
bc0c38d1 | 406 | */ |
e309b41d | 407 | void |
bc0c38d1 SR |
408 | update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) |
409 | { | |
3928a8a2 | 410 | int ret; |
bc0c38d1 | 411 | |
4c11d7ae | 412 | WARN_ON_ONCE(!irqs_disabled()); |
92205c23 | 413 | __raw_spin_lock(&ftrace_max_lock); |
bc0c38d1 | 414 | |
d769041f SR |
415 | ftrace_disable_cpu(); |
416 | ||
3928a8a2 SR |
417 | ring_buffer_reset(max_tr.buffer); |
418 | ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); | |
419 | ||
d769041f SR |
420 | ftrace_enable_cpu(); |
421 | ||
3928a8a2 | 422 | WARN_ON_ONCE(ret); |
bc0c38d1 SR |
423 | |
424 | __update_max_tr(tr, tsk, cpu); | |
92205c23 | 425 | __raw_spin_unlock(&ftrace_max_lock); |
bc0c38d1 SR |
426 | } |
427 | ||
4fcdae83 SR |
428 | /** |
429 | * register_tracer - register a tracer with the ftrace system. | |
430 | * @type - the plugin for the tracer | |
431 | * | |
432 | * Register a new plugin tracer. | |
433 | */ | |
bc0c38d1 SR |
434 | int register_tracer(struct tracer *type) |
435 | { | |
436 | struct tracer *t; | |
437 | int len; | |
438 | int ret = 0; | |
439 | ||
440 | if (!type->name) { | |
441 | pr_info("Tracer must have a name\n"); | |
442 | return -1; | |
443 | } | |
444 | ||
86fa2f60 IM |
445 | /* |
446 | * When this gets called we hold the BKL which means that | |
447 | * preemption is disabled. Various trace selftests however | |
448 | * need to disable and enable preemption for successful tests. | |
449 | * So we drop the BKL here and grab it after the tests again. | |
450 | */ | |
451 | unlock_kernel(); | |
bc0c38d1 | 452 | mutex_lock(&trace_types_lock); |
86fa2f60 | 453 | |
8e1b82e0 FW |
454 | tracing_selftest_running = true; |
455 | ||
bc0c38d1 SR |
456 | for (t = trace_types; t; t = t->next) { |
457 | if (strcmp(type->name, t->name) == 0) { | |
458 | /* already found */ | |
459 | pr_info("Trace %s already registered\n", | |
460 | type->name); | |
461 | ret = -1; | |
462 | goto out; | |
463 | } | |
464 | } | |
465 | ||
adf9f195 FW |
466 | if (!type->set_flag) |
467 | type->set_flag = &dummy_set_flag; | |
468 | if (!type->flags) | |
469 | type->flags = &dummy_tracer_flags; | |
470 | else | |
471 | if (!type->flags->opts) | |
472 | type->flags->opts = dummy_tracer_opt; | |
473 | ||
60a11774 SR |
474 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
475 | if (type->selftest) { | |
476 | struct tracer *saved_tracer = current_trace; | |
60a11774 | 477 | struct trace_array *tr = &global_trace; |
60a11774 | 478 | int i; |
ff32504f | 479 | |
60a11774 SR |
480 | /* |
481 | * Run a selftest on this tracer. | |
482 | * Here we reset the trace buffer, and set the current | |
483 | * tracer to be this tracer. The tracer can then run some | |
484 | * internal tracing to verify that everything is in order. | |
485 | * If we fail, we do not register this tracer. | |
486 | */ | |
86fa2f60 | 487 | for_each_tracing_cpu(i) |
3928a8a2 | 488 | tracing_reset(tr, i); |
86fa2f60 | 489 | |
60a11774 | 490 | current_trace = type; |
60a11774 SR |
491 | /* the test is responsible for initializing and enabling */ |
492 | pr_info("Testing tracer %s: ", type->name); | |
493 | ret = type->selftest(type, tr); | |
494 | /* the test is responsible for resetting too */ | |
495 | current_trace = saved_tracer; | |
60a11774 SR |
496 | if (ret) { |
497 | printk(KERN_CONT "FAILED!\n"); | |
498 | goto out; | |
499 | } | |
1d4db00a | 500 | /* Only reset on passing, to avoid touching corrupted buffers */ |
86fa2f60 | 501 | for_each_tracing_cpu(i) |
3928a8a2 | 502 | tracing_reset(tr, i); |
86fa2f60 | 503 | |
60a11774 SR |
504 | printk(KERN_CONT "PASSED\n"); |
505 | } | |
506 | #endif | |
507 | ||
bc0c38d1 SR |
508 | type->next = trace_types; |
509 | trace_types = type; | |
510 | len = strlen(type->name); | |
511 | if (len > max_tracer_type_len) | |
512 | max_tracer_type_len = len; | |
60a11774 | 513 | |
bc0c38d1 | 514 | out: |
8e1b82e0 | 515 | tracing_selftest_running = false; |
bc0c38d1 | 516 | mutex_unlock(&trace_types_lock); |
86fa2f60 | 517 | lock_kernel(); |
bc0c38d1 SR |
518 | |
519 | return ret; | |
520 | } | |
521 | ||
522 | void unregister_tracer(struct tracer *type) | |
523 | { | |
524 | struct tracer **t; | |
525 | int len; | |
526 | ||
527 | mutex_lock(&trace_types_lock); | |
528 | for (t = &trace_types; *t; t = &(*t)->next) { | |
529 | if (*t == type) | |
530 | goto found; | |
531 | } | |
532 | pr_info("Trace %s not registered\n", type->name); | |
533 | goto out; | |
534 | ||
535 | found: | |
536 | *t = (*t)->next; | |
537 | if (strlen(type->name) != max_tracer_type_len) | |
538 | goto out; | |
539 | ||
540 | max_tracer_type_len = 0; | |
541 | for (t = &trace_types; *t; t = &(*t)->next) { | |
542 | len = strlen((*t)->name); | |
543 | if (len > max_tracer_type_len) | |
544 | max_tracer_type_len = len; | |
545 | } | |
546 | out: | |
547 | mutex_unlock(&trace_types_lock); | |
548 | } | |
549 | ||
3928a8a2 | 550 | void tracing_reset(struct trace_array *tr, int cpu) |
bc0c38d1 | 551 | { |
d769041f | 552 | ftrace_disable_cpu(); |
3928a8a2 | 553 | ring_buffer_reset_cpu(tr->buffer, cpu); |
d769041f | 554 | ftrace_enable_cpu(); |
bc0c38d1 SR |
555 | } |
556 | ||
213cc060 PE |
557 | void tracing_reset_online_cpus(struct trace_array *tr) |
558 | { | |
559 | int cpu; | |
560 | ||
561 | tr->time_start = ftrace_now(tr->cpu); | |
562 | ||
563 | for_each_online_cpu(cpu) | |
564 | tracing_reset(tr, cpu); | |
565 | } | |
566 | ||
bc0c38d1 SR |
567 | #define SAVED_CMDLINES 128 |
568 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |
569 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | |
570 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | |
571 | static int cmdline_idx; | |
572 | static DEFINE_SPINLOCK(trace_cmdline_lock); | |
25b0b44a | 573 | |
25b0b44a SR |
574 | /* temporary disable recording */ |
575 | atomic_t trace_record_cmdline_disabled __read_mostly; | |
bc0c38d1 SR |
576 | |
577 | static void trace_init_cmdlines(void) | |
578 | { | |
579 | memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); | |
580 | memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); | |
581 | cmdline_idx = 0; | |
582 | } | |
583 | ||
0f048701 SR |
584 | static int trace_stop_count; |
585 | static DEFINE_SPINLOCK(tracing_start_lock); | |
586 | ||
69bb54ec SR |
587 | /** |
588 | * ftrace_off_permanent - disable all ftrace code permanently | |
589 | * | |
590 | * This should only be called when a serious anomally has | |
591 | * been detected. This will turn off the function tracing, | |
592 | * ring buffers, and other tracing utilites. It takes no | |
593 | * locks and can be called from any context. | |
594 | */ | |
595 | void ftrace_off_permanent(void) | |
596 | { | |
597 | tracing_disabled = 1; | |
598 | ftrace_stop(); | |
599 | tracing_off_permanent(); | |
600 | } | |
601 | ||
0f048701 SR |
602 | /** |
603 | * tracing_start - quick start of the tracer | |
604 | * | |
605 | * If tracing is enabled but was stopped by tracing_stop, | |
606 | * this will start the tracer back up. | |
607 | */ | |
608 | void tracing_start(void) | |
609 | { | |
610 | struct ring_buffer *buffer; | |
611 | unsigned long flags; | |
612 | ||
613 | if (tracing_disabled) | |
614 | return; | |
615 | ||
616 | spin_lock_irqsave(&tracing_start_lock, flags); | |
617 | if (--trace_stop_count) | |
618 | goto out; | |
619 | ||
620 | if (trace_stop_count < 0) { | |
621 | /* Someone screwed up their debugging */ | |
622 | WARN_ON_ONCE(1); | |
623 | trace_stop_count = 0; | |
624 | goto out; | |
625 | } | |
626 | ||
627 | ||
628 | buffer = global_trace.buffer; | |
629 | if (buffer) | |
630 | ring_buffer_record_enable(buffer); | |
631 | ||
632 | buffer = max_tr.buffer; | |
633 | if (buffer) | |
634 | ring_buffer_record_enable(buffer); | |
635 | ||
636 | ftrace_start(); | |
637 | out: | |
638 | spin_unlock_irqrestore(&tracing_start_lock, flags); | |
639 | } | |
640 | ||
641 | /** | |
642 | * tracing_stop - quick stop of the tracer | |
643 | * | |
644 | * Light weight way to stop tracing. Use in conjunction with | |
645 | * tracing_start. | |
646 | */ | |
647 | void tracing_stop(void) | |
648 | { | |
649 | struct ring_buffer *buffer; | |
650 | unsigned long flags; | |
651 | ||
652 | ftrace_stop(); | |
653 | spin_lock_irqsave(&tracing_start_lock, flags); | |
654 | if (trace_stop_count++) | |
655 | goto out; | |
656 | ||
657 | buffer = global_trace.buffer; | |
658 | if (buffer) | |
659 | ring_buffer_record_disable(buffer); | |
660 | ||
661 | buffer = max_tr.buffer; | |
662 | if (buffer) | |
663 | ring_buffer_record_disable(buffer); | |
664 | ||
665 | out: | |
666 | spin_unlock_irqrestore(&tracing_start_lock, flags); | |
667 | } | |
668 | ||
e309b41d | 669 | void trace_stop_cmdline_recording(void); |
bc0c38d1 | 670 | |
e309b41d | 671 | static void trace_save_cmdline(struct task_struct *tsk) |
bc0c38d1 SR |
672 | { |
673 | unsigned map; | |
674 | unsigned idx; | |
675 | ||
676 | if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) | |
677 | return; | |
678 | ||
679 | /* | |
680 | * It's not the end of the world if we don't get | |
681 | * the lock, but we also don't want to spin | |
682 | * nor do we want to disable interrupts, | |
683 | * so if we miss here, then better luck next time. | |
684 | */ | |
685 | if (!spin_trylock(&trace_cmdline_lock)) | |
686 | return; | |
687 | ||
688 | idx = map_pid_to_cmdline[tsk->pid]; | |
689 | if (idx >= SAVED_CMDLINES) { | |
690 | idx = (cmdline_idx + 1) % SAVED_CMDLINES; | |
691 | ||
692 | map = map_cmdline_to_pid[idx]; | |
693 | if (map <= PID_MAX_DEFAULT) | |
694 | map_pid_to_cmdline[map] = (unsigned)-1; | |
695 | ||
696 | map_pid_to_cmdline[tsk->pid] = idx; | |
697 | ||
698 | cmdline_idx = idx; | |
699 | } | |
700 | ||
701 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | |
702 | ||
703 | spin_unlock(&trace_cmdline_lock); | |
704 | } | |
705 | ||
660c7f9b | 706 | char *trace_find_cmdline(int pid) |
bc0c38d1 SR |
707 | { |
708 | char *cmdline = "<...>"; | |
709 | unsigned map; | |
710 | ||
711 | if (!pid) | |
712 | return "<idle>"; | |
713 | ||
714 | if (pid > PID_MAX_DEFAULT) | |
715 | goto out; | |
716 | ||
717 | map = map_pid_to_cmdline[pid]; | |
718 | if (map >= SAVED_CMDLINES) | |
719 | goto out; | |
720 | ||
721 | cmdline = saved_cmdlines[map]; | |
722 | ||
723 | out: | |
724 | return cmdline; | |
725 | } | |
726 | ||
e309b41d | 727 | void tracing_record_cmdline(struct task_struct *tsk) |
bc0c38d1 SR |
728 | { |
729 | if (atomic_read(&trace_record_cmdline_disabled)) | |
730 | return; | |
731 | ||
732 | trace_save_cmdline(tsk); | |
733 | } | |
734 | ||
45dcd8b8 | 735 | void |
38697053 SR |
736 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
737 | int pc) | |
bc0c38d1 SR |
738 | { |
739 | struct task_struct *tsk = current; | |
bc0c38d1 | 740 | |
777e208d SR |
741 | entry->preempt_count = pc & 0xff; |
742 | entry->pid = (tsk) ? tsk->pid : 0; | |
b54d3de9 | 743 | entry->tgid = (tsk) ? tsk->tgid : 0; |
777e208d | 744 | entry->flags = |
9244489a | 745 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
2e2ca155 | 746 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
9244489a SR |
747 | #else |
748 | TRACE_FLAG_IRQS_NOSUPPORT | | |
749 | #endif | |
bc0c38d1 SR |
750 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
751 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | |
752 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | |
753 | } | |
754 | ||
e309b41d | 755 | void |
6fb44b71 | 756 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, |
38697053 SR |
757 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
758 | int pc) | |
bc0c38d1 | 759 | { |
3928a8a2 | 760 | struct ring_buffer_event *event; |
777e208d | 761 | struct ftrace_entry *entry; |
dcb6308f | 762 | unsigned long irq_flags; |
bc0c38d1 | 763 | |
d769041f SR |
764 | /* If we are reading the ring buffer, don't trace */ |
765 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
766 | return; | |
767 | ||
3928a8a2 SR |
768 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
769 | &irq_flags); | |
770 | if (!event) | |
771 | return; | |
772 | entry = ring_buffer_event_data(event); | |
38697053 | 773 | tracing_generic_entry_update(&entry->ent, flags, pc); |
777e208d SR |
774 | entry->ent.type = TRACE_FN; |
775 | entry->ip = ip; | |
776 | entry->parent_ip = parent_ip; | |
3928a8a2 | 777 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
bc0c38d1 SR |
778 | } |
779 | ||
fb52607a | 780 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
287b6e68 FW |
781 | static void __trace_graph_entry(struct trace_array *tr, |
782 | struct trace_array_cpu *data, | |
783 | struct ftrace_graph_ent *trace, | |
784 | unsigned long flags, | |
785 | int pc) | |
786 | { | |
787 | struct ring_buffer_event *event; | |
788 | struct ftrace_graph_ent_entry *entry; | |
789 | unsigned long irq_flags; | |
790 | ||
791 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
792 | return; | |
793 | ||
794 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | |
795 | &irq_flags); | |
796 | if (!event) | |
797 | return; | |
798 | entry = ring_buffer_event_data(event); | |
799 | tracing_generic_entry_update(&entry->ent, flags, pc); | |
800 | entry->ent.type = TRACE_GRAPH_ENT; | |
801 | entry->graph_ent = *trace; | |
802 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | |
803 | } | |
804 | ||
805 | static void __trace_graph_return(struct trace_array *tr, | |
15e6cb36 | 806 | struct trace_array_cpu *data, |
fb52607a | 807 | struct ftrace_graph_ret *trace, |
15e6cb36 FW |
808 | unsigned long flags, |
809 | int pc) | |
810 | { | |
811 | struct ring_buffer_event *event; | |
287b6e68 | 812 | struct ftrace_graph_ret_entry *entry; |
15e6cb36 FW |
813 | unsigned long irq_flags; |
814 | ||
815 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | |
816 | return; | |
817 | ||
818 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | |
819 | &irq_flags); | |
820 | if (!event) | |
821 | return; | |
822 | entry = ring_buffer_event_data(event); | |
823 | tracing_generic_entry_update(&entry->ent, flags, pc); | |
287b6e68 FW |
824 | entry->ent.type = TRACE_GRAPH_RET; |
825 | entry->ret = *trace; | |
15e6cb36 FW |
826 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); |
827 | } | |
828 | #endif | |
829 | ||
e309b41d | 830 | void |
2e0f5761 | 831 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
38697053 SR |
832 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
833 | int pc) | |
2e0f5761 IM |
834 | { |
835 | if (likely(!atomic_read(&data->disabled))) | |
38697053 | 836 | trace_function(tr, data, ip, parent_ip, flags, pc); |
2e0f5761 IM |
837 | } |
838 | ||
38697053 SR |
839 | static void ftrace_trace_stack(struct trace_array *tr, |
840 | struct trace_array_cpu *data, | |
841 | unsigned long flags, | |
842 | int skip, int pc) | |
86387f7e | 843 | { |
c2c80529 | 844 | #ifdef CONFIG_STACKTRACE |
3928a8a2 | 845 | struct ring_buffer_event *event; |
777e208d | 846 | struct stack_entry *entry; |
86387f7e | 847 | struct stack_trace trace; |
3928a8a2 | 848 | unsigned long irq_flags; |
86387f7e IM |
849 | |
850 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | |
851 | return; | |
852 | ||
3928a8a2 SR |
853 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
854 | &irq_flags); | |
855 | if (!event) | |
856 | return; | |
857 | entry = ring_buffer_event_data(event); | |
38697053 | 858 | tracing_generic_entry_update(&entry->ent, flags, pc); |
777e208d | 859 | entry->ent.type = TRACE_STACK; |
86387f7e | 860 | |
777e208d | 861 | memset(&entry->caller, 0, sizeof(entry->caller)); |
86387f7e IM |
862 | |
863 | trace.nr_entries = 0; | |
864 | trace.max_entries = FTRACE_STACK_ENTRIES; | |
865 | trace.skip = skip; | |
777e208d | 866 | trace.entries = entry->caller; |
86387f7e IM |
867 | |
868 | save_stack_trace(&trace); | |
3928a8a2 | 869 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
c2c80529 | 870 | #endif |
f0a920d5 IM |
871 | } |
872 | ||
38697053 SR |
873 | void __trace_stack(struct trace_array *tr, |
874 | struct trace_array_cpu *data, | |
875 | unsigned long flags, | |
876 | int skip) | |
877 | { | |
878 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | |
879 | } | |
880 | ||
02b67518 TE |
881 | static void ftrace_trace_userstack(struct trace_array *tr, |
882 | struct trace_array_cpu *data, | |
883 | unsigned long flags, int pc) | |
884 | { | |
c7425acb | 885 | #ifdef CONFIG_STACKTRACE |
8d7c6a96 | 886 | struct ring_buffer_event *event; |
02b67518 TE |
887 | struct userstack_entry *entry; |
888 | struct stack_trace trace; | |
02b67518 TE |
889 | unsigned long irq_flags; |
890 | ||
891 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | |
892 | return; | |
893 | ||
894 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | |
895 | &irq_flags); | |
896 | if (!event) | |
897 | return; | |
898 | entry = ring_buffer_event_data(event); | |
899 | tracing_generic_entry_update(&entry->ent, flags, pc); | |
900 | entry->ent.type = TRACE_USER_STACK; | |
901 | ||
902 | memset(&entry->caller, 0, sizeof(entry->caller)); | |
903 | ||
904 | trace.nr_entries = 0; | |
905 | trace.max_entries = FTRACE_STACK_ENTRIES; | |
906 | trace.skip = 0; | |
907 | trace.entries = entry->caller; | |
908 | ||
909 | save_stack_trace_user(&trace); | |
910 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | |
c7425acb | 911 | #endif |
02b67518 TE |
912 | } |
913 | ||
914 | void __trace_userstack(struct trace_array *tr, | |
915 | struct trace_array_cpu *data, | |
916 | unsigned long flags) | |
917 | { | |
918 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | |
919 | } | |
920 | ||
38697053 SR |
921 | static void |
922 | ftrace_trace_special(void *__tr, void *__data, | |
923 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | |
924 | int pc) | |
a4feb834 | 925 | { |
3928a8a2 | 926 | struct ring_buffer_event *event; |
a4feb834 IM |
927 | struct trace_array_cpu *data = __data; |
928 | struct trace_array *tr = __tr; | |
777e208d | 929 | struct special_entry *entry; |
a4feb834 IM |
930 | unsigned long irq_flags; |
931 | ||
3928a8a2 SR |
932 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
933 | &irq_flags); | |
934 | if (!event) | |
935 | return; | |
936 | entry = ring_buffer_event_data(event); | |
38697053 | 937 | tracing_generic_entry_update(&entry->ent, 0, pc); |
777e208d SR |
938 | entry->ent.type = TRACE_SPECIAL; |
939 | entry->arg1 = arg1; | |
940 | entry->arg2 = arg2; | |
941 | entry->arg3 = arg3; | |
3928a8a2 | 942 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
38697053 | 943 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); |
02b67518 | 944 | ftrace_trace_userstack(tr, data, irq_flags, pc); |
a4feb834 IM |
945 | |
946 | trace_wake_up(); | |
947 | } | |
948 | ||
38697053 SR |
949 | void |
950 | __trace_special(void *__tr, void *__data, | |
951 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | |
952 | { | |
953 | ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); | |
954 | } | |
955 | ||
e309b41d | 956 | void |
bc0c38d1 SR |
957 | tracing_sched_switch_trace(struct trace_array *tr, |
958 | struct trace_array_cpu *data, | |
86387f7e IM |
959 | struct task_struct *prev, |
960 | struct task_struct *next, | |
38697053 | 961 | unsigned long flags, int pc) |
bc0c38d1 | 962 | { |
3928a8a2 | 963 | struct ring_buffer_event *event; |
777e208d | 964 | struct ctx_switch_entry *entry; |
dcb6308f | 965 | unsigned long irq_flags; |
bc0c38d1 | 966 | |
3928a8a2 SR |
967 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
968 | &irq_flags); | |
969 | if (!event) | |
970 | return; | |
971 | entry = ring_buffer_event_data(event); | |
38697053 | 972 | tracing_generic_entry_update(&entry->ent, flags, pc); |
777e208d SR |
973 | entry->ent.type = TRACE_CTX; |
974 | entry->prev_pid = prev->pid; | |
975 | entry->prev_prio = prev->prio; | |
976 | entry->prev_state = prev->state; | |
977 | entry->next_pid = next->pid; | |
978 | entry->next_prio = next->prio; | |
979 | entry->next_state = next->state; | |
980 | entry->next_cpu = task_cpu(next); | |
3928a8a2 | 981 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
38697053 | 982 | ftrace_trace_stack(tr, data, flags, 5, pc); |
02b67518 | 983 | ftrace_trace_userstack(tr, data, flags, pc); |
bc0c38d1 SR |
984 | } |
985 | ||
57422797 IM |
986 | void |
987 | tracing_sched_wakeup_trace(struct trace_array *tr, | |
988 | struct trace_array_cpu *data, | |
86387f7e IM |
989 | struct task_struct *wakee, |
990 | struct task_struct *curr, | |
38697053 | 991 | unsigned long flags, int pc) |
57422797 | 992 | { |
3928a8a2 | 993 | struct ring_buffer_event *event; |
777e208d | 994 | struct ctx_switch_entry *entry; |
57422797 IM |
995 | unsigned long irq_flags; |
996 | ||
3928a8a2 SR |
997 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
998 | &irq_flags); | |
999 | if (!event) | |
1000 | return; | |
1001 | entry = ring_buffer_event_data(event); | |
38697053 | 1002 | tracing_generic_entry_update(&entry->ent, flags, pc); |
777e208d SR |
1003 | entry->ent.type = TRACE_WAKE; |
1004 | entry->prev_pid = curr->pid; | |
1005 | entry->prev_prio = curr->prio; | |
1006 | entry->prev_state = curr->state; | |
1007 | entry->next_pid = wakee->pid; | |
1008 | entry->next_prio = wakee->prio; | |
1009 | entry->next_state = wakee->state; | |
1010 | entry->next_cpu = task_cpu(wakee); | |
3928a8a2 | 1011 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
38697053 | 1012 | ftrace_trace_stack(tr, data, flags, 6, pc); |
02b67518 | 1013 | ftrace_trace_userstack(tr, data, flags, pc); |
017730c1 IM |
1014 | |
1015 | trace_wake_up(); | |
57422797 IM |
1016 | } |
1017 | ||
4902f884 SR |
1018 | void |
1019 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |
1020 | { | |
1021 | struct trace_array *tr = &global_trace; | |
1022 | struct trace_array_cpu *data; | |
5aa1ba6a | 1023 | unsigned long flags; |
4902f884 | 1024 | int cpu; |
38697053 | 1025 | int pc; |
4902f884 | 1026 | |
c76f0694 | 1027 | if (tracing_disabled) |
4902f884 SR |
1028 | return; |
1029 | ||
38697053 | 1030 | pc = preempt_count(); |
5aa1ba6a | 1031 | local_irq_save(flags); |
4902f884 SR |
1032 | cpu = raw_smp_processor_id(); |
1033 | data = tr->data[cpu]; | |
4902f884 | 1034 | |
5aa1ba6a | 1035 | if (likely(atomic_inc_return(&data->disabled) == 1)) |
38697053 | 1036 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); |
4902f884 | 1037 | |
5aa1ba6a SR |
1038 | atomic_dec(&data->disabled); |
1039 | local_irq_restore(flags); | |
4902f884 SR |
1040 | } |
1041 | ||
606576ce | 1042 | #ifdef CONFIG_FUNCTION_TRACER |
e309b41d | 1043 | static void |
b2a866f9 | 1044 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) |
2e0f5761 IM |
1045 | { |
1046 | struct trace_array *tr = &global_trace; | |
1047 | struct trace_array_cpu *data; | |
1048 | unsigned long flags; | |
1049 | long disabled; | |
38697053 SR |
1050 | int cpu, resched; |
1051 | int pc; | |
2e0f5761 | 1052 | |
60bc0800 | 1053 | if (unlikely(!ftrace_function_enabled)) |
2e0f5761 IM |
1054 | return; |
1055 | ||
38697053 | 1056 | pc = preempt_count(); |
182e9f5f | 1057 | resched = ftrace_preempt_disable(); |
38697053 | 1058 | local_save_flags(flags); |
2e0f5761 IM |
1059 | cpu = raw_smp_processor_id(); |
1060 | data = tr->data[cpu]; | |
1061 | disabled = atomic_inc_return(&data->disabled); | |
1062 | ||
1063 | if (likely(disabled == 1)) | |
38697053 | 1064 | trace_function(tr, data, ip, parent_ip, flags, pc); |
2e0f5761 IM |
1065 | |
1066 | atomic_dec(&data->disabled); | |
182e9f5f | 1067 | ftrace_preempt_enable(resched); |
2e0f5761 IM |
1068 | } |
1069 | ||
b2a866f9 SR |
1070 | static void |
1071 | function_trace_call(unsigned long ip, unsigned long parent_ip) | |
1072 | { | |
1073 | struct trace_array *tr = &global_trace; | |
1074 | struct trace_array_cpu *data; | |
1075 | unsigned long flags; | |
1076 | long disabled; | |
1077 | int cpu; | |
1078 | int pc; | |
1079 | ||
1080 | if (unlikely(!ftrace_function_enabled)) | |
1081 | return; | |
1082 | ||
1083 | /* | |
1084 | * Need to use raw, since this must be called before the | |
1085 | * recursive protection is performed. | |
1086 | */ | |
d51ad7ac | 1087 | local_irq_save(flags); |
b2a866f9 SR |
1088 | cpu = raw_smp_processor_id(); |
1089 | data = tr->data[cpu]; | |
1090 | disabled = atomic_inc_return(&data->disabled); | |
1091 | ||
1092 | if (likely(disabled == 1)) { | |
1093 | pc = preempt_count(); | |
1094 | trace_function(tr, data, ip, parent_ip, flags, pc); | |
1095 | } | |
1096 | ||
1097 | atomic_dec(&data->disabled); | |
d51ad7ac | 1098 | local_irq_restore(flags); |
b2a866f9 SR |
1099 | } |
1100 | ||
fb52607a | 1101 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e49dc19c | 1102 | int trace_graph_entry(struct ftrace_graph_ent *trace) |
15e6cb36 FW |
1103 | { |
1104 | struct trace_array *tr = &global_trace; | |
1105 | struct trace_array_cpu *data; | |
1106 | unsigned long flags; | |
1107 | long disabled; | |
1108 | int cpu; | |
1109 | int pc; | |
1110 | ||
804a6851 SR |
1111 | if (!ftrace_trace_task(current)) |
1112 | return 0; | |
1113 | ||
ea4e2bc4 SR |
1114 | if (!ftrace_graph_addr(trace->func)) |
1115 | return 0; | |
1116 | ||
a5e25883 | 1117 | local_irq_save(flags); |
15e6cb36 FW |
1118 | cpu = raw_smp_processor_id(); |
1119 | data = tr->data[cpu]; | |
1120 | disabled = atomic_inc_return(&data->disabled); | |
1121 | if (likely(disabled == 1)) { | |
1122 | pc = preempt_count(); | |
287b6e68 FW |
1123 | __trace_graph_entry(tr, data, trace, flags, pc); |
1124 | } | |
ea4e2bc4 SR |
1125 | /* Only do the atomic if it is not already set */ |
1126 | if (!test_tsk_trace_graph(current)) | |
1127 | set_tsk_trace_graph(current); | |
287b6e68 | 1128 | atomic_dec(&data->disabled); |
a5e25883 | 1129 | local_irq_restore(flags); |
e49dc19c SR |
1130 | |
1131 | return 1; | |
287b6e68 FW |
1132 | } |
1133 | ||
1134 | void trace_graph_return(struct ftrace_graph_ret *trace) | |
1135 | { | |
1136 | struct trace_array *tr = &global_trace; | |
1137 | struct trace_array_cpu *data; | |
1138 | unsigned long flags; | |
1139 | long disabled; | |
1140 | int cpu; | |
1141 | int pc; | |
1142 | ||
a5e25883 | 1143 | local_irq_save(flags); |
287b6e68 FW |
1144 | cpu = raw_smp_processor_id(); |
1145 | data = tr->data[cpu]; | |
1146 | disabled = atomic_inc_return(&data->disabled); | |
1147 | if (likely(disabled == 1)) { | |
1148 | pc = preempt_count(); | |
1149 | __trace_graph_return(tr, data, trace, flags, pc); | |
15e6cb36 | 1150 | } |
ea4e2bc4 SR |
1151 | if (!trace->depth) |
1152 | clear_tsk_trace_graph(current); | |
15e6cb36 | 1153 | atomic_dec(&data->disabled); |
a5e25883 | 1154 | local_irq_restore(flags); |
15e6cb36 | 1155 | } |
fb52607a | 1156 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 1157 | |
2e0f5761 IM |
1158 | static struct ftrace_ops trace_ops __read_mostly = |
1159 | { | |
1160 | .func = function_trace_call, | |
1161 | }; | |
1162 | ||
e309b41d | 1163 | void tracing_start_function_trace(void) |
2e0f5761 | 1164 | { |
60bc0800 | 1165 | ftrace_function_enabled = 0; |
b2a866f9 SR |
1166 | |
1167 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | |
1168 | trace_ops.func = function_trace_call_preempt_only; | |
1169 | else | |
1170 | trace_ops.func = function_trace_call; | |
1171 | ||
2e0f5761 | 1172 | register_ftrace_function(&trace_ops); |
9036990d | 1173 | ftrace_function_enabled = 1; |
2e0f5761 IM |
1174 | } |
1175 | ||
e309b41d | 1176 | void tracing_stop_function_trace(void) |
2e0f5761 | 1177 | { |
60bc0800 | 1178 | ftrace_function_enabled = 0; |
2e0f5761 IM |
1179 | unregister_ftrace_function(&trace_ops); |
1180 | } | |
1181 | #endif | |
1182 | ||
bc0c38d1 SR |
1183 | enum trace_file_type { |
1184 | TRACE_FILE_LAT_FMT = 1, | |
12ef7d44 | 1185 | TRACE_FILE_ANNOTATE = 2, |
bc0c38d1 SR |
1186 | }; |
1187 | ||
5a90f577 SR |
1188 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) |
1189 | { | |
d769041f SR |
1190 | /* Don't allow ftrace to trace into the ring buffers */ |
1191 | ftrace_disable_cpu(); | |
1192 | ||
5a90f577 | 1193 | iter->idx++; |
d769041f SR |
1194 | if (iter->buffer_iter[iter->cpu]) |
1195 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | |
1196 | ||
1197 | ftrace_enable_cpu(); | |
5a90f577 SR |
1198 | } |
1199 | ||
e309b41d | 1200 | static struct trace_entry * |
3928a8a2 | 1201 | peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) |
dd0e545f | 1202 | { |
3928a8a2 SR |
1203 | struct ring_buffer_event *event; |
1204 | struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; | |
dd0e545f | 1205 | |
d769041f SR |
1206 | /* Don't allow ftrace to trace into the ring buffers */ |
1207 | ftrace_disable_cpu(); | |
1208 | ||
1209 | if (buf_iter) | |
1210 | event = ring_buffer_iter_peek(buf_iter, ts); | |
1211 | else | |
1212 | event = ring_buffer_peek(iter->tr->buffer, cpu, ts); | |
1213 | ||
1214 | ftrace_enable_cpu(); | |
1215 | ||
3928a8a2 | 1216 | return event ? ring_buffer_event_data(event) : NULL; |
dd0e545f | 1217 | } |
d769041f | 1218 | |
dd0e545f | 1219 | static struct trace_entry * |
3928a8a2 | 1220 | __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) |
bc0c38d1 | 1221 | { |
3928a8a2 | 1222 | struct ring_buffer *buffer = iter->tr->buffer; |
bc0c38d1 | 1223 | struct trace_entry *ent, *next = NULL; |
3928a8a2 | 1224 | u64 next_ts = 0, ts; |
bc0c38d1 SR |
1225 | int next_cpu = -1; |
1226 | int cpu; | |
1227 | ||
ab46428c | 1228 | for_each_tracing_cpu(cpu) { |
dd0e545f | 1229 | |
3928a8a2 SR |
1230 | if (ring_buffer_empty_cpu(buffer, cpu)) |
1231 | continue; | |
dd0e545f | 1232 | |
3928a8a2 | 1233 | ent = peek_next_entry(iter, cpu, &ts); |
dd0e545f | 1234 | |
cdd31cd2 IM |
1235 | /* |
1236 | * Pick the entry with the smallest timestamp: | |
1237 | */ | |
3928a8a2 | 1238 | if (ent && (!next || ts < next_ts)) { |
bc0c38d1 SR |
1239 | next = ent; |
1240 | next_cpu = cpu; | |
3928a8a2 | 1241 | next_ts = ts; |
bc0c38d1 SR |
1242 | } |
1243 | } | |
1244 | ||
1245 | if (ent_cpu) | |
1246 | *ent_cpu = next_cpu; | |
1247 | ||
3928a8a2 SR |
1248 | if (ent_ts) |
1249 | *ent_ts = next_ts; | |
1250 | ||
bc0c38d1 SR |
1251 | return next; |
1252 | } | |
1253 | ||
dd0e545f SR |
1254 | /* Find the next real entry, without updating the iterator itself */ |
1255 | static struct trace_entry * | |
3928a8a2 | 1256 | find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) |
bc0c38d1 | 1257 | { |
3928a8a2 | 1258 | return __find_next_entry(iter, ent_cpu, ent_ts); |
dd0e545f SR |
1259 | } |
1260 | ||
1261 | /* Find the next real entry, and increment the iterator to the next entry */ | |
1262 | static void *find_next_entry_inc(struct trace_iterator *iter) | |
1263 | { | |
3928a8a2 | 1264 | iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); |
dd0e545f | 1265 | |
3928a8a2 | 1266 | if (iter->ent) |
dd0e545f SR |
1267 | trace_iterator_increment(iter, iter->cpu); |
1268 | ||
3928a8a2 | 1269 | return iter->ent ? iter : NULL; |
b3806b43 | 1270 | } |
bc0c38d1 | 1271 | |
e309b41d | 1272 | static void trace_consume(struct trace_iterator *iter) |
b3806b43 | 1273 | { |
d769041f SR |
1274 | /* Don't allow ftrace to trace into the ring buffers */ |
1275 | ftrace_disable_cpu(); | |
3928a8a2 | 1276 | ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); |
d769041f | 1277 | ftrace_enable_cpu(); |
bc0c38d1 SR |
1278 | } |
1279 | ||
e309b41d | 1280 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
bc0c38d1 SR |
1281 | { |
1282 | struct trace_iterator *iter = m->private; | |
bc0c38d1 | 1283 | int i = (int)*pos; |
4e3c3333 | 1284 | void *ent; |
bc0c38d1 SR |
1285 | |
1286 | (*pos)++; | |
1287 | ||
1288 | /* can't go backwards */ | |
1289 | if (iter->idx > i) | |
1290 | return NULL; | |
1291 | ||
1292 | if (iter->idx < 0) | |
1293 | ent = find_next_entry_inc(iter); | |
1294 | else | |
1295 | ent = iter; | |
1296 | ||
1297 | while (ent && iter->idx < i) | |
1298 | ent = find_next_entry_inc(iter); | |
1299 | ||
1300 | iter->pos = *pos; | |
1301 | ||
bc0c38d1 SR |
1302 | return ent; |
1303 | } | |
1304 | ||
1305 | static void *s_start(struct seq_file *m, loff_t *pos) | |
1306 | { | |
1307 | struct trace_iterator *iter = m->private; | |
1308 | void *p = NULL; | |
1309 | loff_t l = 0; | |
3928a8a2 | 1310 | int cpu; |
bc0c38d1 SR |
1311 | |
1312 | mutex_lock(&trace_types_lock); | |
1313 | ||
d15f57f2 SR |
1314 | if (!current_trace || current_trace != iter->trace) { |
1315 | mutex_unlock(&trace_types_lock); | |
bc0c38d1 | 1316 | return NULL; |
d15f57f2 | 1317 | } |
bc0c38d1 SR |
1318 | |
1319 | atomic_inc(&trace_record_cmdline_disabled); | |
1320 | ||
bc0c38d1 SR |
1321 | if (*pos != iter->pos) { |
1322 | iter->ent = NULL; | |
1323 | iter->cpu = 0; | |
1324 | iter->idx = -1; | |
1325 | ||
d769041f SR |
1326 | ftrace_disable_cpu(); |
1327 | ||
3928a8a2 SR |
1328 | for_each_tracing_cpu(cpu) { |
1329 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | |
4c11d7ae | 1330 | } |
bc0c38d1 | 1331 | |
d769041f SR |
1332 | ftrace_enable_cpu(); |
1333 | ||
bc0c38d1 SR |
1334 | for (p = iter; p && l < *pos; p = s_next(m, p, &l)) |
1335 | ; | |
1336 | ||
1337 | } else { | |
4c11d7ae | 1338 | l = *pos - 1; |
bc0c38d1 SR |
1339 | p = s_next(m, p, &l); |
1340 | } | |
1341 | ||
1342 | return p; | |
1343 | } | |
1344 | ||
1345 | static void s_stop(struct seq_file *m, void *p) | |
1346 | { | |
bc0c38d1 | 1347 | atomic_dec(&trace_record_cmdline_disabled); |
bc0c38d1 SR |
1348 | mutex_unlock(&trace_types_lock); |
1349 | } | |
1350 | ||
e309b41d | 1351 | static void print_lat_help_header(struct seq_file *m) |
bc0c38d1 | 1352 | { |
a6168353 ME |
1353 | seq_puts(m, "# _------=> CPU# \n"); |
1354 | seq_puts(m, "# / _-----=> irqs-off \n"); | |
1355 | seq_puts(m, "# | / _----=> need-resched \n"); | |
1356 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | |
1357 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | |
1358 | seq_puts(m, "# |||| / \n"); | |
1359 | seq_puts(m, "# ||||| delay \n"); | |
1360 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | |
1361 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | |
bc0c38d1 SR |
1362 | } |
1363 | ||
e309b41d | 1364 | static void print_func_help_header(struct seq_file *m) |
bc0c38d1 | 1365 | { |
a6168353 ME |
1366 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); |
1367 | seq_puts(m, "# | | | | |\n"); | |
bc0c38d1 SR |
1368 | } |
1369 | ||
1370 | ||
e309b41d | 1371 | static void |
bc0c38d1 SR |
1372 | print_trace_header(struct seq_file *m, struct trace_iterator *iter) |
1373 | { | |
1374 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | |
1375 | struct trace_array *tr = iter->tr; | |
1376 | struct trace_array_cpu *data = tr->data[tr->cpu]; | |
1377 | struct tracer *type = current_trace; | |
3928a8a2 SR |
1378 | unsigned long total; |
1379 | unsigned long entries; | |
bc0c38d1 SR |
1380 | const char *name = "preemption"; |
1381 | ||
1382 | if (type) | |
1383 | name = type->name; | |
1384 | ||
3928a8a2 SR |
1385 | entries = ring_buffer_entries(iter->tr->buffer); |
1386 | total = entries + | |
1387 | ring_buffer_overruns(iter->tr->buffer); | |
bc0c38d1 SR |
1388 | |
1389 | seq_printf(m, "%s latency trace v1.1.5 on %s\n", | |
1390 | name, UTS_RELEASE); | |
1391 | seq_puts(m, "-----------------------------------" | |
1392 | "---------------------------------\n"); | |
1393 | seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" | |
1394 | " (M:%s VP:%d, KP:%d, SP:%d HP:%d", | |
57f50be1 | 1395 | nsecs_to_usecs(data->saved_latency), |
bc0c38d1 | 1396 | entries, |
4c11d7ae | 1397 | total, |
bc0c38d1 SR |
1398 | tr->cpu, |
1399 | #if defined(CONFIG_PREEMPT_NONE) | |
1400 | "server", | |
1401 | #elif defined(CONFIG_PREEMPT_VOLUNTARY) | |
1402 | "desktop", | |
b5c21b45 | 1403 | #elif defined(CONFIG_PREEMPT) |
bc0c38d1 SR |
1404 | "preempt", |
1405 | #else | |
1406 | "unknown", | |
1407 | #endif | |
1408 | /* These are reserved for later use */ | |
1409 | 0, 0, 0, 0); | |
1410 | #ifdef CONFIG_SMP | |
1411 | seq_printf(m, " #P:%d)\n", num_online_cpus()); | |
1412 | #else | |
1413 | seq_puts(m, ")\n"); | |
1414 | #endif | |
1415 | seq_puts(m, " -----------------\n"); | |
1416 | seq_printf(m, " | task: %.16s-%d " | |
1417 | "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", | |
1418 | data->comm, data->pid, data->uid, data->nice, | |
1419 | data->policy, data->rt_priority); | |
1420 | seq_puts(m, " -----------------\n"); | |
1421 | ||
1422 | if (data->critical_start) { | |
1423 | seq_puts(m, " => started at: "); | |
214023c3 SR |
1424 | seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); |
1425 | trace_print_seq(m, &iter->seq); | |
bc0c38d1 | 1426 | seq_puts(m, "\n => ended at: "); |
214023c3 SR |
1427 | seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); |
1428 | trace_print_seq(m, &iter->seq); | |
bc0c38d1 SR |
1429 | seq_puts(m, "\n"); |
1430 | } | |
1431 | ||
1432 | seq_puts(m, "\n"); | |
1433 | } | |
1434 | ||
e309b41d | 1435 | static void |
214023c3 | 1436 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) |
bc0c38d1 SR |
1437 | { |
1438 | int hardirq, softirq; | |
1439 | char *comm; | |
1440 | ||
777e208d | 1441 | comm = trace_find_cmdline(entry->pid); |
bc0c38d1 | 1442 | |
777e208d | 1443 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); |
a6168353 | 1444 | trace_seq_printf(s, "%3d", cpu); |
214023c3 | 1445 | trace_seq_printf(s, "%c%c", |
9244489a SR |
1446 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : |
1447 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', | |
777e208d | 1448 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); |
bc0c38d1 | 1449 | |
777e208d SR |
1450 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
1451 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | |
afc2abc0 | 1452 | if (hardirq && softirq) { |
214023c3 | 1453 | trace_seq_putc(s, 'H'); |
afc2abc0 IM |
1454 | } else { |
1455 | if (hardirq) { | |
214023c3 | 1456 | trace_seq_putc(s, 'h'); |
afc2abc0 | 1457 | } else { |
bc0c38d1 | 1458 | if (softirq) |
214023c3 | 1459 | trace_seq_putc(s, 's'); |
bc0c38d1 | 1460 | else |
214023c3 | 1461 | trace_seq_putc(s, '.'); |
bc0c38d1 SR |
1462 | } |
1463 | } | |
1464 | ||
777e208d SR |
1465 | if (entry->preempt_count) |
1466 | trace_seq_printf(s, "%x", entry->preempt_count); | |
bc0c38d1 | 1467 | else |
214023c3 | 1468 | trace_seq_puts(s, "."); |
bc0c38d1 SR |
1469 | } |
1470 | ||
1471 | unsigned long preempt_mark_thresh = 100; | |
1472 | ||
e309b41d | 1473 | static void |
3928a8a2 | 1474 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, |
bc0c38d1 SR |
1475 | unsigned long rel_usecs) |
1476 | { | |
214023c3 | 1477 | trace_seq_printf(s, " %4lldus", abs_usecs); |
bc0c38d1 | 1478 | if (rel_usecs > preempt_mark_thresh) |
214023c3 | 1479 | trace_seq_puts(s, "!: "); |
bc0c38d1 | 1480 | else if (rel_usecs > 1) |
214023c3 | 1481 | trace_seq_puts(s, "+: "); |
bc0c38d1 | 1482 | else |
214023c3 | 1483 | trace_seq_puts(s, " : "); |
bc0c38d1 SR |
1484 | } |
1485 | ||
1486 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | |
1487 | ||
3d9101e9 TG |
1488 | static int task_state_char(unsigned long state) |
1489 | { | |
1490 | int bit = state ? __ffs(state) + 1 : 0; | |
1491 | ||
1492 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | |
1493 | } | |
1494 | ||
a309720c SR |
1495 | static void test_cpu_buff_start(struct trace_iterator *iter) |
1496 | { | |
1497 | struct trace_seq *s = &iter->seq; | |
1498 | ||
12ef7d44 SR |
1499 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) |
1500 | return; | |
1501 | ||
1502 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | |
1503 | return; | |
1504 | ||
a309720c SR |
1505 | if (cpu_isset(iter->cpu, iter->started)) |
1506 | return; | |
1507 | ||
1508 | cpu_set(iter->cpu, iter->started); | |
1509 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | |
1510 | } | |
1511 | ||
2c4f035f | 1512 | static enum print_line_t |
214023c3 | 1513 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) |
bc0c38d1 | 1514 | { |
214023c3 | 1515 | struct trace_seq *s = &iter->seq; |
bc0c38d1 | 1516 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
3928a8a2 | 1517 | struct trace_entry *next_entry; |
bc0c38d1 SR |
1518 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); |
1519 | struct trace_entry *entry = iter->ent; | |
1520 | unsigned long abs_usecs; | |
1521 | unsigned long rel_usecs; | |
3928a8a2 | 1522 | u64 next_ts; |
bc0c38d1 | 1523 | char *comm; |
bac524d3 | 1524 | int S, T; |
86387f7e | 1525 | int i; |
bc0c38d1 | 1526 | |
a309720c SR |
1527 | test_cpu_buff_start(iter); |
1528 | ||
3928a8a2 SR |
1529 | next_entry = find_next_entry(iter, NULL, &next_ts); |
1530 | if (!next_entry) | |
1531 | next_ts = iter->ts; | |
1532 | rel_usecs = ns2usecs(next_ts - iter->ts); | |
1533 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | |
bc0c38d1 SR |
1534 | |
1535 | if (verbose) { | |
777e208d | 1536 | comm = trace_find_cmdline(entry->pid); |
a6168353 | 1537 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" |
214023c3 SR |
1538 | " %ld.%03ldms (+%ld.%03ldms): ", |
1539 | comm, | |
777e208d SR |
1540 | entry->pid, cpu, entry->flags, |
1541 | entry->preempt_count, trace_idx, | |
3928a8a2 | 1542 | ns2usecs(iter->ts), |
214023c3 SR |
1543 | abs_usecs/1000, |
1544 | abs_usecs % 1000, rel_usecs/1000, | |
1545 | rel_usecs % 1000); | |
bc0c38d1 | 1546 | } else { |
f29c73fe IM |
1547 | lat_print_generic(s, entry, cpu); |
1548 | lat_print_timestamp(s, abs_usecs, rel_usecs); | |
bc0c38d1 SR |
1549 | } |
1550 | switch (entry->type) { | |
777e208d | 1551 | case TRACE_FN: { |
7104f300 SR |
1552 | struct ftrace_entry *field; |
1553 | ||
1554 | trace_assign_type(field, entry); | |
777e208d SR |
1555 | |
1556 | seq_print_ip_sym(s, field->ip, sym_flags); | |
214023c3 | 1557 | trace_seq_puts(s, " ("); |
b3aa5577 | 1558 | seq_print_ip_sym(s, field->parent_ip, sym_flags); |
214023c3 | 1559 | trace_seq_puts(s, ")\n"); |
bc0c38d1 | 1560 | break; |
777e208d | 1561 | } |
bc0c38d1 | 1562 | case TRACE_CTX: |
777e208d | 1563 | case TRACE_WAKE: { |
7104f300 SR |
1564 | struct ctx_switch_entry *field; |
1565 | ||
1566 | trace_assign_type(field, entry); | |
777e208d | 1567 | |
3d9101e9 TG |
1568 | T = task_state_char(field->next_state); |
1569 | S = task_state_char(field->prev_state); | |
777e208d | 1570 | comm = trace_find_cmdline(field->next_pid); |
80b5e940 | 1571 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
777e208d SR |
1572 | field->prev_pid, |
1573 | field->prev_prio, | |
57422797 | 1574 | S, entry->type == TRACE_CTX ? "==>" : " +", |
777e208d SR |
1575 | field->next_cpu, |
1576 | field->next_pid, | |
1577 | field->next_prio, | |
bac524d3 | 1578 | T, comm); |
bc0c38d1 | 1579 | break; |
777e208d SR |
1580 | } |
1581 | case TRACE_SPECIAL: { | |
7104f300 SR |
1582 | struct special_entry *field; |
1583 | ||
1584 | trace_assign_type(field, entry); | |
777e208d | 1585 | |
88a4216c | 1586 | trace_seq_printf(s, "# %ld %ld %ld\n", |
777e208d SR |
1587 | field->arg1, |
1588 | field->arg2, | |
1589 | field->arg3); | |
f0a920d5 | 1590 | break; |
777e208d SR |
1591 | } |
1592 | case TRACE_STACK: { | |
7104f300 SR |
1593 | struct stack_entry *field; |
1594 | ||
1595 | trace_assign_type(field, entry); | |
777e208d | 1596 | |
86387f7e IM |
1597 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
1598 | if (i) | |
1599 | trace_seq_puts(s, " <= "); | |
777e208d | 1600 | seq_print_ip_sym(s, field->caller[i], sym_flags); |
86387f7e IM |
1601 | } |
1602 | trace_seq_puts(s, "\n"); | |
1603 | break; | |
777e208d SR |
1604 | } |
1605 | case TRACE_PRINT: { | |
7104f300 SR |
1606 | struct print_entry *field; |
1607 | ||
1608 | trace_assign_type(field, entry); | |
777e208d SR |
1609 | |
1610 | seq_print_ip_sym(s, field->ip, sym_flags); | |
1611 | trace_seq_printf(s, ": %s", field->buf); | |
dd0e545f | 1612 | break; |
777e208d | 1613 | } |
9f029e83 SR |
1614 | case TRACE_BRANCH: { |
1615 | struct trace_branch *field; | |
52f232cb SR |
1616 | |
1617 | trace_assign_type(field, entry); | |
1618 | ||
1619 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | |
68d119f0 | 1620 | field->correct ? " ok " : " MISS ", |
52f232cb SR |
1621 | field->func, |
1622 | field->file, | |
1623 | field->line); | |
1624 | break; | |
1625 | } | |
02b67518 TE |
1626 | case TRACE_USER_STACK: { |
1627 | struct userstack_entry *field; | |
1628 | ||
1629 | trace_assign_type(field, entry); | |
1630 | ||
1631 | seq_print_userip_objs(field, s, sym_flags); | |
b54d3de9 | 1632 | trace_seq_putc(s, '\n'); |
02b67518 TE |
1633 | break; |
1634 | } | |
89b2f978 | 1635 | default: |
214023c3 | 1636 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
bc0c38d1 | 1637 | } |
2c4f035f | 1638 | return TRACE_TYPE_HANDLED; |
bc0c38d1 SR |
1639 | } |
1640 | ||
2c4f035f | 1641 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) |
bc0c38d1 | 1642 | { |
214023c3 | 1643 | struct trace_seq *s = &iter->seq; |
bc0c38d1 | 1644 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
4e3c3333 | 1645 | struct trace_entry *entry; |
bc0c38d1 SR |
1646 | unsigned long usec_rem; |
1647 | unsigned long long t; | |
1648 | unsigned long secs; | |
1649 | char *comm; | |
b3806b43 | 1650 | int ret; |
bac524d3 | 1651 | int S, T; |
86387f7e | 1652 | int i; |
bc0c38d1 | 1653 | |
4e3c3333 | 1654 | entry = iter->ent; |
dd0e545f | 1655 | |
a309720c SR |
1656 | test_cpu_buff_start(iter); |
1657 | ||
777e208d | 1658 | comm = trace_find_cmdline(iter->ent->pid); |
bc0c38d1 | 1659 | |
3928a8a2 | 1660 | t = ns2usecs(iter->ts); |
bc0c38d1 SR |
1661 | usec_rem = do_div(t, 1000000ULL); |
1662 | secs = (unsigned long)t; | |
1663 | ||
777e208d | 1664 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); |
f29c73fe | 1665 | if (!ret) |
2c4f035f | 1666 | return TRACE_TYPE_PARTIAL_LINE; |
a6168353 | 1667 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); |
f29c73fe | 1668 | if (!ret) |
2c4f035f | 1669 | return TRACE_TYPE_PARTIAL_LINE; |
f29c73fe IM |
1670 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); |
1671 | if (!ret) | |
2c4f035f | 1672 | return TRACE_TYPE_PARTIAL_LINE; |
bc0c38d1 SR |
1673 | |
1674 | switch (entry->type) { | |
777e208d | 1675 | case TRACE_FN: { |
7104f300 SR |
1676 | struct ftrace_entry *field; |
1677 | ||
1678 | trace_assign_type(field, entry); | |
777e208d SR |
1679 | |
1680 | ret = seq_print_ip_sym(s, field->ip, sym_flags); | |
b3806b43 | 1681 | if (!ret) |
2c4f035f | 1682 | return TRACE_TYPE_PARTIAL_LINE; |
bc0c38d1 | 1683 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && |
777e208d | 1684 | field->parent_ip) { |
b3806b43 SR |
1685 | ret = trace_seq_printf(s, " <-"); |
1686 | if (!ret) | |
2c4f035f | 1687 | return TRACE_TYPE_PARTIAL_LINE; |
b3aa5577 SR |
1688 | ret = seq_print_ip_sym(s, |
1689 | field->parent_ip, | |
1690 | sym_flags); | |
b3806b43 | 1691 | if (!ret) |
2c4f035f | 1692 | return TRACE_TYPE_PARTIAL_LINE; |
bc0c38d1 | 1693 | } |
b3806b43 SR |
1694 | ret = trace_seq_printf(s, "\n"); |
1695 | if (!ret) | |
2c4f035f | 1696 | return TRACE_TYPE_PARTIAL_LINE; |
bc0c38d1 | 1697 | break; |
777e208d | 1698 | } |
bc0c38d1 | 1699 | case TRACE_CTX: |
777e208d | 1700 | case TRACE_WAKE: { |
7104f300 SR |
1701 | struct ctx_switch_entry *field; |
1702 | ||
1703 | trace_assign_type(field, entry); | |
777e208d | 1704 | |
3d9101e9 TG |
1705 | T = task_state_char(field->next_state); |
1706 | S = task_state_char(field->prev_state); | |
80b5e940 | 1707 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", |
777e208d SR |
1708 | field->prev_pid, |
1709 | field->prev_prio, | |
b3806b43 | 1710 | S, |
57422797 | 1711 | entry->type == TRACE_CTX ? "==>" : " +", |
777e208d SR |
1712 | field->next_cpu, |
1713 | field->next_pid, | |
1714 | field->next_prio, | |
bac524d3 | 1715 | T); |
b3806b43 | 1716 | if (!ret) |
2c4f035f | 1717 | return TRACE_TYPE_PARTIAL_LINE; |
bc0c38d1 | 1718 | break; |
777e208d SR |
1719 | } |
1720 | case TRACE_SPECIAL: { | |
7104f300 SR |
1721 | struct special_entry *field; |
1722 | ||
1723 | trace_assign_type(field, entry); | |
777e208d | 1724 | |
88a4216c | 1725 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", |
777e208d SR |
1726 | field->arg1, |
1727 | field->arg2, | |
1728 | field->arg3); | |
f0a920d5 | 1729 | if (!ret) |
2c4f035f | 1730 | return TRACE_TYPE_PARTIAL_LINE; |
f0a920d5 | 1731 | break; |
777e208d SR |
1732 | } |
1733 | case TRACE_STACK: { | |
7104f300 SR |
1734 | struct stack_entry *field; |
1735 | ||
1736 | trace_assign_type(field, entry); | |
777e208d | 1737 | |
86387f7e IM |
1738 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
1739 | if (i) { | |
1740 | ret = trace_seq_puts(s, " <= "); | |
1741 | if (!ret) | |
2c4f035f | 1742 | return TRACE_TYPE_PARTIAL_LINE; |
86387f7e | 1743 | } |
777e208d | 1744 | ret = seq_print_ip_sym(s, field->caller[i], |
86387f7e IM |
1745 | sym_flags); |
1746 | if (!ret) | |
2c4f035f | 1747 | return TRACE_TYPE_PARTIAL_LINE; |
86387f7e IM |
1748 | } |
1749 | ret = trace_seq_puts(s, "\n"); | |
1750 | if (!ret) | |
2c4f035f | 1751 | return TRACE_TYPE_PARTIAL_LINE; |
86387f7e | 1752 | break; |
777e208d SR |
1753 | } |
1754 | case TRACE_PRINT: { | |
7104f300 SR |
1755 | struct print_entry *field; |
1756 | ||
1757 | trace_assign_type(field, entry); | |
777e208d SR |
1758 | |
1759 | seq_print_ip_sym(s, field->ip, sym_flags); | |
1760 | trace_seq_printf(s, ": %s", field->buf); | |
dd0e545f | 1761 | break; |
bc0c38d1 | 1762 | } |
287b6e68 FW |
1763 | case TRACE_GRAPH_RET: { |
1764 | return print_graph_function(iter); | |
1765 | } | |
1766 | case TRACE_GRAPH_ENT: { | |
fb52607a | 1767 | return print_graph_function(iter); |
15e6cb36 | 1768 | } |
9f029e83 SR |
1769 | case TRACE_BRANCH: { |
1770 | struct trace_branch *field; | |
52f232cb SR |
1771 | |
1772 | trace_assign_type(field, entry); | |
1773 | ||
1774 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | |
68d119f0 | 1775 | field->correct ? " ok " : " MISS ", |
52f232cb SR |
1776 | field->func, |
1777 | field->file, | |
1778 | field->line); | |
1779 | break; | |
1780 | } | |
02b67518 TE |
1781 | case TRACE_USER_STACK: { |
1782 | struct userstack_entry *field; | |
1783 | ||
1784 | trace_assign_type(field, entry); | |
1785 | ||
1786 | ret = seq_print_userip_objs(field, s, sym_flags); | |
1787 | if (!ret) | |
1788 | return TRACE_TYPE_PARTIAL_LINE; | |
1789 | ret = trace_seq_putc(s, '\n'); | |
1790 | if (!ret) | |
1791 | return TRACE_TYPE_PARTIAL_LINE; | |
1792 | break; | |
1793 | } | |
777e208d | 1794 | } |
2c4f035f | 1795 | return TRACE_TYPE_HANDLED; |
bc0c38d1 SR |
1796 | } |
1797 | ||
2c4f035f | 1798 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
f9896bf3 IM |
1799 | { |
1800 | struct trace_seq *s = &iter->seq; | |
1801 | struct trace_entry *entry; | |
1802 | int ret; | |
bac524d3 | 1803 | int S, T; |
f9896bf3 IM |
1804 | |
1805 | entry = iter->ent; | |
dd0e545f | 1806 | |
f9896bf3 | 1807 | ret = trace_seq_printf(s, "%d %d %llu ", |
777e208d | 1808 | entry->pid, iter->cpu, iter->ts); |
f9896bf3 | 1809 | if (!ret) |
2c4f035f | 1810 | return TRACE_TYPE_PARTIAL_LINE; |
f9896bf3 IM |
1811 | |
1812 | switch (entry->type) { | |
777e208d | 1813 | case TRACE_FN: { |
7104f300 SR |
1814 | struct ftrace_entry *field; |
1815 | ||
1816 | trace_assign_type(field, entry); | |
777e208d | 1817 | |
f9896bf3 | 1818 | ret = trace_seq_printf(s, "%x %x\n", |
777e208d SR |
1819 | field->ip, |
1820 | field->parent_ip); | |
f9896bf3 | 1821 | if (!ret) |
2c4f035f | 1822 | return TRACE_TYPE_PARTIAL_LINE; |
f9896bf3 | 1823 | break; |
777e208d | 1824 | } |
f9896bf3 | 1825 | case TRACE_CTX: |
777e208d | 1826 | case TRACE_WAKE: { |
7104f300 SR |
1827 | struct ctx_switch_entry *field; |
1828 | ||
1829 | trace_assign_type(field, entry); | |
777e208d | 1830 | |
3d9101e9 TG |
1831 | T = task_state_char(field->next_state); |
1832 | S = entry->type == TRACE_WAKE ? '+' : | |
1833 | task_state_char(field->prev_state); | |
80b5e940 | 1834 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", |
777e208d SR |
1835 | field->prev_pid, |
1836 | field->prev_prio, | |
f9896bf3 | 1837 | S, |
777e208d SR |
1838 | field->next_cpu, |
1839 | field->next_pid, | |
1840 | field->next_prio, | |
bac524d3 | 1841 | T); |
f9896bf3 | 1842 | if (!ret) |
2c4f035f | 1843 | return TRACE_TYPE_PARTIAL_LINE; |
f9896bf3 | 1844 | break; |
777e208d | 1845 | } |
f0a920d5 | 1846 | case TRACE_SPECIAL: |
02b67518 | 1847 | case TRACE_USER_STACK: |
777e208d | 1848 | case TRACE_STACK: { |
7104f300 SR |
1849 | struct special_entry *field; |
1850 | ||
1851 | trace_assign_type(field, entry); | |
777e208d | 1852 | |
88a4216c | 1853 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", |
777e208d SR |
1854 | field->arg1, |
1855 | field->arg2, | |
1856 | field->arg3); | |
f0a920d5 | 1857 | if (!ret) |
2c4f035f | 1858 | return TRACE_TYPE_PARTIAL_LINE; |
f0a920d5 | 1859 | break; |
777e208d SR |
1860 | } |
1861 | case TRACE_PRINT: { | |
7104f300 SR |
1862 | struct print_entry *field; |
1863 | ||
1864 | trace_assign_type(field, entry); | |
777e208d SR |
1865 | |
1866 | trace_seq_printf(s, "# %lx %s", field->ip, field->buf); | |
dd0e545f | 1867 | break; |
f9896bf3 | 1868 | } |
777e208d | 1869 | } |
2c4f035f | 1870 | return TRACE_TYPE_HANDLED; |
f9896bf3 IM |
1871 | } |
1872 | ||
cb0f12aa IM |
1873 | #define SEQ_PUT_FIELD_RET(s, x) \ |
1874 | do { \ | |
1875 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | |
1876 | return 0; \ | |
1877 | } while (0) | |
1878 | ||
5e3ca0ec IM |
1879 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ |
1880 | do { \ | |
ad0a3b68 | 1881 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ |
5e3ca0ec IM |
1882 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ |
1883 | return 0; \ | |
1884 | } while (0) | |
1885 | ||
2c4f035f | 1886 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
5e3ca0ec IM |
1887 | { |
1888 | struct trace_seq *s = &iter->seq; | |
1889 | unsigned char newline = '\n'; | |
1890 | struct trace_entry *entry; | |
bac524d3 | 1891 | int S, T; |
5e3ca0ec IM |
1892 | |
1893 | entry = iter->ent; | |
dd0e545f | 1894 | |
777e208d | 1895 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); |
5e3ca0ec | 1896 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); |
3928a8a2 | 1897 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); |
5e3ca0ec IM |
1898 | |
1899 | switch (entry->type) { | |
777e208d | 1900 | case TRACE_FN: { |
7104f300 SR |
1901 | struct ftrace_entry *field; |
1902 | ||
1903 | trace_assign_type(field, entry); | |
777e208d SR |
1904 | |
1905 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | |
1906 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | |
5e3ca0ec | 1907 | break; |
777e208d | 1908 | } |
5e3ca0ec | 1909 | case TRACE_CTX: |
777e208d | 1910 | case TRACE_WAKE: { |
7104f300 SR |
1911 | struct ctx_switch_entry *field; |
1912 | ||
1913 | trace_assign_type(field, entry); | |
777e208d | 1914 | |
3d9101e9 TG |
1915 | T = task_state_char(field->next_state); |
1916 | S = entry->type == TRACE_WAKE ? '+' : | |
1917 | task_state_char(field->prev_state); | |
777e208d SR |
1918 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
1919 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | |
5e3ca0ec | 1920 | SEQ_PUT_HEX_FIELD_RET(s, S); |
777e208d SR |
1921 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); |
1922 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | |
1923 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | |
bac524d3 | 1924 | SEQ_PUT_HEX_FIELD_RET(s, T); |
5e3ca0ec | 1925 | break; |
777e208d | 1926 | } |
5e3ca0ec | 1927 | case TRACE_SPECIAL: |
02b67518 | 1928 | case TRACE_USER_STACK: |
777e208d | 1929 | case TRACE_STACK: { |
7104f300 SR |
1930 | struct special_entry *field; |
1931 | ||
1932 | trace_assign_type(field, entry); | |
777e208d SR |
1933 | |
1934 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | |
1935 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | |
1936 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | |
5e3ca0ec IM |
1937 | break; |
1938 | } | |
777e208d | 1939 | } |
5e3ca0ec IM |
1940 | SEQ_PUT_FIELD_RET(s, newline); |
1941 | ||
2c4f035f | 1942 | return TRACE_TYPE_HANDLED; |
5e3ca0ec IM |
1943 | } |
1944 | ||
66896a85 FW |
1945 | static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) |
1946 | { | |
1947 | struct trace_seq *s = &iter->seq; | |
1948 | struct trace_entry *entry = iter->ent; | |
1949 | struct print_entry *field; | |
1950 | int ret; | |
1951 | ||
1952 | trace_assign_type(field, entry); | |
1953 | ||
1954 | ret = trace_seq_printf(s, field->buf); | |
1955 | if (!ret) | |
1956 | return TRACE_TYPE_PARTIAL_LINE; | |
1957 | ||
66896a85 FW |
1958 | return TRACE_TYPE_HANDLED; |
1959 | } | |
1960 | ||
2c4f035f | 1961 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
cb0f12aa IM |
1962 | { |
1963 | struct trace_seq *s = &iter->seq; | |
1964 | struct trace_entry *entry; | |
1965 | ||
1966 | entry = iter->ent; | |
dd0e545f | 1967 | |
777e208d | 1968 | SEQ_PUT_FIELD_RET(s, entry->pid); |
072ba498 | 1969 | SEQ_PUT_FIELD_RET(s, entry->cpu); |
3928a8a2 | 1970 | SEQ_PUT_FIELD_RET(s, iter->ts); |
cb0f12aa IM |
1971 | |
1972 | switch (entry->type) { | |
777e208d | 1973 | case TRACE_FN: { |
7104f300 SR |
1974 | struct ftrace_entry *field; |
1975 | ||
1976 | trace_assign_type(field, entry); | |
777e208d SR |
1977 | |
1978 | SEQ_PUT_FIELD_RET(s, field->ip); | |
1979 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | |
cb0f12aa | 1980 | break; |
777e208d SR |
1981 | } |
1982 | case TRACE_CTX: { | |
7104f300 SR |
1983 | struct ctx_switch_entry *field; |
1984 | ||
1985 | trace_assign_type(field, entry); | |
777e208d SR |
1986 | |
1987 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | |
1988 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | |
1989 | SEQ_PUT_FIELD_RET(s, field->prev_state); | |
1990 | SEQ_PUT_FIELD_RET(s, field->next_pid); | |
1991 | SEQ_PUT_FIELD_RET(s, field->next_prio); | |
1992 | SEQ_PUT_FIELD_RET(s, field->next_state); | |
cb0f12aa | 1993 | break; |
777e208d | 1994 | } |
f0a920d5 | 1995 | case TRACE_SPECIAL: |
02b67518 | 1996 | case TRACE_USER_STACK: |
777e208d | 1997 | case TRACE_STACK: { |
7104f300 SR |
1998 | struct special_entry *field; |
1999 | ||
2000 | trace_assign_type(field, entry); | |
777e208d SR |
2001 | |
2002 | SEQ_PUT_FIELD_RET(s, field->arg1); | |
2003 | SEQ_PUT_FIELD_RET(s, field->arg2); | |
2004 | SEQ_PUT_FIELD_RET(s, field->arg3); | |
f0a920d5 | 2005 | break; |
cb0f12aa | 2006 | } |
777e208d | 2007 | } |
cb0f12aa IM |
2008 | return 1; |
2009 | } | |
2010 | ||
bc0c38d1 SR |
2011 | static int trace_empty(struct trace_iterator *iter) |
2012 | { | |
bc0c38d1 SR |
2013 | int cpu; |
2014 | ||
ab46428c | 2015 | for_each_tracing_cpu(cpu) { |
d769041f SR |
2016 | if (iter->buffer_iter[cpu]) { |
2017 | if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) | |
2018 | return 0; | |
2019 | } else { | |
2020 | if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) | |
2021 | return 0; | |
2022 | } | |
bc0c38d1 | 2023 | } |
d769041f | 2024 | |
797d3712 | 2025 | return 1; |
bc0c38d1 SR |
2026 | } |
2027 | ||
2c4f035f | 2028 | static enum print_line_t print_trace_line(struct trace_iterator *iter) |
f9896bf3 | 2029 | { |
2c4f035f FW |
2030 | enum print_line_t ret; |
2031 | ||
2032 | if (iter->trace && iter->trace->print_line) { | |
2033 | ret = iter->trace->print_line(iter); | |
2034 | if (ret != TRACE_TYPE_UNHANDLED) | |
2035 | return ret; | |
2036 | } | |
72829bc3 | 2037 | |
66896a85 FW |
2038 | if (iter->ent->type == TRACE_PRINT && |
2039 | trace_flags & TRACE_ITER_PRINTK && | |
2040 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | |
2041 | return print_printk_msg_only(iter); | |
2042 | ||
cb0f12aa IM |
2043 | if (trace_flags & TRACE_ITER_BIN) |
2044 | return print_bin_fmt(iter); | |
2045 | ||
5e3ca0ec IM |
2046 | if (trace_flags & TRACE_ITER_HEX) |
2047 | return print_hex_fmt(iter); | |
2048 | ||
f9896bf3 IM |
2049 | if (trace_flags & TRACE_ITER_RAW) |
2050 | return print_raw_fmt(iter); | |
2051 | ||
2052 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | |
2053 | return print_lat_fmt(iter, iter->idx, iter->cpu); | |
2054 | ||
2055 | return print_trace_fmt(iter); | |
2056 | } | |
2057 | ||
bc0c38d1 SR |
2058 | static int s_show(struct seq_file *m, void *v) |
2059 | { | |
2060 | struct trace_iterator *iter = v; | |
2061 | ||
2062 | if (iter->ent == NULL) { | |
2063 | if (iter->tr) { | |
2064 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | |
2065 | seq_puts(m, "#\n"); | |
2066 | } | |
8bba1bf5 MM |
2067 | if (iter->trace && iter->trace->print_header) |
2068 | iter->trace->print_header(m); | |
2069 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | |
bc0c38d1 SR |
2070 | /* print nothing if the buffers are empty */ |
2071 | if (trace_empty(iter)) | |
2072 | return 0; | |
2073 | print_trace_header(m, iter); | |
2074 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | |
2075 | print_lat_help_header(m); | |
2076 | } else { | |
2077 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | |
2078 | print_func_help_header(m); | |
2079 | } | |
2080 | } else { | |
f9896bf3 | 2081 | print_trace_line(iter); |
214023c3 | 2082 | trace_print_seq(m, &iter->seq); |
bc0c38d1 SR |
2083 | } |
2084 | ||
2085 | return 0; | |
2086 | } | |
2087 | ||
2088 | static struct seq_operations tracer_seq_ops = { | |
4bf39a94 IM |
2089 | .start = s_start, |
2090 | .next = s_next, | |
2091 | .stop = s_stop, | |
2092 | .show = s_show, | |
bc0c38d1 SR |
2093 | }; |
2094 | ||
e309b41d | 2095 | static struct trace_iterator * |
bc0c38d1 SR |
2096 | __tracing_open(struct inode *inode, struct file *file, int *ret) |
2097 | { | |
2098 | struct trace_iterator *iter; | |
3928a8a2 SR |
2099 | struct seq_file *m; |
2100 | int cpu; | |
bc0c38d1 | 2101 | |
60a11774 SR |
2102 | if (tracing_disabled) { |
2103 | *ret = -ENODEV; | |
2104 | return NULL; | |
2105 | } | |
2106 | ||
bc0c38d1 SR |
2107 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2108 | if (!iter) { | |
2109 | *ret = -ENOMEM; | |
2110 | goto out; | |
2111 | } | |
2112 | ||
2113 | mutex_lock(&trace_types_lock); | |
2114 | if (current_trace && current_trace->print_max) | |
2115 | iter->tr = &max_tr; | |
2116 | else | |
2117 | iter->tr = inode->i_private; | |
2118 | iter->trace = current_trace; | |
2119 | iter->pos = -1; | |
2120 | ||
8bba1bf5 MM |
2121 | /* Notify the tracer early; before we stop tracing. */ |
2122 | if (iter->trace && iter->trace->open) | |
a93751ca | 2123 | iter->trace->open(iter); |
8bba1bf5 | 2124 | |
12ef7d44 SR |
2125 | /* Annotate start of buffers if we had overruns */ |
2126 | if (ring_buffer_overruns(iter->tr->buffer)) | |
2127 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | |
2128 | ||
2129 | ||
3928a8a2 | 2130 | for_each_tracing_cpu(cpu) { |
d769041f | 2131 | |
3928a8a2 SR |
2132 | iter->buffer_iter[cpu] = |
2133 | ring_buffer_read_start(iter->tr->buffer, cpu); | |
d769041f | 2134 | |
3928a8a2 SR |
2135 | if (!iter->buffer_iter[cpu]) |
2136 | goto fail_buffer; | |
2137 | } | |
2138 | ||
bc0c38d1 SR |
2139 | /* TODO stop tracer */ |
2140 | *ret = seq_open(file, &tracer_seq_ops); | |
3928a8a2 SR |
2141 | if (*ret) |
2142 | goto fail_buffer; | |
bc0c38d1 | 2143 | |
3928a8a2 SR |
2144 | m = file->private_data; |
2145 | m->private = iter; | |
bc0c38d1 | 2146 | |
3928a8a2 | 2147 | /* stop the trace while dumping */ |
9036990d | 2148 | tracing_stop(); |
3928a8a2 | 2149 | |
bc0c38d1 SR |
2150 | mutex_unlock(&trace_types_lock); |
2151 | ||
2152 | out: | |
2153 | return iter; | |
3928a8a2 SR |
2154 | |
2155 | fail_buffer: | |
2156 | for_each_tracing_cpu(cpu) { | |
2157 | if (iter->buffer_iter[cpu]) | |
2158 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | |
2159 | } | |
2160 | mutex_unlock(&trace_types_lock); | |
0bb943c7 | 2161 | kfree(iter); |
3928a8a2 SR |
2162 | |
2163 | return ERR_PTR(-ENOMEM); | |
bc0c38d1 SR |
2164 | } |
2165 | ||
2166 | int tracing_open_generic(struct inode *inode, struct file *filp) | |
2167 | { | |
60a11774 SR |
2168 | if (tracing_disabled) |
2169 | return -ENODEV; | |
2170 | ||
bc0c38d1 SR |
2171 | filp->private_data = inode->i_private; |
2172 | return 0; | |
2173 | } | |
2174 | ||
2175 | int tracing_release(struct inode *inode, struct file *file) | |
2176 | { | |
2177 | struct seq_file *m = (struct seq_file *)file->private_data; | |
2178 | struct trace_iterator *iter = m->private; | |
3928a8a2 | 2179 | int cpu; |
bc0c38d1 SR |
2180 | |
2181 | mutex_lock(&trace_types_lock); | |
3928a8a2 SR |
2182 | for_each_tracing_cpu(cpu) { |
2183 | if (iter->buffer_iter[cpu]) | |
2184 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | |
2185 | } | |
2186 | ||
bc0c38d1 SR |
2187 | if (iter->trace && iter->trace->close) |
2188 | iter->trace->close(iter); | |
2189 | ||
2190 | /* reenable tracing if it was previously enabled */ | |
9036990d | 2191 | tracing_start(); |
bc0c38d1 SR |
2192 | mutex_unlock(&trace_types_lock); |
2193 | ||
2194 | seq_release(inode, file); | |
2195 | kfree(iter); | |
2196 | return 0; | |
2197 | } | |
2198 | ||
2199 | static int tracing_open(struct inode *inode, struct file *file) | |
2200 | { | |
2201 | int ret; | |
2202 | ||
2203 | __tracing_open(inode, file, &ret); | |
2204 | ||
2205 | return ret; | |
2206 | } | |
2207 | ||
2208 | static int tracing_lt_open(struct inode *inode, struct file *file) | |
2209 | { | |
2210 | struct trace_iterator *iter; | |
2211 | int ret; | |
2212 | ||
2213 | iter = __tracing_open(inode, file, &ret); | |
2214 | ||
2215 | if (!ret) | |
2216 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | |
2217 | ||
2218 | return ret; | |
2219 | } | |
2220 | ||
2221 | ||
e309b41d | 2222 | static void * |
bc0c38d1 SR |
2223 | t_next(struct seq_file *m, void *v, loff_t *pos) |
2224 | { | |
2225 | struct tracer *t = m->private; | |
2226 | ||
2227 | (*pos)++; | |
2228 | ||
2229 | if (t) | |
2230 | t = t->next; | |
2231 | ||
2232 | m->private = t; | |
2233 | ||
2234 | return t; | |
2235 | } | |
2236 | ||
2237 | static void *t_start(struct seq_file *m, loff_t *pos) | |
2238 | { | |
2239 | struct tracer *t = m->private; | |
2240 | loff_t l = 0; | |
2241 | ||
2242 | mutex_lock(&trace_types_lock); | |
2243 | for (; t && l < *pos; t = t_next(m, t, &l)) | |
2244 | ; | |
2245 | ||
2246 | return t; | |
2247 | } | |
2248 | ||
2249 | static void t_stop(struct seq_file *m, void *p) | |
2250 | { | |
2251 | mutex_unlock(&trace_types_lock); | |
2252 | } | |
2253 | ||
2254 | static int t_show(struct seq_file *m, void *v) | |
2255 | { | |
2256 | struct tracer *t = v; | |
2257 | ||
2258 | if (!t) | |
2259 | return 0; | |
2260 | ||
2261 | seq_printf(m, "%s", t->name); | |
2262 | if (t->next) | |
2263 | seq_putc(m, ' '); | |
2264 | else | |
2265 | seq_putc(m, '\n'); | |
2266 | ||
2267 | return 0; | |
2268 | } | |
2269 | ||
2270 | static struct seq_operations show_traces_seq_ops = { | |
4bf39a94 IM |
2271 | .start = t_start, |
2272 | .next = t_next, | |
2273 | .stop = t_stop, | |
2274 | .show = t_show, | |
bc0c38d1 SR |
2275 | }; |
2276 | ||
2277 | static int show_traces_open(struct inode *inode, struct file *file) | |
2278 | { | |
2279 | int ret; | |
2280 | ||
60a11774 SR |
2281 | if (tracing_disabled) |
2282 | return -ENODEV; | |
2283 | ||
bc0c38d1 SR |
2284 | ret = seq_open(file, &show_traces_seq_ops); |
2285 | if (!ret) { | |
2286 | struct seq_file *m = file->private_data; | |
2287 | m->private = trace_types; | |
2288 | } | |
2289 | ||
2290 | return ret; | |
2291 | } | |
2292 | ||
2293 | static struct file_operations tracing_fops = { | |
4bf39a94 IM |
2294 | .open = tracing_open, |
2295 | .read = seq_read, | |
2296 | .llseek = seq_lseek, | |
2297 | .release = tracing_release, | |
bc0c38d1 SR |
2298 | }; |
2299 | ||
2300 | static struct file_operations tracing_lt_fops = { | |
4bf39a94 IM |
2301 | .open = tracing_lt_open, |
2302 | .read = seq_read, | |
2303 | .llseek = seq_lseek, | |
2304 | .release = tracing_release, | |
bc0c38d1 SR |
2305 | }; |
2306 | ||
2307 | static struct file_operations show_traces_fops = { | |
c7078de1 IM |
2308 | .open = show_traces_open, |
2309 | .read = seq_read, | |
2310 | .release = seq_release, | |
2311 | }; | |
2312 | ||
36dfe925 IM |
2313 | /* |
2314 | * Only trace on a CPU if the bitmask is set: | |
2315 | */ | |
2316 | static cpumask_t tracing_cpumask = CPU_MASK_ALL; | |
2317 | ||
2318 | /* | |
2319 | * When tracing/tracing_cpu_mask is modified then this holds | |
2320 | * the new bitmask we are about to install: | |
2321 | */ | |
2322 | static cpumask_t tracing_cpumask_new; | |
2323 | ||
2324 | /* | |
2325 | * The tracer itself will not take this lock, but still we want | |
2326 | * to provide a consistent cpumask to user-space: | |
2327 | */ | |
2328 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | |
2329 | ||
2330 | /* | |
2331 | * Temporary storage for the character representation of the | |
2332 | * CPU bitmask (and one more byte for the newline): | |
2333 | */ | |
2334 | static char mask_str[NR_CPUS + 1]; | |
2335 | ||
c7078de1 IM |
2336 | static ssize_t |
2337 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | |
2338 | size_t count, loff_t *ppos) | |
2339 | { | |
36dfe925 | 2340 | int len; |
c7078de1 IM |
2341 | |
2342 | mutex_lock(&tracing_cpumask_update_lock); | |
36dfe925 IM |
2343 | |
2344 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | |
2345 | if (count - len < 2) { | |
2346 | count = -EINVAL; | |
2347 | goto out_err; | |
2348 | } | |
2349 | len += sprintf(mask_str + len, "\n"); | |
2350 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); | |
2351 | ||
2352 | out_err: | |
c7078de1 IM |
2353 | mutex_unlock(&tracing_cpumask_update_lock); |
2354 | ||
2355 | return count; | |
2356 | } | |
2357 | ||
2358 | static ssize_t | |
2359 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |
2360 | size_t count, loff_t *ppos) | |
2361 | { | |
36dfe925 | 2362 | int err, cpu; |
c7078de1 IM |
2363 | |
2364 | mutex_lock(&tracing_cpumask_update_lock); | |
36dfe925 | 2365 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); |
c7078de1 | 2366 | if (err) |
36dfe925 IM |
2367 | goto err_unlock; |
2368 | ||
a5e25883 | 2369 | local_irq_disable(); |
92205c23 | 2370 | __raw_spin_lock(&ftrace_max_lock); |
ab46428c | 2371 | for_each_tracing_cpu(cpu) { |
36dfe925 IM |
2372 | /* |
2373 | * Increase/decrease the disabled counter if we are | |
2374 | * about to flip a bit in the cpumask: | |
2375 | */ | |
2376 | if (cpu_isset(cpu, tracing_cpumask) && | |
2377 | !cpu_isset(cpu, tracing_cpumask_new)) { | |
2378 | atomic_inc(&global_trace.data[cpu]->disabled); | |
2379 | } | |
2380 | if (!cpu_isset(cpu, tracing_cpumask) && | |
2381 | cpu_isset(cpu, tracing_cpumask_new)) { | |
2382 | atomic_dec(&global_trace.data[cpu]->disabled); | |
2383 | } | |
2384 | } | |
92205c23 | 2385 | __raw_spin_unlock(&ftrace_max_lock); |
a5e25883 | 2386 | local_irq_enable(); |
36dfe925 IM |
2387 | |
2388 | tracing_cpumask = tracing_cpumask_new; | |
2389 | ||
2390 | mutex_unlock(&tracing_cpumask_update_lock); | |
c7078de1 IM |
2391 | |
2392 | return count; | |
36dfe925 IM |
2393 | |
2394 | err_unlock: | |
2395 | mutex_unlock(&tracing_cpumask_update_lock); | |
2396 | ||
2397 | return err; | |
c7078de1 IM |
2398 | } |
2399 | ||
2400 | static struct file_operations tracing_cpumask_fops = { | |
2401 | .open = tracing_open_generic, | |
2402 | .read = tracing_cpumask_read, | |
2403 | .write = tracing_cpumask_write, | |
bc0c38d1 SR |
2404 | }; |
2405 | ||
2406 | static ssize_t | |
ee6bce52 | 2407 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
bc0c38d1 SR |
2408 | size_t cnt, loff_t *ppos) |
2409 | { | |
adf9f195 | 2410 | int i; |
bc0c38d1 SR |
2411 | char *buf; |
2412 | int r = 0; | |
2413 | int len = 0; | |
adf9f195 FW |
2414 | u32 tracer_flags = current_trace->flags->val; |
2415 | struct tracer_opt *trace_opts = current_trace->flags->opts; | |
2416 | ||
bc0c38d1 SR |
2417 | |
2418 | /* calulate max size */ | |
2419 | for (i = 0; trace_options[i]; i++) { | |
2420 | len += strlen(trace_options[i]); | |
2421 | len += 3; /* "no" and space */ | |
2422 | } | |
2423 | ||
adf9f195 FW |
2424 | /* |
2425 | * Increase the size with names of options specific | |
2426 | * of the current tracer. | |
2427 | */ | |
2428 | for (i = 0; trace_opts[i].name; i++) { | |
2429 | len += strlen(trace_opts[i].name); | |
2430 | len += 3; /* "no" and space */ | |
2431 | } | |
2432 | ||
bc0c38d1 SR |
2433 | /* +2 for \n and \0 */ |
2434 | buf = kmalloc(len + 2, GFP_KERNEL); | |
2435 | if (!buf) | |
2436 | return -ENOMEM; | |
2437 | ||
2438 | for (i = 0; trace_options[i]; i++) { | |
2439 | if (trace_flags & (1 << i)) | |
2440 | r += sprintf(buf + r, "%s ", trace_options[i]); | |
2441 | else | |
2442 | r += sprintf(buf + r, "no%s ", trace_options[i]); | |
2443 | } | |
2444 | ||
adf9f195 FW |
2445 | for (i = 0; trace_opts[i].name; i++) { |
2446 | if (tracer_flags & trace_opts[i].bit) | |
2447 | r += sprintf(buf + r, "%s ", | |
2448 | trace_opts[i].name); | |
2449 | else | |
2450 | r += sprintf(buf + r, "no%s ", | |
2451 | trace_opts[i].name); | |
2452 | } | |
2453 | ||
bc0c38d1 SR |
2454 | r += sprintf(buf + r, "\n"); |
2455 | WARN_ON(r >= len + 2); | |
2456 | ||
36dfe925 | 2457 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
bc0c38d1 SR |
2458 | |
2459 | kfree(buf); | |
2460 | ||
2461 | return r; | |
2462 | } | |
2463 | ||
adf9f195 FW |
2464 | /* Try to assign a tracer specific option */ |
2465 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |
2466 | { | |
2467 | struct tracer_flags *trace_flags = trace->flags; | |
2468 | struct tracer_opt *opts = NULL; | |
2469 | int ret = 0, i = 0; | |
2470 | int len; | |
2471 | ||
2472 | for (i = 0; trace_flags->opts[i].name; i++) { | |
2473 | opts = &trace_flags->opts[i]; | |
2474 | len = strlen(opts->name); | |
2475 | ||
2476 | if (strncmp(cmp, opts->name, len) == 0) { | |
2477 | ret = trace->set_flag(trace_flags->val, | |
2478 | opts->bit, !neg); | |
2479 | break; | |
2480 | } | |
2481 | } | |
2482 | /* Not found */ | |
2483 | if (!trace_flags->opts[i].name) | |
2484 | return -EINVAL; | |
2485 | ||
2486 | /* Refused to handle */ | |
2487 | if (ret) | |
2488 | return ret; | |
2489 | ||
2490 | if (neg) | |
2491 | trace_flags->val &= ~opts->bit; | |
2492 | else | |
2493 | trace_flags->val |= opts->bit; | |
2494 | ||
2495 | return 0; | |
2496 | } | |
2497 | ||
bc0c38d1 | 2498 | static ssize_t |
ee6bce52 | 2499 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
bc0c38d1 SR |
2500 | size_t cnt, loff_t *ppos) |
2501 | { | |
2502 | char buf[64]; | |
2503 | char *cmp = buf; | |
2504 | int neg = 0; | |
adf9f195 | 2505 | int ret; |
bc0c38d1 SR |
2506 | int i; |
2507 | ||
cffae437 SR |
2508 | if (cnt >= sizeof(buf)) |
2509 | return -EINVAL; | |
bc0c38d1 SR |
2510 | |
2511 | if (copy_from_user(&buf, ubuf, cnt)) | |
2512 | return -EFAULT; | |
2513 | ||
2514 | buf[cnt] = 0; | |
2515 | ||
2516 | if (strncmp(buf, "no", 2) == 0) { | |
2517 | neg = 1; | |
2518 | cmp += 2; | |
2519 | } | |
2520 | ||
2521 | for (i = 0; trace_options[i]; i++) { | |
2522 | int len = strlen(trace_options[i]); | |
2523 | ||
2524 | if (strncmp(cmp, trace_options[i], len) == 0) { | |
2525 | if (neg) | |
2526 | trace_flags &= ~(1 << i); | |
2527 | else | |
2528 | trace_flags |= (1 << i); | |
2529 | break; | |
2530 | } | |
2531 | } | |
adf9f195 FW |
2532 | |
2533 | /* If no option could be set, test the specific tracer options */ | |
2534 | if (!trace_options[i]) { | |
2535 | ret = set_tracer_option(current_trace, cmp, neg); | |
2536 | if (ret) | |
2537 | return ret; | |
2538 | } | |
bc0c38d1 SR |
2539 | |
2540 | filp->f_pos += cnt; | |
2541 | ||
2542 | return cnt; | |
2543 | } | |
2544 | ||
2545 | static struct file_operations tracing_iter_fops = { | |
c7078de1 | 2546 | .open = tracing_open_generic, |
ee6bce52 SR |
2547 | .read = tracing_trace_options_read, |
2548 | .write = tracing_trace_options_write, | |
bc0c38d1 SR |
2549 | }; |
2550 | ||
7bd2f24c IM |
2551 | static const char readme_msg[] = |
2552 | "tracing mini-HOWTO:\n\n" | |
2553 | "# mkdir /debug\n" | |
2554 | "# mount -t debugfs nodev /debug\n\n" | |
2555 | "# cat /debug/tracing/available_tracers\n" | |
2556 | "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n" | |
2557 | "# cat /debug/tracing/current_tracer\n" | |
2558 | "none\n" | |
2559 | "# echo sched_switch > /debug/tracing/current_tracer\n" | |
2560 | "# cat /debug/tracing/current_tracer\n" | |
2561 | "sched_switch\n" | |
ee6bce52 | 2562 | "# cat /debug/tracing/trace_options\n" |
7bd2f24c | 2563 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
ee6bce52 | 2564 | "# echo print-parent > /debug/tracing/trace_options\n" |
7bd2f24c IM |
2565 | "# echo 1 > /debug/tracing/tracing_enabled\n" |
2566 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | |
2567 | "echo 0 > /debug/tracing/tracing_enabled\n" | |
2568 | ; | |
2569 | ||
2570 | static ssize_t | |
2571 | tracing_readme_read(struct file *filp, char __user *ubuf, | |
2572 | size_t cnt, loff_t *ppos) | |
2573 | { | |
2574 | return simple_read_from_buffer(ubuf, cnt, ppos, | |
2575 | readme_msg, strlen(readme_msg)); | |
2576 | } | |
2577 | ||
2578 | static struct file_operations tracing_readme_fops = { | |
c7078de1 IM |
2579 | .open = tracing_open_generic, |
2580 | .read = tracing_readme_read, | |
7bd2f24c IM |
2581 | }; |
2582 | ||
bc0c38d1 SR |
2583 | static ssize_t |
2584 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | |
2585 | size_t cnt, loff_t *ppos) | |
2586 | { | |
bc0c38d1 SR |
2587 | char buf[64]; |
2588 | int r; | |
2589 | ||
9036990d | 2590 | r = sprintf(buf, "%u\n", tracer_enabled); |
4e3c3333 | 2591 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
bc0c38d1 SR |
2592 | } |
2593 | ||
2594 | static ssize_t | |
2595 | tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |
2596 | size_t cnt, loff_t *ppos) | |
2597 | { | |
2598 | struct trace_array *tr = filp->private_data; | |
bc0c38d1 | 2599 | char buf[64]; |
c6caeeb1 SR |
2600 | long val; |
2601 | int ret; | |
bc0c38d1 | 2602 | |
cffae437 SR |
2603 | if (cnt >= sizeof(buf)) |
2604 | return -EINVAL; | |
bc0c38d1 SR |
2605 | |
2606 | if (copy_from_user(&buf, ubuf, cnt)) | |
2607 | return -EFAULT; | |
2608 | ||
2609 | buf[cnt] = 0; | |
2610 | ||
c6caeeb1 SR |
2611 | ret = strict_strtoul(buf, 10, &val); |
2612 | if (ret < 0) | |
2613 | return ret; | |
bc0c38d1 SR |
2614 | |
2615 | val = !!val; | |
2616 | ||
2617 | mutex_lock(&trace_types_lock); | |
9036990d SR |
2618 | if (tracer_enabled ^ val) { |
2619 | if (val) { | |
bc0c38d1 | 2620 | tracer_enabled = 1; |
9036990d SR |
2621 | if (current_trace->start) |
2622 | current_trace->start(tr); | |
2623 | tracing_start(); | |
2624 | } else { | |
bc0c38d1 | 2625 | tracer_enabled = 0; |
9036990d SR |
2626 | tracing_stop(); |
2627 | if (current_trace->stop) | |
2628 | current_trace->stop(tr); | |
2629 | } | |
bc0c38d1 SR |
2630 | } |
2631 | mutex_unlock(&trace_types_lock); | |
2632 | ||
2633 | filp->f_pos += cnt; | |
2634 | ||
2635 | return cnt; | |
2636 | } | |
2637 | ||
2638 | static ssize_t | |
2639 | tracing_set_trace_read(struct file *filp, char __user *ubuf, | |
2640 | size_t cnt, loff_t *ppos) | |
2641 | { | |
2642 | char buf[max_tracer_type_len+2]; | |
2643 | int r; | |
2644 | ||
2645 | mutex_lock(&trace_types_lock); | |
2646 | if (current_trace) | |
2647 | r = sprintf(buf, "%s\n", current_trace->name); | |
2648 | else | |
2649 | r = sprintf(buf, "\n"); | |
2650 | mutex_unlock(&trace_types_lock); | |
2651 | ||
4bf39a94 | 2652 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
bc0c38d1 SR |
2653 | } |
2654 | ||
d9e54076 | 2655 | static int tracing_set_tracer(char *buf) |
bc0c38d1 SR |
2656 | { |
2657 | struct trace_array *tr = &global_trace; | |
2658 | struct tracer *t; | |
d9e54076 | 2659 | int ret = 0; |
bc0c38d1 SR |
2660 | |
2661 | mutex_lock(&trace_types_lock); | |
2662 | for (t = trace_types; t; t = t->next) { | |
2663 | if (strcmp(t->name, buf) == 0) | |
2664 | break; | |
2665 | } | |
c2931e05 FW |
2666 | if (!t) { |
2667 | ret = -EINVAL; | |
2668 | goto out; | |
2669 | } | |
2670 | if (t == current_trace) | |
bc0c38d1 SR |
2671 | goto out; |
2672 | ||
9f029e83 | 2673 | trace_branch_disable(); |
bc0c38d1 SR |
2674 | if (current_trace && current_trace->reset) |
2675 | current_trace->reset(tr); | |
2676 | ||
2677 | current_trace = t; | |
1c80025a FW |
2678 | if (t->init) { |
2679 | ret = t->init(tr); | |
2680 | if (ret) | |
2681 | goto out; | |
2682 | } | |
bc0c38d1 | 2683 | |
9f029e83 | 2684 | trace_branch_enable(tr); |
bc0c38d1 SR |
2685 | out: |
2686 | mutex_unlock(&trace_types_lock); | |
2687 | ||
d9e54076 PZ |
2688 | return ret; |
2689 | } | |
2690 | ||
2691 | static ssize_t | |
2692 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |
2693 | size_t cnt, loff_t *ppos) | |
2694 | { | |
2695 | char buf[max_tracer_type_len+1]; | |
2696 | int i; | |
2697 | size_t ret; | |
e6e7a65a FW |
2698 | int err; |
2699 | ||
2700 | ret = cnt; | |
d9e54076 PZ |
2701 | |
2702 | if (cnt > max_tracer_type_len) | |
2703 | cnt = max_tracer_type_len; | |
2704 | ||
2705 | if (copy_from_user(&buf, ubuf, cnt)) | |
2706 | return -EFAULT; | |
2707 | ||
2708 | buf[cnt] = 0; | |
2709 | ||
2710 | /* strip ending whitespace. */ | |
2711 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | |
2712 | buf[i] = 0; | |
2713 | ||
e6e7a65a FW |
2714 | err = tracing_set_tracer(buf); |
2715 | if (err) | |
2716 | return err; | |
d9e54076 | 2717 | |
e6e7a65a | 2718 | filp->f_pos += ret; |
bc0c38d1 | 2719 | |
c2931e05 | 2720 | return ret; |
bc0c38d1 SR |
2721 | } |
2722 | ||
2723 | static ssize_t | |
2724 | tracing_max_lat_read(struct file *filp, char __user *ubuf, | |
2725 | size_t cnt, loff_t *ppos) | |
2726 | { | |
2727 | unsigned long *ptr = filp->private_data; | |
2728 | char buf[64]; | |
2729 | int r; | |
2730 | ||
cffae437 | 2731 | r = snprintf(buf, sizeof(buf), "%ld\n", |
bc0c38d1 | 2732 | *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); |
cffae437 SR |
2733 | if (r > sizeof(buf)) |
2734 | r = sizeof(buf); | |
4bf39a94 | 2735 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
bc0c38d1 SR |
2736 | } |
2737 | ||
2738 | static ssize_t | |
2739 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |
2740 | size_t cnt, loff_t *ppos) | |
2741 | { | |
2742 | long *ptr = filp->private_data; | |
bc0c38d1 | 2743 | char buf[64]; |
c6caeeb1 SR |
2744 | long val; |
2745 | int ret; | |
bc0c38d1 | 2746 | |
cffae437 SR |
2747 | if (cnt >= sizeof(buf)) |
2748 | return -EINVAL; | |
bc0c38d1 SR |
2749 | |
2750 | if (copy_from_user(&buf, ubuf, cnt)) | |
2751 | return -EFAULT; | |
2752 | ||
2753 | buf[cnt] = 0; | |
2754 | ||
c6caeeb1 SR |
2755 | ret = strict_strtoul(buf, 10, &val); |
2756 | if (ret < 0) | |
2757 | return ret; | |
bc0c38d1 SR |
2758 | |
2759 | *ptr = val * 1000; | |
2760 | ||
2761 | return cnt; | |
2762 | } | |
2763 | ||
b3806b43 SR |
2764 | static atomic_t tracing_reader; |
2765 | ||
2766 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | |
2767 | { | |
2768 | struct trace_iterator *iter; | |
2769 | ||
2770 | if (tracing_disabled) | |
2771 | return -ENODEV; | |
2772 | ||
2773 | /* We only allow for reader of the pipe */ | |
2774 | if (atomic_inc_return(&tracing_reader) != 1) { | |
2775 | atomic_dec(&tracing_reader); | |
2776 | return -EBUSY; | |
2777 | } | |
2778 | ||
2779 | /* create a buffer to store the information to pass to userspace */ | |
2780 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
2781 | if (!iter) | |
2782 | return -ENOMEM; | |
2783 | ||
107bad8b | 2784 | mutex_lock(&trace_types_lock); |
a309720c SR |
2785 | |
2786 | /* trace pipe does not show start of buffer */ | |
2787 | cpus_setall(iter->started); | |
2788 | ||
b3806b43 | 2789 | iter->tr = &global_trace; |
72829bc3 | 2790 | iter->trace = current_trace; |
b3806b43 SR |
2791 | filp->private_data = iter; |
2792 | ||
107bad8b SR |
2793 | if (iter->trace->pipe_open) |
2794 | iter->trace->pipe_open(iter); | |
2795 | mutex_unlock(&trace_types_lock); | |
2796 | ||
b3806b43 SR |
2797 | return 0; |
2798 | } | |
2799 | ||
2800 | static int tracing_release_pipe(struct inode *inode, struct file *file) | |
2801 | { | |
2802 | struct trace_iterator *iter = file->private_data; | |
2803 | ||
2804 | kfree(iter); | |
2805 | atomic_dec(&tracing_reader); | |
2806 | ||
2807 | return 0; | |
2808 | } | |
2809 | ||
2a2cc8f7 SSP |
2810 | static unsigned int |
2811 | tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |
2812 | { | |
2813 | struct trace_iterator *iter = filp->private_data; | |
2814 | ||
2815 | if (trace_flags & TRACE_ITER_BLOCK) { | |
2816 | /* | |
2817 | * Always select as readable when in blocking mode | |
2818 | */ | |
2819 | return POLLIN | POLLRDNORM; | |
afc2abc0 | 2820 | } else { |
2a2cc8f7 SSP |
2821 | if (!trace_empty(iter)) |
2822 | return POLLIN | POLLRDNORM; | |
2823 | poll_wait(filp, &trace_wait, poll_table); | |
2824 | if (!trace_empty(iter)) | |
2825 | return POLLIN | POLLRDNORM; | |
2826 | ||
2827 | return 0; | |
2828 | } | |
2829 | } | |
2830 | ||
b3806b43 SR |
2831 | /* |
2832 | * Consumer reader. | |
2833 | */ | |
2834 | static ssize_t | |
2835 | tracing_read_pipe(struct file *filp, char __user *ubuf, | |
2836 | size_t cnt, loff_t *ppos) | |
2837 | { | |
2838 | struct trace_iterator *iter = filp->private_data; | |
6c6c2796 | 2839 | ssize_t sret; |
b3806b43 SR |
2840 | |
2841 | /* return any leftover data */ | |
6c6c2796 PP |
2842 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
2843 | if (sret != -EBUSY) | |
2844 | return sret; | |
b3806b43 | 2845 | |
6c6c2796 | 2846 | trace_seq_reset(&iter->seq); |
b3806b43 | 2847 | |
107bad8b SR |
2848 | mutex_lock(&trace_types_lock); |
2849 | if (iter->trace->read) { | |
6c6c2796 PP |
2850 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); |
2851 | if (sret) | |
107bad8b | 2852 | goto out; |
107bad8b SR |
2853 | } |
2854 | ||
9ff4b974 PP |
2855 | waitagain: |
2856 | sret = 0; | |
b3806b43 | 2857 | while (trace_empty(iter)) { |
2dc8f095 | 2858 | |
107bad8b | 2859 | if ((filp->f_flags & O_NONBLOCK)) { |
6c6c2796 | 2860 | sret = -EAGAIN; |
107bad8b SR |
2861 | goto out; |
2862 | } | |
2dc8f095 | 2863 | |
b3806b43 SR |
2864 | /* |
2865 | * This is a make-shift waitqueue. The reason we don't use | |
2866 | * an actual wait queue is because: | |
2867 | * 1) we only ever have one waiter | |
2868 | * 2) the tracing, traces all functions, we don't want | |
2869 | * the overhead of calling wake_up and friends | |
2870 | * (and tracing them too) | |
2871 | * Anyway, this is really very primitive wakeup. | |
2872 | */ | |
2873 | set_current_state(TASK_INTERRUPTIBLE); | |
2874 | iter->tr->waiter = current; | |
2875 | ||
107bad8b SR |
2876 | mutex_unlock(&trace_types_lock); |
2877 | ||
9fe068e9 IM |
2878 | /* sleep for 100 msecs, and try again. */ |
2879 | schedule_timeout(HZ/10); | |
b3806b43 | 2880 | |
107bad8b SR |
2881 | mutex_lock(&trace_types_lock); |
2882 | ||
b3806b43 SR |
2883 | iter->tr->waiter = NULL; |
2884 | ||
107bad8b | 2885 | if (signal_pending(current)) { |
6c6c2796 | 2886 | sret = -EINTR; |
107bad8b SR |
2887 | goto out; |
2888 | } | |
b3806b43 | 2889 | |
84527997 | 2890 | if (iter->trace != current_trace) |
107bad8b | 2891 | goto out; |
84527997 | 2892 | |
b3806b43 SR |
2893 | /* |
2894 | * We block until we read something and tracing is disabled. | |
2895 | * We still block if tracing is disabled, but we have never | |
2896 | * read anything. This allows a user to cat this file, and | |
2897 | * then enable tracing. But after we have read something, | |
2898 | * we give an EOF when tracing is again disabled. | |
2899 | * | |
2900 | * iter->pos will be 0 if we haven't read anything. | |
2901 | */ | |
2902 | if (!tracer_enabled && iter->pos) | |
2903 | break; | |
2904 | ||
2905 | continue; | |
2906 | } | |
2907 | ||
2908 | /* stop when tracing is finished */ | |
2909 | if (trace_empty(iter)) | |
107bad8b | 2910 | goto out; |
b3806b43 SR |
2911 | |
2912 | if (cnt >= PAGE_SIZE) | |
2913 | cnt = PAGE_SIZE - 1; | |
2914 | ||
53d0aa77 | 2915 | /* reset all but tr, trace, and overruns */ |
53d0aa77 SR |
2916 | memset(&iter->seq, 0, |
2917 | sizeof(struct trace_iterator) - | |
2918 | offsetof(struct trace_iterator, seq)); | |
4823ed7e | 2919 | iter->pos = -1; |
b3806b43 | 2920 | |
088b1e42 | 2921 | while (find_next_entry_inc(iter) != NULL) { |
2c4f035f | 2922 | enum print_line_t ret; |
088b1e42 SR |
2923 | int len = iter->seq.len; |
2924 | ||
f9896bf3 | 2925 | ret = print_trace_line(iter); |
2c4f035f | 2926 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
088b1e42 SR |
2927 | /* don't print partial lines */ |
2928 | iter->seq.len = len; | |
b3806b43 | 2929 | break; |
088b1e42 | 2930 | } |
b3806b43 SR |
2931 | |
2932 | trace_consume(iter); | |
2933 | ||
2934 | if (iter->seq.len >= cnt) | |
2935 | break; | |
b3806b43 SR |
2936 | } |
2937 | ||
b3806b43 | 2938 | /* Now copy what we have to the user */ |
6c6c2796 PP |
2939 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
2940 | if (iter->seq.readpos >= iter->seq.len) | |
b3806b43 | 2941 | trace_seq_reset(&iter->seq); |
9ff4b974 PP |
2942 | |
2943 | /* | |
2944 | * If there was nothing to send to user, inspite of consuming trace | |
2945 | * entries, go back to wait for more entries. | |
2946 | */ | |
6c6c2796 | 2947 | if (sret == -EBUSY) |
9ff4b974 | 2948 | goto waitagain; |
b3806b43 | 2949 | |
107bad8b SR |
2950 | out: |
2951 | mutex_unlock(&trace_types_lock); | |
2952 | ||
6c6c2796 | 2953 | return sret; |
b3806b43 SR |
2954 | } |
2955 | ||
a98a3c3f SR |
2956 | static ssize_t |
2957 | tracing_entries_read(struct file *filp, char __user *ubuf, | |
2958 | size_t cnt, loff_t *ppos) | |
2959 | { | |
2960 | struct trace_array *tr = filp->private_data; | |
2961 | char buf[64]; | |
2962 | int r; | |
2963 | ||
1696b2b0 | 2964 | r = sprintf(buf, "%lu\n", tr->entries >> 10); |
a98a3c3f SR |
2965 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2966 | } | |
2967 | ||
2968 | static ssize_t | |
2969 | tracing_entries_write(struct file *filp, const char __user *ubuf, | |
2970 | size_t cnt, loff_t *ppos) | |
2971 | { | |
2972 | unsigned long val; | |
2973 | char buf[64]; | |
bf5e6519 | 2974 | int ret, cpu; |
a98a3c3f | 2975 | |
cffae437 SR |
2976 | if (cnt >= sizeof(buf)) |
2977 | return -EINVAL; | |
a98a3c3f SR |
2978 | |
2979 | if (copy_from_user(&buf, ubuf, cnt)) | |
2980 | return -EFAULT; | |
2981 | ||
2982 | buf[cnt] = 0; | |
2983 | ||
c6caeeb1 SR |
2984 | ret = strict_strtoul(buf, 10, &val); |
2985 | if (ret < 0) | |
2986 | return ret; | |
a98a3c3f SR |
2987 | |
2988 | /* must have at least 1 entry */ | |
2989 | if (!val) | |
2990 | return -EINVAL; | |
2991 | ||
2992 | mutex_lock(&trace_types_lock); | |
2993 | ||
c76f0694 | 2994 | tracing_stop(); |
a98a3c3f | 2995 | |
bf5e6519 SR |
2996 | /* disable all cpu buffers */ |
2997 | for_each_tracing_cpu(cpu) { | |
2998 | if (global_trace.data[cpu]) | |
2999 | atomic_inc(&global_trace.data[cpu]->disabled); | |
3000 | if (max_tr.data[cpu]) | |
3001 | atomic_inc(&max_tr.data[cpu]->disabled); | |
3002 | } | |
3003 | ||
1696b2b0 SR |
3004 | /* value is in KB */ |
3005 | val <<= 10; | |
3006 | ||
3928a8a2 SR |
3007 | if (val != global_trace.entries) { |
3008 | ret = ring_buffer_resize(global_trace.buffer, val); | |
3009 | if (ret < 0) { | |
3010 | cnt = ret; | |
3eefae99 SR |
3011 | goto out; |
3012 | } | |
3013 | ||
3928a8a2 SR |
3014 | ret = ring_buffer_resize(max_tr.buffer, val); |
3015 | if (ret < 0) { | |
3016 | int r; | |
3017 | cnt = ret; | |
3018 | r = ring_buffer_resize(global_trace.buffer, | |
3019 | global_trace.entries); | |
3020 | if (r < 0) { | |
3021 | /* AARGH! We are left with different | |
3022 | * size max buffer!!!! */ | |
3023 | WARN_ON(1); | |
3024 | tracing_disabled = 1; | |
a98a3c3f | 3025 | } |
3928a8a2 | 3026 | goto out; |
a98a3c3f | 3027 | } |
3eefae99 | 3028 | |
3928a8a2 | 3029 | global_trace.entries = val; |
a98a3c3f SR |
3030 | } |
3031 | ||
3032 | filp->f_pos += cnt; | |
3033 | ||
19384c03 SR |
3034 | /* If check pages failed, return ENOMEM */ |
3035 | if (tracing_disabled) | |
3036 | cnt = -ENOMEM; | |
a98a3c3f | 3037 | out: |
bf5e6519 SR |
3038 | for_each_tracing_cpu(cpu) { |
3039 | if (global_trace.data[cpu]) | |
3040 | atomic_dec(&global_trace.data[cpu]->disabled); | |
3041 | if (max_tr.data[cpu]) | |
3042 | atomic_dec(&max_tr.data[cpu]->disabled); | |
3043 | } | |
3044 | ||
c76f0694 | 3045 | tracing_start(); |
a98a3c3f SR |
3046 | max_tr.entries = global_trace.entries; |
3047 | mutex_unlock(&trace_types_lock); | |
3048 | ||
3049 | return cnt; | |
3050 | } | |
3051 | ||
5bf9a1ee PP |
3052 | static int mark_printk(const char *fmt, ...) |
3053 | { | |
3054 | int ret; | |
3055 | va_list args; | |
3056 | va_start(args, fmt); | |
1fd8f2a3 | 3057 | ret = trace_vprintk(0, -1, fmt, args); |
5bf9a1ee PP |
3058 | va_end(args); |
3059 | return ret; | |
3060 | } | |
3061 | ||
3062 | static ssize_t | |
3063 | tracing_mark_write(struct file *filp, const char __user *ubuf, | |
3064 | size_t cnt, loff_t *fpos) | |
3065 | { | |
3066 | char *buf; | |
3067 | char *end; | |
5bf9a1ee | 3068 | |
c76f0694 | 3069 | if (tracing_disabled) |
5bf9a1ee PP |
3070 | return -EINVAL; |
3071 | ||
3072 | if (cnt > TRACE_BUF_SIZE) | |
3073 | cnt = TRACE_BUF_SIZE; | |
3074 | ||
3075 | buf = kmalloc(cnt + 1, GFP_KERNEL); | |
3076 | if (buf == NULL) | |
3077 | return -ENOMEM; | |
3078 | ||
3079 | if (copy_from_user(buf, ubuf, cnt)) { | |
3080 | kfree(buf); | |
3081 | return -EFAULT; | |
3082 | } | |
3083 | ||
3084 | /* Cut from the first nil or newline. */ | |
3085 | buf[cnt] = '\0'; | |
3086 | end = strchr(buf, '\n'); | |
3087 | if (end) | |
3088 | *end = '\0'; | |
3089 | ||
3090 | cnt = mark_printk("%s\n", buf); | |
3091 | kfree(buf); | |
3092 | *fpos += cnt; | |
3093 | ||
3094 | return cnt; | |
3095 | } | |
3096 | ||
bc0c38d1 | 3097 | static struct file_operations tracing_max_lat_fops = { |
4bf39a94 IM |
3098 | .open = tracing_open_generic, |
3099 | .read = tracing_max_lat_read, | |
3100 | .write = tracing_max_lat_write, | |
bc0c38d1 SR |
3101 | }; |
3102 | ||
3103 | static struct file_operations tracing_ctrl_fops = { | |
4bf39a94 IM |
3104 | .open = tracing_open_generic, |
3105 | .read = tracing_ctrl_read, | |
3106 | .write = tracing_ctrl_write, | |
bc0c38d1 SR |
3107 | }; |
3108 | ||
3109 | static struct file_operations set_tracer_fops = { | |
4bf39a94 IM |
3110 | .open = tracing_open_generic, |
3111 | .read = tracing_set_trace_read, | |
3112 | .write = tracing_set_trace_write, | |
bc0c38d1 SR |
3113 | }; |
3114 | ||
b3806b43 | 3115 | static struct file_operations tracing_pipe_fops = { |
4bf39a94 | 3116 | .open = tracing_open_pipe, |
2a2cc8f7 | 3117 | .poll = tracing_poll_pipe, |
4bf39a94 IM |
3118 | .read = tracing_read_pipe, |
3119 | .release = tracing_release_pipe, | |
b3806b43 SR |
3120 | }; |
3121 | ||
a98a3c3f SR |
3122 | static struct file_operations tracing_entries_fops = { |
3123 | .open = tracing_open_generic, | |
3124 | .read = tracing_entries_read, | |
3125 | .write = tracing_entries_write, | |
3126 | }; | |
3127 | ||
5bf9a1ee | 3128 | static struct file_operations tracing_mark_fops = { |
43a15386 | 3129 | .open = tracing_open_generic, |
5bf9a1ee PP |
3130 | .write = tracing_mark_write, |
3131 | }; | |
3132 | ||
bc0c38d1 SR |
3133 | #ifdef CONFIG_DYNAMIC_FTRACE |
3134 | ||
b807c3d0 SR |
3135 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
3136 | { | |
3137 | return 0; | |
3138 | } | |
3139 | ||
bc0c38d1 | 3140 | static ssize_t |
b807c3d0 | 3141 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
bc0c38d1 SR |
3142 | size_t cnt, loff_t *ppos) |
3143 | { | |
a26a2a27 SR |
3144 | static char ftrace_dyn_info_buffer[1024]; |
3145 | static DEFINE_MUTEX(dyn_info_mutex); | |
bc0c38d1 | 3146 | unsigned long *p = filp->private_data; |
b807c3d0 | 3147 | char *buf = ftrace_dyn_info_buffer; |
a26a2a27 | 3148 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); |
bc0c38d1 SR |
3149 | int r; |
3150 | ||
b807c3d0 SR |
3151 | mutex_lock(&dyn_info_mutex); |
3152 | r = sprintf(buf, "%ld ", *p); | |
4bf39a94 | 3153 | |
a26a2a27 | 3154 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
b807c3d0 SR |
3155 | buf[r++] = '\n'; |
3156 | ||
3157 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
3158 | ||
3159 | mutex_unlock(&dyn_info_mutex); | |
3160 | ||
3161 | return r; | |
bc0c38d1 SR |
3162 | } |
3163 | ||
b807c3d0 | 3164 | static struct file_operations tracing_dyn_info_fops = { |
4bf39a94 | 3165 | .open = tracing_open_generic, |
b807c3d0 | 3166 | .read = tracing_read_dyn_info, |
bc0c38d1 SR |
3167 | }; |
3168 | #endif | |
3169 | ||
3170 | static struct dentry *d_tracer; | |
3171 | ||
3172 | struct dentry *tracing_init_dentry(void) | |
3173 | { | |
3174 | static int once; | |
3175 | ||
3176 | if (d_tracer) | |
3177 | return d_tracer; | |
3178 | ||
3179 | d_tracer = debugfs_create_dir("tracing", NULL); | |
3180 | ||
3181 | if (!d_tracer && !once) { | |
3182 | once = 1; | |
3183 | pr_warning("Could not create debugfs directory 'tracing'\n"); | |
3184 | return NULL; | |
3185 | } | |
3186 | ||
3187 | return d_tracer; | |
3188 | } | |
3189 | ||
60a11774 SR |
3190 | #ifdef CONFIG_FTRACE_SELFTEST |
3191 | /* Let selftest have access to static functions in this file */ | |
3192 | #include "trace_selftest.c" | |
3193 | #endif | |
3194 | ||
b5ad384e | 3195 | static __init int tracer_init_debugfs(void) |
bc0c38d1 SR |
3196 | { |
3197 | struct dentry *d_tracer; | |
3198 | struct dentry *entry; | |
3199 | ||
3200 | d_tracer = tracing_init_dentry(); | |
3201 | ||
3202 | entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, | |
3203 | &global_trace, &tracing_ctrl_fops); | |
3204 | if (!entry) | |
3205 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | |
3206 | ||
ee6bce52 | 3207 | entry = debugfs_create_file("trace_options", 0644, d_tracer, |
bc0c38d1 SR |
3208 | NULL, &tracing_iter_fops); |
3209 | if (!entry) | |
ee6bce52 | 3210 | pr_warning("Could not create debugfs 'trace_options' entry\n"); |
bc0c38d1 | 3211 | |
c7078de1 IM |
3212 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, |
3213 | NULL, &tracing_cpumask_fops); | |
3214 | if (!entry) | |
3215 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | |
3216 | ||
bc0c38d1 SR |
3217 | entry = debugfs_create_file("latency_trace", 0444, d_tracer, |
3218 | &global_trace, &tracing_lt_fops); | |
3219 | if (!entry) | |
3220 | pr_warning("Could not create debugfs 'latency_trace' entry\n"); | |
3221 | ||
3222 | entry = debugfs_create_file("trace", 0444, d_tracer, | |
3223 | &global_trace, &tracing_fops); | |
3224 | if (!entry) | |
3225 | pr_warning("Could not create debugfs 'trace' entry\n"); | |
3226 | ||
3227 | entry = debugfs_create_file("available_tracers", 0444, d_tracer, | |
3228 | &global_trace, &show_traces_fops); | |
3229 | if (!entry) | |
98a983aa | 3230 | pr_warning("Could not create debugfs 'available_tracers' entry\n"); |
bc0c38d1 SR |
3231 | |
3232 | entry = debugfs_create_file("current_tracer", 0444, d_tracer, | |
3233 | &global_trace, &set_tracer_fops); | |
3234 | if (!entry) | |
98a983aa | 3235 | pr_warning("Could not create debugfs 'current_tracer' entry\n"); |
bc0c38d1 SR |
3236 | |
3237 | entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, | |
3238 | &tracing_max_latency, | |
3239 | &tracing_max_lat_fops); | |
3240 | if (!entry) | |
3241 | pr_warning("Could not create debugfs " | |
3242 | "'tracing_max_latency' entry\n"); | |
3243 | ||
3244 | entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, | |
3245 | &tracing_thresh, &tracing_max_lat_fops); | |
3246 | if (!entry) | |
3247 | pr_warning("Could not create debugfs " | |
98a983aa | 3248 | "'tracing_thresh' entry\n"); |
7bd2f24c IM |
3249 | entry = debugfs_create_file("README", 0644, d_tracer, |
3250 | NULL, &tracing_readme_fops); | |
3251 | if (!entry) | |
3252 | pr_warning("Could not create debugfs 'README' entry\n"); | |
3253 | ||
b3806b43 SR |
3254 | entry = debugfs_create_file("trace_pipe", 0644, d_tracer, |
3255 | NULL, &tracing_pipe_fops); | |
3256 | if (!entry) | |
3257 | pr_warning("Could not create debugfs " | |
98a983aa | 3258 | "'trace_pipe' entry\n"); |
bc0c38d1 | 3259 | |
a94c80e7 | 3260 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, |
a98a3c3f SR |
3261 | &global_trace, &tracing_entries_fops); |
3262 | if (!entry) | |
3263 | pr_warning("Could not create debugfs " | |
a94c80e7 | 3264 | "'buffer_size_kb' entry\n"); |
a98a3c3f | 3265 | |
5bf9a1ee PP |
3266 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, |
3267 | NULL, &tracing_mark_fops); | |
3268 | if (!entry) | |
3269 | pr_warning("Could not create debugfs " | |
3270 | "'trace_marker' entry\n"); | |
3271 | ||
bc0c38d1 SR |
3272 | #ifdef CONFIG_DYNAMIC_FTRACE |
3273 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | |
3274 | &ftrace_update_tot_cnt, | |
b807c3d0 | 3275 | &tracing_dyn_info_fops); |
bc0c38d1 SR |
3276 | if (!entry) |
3277 | pr_warning("Could not create debugfs " | |
3278 | "'dyn_ftrace_total_info' entry\n"); | |
3279 | #endif | |
d618b3e6 IM |
3280 | #ifdef CONFIG_SYSPROF_TRACER |
3281 | init_tracer_sysprof_debugfs(d_tracer); | |
3282 | #endif | |
b5ad384e | 3283 | return 0; |
bc0c38d1 SR |
3284 | } |
3285 | ||
1fd8f2a3 | 3286 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
dd0e545f | 3287 | { |
dd0e545f SR |
3288 | static DEFINE_SPINLOCK(trace_buf_lock); |
3289 | static char trace_buf[TRACE_BUF_SIZE]; | |
f09ce573 | 3290 | |
3928a8a2 | 3291 | struct ring_buffer_event *event; |
f09ce573 | 3292 | struct trace_array *tr = &global_trace; |
dd0e545f | 3293 | struct trace_array_cpu *data; |
38697053 | 3294 | int cpu, len = 0, size, pc; |
e726f5f9 IM |
3295 | struct print_entry *entry; |
3296 | unsigned long irq_flags; | |
dd0e545f | 3297 | |
8e1b82e0 | 3298 | if (tracing_disabled || tracing_selftest_running) |
dd0e545f SR |
3299 | return 0; |
3300 | ||
38697053 SR |
3301 | pc = preempt_count(); |
3302 | preempt_disable_notrace(); | |
dd0e545f SR |
3303 | cpu = raw_smp_processor_id(); |
3304 | data = tr->data[cpu]; | |
dd0e545f | 3305 | |
3ea2e6d7 | 3306 | if (unlikely(atomic_read(&data->disabled))) |
dd0e545f SR |
3307 | goto out; |
3308 | ||
380c4b14 FW |
3309 | pause_graph_tracing(); |
3310 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | |
801fe400 | 3311 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
dd0e545f SR |
3312 | |
3313 | len = min(len, TRACE_BUF_SIZE-1); | |
3314 | trace_buf[len] = 0; | |
3315 | ||
777e208d SR |
3316 | size = sizeof(*entry) + len + 1; |
3317 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); | |
3928a8a2 SR |
3318 | if (!event) |
3319 | goto out_unlock; | |
777e208d | 3320 | entry = ring_buffer_event_data(event); |
e726f5f9 | 3321 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); |
777e208d SR |
3322 | entry->ent.type = TRACE_PRINT; |
3323 | entry->ip = ip; | |
1fd8f2a3 | 3324 | entry->depth = depth; |
dd0e545f | 3325 | |
777e208d SR |
3326 | memcpy(&entry->buf, trace_buf, len); |
3327 | entry->buf[len] = 0; | |
3928a8a2 | 3328 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
dd0e545f | 3329 | |
3928a8a2 | 3330 | out_unlock: |
380c4b14 FW |
3331 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |
3332 | unpause_graph_tracing(); | |
dd0e545f | 3333 | out: |
38697053 | 3334 | preempt_enable_notrace(); |
dd0e545f SR |
3335 | |
3336 | return len; | |
3337 | } | |
801fe400 PP |
3338 | EXPORT_SYMBOL_GPL(trace_vprintk); |
3339 | ||
3340 | int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |
3341 | { | |
3342 | int ret; | |
3343 | va_list ap; | |
3344 | ||
3345 | if (!(trace_flags & TRACE_ITER_PRINTK)) | |
3346 | return 0; | |
3347 | ||
3348 | va_start(ap, fmt); | |
21a8c466 | 3349 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); |
801fe400 PP |
3350 | va_end(ap); |
3351 | return ret; | |
3352 | } | |
dd0e545f SR |
3353 | EXPORT_SYMBOL_GPL(__ftrace_printk); |
3354 | ||
3f5a54e3 SR |
3355 | static int trace_panic_handler(struct notifier_block *this, |
3356 | unsigned long event, void *unused) | |
3357 | { | |
944ac425 SR |
3358 | if (ftrace_dump_on_oops) |
3359 | ftrace_dump(); | |
3f5a54e3 SR |
3360 | return NOTIFY_OK; |
3361 | } | |
3362 | ||
3363 | static struct notifier_block trace_panic_notifier = { | |
3364 | .notifier_call = trace_panic_handler, | |
3365 | .next = NULL, | |
3366 | .priority = 150 /* priority: INT_MAX >= x >= 0 */ | |
3367 | }; | |
3368 | ||
3369 | static int trace_die_handler(struct notifier_block *self, | |
3370 | unsigned long val, | |
3371 | void *data) | |
3372 | { | |
3373 | switch (val) { | |
3374 | case DIE_OOPS: | |
944ac425 SR |
3375 | if (ftrace_dump_on_oops) |
3376 | ftrace_dump(); | |
3f5a54e3 SR |
3377 | break; |
3378 | default: | |
3379 | break; | |
3380 | } | |
3381 | return NOTIFY_OK; | |
3382 | } | |
3383 | ||
3384 | static struct notifier_block trace_die_notifier = { | |
3385 | .notifier_call = trace_die_handler, | |
3386 | .priority = 200 | |
3387 | }; | |
3388 | ||
3389 | /* | |
3390 | * printk is set to max of 1024, we really don't need it that big. | |
3391 | * Nothing should be printing 1000 characters anyway. | |
3392 | */ | |
3393 | #define TRACE_MAX_PRINT 1000 | |
3394 | ||
3395 | /* | |
3396 | * Define here KERN_TRACE so that we have one place to modify | |
3397 | * it if we decide to change what log level the ftrace dump | |
3398 | * should be at. | |
3399 | */ | |
3400 | #define KERN_TRACE KERN_INFO | |
3401 | ||
3402 | static void | |
3403 | trace_printk_seq(struct trace_seq *s) | |
3404 | { | |
3405 | /* Probably should print a warning here. */ | |
3406 | if (s->len >= 1000) | |
3407 | s->len = 1000; | |
3408 | ||
3409 | /* should be zero ended, but we are paranoid. */ | |
3410 | s->buffer[s->len] = 0; | |
3411 | ||
3412 | printk(KERN_TRACE "%s", s->buffer); | |
3413 | ||
3414 | trace_seq_reset(s); | |
3415 | } | |
3416 | ||
3f5a54e3 SR |
3417 | void ftrace_dump(void) |
3418 | { | |
3419 | static DEFINE_SPINLOCK(ftrace_dump_lock); | |
3420 | /* use static because iter can be a bit big for the stack */ | |
3421 | static struct trace_iterator iter; | |
3f5a54e3 SR |
3422 | static cpumask_t mask; |
3423 | static int dump_ran; | |
d769041f SR |
3424 | unsigned long flags; |
3425 | int cnt = 0, cpu; | |
3f5a54e3 SR |
3426 | |
3427 | /* only one dump */ | |
3428 | spin_lock_irqsave(&ftrace_dump_lock, flags); | |
3429 | if (dump_ran) | |
3430 | goto out; | |
3431 | ||
3432 | dump_ran = 1; | |
3433 | ||
3434 | /* No turning back! */ | |
81adbdc0 | 3435 | ftrace_kill(); |
3f5a54e3 | 3436 | |
d769041f SR |
3437 | for_each_tracing_cpu(cpu) { |
3438 | atomic_inc(&global_trace.data[cpu]->disabled); | |
3439 | } | |
3440 | ||
b54d3de9 TE |
3441 | /* don't look at user memory in panic mode */ |
3442 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | |
3443 | ||
3f5a54e3 SR |
3444 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
3445 | ||
3446 | iter.tr = &global_trace; | |
3447 | iter.trace = current_trace; | |
3448 | ||
3449 | /* | |
3450 | * We need to stop all tracing on all CPUS to read the | |
3451 | * the next buffer. This is a bit expensive, but is | |
3452 | * not done often. We fill all what we can read, | |
3453 | * and then release the locks again. | |
3454 | */ | |
3455 | ||
3456 | cpus_clear(mask); | |
3457 | ||
3f5a54e3 SR |
3458 | while (!trace_empty(&iter)) { |
3459 | ||
3460 | if (!cnt) | |
3461 | printk(KERN_TRACE "---------------------------------\n"); | |
3462 | ||
3463 | cnt++; | |
3464 | ||
3465 | /* reset all but tr, trace, and overruns */ | |
3466 | memset(&iter.seq, 0, | |
3467 | sizeof(struct trace_iterator) - | |
3468 | offsetof(struct trace_iterator, seq)); | |
3469 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | |
3470 | iter.pos = -1; | |
3471 | ||
3472 | if (find_next_entry_inc(&iter) != NULL) { | |
3473 | print_trace_line(&iter); | |
3474 | trace_consume(&iter); | |
3475 | } | |
3476 | ||
3477 | trace_printk_seq(&iter.seq); | |
3478 | } | |
3479 | ||
3480 | if (!cnt) | |
3481 | printk(KERN_TRACE " (ftrace buffer empty)\n"); | |
3482 | else | |
3483 | printk(KERN_TRACE "---------------------------------\n"); | |
3484 | ||
3f5a54e3 SR |
3485 | out: |
3486 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | |
3487 | } | |
3488 | ||
3928a8a2 | 3489 | __init static int tracer_alloc_buffers(void) |
bc0c38d1 | 3490 | { |
4c11d7ae | 3491 | struct trace_array_cpu *data; |
4c11d7ae SR |
3492 | int i; |
3493 | ||
3928a8a2 SR |
3494 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
3495 | tracing_buffer_mask = cpu_possible_map; | |
4c11d7ae | 3496 | |
3928a8a2 SR |
3497 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
3498 | TRACE_BUFFER_FLAGS); | |
3499 | if (!global_trace.buffer) { | |
3500 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | |
3501 | WARN_ON(1); | |
3502 | return 0; | |
4c11d7ae | 3503 | } |
3928a8a2 | 3504 | global_trace.entries = ring_buffer_size(global_trace.buffer); |
4c11d7ae SR |
3505 | |
3506 | #ifdef CONFIG_TRACER_MAX_TRACE | |
3928a8a2 SR |
3507 | max_tr.buffer = ring_buffer_alloc(trace_buf_size, |
3508 | TRACE_BUFFER_FLAGS); | |
3509 | if (!max_tr.buffer) { | |
3510 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | |
3511 | WARN_ON(1); | |
3512 | ring_buffer_free(global_trace.buffer); | |
3513 | return 0; | |
4c11d7ae | 3514 | } |
3928a8a2 SR |
3515 | max_tr.entries = ring_buffer_size(max_tr.buffer); |
3516 | WARN_ON(max_tr.entries != global_trace.entries); | |
a98a3c3f | 3517 | #endif |
ab46428c | 3518 | |
4c11d7ae | 3519 | /* Allocate the first page for all buffers */ |
ab46428c | 3520 | for_each_tracing_cpu(i) { |
4c11d7ae | 3521 | data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
bc0c38d1 | 3522 | max_tr.data[i] = &per_cpu(max_data, i); |
4c11d7ae | 3523 | } |
bc0c38d1 | 3524 | |
bc0c38d1 SR |
3525 | trace_init_cmdlines(); |
3526 | ||
43a15386 | 3527 | register_tracer(&nop_trace); |
b5ad384e FW |
3528 | #ifdef CONFIG_BOOT_TRACER |
3529 | register_tracer(&boot_tracer); | |
3530 | current_trace = &boot_tracer; | |
3531 | current_trace->init(&global_trace); | |
3532 | #else | |
43a15386 | 3533 | current_trace = &nop_trace; |
b5ad384e | 3534 | #endif |
bc0c38d1 | 3535 | |
60a11774 SR |
3536 | /* All seems OK, enable tracing */ |
3537 | tracing_disabled = 0; | |
3928a8a2 | 3538 | |
3f5a54e3 SR |
3539 | atomic_notifier_chain_register(&panic_notifier_list, |
3540 | &trace_panic_notifier); | |
3541 | ||
3542 | register_die_notifier(&trace_die_notifier); | |
3543 | ||
bc0c38d1 | 3544 | return 0; |
bc0c38d1 | 3545 | } |
b5ad384e FW |
3546 | early_initcall(tracer_alloc_buffers); |
3547 | fs_initcall(tracer_init_debugfs); |